diff options
895 files changed, 36024 insertions, 45222 deletions
@@ -3054,6 +3054,7 @@ D: PLX USB338x driver D: PCA9634 driver D: Option GTM671WFS D: Fintek F81216A +D: AD5761 iio driver D: Various kernel hacks S: Qtechnology A/S S: Valby Langgade 142 diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio index 0439c2aaf741..3c6624881375 100644 --- a/Documentation/ABI/testing/sysfs-bus-iio +++ b/Documentation/ABI/testing/sysfs-bus-iio @@ -496,8 +496,11 @@ Description: 1kohm_to_gnd: connected to ground via an 1kOhm resistor, 6kohm_to_gnd: connected to ground via a 6kOhm resistor, 20kohm_to_gnd: connected to ground via a 20kOhm resistor, + 90kohm_to_gnd: connected to ground via a 90kOhm resistor, 100kohm_to_gnd: connected to ground via an 100kOhm resistor, + 125kohm_to_gnd: connected to ground via an 125kOhm resistor, 500kohm_to_gnd: connected to ground via a 500kOhm resistor, + 640kohm_to_gnd: connected to ground via a 640kOhm resistor, three_state: left floating. For a list of available output power down options read outX_powerdown_mode_available. If Y is not present the @@ -1491,3 +1494,10 @@ Description: This ABI is especially applicable for humidity sensors to heatup the device and get rid of any condensation in some humidity environment + +What: /sys/bus/iio/devices/iio:deviceX/in_ph_raw +KernelVersion: 4.5 +Contact: linux-iio@vger.kernel.org +Description: + Raw (unscaled no offset etc.) pH reading of a substance as a negative + base-10 logarithm of hydrodium ions in a litre of water. diff --git a/Documentation/ABI/testing/sysfs-bus-iio-health-afe440x b/Documentation/ABI/testing/sysfs-bus-iio-health-afe440x new file mode 100644 index 000000000000..3740f253d406 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-iio-health-afe440x @@ -0,0 +1,54 @@ +What: /sys/bus/iio/devices/iio:deviceX/tia_resistanceY + /sys/bus/iio/devices/iio:deviceX/tia_capacitanceY +Date: December 2015 +KernelVersion: +Contact: Andrew F. Davis <afd@ti.com> +Description: + Get and set the resistance and the capacitance settings for the + Transimpedance Amplifier. Y is 1 for Rf1 and Cf1, Y is 2 for + Rf2 and Cf2 values. + +What: /sys/bus/iio/devices/iio:deviceX/tia_separate_en +Date: December 2015 +KernelVersion: +Contact: Andrew F. Davis <afd@ti.com> +Description: + Enable or disable separate settings for the TransImpedance + Amplifier above, when disabled both values are set by the + first channel. + +What: /sys/bus/iio/devices/iio:deviceX/in_intensity_ledY_raw + /sys/bus/iio/devices/iio:deviceX/in_intensity_ledY_ambient_raw +Date: December 2015 +KernelVersion: +Contact: Andrew F. Davis <afd@ti.com> +Description: + Get measured values from the ADC for these stages. Y is the + specific LED number. The values are expressed in 24-bit twos + complement. + +What: /sys/bus/iio/devices/iio:deviceX/in_intensity_ledY-ledY_ambient_raw +Date: December 2015 +KernelVersion: +Contact: Andrew F. Davis <afd@ti.com> +Description: + Get differential values from the ADC for these stages. Y is the + specific LED number. The values are expressed in 24-bit twos + complement for the specified LEDs. + +What: /sys/bus/iio/devices/iio:deviceX/out_current_ledY_offset + /sys/bus/iio/devices/iio:deviceX/out_current_ledY_ambient_offset +Date: December 2015 +KernelVersion: +Contact: Andrew F. Davis <afd@ti.com> +Description: + Get and set the offset cancellation DAC setting for these + stages. The values are expressed in 5-bit sign-magnitude. + +What: /sys/bus/iio/devices/iio:deviceX/out_current_ledY_raw +Date: December 2015 +KernelVersion: +Contact: Andrew F. Davis <afd@ti.com> +Description: + Get and set the LED current for the specified LED. Y is the + specific LED number. diff --git a/Documentation/ABI/testing/sysfs-bus-iio-magnetometer-hmc5843 b/Documentation/ABI/testing/sysfs-bus-iio-magnetometer-hmc5843 new file mode 100644 index 000000000000..6275e9f56e6c --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-iio-magnetometer-hmc5843 @@ -0,0 +1,15 @@ +What: /sys/bus/iio/devices/iio:deviceX/meas_conf +What: /sys/bus/iio/devices/iio:deviceX/meas_conf_available +KernelVersion: 4.5 +Contact: linux-iio@vger.kernel.org +Description: + Current configuration and available configurations + for the bias current. + normal - Normal measurement configurations (default) + positivebias - Positive bias configuration + negativebias - Negative bias configuration + disabled - Only available on HMC5983. Disables magnetic + sensor and enables temperature sensor. + Note: The effect of this configuration may vary + according to the device. For exact documentation + check the device's datasheet. diff --git a/Documentation/ABI/testing/sysfs-bus-iio-vf610 b/Documentation/ABI/testing/sysfs-bus-iio-vf610 index ecbc1f4af921..308a6756d3bf 100644 --- a/Documentation/ABI/testing/sysfs-bus-iio-vf610 +++ b/Documentation/ABI/testing/sysfs-bus-iio-vf610 @@ -5,3 +5,12 @@ Description: Specifies the hardware conversion mode used. The three available modes are "normal", "high-speed" and "low-power", where the last is the default mode. + + +What: /sys/bus/iio/devices/iio:deviceX/out_conversion_mode +KernelVersion: 4.6 +Contact: linux-iio@vger.kernel.org +Description: + Specifies the hardware conversion mode used within DAC. + The two available modes are "high-power" and "low-power", + where "low-power" mode is the default mode. diff --git a/Documentation/devicetree/bindings/goldfish/audio.txt b/Documentation/devicetree/bindings/goldfish/audio.txt new file mode 100644 index 000000000000..d043fda433ba --- /dev/null +++ b/Documentation/devicetree/bindings/goldfish/audio.txt @@ -0,0 +1,17 @@ +Android Goldfish Audio + +Android goldfish audio device generated by android emulator. + +Required properties: + +- compatible : should contain "google,goldfish-audio" to match emulator +- reg : <registers mapping> +- interrupts : <interrupt mapping> + +Example: + + goldfish_audio@9030000 { + compatible = "google,goldfish-audio"; + reg = <0x9030000 0x100>; + interrupts = <0x4>; + }; diff --git a/Documentation/devicetree/bindings/iio/accel/mma8452.txt b/Documentation/devicetree/bindings/iio/accel/mma8452.txt index 3c10e8581144..165937e1ac1c 100644 --- a/Documentation/devicetree/bindings/iio/accel/mma8452.txt +++ b/Documentation/devicetree/bindings/iio/accel/mma8452.txt @@ -1,8 +1,10 @@ -Freescale MMA8452Q, MMA8453Q, MMA8652FC or MMA8653FC triaxial accelerometer +Freescale MMA8451Q, MMA8452Q, MMA8453Q, MMA8652FC or MMA8653FC +triaxial accelerometer Required properties: - compatible: should contain one of + * "fsl,mma8451" * "fsl,mma8452" * "fsl,mma8453" * "fsl,mma8652" diff --git a/Documentation/devicetree/bindings/iio/adc/at91-sama5d2_adc.txt b/Documentation/devicetree/bindings/iio/adc/at91-sama5d2_adc.txt new file mode 100644 index 000000000000..3223684a643b --- /dev/null +++ b/Documentation/devicetree/bindings/iio/adc/at91-sama5d2_adc.txt @@ -0,0 +1,28 @@ +* AT91 SAMA5D2 Analog to Digital Converter (ADC) + +Required properties: + - compatible: Should be "atmel,sama5d2-adc". + - reg: Should contain ADC registers location and length. + - interrupts: Should contain the IRQ line for the ADC. + - clocks: phandle to device clock. + - clock-names: Must be "adc_clk". + - vref-supply: Supply used as reference for conversions. + - vddana-supply: Supply for the adc device. + - atmel,min-sample-rate-hz: Minimum sampling rate, it depends on SoC. + - atmel,max-sample-rate-hz: Maximum sampling rate, it depends on SoC. + - atmel,startup-time-ms: Startup time expressed in ms, it depends on SoC. + +Example: + +adc: adc@fc030000 { + compatible = "atmel,sama5d2-adc"; + reg = <0xfc030000 0x100>; + interrupts = <40 IRQ_TYPE_LEVEL_HIGH 7>; + clocks = <&adc_clk>; + clock-names = "adc_clk"; + atmel,min-sample-rate-hz = <200000>; + atmel,max-sample-rate-hz = <20000000>; + atmel,startup-time-ms = <4>; + vddana-supply = <&vdd_3v3_lp_reg>; + vref-supply = <&vdd_3v3_lp_reg>; +} diff --git a/Documentation/devicetree/bindings/iio/adc/mcp3422.txt b/Documentation/devicetree/bindings/iio/adc/mcp3422.txt index dcae4ccfcc52..82bcce07255d 100644 --- a/Documentation/devicetree/bindings/iio/adc/mcp3422.txt +++ b/Documentation/devicetree/bindings/iio/adc/mcp3422.txt @@ -6,6 +6,7 @@ Required properties: "microchip,mcp3422" or "microchip,mcp3423" or "microchip,mcp3424" or + "microchip,mcp3425" or "microchip,mcp3426" or "microchip,mcp3427" or "microchip,mcp3428" diff --git a/Documentation/devicetree/bindings/iio/adc/ti-adc0832.txt b/Documentation/devicetree/bindings/iio/adc/ti-adc0832.txt new file mode 100644 index 000000000000..d91130587d01 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/adc/ti-adc0832.txt @@ -0,0 +1,19 @@ +* Texas Instruments' ADC0831/ADC0832/ADC0832/ADC0838 + +Required properties: + - compatible: Should be one of + * "ti,adc0831" + * "ti,adc0832" + * "ti,adc0834" + * "ti,adc0838" + - reg: spi chip select number for the device + - vref-supply: The regulator supply for ADC reference voltage + - spi-max-frequency: Max SPI frequency to use (< 400000) + +Example: +adc@0 { + compatible = "ti,adc0832"; + reg = <0>; + vref-supply = <&vdd_supply>; + spi-max-frequency = <200000>; +}; diff --git a/Documentation/devicetree/bindings/iio/chemical/atlas,ph-sm.txt b/Documentation/devicetree/bindings/iio/chemical/atlas,ph-sm.txt new file mode 100644 index 000000000000..cffa1907463a --- /dev/null +++ b/Documentation/devicetree/bindings/iio/chemical/atlas,ph-sm.txt @@ -0,0 +1,22 @@ +* Atlas Scientific pH-SM OEM sensor + +http://www.atlas-scientific.com/_files/_datasheets/_oem/pH_oem_datasheet.pdf + +Required properties: + + - compatible: must be "atlas,ph-sm" + - reg: the I2C address of the sensor + - interrupt-parent: should be the phandle for the interrupt controller + - interrupts: the sole interrupt generated by the device + + Refer to interrupt-controller/interrupts.txt for generic interrupt client + node bindings. + +Example: + +atlas@65 { + compatible = "atlas,ph-sm"; + reg = <0x65>; + interrupt-parent = <&gpio1>; + interrupts = <16 2>; +}; diff --git a/Documentation/devicetree/bindings/iio/dac/vf610-dac.txt b/Documentation/devicetree/bindings/iio/dac/vf610-dac.txt new file mode 100644 index 000000000000..20c6c7ae9687 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/dac/vf610-dac.txt @@ -0,0 +1,20 @@ +Freescale vf610 Digital to Analog Converter bindings + +The devicetree bindings are for the new DAC driver written for +vf610 SoCs from Freescale. + +Required properties: +- compatible: Should contain "fsl,vf610-dac" +- reg: Offset and length of the register set for the device +- interrupts: Should contain the interrupt for the device +- clocks: The clock is needed by the DAC controller +- clock-names: Must contain "dac" matching entry in the clocks property. + +Example: +dac0: dac@400cc000 { + compatible = "fsl,vf610-dac"; + reg = <0x400cc000 0x1000>; + interrupts = <55 IRQ_TYPE_LEVEL_HIGH>; + clock-names = "dac"; + clocks = <&clks VF610_CLK_DAC0>; +}; diff --git a/Documentation/devicetree/bindings/iio/health/afe4403.txt b/Documentation/devicetree/bindings/iio/health/afe4403.txt new file mode 100644 index 000000000000..2fffd70336ba --- /dev/null +++ b/Documentation/devicetree/bindings/iio/health/afe4403.txt @@ -0,0 +1,34 @@ +Texas Instruments AFE4403 Heart rate and Pulse Oximeter + +Required properties: + - compatible : Should be "ti,afe4403". + - reg : SPI chip select address of device. + - tx-supply : Regulator supply to transmitting LEDs. + - interrupt-parent : Phandle to he parent interrupt controller. + - interrupts : The interrupt line the device ADC_RDY pin is + connected to. For details refer to, + ../../interrupt-controller/interrupts.txt. + +Optional properties: + - reset-gpios : GPIO used to reset the device. + For details refer to, ../../gpio/gpio.txt. + +For other required and optional properties of SPI slave nodes +please refer to ../../spi/spi-bus.txt. + +Example: + +&spi0 { + heart_mon@0 { + compatible = "ti,afe4403"; + reg = <0>; + spi-max-frequency = <10000000>; + + tx-supply = <&vbat>; + + interrupt-parent = <&gpio1>; + interrupts = <28 IRQ_TYPE_EDGE_RISING>; + + reset-gpios = <&gpio1 16 GPIO_ACTIVE_LOW>; + }; +}; diff --git a/Documentation/devicetree/bindings/iio/health/afe4404.txt b/Documentation/devicetree/bindings/iio/health/afe4404.txt new file mode 100644 index 000000000000..de69f203edfa --- /dev/null +++ b/Documentation/devicetree/bindings/iio/health/afe4404.txt @@ -0,0 +1,30 @@ +Texas Instruments AFE4404 Heart rate and Pulse Oximeter + +Required properties: + - compatible : Should be "ti,afe4404". + - reg : I2C address of the device. + - tx-supply : Regulator supply to transmitting LEDs. + - interrupt-parent : Phandle to he parent interrupt controller. + - interrupts : The interrupt line the device ADC_RDY pin is + connected to. For details refer to, + ../interrupt-controller/interrupts.txt. + +Optional properties: + - reset-gpios : GPIO used to reset the device. + For details refer to, ../gpio/gpio.txt. + +Example: + +&i2c2 { + heart_mon@58 { + compatible = "ti,afe4404"; + reg = <0x58>; + + tx-supply = <&vbat>; + + interrupt-parent = <&gpio1>; + interrupts = <28 IRQ_TYPE_EDGE_RISING>; + + reset-gpios = <&gpio1 16 GPIO_ACTIVE_LOW>; + }; +}; diff --git a/Documentation/devicetree/bindings/iio/health/max30100.txt b/Documentation/devicetree/bindings/iio/health/max30100.txt index f6fbac66ad06..295a9edfa4fd 100644 --- a/Documentation/devicetree/bindings/iio/health/max30100.txt +++ b/Documentation/devicetree/bindings/iio/health/max30100.txt @@ -11,11 +11,19 @@ Required properties: Refer to interrupt-controller/interrupts.txt for generic interrupt client node bindings. +Optional properties: + - maxim,led-current-microamp: configuration for LED current in microamperes + while the engine is running. First indexed value is the configuration for + the RED LED, and second value is for the IR LED. + + Refer to the datasheet for the allowed current values. + Example: max30100@057 { compatible = "maxim,max30100"; reg = <57>; + maxim,led-current-microamp = <24000 50000>; interrupt-parent = <&gpio1>; interrupts = <16 2>; }; diff --git a/Documentation/devicetree/bindings/iio/light/opt3001.txt b/Documentation/devicetree/bindings/iio/light/opt3001.txt new file mode 100644 index 000000000000..eac30d508849 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/light/opt3001.txt @@ -0,0 +1,26 @@ +* Texas Instruments OPT3001 Ambient Light Sensor + +The driver supports interrupt-driven and interrupt-less operation, depending +on whether an interrupt property has been populated into the DT. Note that +the optional generation of IIO events on rising/falling light threshold changes +requires the use of interrupts. Without interrupts, only the simple reading +of the current light value is supported through the IIO API. + +http://www.ti.com/product/opt3001 + +Required properties: + - compatible: should be "ti,opt3001" + - reg: the I2C address of the sensor + +Optional properties: + - interrupt-parent: should be the phandle for the interrupt controller + - interrupts: interrupt mapping for GPIO IRQ (configure for falling edge) + +Example: + +opt3001@44 { + compatible = "ti,opt3001"; + reg = <0x44>; + interrupt-parent = <&gpio1>; + interrupts = <28 IRQ_TYPE_EDGE_FALLING>; +}; diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index bd33a268fd1b..ee66defcdd8b 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -28,6 +28,7 @@ arm ARM Ltd. armadeus ARMadeus Systems SARL artesyn Artesyn Embedded Technologies Inc. asahi-kasei Asahi Kasei Corp. +atlas Atlas Scientific LLC atmel Atmel Corporation auo AU Optronics Corporation avago Avago Technologies diff --git a/Documentation/isdn/00-INDEX b/Documentation/isdn/00-INDEX index e87e336f590e..2d1889b6c1fa 100644 --- a/Documentation/isdn/00-INDEX +++ b/Documentation/isdn/00-INDEX @@ -16,8 +16,6 @@ README.FAQ - general info for FAQ. README.HiSax - info on the HiSax driver which replaces the old teles. -README.act2000 - - info on driver for IBM ACT-2000 card. README.audio - info for running audio over ISDN. README.avmb1 @@ -34,14 +32,8 @@ README.hfc-pci - info on hfc-pci based cards. README.hysdn - info on driver for Hypercope active HYSDN cards -README.icn - - info on the ICN-ISDN-card and its driver. README.mISDN - info on the Modular ISDN subsystem (mISDN) -README.pcbit - - info on the PCBIT-D ISDN adapter and driver. -README.sc - - info on driver for Spellcaster cards. README.syncppp - info on running Sync PPP over ISDN. README.x25 diff --git a/drivers/staging/panel/lcd-panel-cgram.txt b/Documentation/misc-devices/lcd-panel-cgram.txt index 7f82c905763d..7f82c905763d 100644 --- a/drivers/staging/panel/lcd-panel-cgram.txt +++ b/Documentation/misc-devices/lcd-panel-cgram.txt diff --git a/MAINTAINERS b/MAINTAINERS index 145289e3eb7a..15b4c417211f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -775,6 +775,12 @@ L: alsa-devel@alsa-project.org (moderated for non-subscribers) S: Maintained F: sound/aoa/ +APEX EMBEDDED SYSTEMS STX104 DAC DRIVER +M: William Breathitt Gray <vilhelm.gray@gmail.com> +L: linux-iio@vger.kernel.org +S: Maintained +F: drivers/iio/dac/stx104.c + APM DRIVER M: Jiri Kosina <jikos@kernel.org> S: Odd fixes @@ -1962,6 +1968,12 @@ M: Nicolas Ferre <nicolas.ferre@atmel.com> S: Supported F: drivers/tty/serial/atmel_serial.c +ATMEL SAMA5D2 ADC DRIVER +M: Ludovic Desroches <ludovic.desroches@atmel.com> +L: linux-iio@vger.kernel.org +S: Supported +F: drivers/iio/adc/at91-sama5d2_adc.c + ATMEL Audio ALSA driver M: Nicolas Ferre <nicolas.ferre@atmel.com> L: alsa-devel@alsa-project.org (moderated for non-subscribers) @@ -3548,13 +3560,6 @@ L: driverdev-devel@linuxdriverproject.org S: Maintained F: drivers/staging/dgnc/ -DIGI EPCA PCI PRODUCTS -M: Lidza Louina <lidza.louina@gmail.com> -M: Daeseok Youn <daeseok.youn@gmail.com> -L: driverdev-devel@linuxdriverproject.org -S: Maintained -F: drivers/staging/dgap/ - DIOLAN U2C-12 I2C DRIVER M: Guenter Roeck <linux@roeck-us.net> L: linux-i2c@vger.kernel.org @@ -8202,6 +8207,13 @@ S: Maintained F: Documentation/mn10300/ F: arch/mn10300/ +PARALLEL LCD/KEYPAD PANEL DRIVER +M: Willy Tarreau <willy@haproxy.com> +M: Ksenija Stanojevic <ksenija.stanojevic@gmail.com> +S: Odd Fixes +F: Documentation/misc-devices/lcd-panel-cgram.txt +F: drivers/misc/panel.c + PARALLEL PORT SUBSYSTEM M: Sudip Mukherjee <sudipm.mukherjee@gmail.com> M: Sudip Mukherjee <sudip@vectorindia.org> @@ -10455,19 +10467,6 @@ L: linux-tegra@vger.kernel.org S: Maintained F: drivers/staging/nvec/ -STAGING - OLPC SECONDARY DISPLAY CONTROLLER (DCON) -M: Jens Frederich <jfrederich@gmail.com> -M: Daniel Drake <dsd@laptop.org> -M: Jon Nettleton <jon.nettleton@gmail.com> -W: http://wiki.laptop.org/go/DCON -S: Maintained -F: drivers/staging/olpc_dcon/ - -STAGING - PARALLEL LCD/KEYPAD PANEL DRIVER -M: Willy Tarreau <willy@meta-x.org> -S: Odd Fixes -F: drivers/staging/panel/ - STAGING - REALTEK RTL8712U DRIVERS M: Larry Finger <Larry.Finger@lwfinger.net> M: Florian Schilhabel <florian.c.schilhabel@googlemail.com>. diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 57f52a2afa35..16288e777ec3 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -2744,6 +2744,10 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) /*pr_info("binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/ + if (unlikely(current->mm != proc->vma_vm_mm)) { + pr_err("current mm mismatch proc mm\n"); + return -EINVAL; + } trace_binder_ioctl(cmd, arg); ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); @@ -2958,6 +2962,7 @@ static int binder_open(struct inode *nodp, struct file *filp) return -ENOMEM; get_task_struct(current); proc->tsk = current; + proc->vma_vm_mm = current->mm; INIT_LIST_HEAD(&proc->todo); init_waitqueue_head(&proc->wait); proc->default_priority = task_nice(current); diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig index 833ea9dd4464..b0d3ecf3318b 100644 --- a/drivers/iio/accel/Kconfig +++ b/drivers/iio/accel/Kconfig @@ -143,7 +143,7 @@ config MMA8452 select IIO_TRIGGERED_BUFFER help Say yes here to build support for the following Freescale 3-axis - accelerometers: MMA8452Q, MMA8453Q, MMA8652FC, MMA8653FC. + accelerometers: MMA8451Q, MMA8452Q, MMA8453Q, MMA8652FC, MMA8653FC. To compile this driver as a module, choose M here: the module will be called mma8452. diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c index ccc632a7cf01..7f4994f32a90 100644 --- a/drivers/iio/accel/mma8452.c +++ b/drivers/iio/accel/mma8452.c @@ -1,6 +1,7 @@ /* * mma8452.c - Support for following Freescale 3-axis accelerometers: * + * MMA8451Q (14 bit) * MMA8452Q (12 bit) * MMA8453Q (10 bit) * MMA8652FC (12 bit) @@ -15,7 +16,7 @@ * * 7-bit I2C slave address 0x1c/0x1d (pin selectable) * - * TODO: orientation / freefall events, autosleep + * TODO: orientation events, autosleep */ #include <linux/module.h> @@ -85,8 +86,9 @@ #define MMA8452_INT_FF_MT BIT(2) #define MMA8452_INT_TRANS BIT(5) -#define MMA8452_DEVICE_ID 0x2a -#define MMA8453_DEVICE_ID 0x3a +#define MMA8451_DEVICE_ID 0x1a +#define MMA8452_DEVICE_ID 0x2a +#define MMA8453_DEVICE_ID 0x3a #define MMA8652_DEVICE_ID 0x4a #define MMA8653_DEVICE_ID 0x5a @@ -416,6 +418,51 @@ fail: return ret; } +/* returns >0 if in freefall mode, 0 if not or <0 if an error occured */ +static int mma8452_freefall_mode_enabled(struct mma8452_data *data) +{ + int val; + const struct mma_chip_info *chip = data->chip_info; + + val = i2c_smbus_read_byte_data(data->client, chip->ev_cfg); + if (val < 0) + return val; + + return !(val & MMA8452_FF_MT_CFG_OAE); +} + +static int mma8452_set_freefall_mode(struct mma8452_data *data, bool state) +{ + int val; + const struct mma_chip_info *chip = data->chip_info; + + if ((state && mma8452_freefall_mode_enabled(data)) || + (!state && !(mma8452_freefall_mode_enabled(data)))) + return 0; + + val = i2c_smbus_read_byte_data(data->client, chip->ev_cfg); + if (val < 0) + return val; + + if (state) { + val |= BIT(idx_x + chip->ev_cfg_chan_shift); + val |= BIT(idx_y + chip->ev_cfg_chan_shift); + val |= BIT(idx_z + chip->ev_cfg_chan_shift); + val &= ~MMA8452_FF_MT_CFG_OAE; + } else { + val &= ~BIT(idx_x + chip->ev_cfg_chan_shift); + val &= ~BIT(idx_y + chip->ev_cfg_chan_shift); + val &= ~BIT(idx_z + chip->ev_cfg_chan_shift); + val |= MMA8452_FF_MT_CFG_OAE; + } + + val = mma8452_change_config(data, chip->ev_cfg, val); + if (val) + return val; + + return 0; +} + static int mma8452_set_hp_filter_frequency(struct mma8452_data *data, int val, int val2) { @@ -609,12 +656,22 @@ static int mma8452_read_event_config(struct iio_dev *indio_dev, const struct mma_chip_info *chip = data->chip_info; int ret; - ret = i2c_smbus_read_byte_data(data->client, - data->chip_info->ev_cfg); - if (ret < 0) - return ret; + switch (dir) { + case IIO_EV_DIR_FALLING: + return mma8452_freefall_mode_enabled(data); + case IIO_EV_DIR_RISING: + if (mma8452_freefall_mode_enabled(data)) + return 0; + + ret = i2c_smbus_read_byte_data(data->client, + data->chip_info->ev_cfg); + if (ret < 0) + return ret; - return !!(ret & BIT(chan->scan_index + chip->ev_cfg_chan_shift)); + return !!(ret & BIT(chan->scan_index + chip->ev_cfg_chan_shift)); + default: + return -EINVAL; + } } static int mma8452_write_event_config(struct iio_dev *indio_dev, @@ -627,19 +684,35 @@ static int mma8452_write_event_config(struct iio_dev *indio_dev, const struct mma_chip_info *chip = data->chip_info; int val; - val = i2c_smbus_read_byte_data(data->client, chip->ev_cfg); - if (val < 0) - return val; + switch (dir) { + case IIO_EV_DIR_FALLING: + return mma8452_set_freefall_mode(data, state); + case IIO_EV_DIR_RISING: + val = i2c_smbus_read_byte_data(data->client, chip->ev_cfg); + if (val < 0) + return val; + + if (state) { + if (mma8452_freefall_mode_enabled(data)) { + val &= ~BIT(idx_x + chip->ev_cfg_chan_shift); + val &= ~BIT(idx_y + chip->ev_cfg_chan_shift); + val &= ~BIT(idx_z + chip->ev_cfg_chan_shift); + val |= MMA8452_FF_MT_CFG_OAE; + } + val |= BIT(chan->scan_index + chip->ev_cfg_chan_shift); + } else { + if (mma8452_freefall_mode_enabled(data)) + return 0; - if (state) - val |= BIT(chan->scan_index + chip->ev_cfg_chan_shift); - else - val &= ~BIT(chan->scan_index + chip->ev_cfg_chan_shift); + val &= ~BIT(chan->scan_index + chip->ev_cfg_chan_shift); + } - val |= chip->ev_cfg_ele; - val |= MMA8452_FF_MT_CFG_OAE; + val |= chip->ev_cfg_ele; - return mma8452_change_config(data, chip->ev_cfg, val); + return mma8452_change_config(data, chip->ev_cfg, val); + default: + return -EINVAL; + } } static void mma8452_transient_interrupt(struct iio_dev *indio_dev) @@ -652,6 +725,16 @@ static void mma8452_transient_interrupt(struct iio_dev *indio_dev) if (src < 0) return; + if (mma8452_freefall_mode_enabled(data)) { + iio_push_event(indio_dev, + IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, + IIO_MOD_X_AND_Y_AND_Z, + IIO_EV_TYPE_MAG, + IIO_EV_DIR_FALLING), + ts); + return; + } + if (src & data->chip_info->ev_src_xe) iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X, @@ -745,6 +828,27 @@ static int mma8452_reg_access_dbg(struct iio_dev *indio_dev, return 0; } +static const struct iio_event_spec mma8452_freefall_event[] = { + { + .type = IIO_EV_TYPE_MAG, + .dir = IIO_EV_DIR_FALLING, + .mask_separate = BIT(IIO_EV_INFO_ENABLE), + .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) | + BIT(IIO_EV_INFO_PERIOD) | + BIT(IIO_EV_INFO_HIGH_PASS_FILTER_3DB) + }, +}; + +static const struct iio_event_spec mma8652_freefall_event[] = { + { + .type = IIO_EV_TYPE_MAG, + .dir = IIO_EV_DIR_FALLING, + .mask_separate = BIT(IIO_EV_INFO_ENABLE), + .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) | + BIT(IIO_EV_INFO_PERIOD) + }, +}; + static const struct iio_event_spec mma8452_transient_event[] = { { .type = IIO_EV_TYPE_MAG, @@ -781,6 +885,24 @@ static struct attribute_group mma8452_event_attribute_group = { .attrs = mma8452_event_attributes, }; +#define MMA8452_FREEFALL_CHANNEL(modifier) { \ + .type = IIO_ACCEL, \ + .modified = 1, \ + .channel2 = modifier, \ + .scan_index = -1, \ + .event_spec = mma8452_freefall_event, \ + .num_event_specs = ARRAY_SIZE(mma8452_freefall_event), \ +} + +#define MMA8652_FREEFALL_CHANNEL(modifier) { \ + .type = IIO_ACCEL, \ + .modified = 1, \ + .channel2 = modifier, \ + .scan_index = -1, \ + .event_spec = mma8652_freefall_event, \ + .num_event_specs = ARRAY_SIZE(mma8652_freefall_event), \ +} + #define MMA8452_CHANNEL(axis, idx, bits) { \ .type = IIO_ACCEL, \ .modified = 1, \ @@ -822,11 +944,20 @@ static struct attribute_group mma8452_event_attribute_group = { .num_event_specs = ARRAY_SIZE(mma8452_motion_event), \ } +static const struct iio_chan_spec mma8451_channels[] = { + MMA8452_CHANNEL(X, idx_x, 14), + MMA8452_CHANNEL(Y, idx_y, 14), + MMA8452_CHANNEL(Z, idx_z, 14), + IIO_CHAN_SOFT_TIMESTAMP(idx_ts), + MMA8452_FREEFALL_CHANNEL(IIO_MOD_X_AND_Y_AND_Z), +}; + static const struct iio_chan_spec mma8452_channels[] = { MMA8452_CHANNEL(X, idx_x, 12), MMA8452_CHANNEL(Y, idx_y, 12), MMA8452_CHANNEL(Z, idx_z, 12), IIO_CHAN_SOFT_TIMESTAMP(idx_ts), + MMA8452_FREEFALL_CHANNEL(IIO_MOD_X_AND_Y_AND_Z), }; static const struct iio_chan_spec mma8453_channels[] = { @@ -834,6 +965,7 @@ static const struct iio_chan_spec mma8453_channels[] = { MMA8452_CHANNEL(Y, idx_y, 10), MMA8452_CHANNEL(Z, idx_z, 10), IIO_CHAN_SOFT_TIMESTAMP(idx_ts), + MMA8452_FREEFALL_CHANNEL(IIO_MOD_X_AND_Y_AND_Z), }; static const struct iio_chan_spec mma8652_channels[] = { @@ -841,6 +973,7 @@ static const struct iio_chan_spec mma8652_channels[] = { MMA8652_CHANNEL(Y, idx_y, 12), MMA8652_CHANNEL(Z, idx_z, 12), IIO_CHAN_SOFT_TIMESTAMP(idx_ts), + MMA8652_FREEFALL_CHANNEL(IIO_MOD_X_AND_Y_AND_Z), }; static const struct iio_chan_spec mma8653_channels[] = { @@ -848,9 +981,11 @@ static const struct iio_chan_spec mma8653_channels[] = { MMA8652_CHANNEL(Y, idx_y, 10), MMA8652_CHANNEL(Z, idx_z, 10), IIO_CHAN_SOFT_TIMESTAMP(idx_ts), + MMA8652_FREEFALL_CHANNEL(IIO_MOD_X_AND_Y_AND_Z), }; enum { + mma8451, mma8452, mma8453, mma8652, @@ -858,17 +993,34 @@ enum { }; static const struct mma_chip_info mma_chip_info_table[] = { - [mma8452] = { - .chip_id = MMA8452_DEVICE_ID, - .channels = mma8452_channels, - .num_channels = ARRAY_SIZE(mma8452_channels), + [mma8451] = { + .chip_id = MMA8451_DEVICE_ID, + .channels = mma8451_channels, + .num_channels = ARRAY_SIZE(mma8451_channels), /* * Hardware has fullscale of -2G, -4G, -8G corresponding to - * raw value -2048 for 12 bit or -512 for 10 bit. + * raw value -8192 for 14 bit, -2048 for 12 bit or -512 for 10 + * bit. * The userspace interface uses m/s^2 and we declare micro units * So scale factor for 12 bit here is given by: - * g * N * 1000000 / 2048 for N = 2, 4, 8 and g=9.80665 + * g * N * 1000000 / 2048 for N = 2, 4, 8 and g=9.80665 */ + .mma_scales = { {0, 2394}, {0, 4788}, {0, 9577} }, + .ev_cfg = MMA8452_TRANSIENT_CFG, + .ev_cfg_ele = MMA8452_TRANSIENT_CFG_ELE, + .ev_cfg_chan_shift = 1, + .ev_src = MMA8452_TRANSIENT_SRC, + .ev_src_xe = MMA8452_TRANSIENT_SRC_XTRANSE, + .ev_src_ye = MMA8452_TRANSIENT_SRC_YTRANSE, + .ev_src_ze = MMA8452_TRANSIENT_SRC_ZTRANSE, + .ev_ths = MMA8452_TRANSIENT_THS, + .ev_ths_mask = MMA8452_TRANSIENT_THS_MASK, + .ev_count = MMA8452_TRANSIENT_COUNT, + }, + [mma8452] = { + .chip_id = MMA8452_DEVICE_ID, + .channels = mma8452_channels, + .num_channels = ARRAY_SIZE(mma8452_channels), .mma_scales = { {0, 9577}, {0, 19154}, {0, 38307} }, .ev_cfg = MMA8452_TRANSIENT_CFG, .ev_cfg_ele = MMA8452_TRANSIENT_CFG_ELE, @@ -1049,6 +1201,7 @@ static int mma8452_reset(struct i2c_client *client) } static const struct of_device_id mma8452_dt_ids[] = { + { .compatible = "fsl,mma8451", .data = &mma_chip_info_table[mma8451] }, { .compatible = "fsl,mma8452", .data = &mma_chip_info_table[mma8452] }, { .compatible = "fsl,mma8453", .data = &mma_chip_info_table[mma8453] }, { .compatible = "fsl,mma8652", .data = &mma_chip_info_table[mma8652] }, @@ -1085,6 +1238,7 @@ static int mma8452_probe(struct i2c_client *client, return ret; switch (ret) { + case MMA8451_DEVICE_ID: case MMA8452_DEVICE_ID: case MMA8453_DEVICE_ID: case MMA8652_DEVICE_ID: @@ -1190,6 +1344,10 @@ static int mma8452_probe(struct i2c_client *client, if (ret < 0) goto buffer_cleanup; + ret = mma8452_set_freefall_mode(data, false); + if (ret) + return ret; + return 0; buffer_cleanup: diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c index 70f042797f15..a03a1417dd63 100644 --- a/drivers/iio/accel/st_accel_core.c +++ b/drivers/iio/accel/st_accel_core.c @@ -67,6 +67,8 @@ #define ST_ACCEL_1_DRDY_IRQ_ADDR 0x22 #define ST_ACCEL_1_DRDY_IRQ_INT1_MASK 0x10 #define ST_ACCEL_1_DRDY_IRQ_INT2_MASK 0x08 +#define ST_ACCEL_1_IHL_IRQ_ADDR 0x25 +#define ST_ACCEL_1_IHL_IRQ_MASK 0x02 #define ST_ACCEL_1_MULTIREAD_BIT true /* CUSTOM VALUES FOR SENSOR 2 */ @@ -92,6 +94,8 @@ #define ST_ACCEL_2_DRDY_IRQ_ADDR 0x22 #define ST_ACCEL_2_DRDY_IRQ_INT1_MASK 0x02 #define ST_ACCEL_2_DRDY_IRQ_INT2_MASK 0x10 +#define ST_ACCEL_2_IHL_IRQ_ADDR 0x22 +#define ST_ACCEL_2_IHL_IRQ_MASK 0x80 #define ST_ACCEL_2_MULTIREAD_BIT true /* CUSTOM VALUES FOR SENSOR 3 */ @@ -125,6 +129,8 @@ #define ST_ACCEL_3_DRDY_IRQ_ADDR 0x23 #define ST_ACCEL_3_DRDY_IRQ_INT1_MASK 0x80 #define ST_ACCEL_3_DRDY_IRQ_INT2_MASK 0x00 +#define ST_ACCEL_3_IHL_IRQ_ADDR 0x23 +#define ST_ACCEL_3_IHL_IRQ_MASK 0x40 #define ST_ACCEL_3_IG1_EN_ADDR 0x23 #define ST_ACCEL_3_IG1_EN_MASK 0x08 #define ST_ACCEL_3_MULTIREAD_BIT false @@ -169,6 +175,8 @@ #define ST_ACCEL_5_DRDY_IRQ_ADDR 0x22 #define ST_ACCEL_5_DRDY_IRQ_INT1_MASK 0x04 #define ST_ACCEL_5_DRDY_IRQ_INT2_MASK 0x20 +#define ST_ACCEL_5_IHL_IRQ_ADDR 0x22 +#define ST_ACCEL_5_IHL_IRQ_MASK 0x80 #define ST_ACCEL_5_IG1_EN_ADDR 0x21 #define ST_ACCEL_5_IG1_EN_MASK 0x08 #define ST_ACCEL_5_MULTIREAD_BIT false @@ -292,6 +300,8 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .addr = ST_ACCEL_1_DRDY_IRQ_ADDR, .mask_int1 = ST_ACCEL_1_DRDY_IRQ_INT1_MASK, .mask_int2 = ST_ACCEL_1_DRDY_IRQ_INT2_MASK, + .addr_ihl = ST_ACCEL_1_IHL_IRQ_ADDR, + .mask_ihl = ST_ACCEL_1_IHL_IRQ_MASK, }, .multi_read_bit = ST_ACCEL_1_MULTIREAD_BIT, .bootime = 2, @@ -355,6 +365,8 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .addr = ST_ACCEL_2_DRDY_IRQ_ADDR, .mask_int1 = ST_ACCEL_2_DRDY_IRQ_INT1_MASK, .mask_int2 = ST_ACCEL_2_DRDY_IRQ_INT2_MASK, + .addr_ihl = ST_ACCEL_2_IHL_IRQ_ADDR, + .mask_ihl = ST_ACCEL_2_IHL_IRQ_MASK, }, .multi_read_bit = ST_ACCEL_2_MULTIREAD_BIT, .bootime = 2, @@ -430,6 +442,8 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .addr = ST_ACCEL_3_DRDY_IRQ_ADDR, .mask_int1 = ST_ACCEL_3_DRDY_IRQ_INT1_MASK, .mask_int2 = ST_ACCEL_3_DRDY_IRQ_INT2_MASK, + .addr_ihl = ST_ACCEL_3_IHL_IRQ_ADDR, + .mask_ihl = ST_ACCEL_3_IHL_IRQ_MASK, .ig1 = { .en_addr = ST_ACCEL_3_IG1_EN_ADDR, .en_mask = ST_ACCEL_3_IG1_EN_MASK, @@ -537,6 +551,8 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .addr = ST_ACCEL_5_DRDY_IRQ_ADDR, .mask_int1 = ST_ACCEL_5_DRDY_IRQ_INT1_MASK, .mask_int2 = ST_ACCEL_5_DRDY_IRQ_INT2_MASK, + .addr_ihl = ST_ACCEL_5_IHL_IRQ_ADDR, + .mask_ihl = ST_ACCEL_5_IHL_IRQ_MASK, }, .multi_read_bit = ST_ACCEL_5_MULTIREAD_BIT, .bootime = 2, /* guess */ diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index 283ded7747a9..932de1f9d1e7 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig @@ -131,6 +131,16 @@ config AT91_ADC To compile this driver as a module, choose M here: the module will be called at91_adc. +config AT91_SAMA5D2_ADC + tristate "Atmel AT91 SAMA5D2 ADC" + depends on ARCH_AT91 || COMPILE_TEST + help + Say yes here to build support for Atmel SAMA5D2 ADC which is + available on SAMA5D2 SoC family. + + To compile this driver as a module, choose M here: the module will be + called at91-sama5d2_adc. + config AXP288_ADC tristate "X-Powers AXP288 ADC driver" depends on MFD_AXP20X @@ -267,11 +277,11 @@ config MCP320X called mcp320x. config MCP3422 - tristate "Microchip Technology MCP3422/3/4/6/7/8 driver" + tristate "Microchip Technology MCP3421/2/3/4/5/6/7/8 driver" depends on I2C help - Say yes here to build support for Microchip Technology's - MCP3422, MCP3423, MCP3424, MCP3426, MCP3427 or MCP3428 + Say yes here to build support for Microchip Technology's MCP3421 + MCP3422, MCP3423, MCP3424, MCP3425, MCP3426, MCP3427 or MCP3428 analog to digital converters. This driver can also be built as a module. If so, the module will be @@ -287,6 +297,20 @@ config MEN_Z188_ADC This driver can also be built as a module. If so, the module will be called men_z188_adc. +config MXS_LRADC + tristate "Freescale i.MX23/i.MX28 LRADC" + depends on (ARCH_MXS || COMPILE_TEST) && HAS_IOMEM + depends on INPUT + select STMP_DEVICE + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER + help + Say yes here to build support for i.MX23/i.MX28 LRADC convertor + built into these chips. + + To compile this driver as a module, choose M here: the + module will be called mxs-lradc. + config NAU7802 tristate "Nuvoton NAU7802 ADC driver" depends on I2C @@ -352,6 +376,16 @@ config TI_ADC081C This driver can also be built as a module. If so, the module will be called ti-adc081c. +config TI_ADC0832 + tristate "Texas Instruments ADC0831/ADC0832/ADC0834/ADC0838" + depends on SPI + help + If you say yes here you get support for Texas Instruments ADC0831, + ADC0832, ADC0834, ADC0838 ADC chips. + + This driver can also be built as a module. If so, the module will be + called ti-adc0832. + config TI_ADC128S052 tristate "Texas Instruments ADC128S052/ADC122S021/ADC124S021" depends on SPI @@ -362,6 +396,19 @@ config TI_ADC128S052 This driver can also be built as a module. If so, the module will be called ti-adc128s052. +config TI_ADS1015 + tristate "Texas Instruments ADS1015 ADC" + depends on I2C && !SENSORS_ADS1015 + select REGMAP_I2C + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER + help + If you say yes here you get support for Texas Instruments ADS1015 + ADC chip. + + This driver can also be built as a module. If so, the module will be + called ti-ads1015. + config TI_ADS8688 tristate "Texas Instruments ADS8688" depends on SPI && OF diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile index 6435780e9b71..b1aa456e6af3 100644 --- a/drivers/iio/adc/Makefile +++ b/drivers/iio/adc/Makefile @@ -14,6 +14,7 @@ obj-$(CONFIG_AD7793) += ad7793.o obj-$(CONFIG_AD7887) += ad7887.o obj-$(CONFIG_AD799X) += ad799x.o obj-$(CONFIG_AT91_ADC) += at91_adc.o +obj-$(CONFIG_AT91_SAMA5D2_ADC) += at91-sama5d2_adc.o obj-$(CONFIG_AXP288_ADC) += axp288_adc.o obj-$(CONFIG_BERLIN2_ADC) += berlin2-adc.o obj-$(CONFIG_CC10001_ADC) += cc10001_adc.o @@ -28,13 +29,16 @@ obj-$(CONFIG_MAX1363) += max1363.o obj-$(CONFIG_MCP320X) += mcp320x.o obj-$(CONFIG_MCP3422) += mcp3422.o obj-$(CONFIG_MEN_Z188_ADC) += men_z188_adc.o +obj-$(CONFIG_MXS_LRADC) += mxs-lradc.o obj-$(CONFIG_NAU7802) += nau7802.o obj-$(CONFIG_PALMAS_GPADC) += palmas_gpadc.o obj-$(CONFIG_QCOM_SPMI_IADC) += qcom-spmi-iadc.o obj-$(CONFIG_QCOM_SPMI_VADC) += qcom-spmi-vadc.o obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o +obj-$(CONFIG_TI_ADC0832) += ti-adc0832.o obj-$(CONFIG_TI_ADC128S052) += ti-adc128s052.o +obj-$(CONFIG_TI_ADS1015) += ti-ads1015.o obj-$(CONFIG_TI_ADS8688) += ti-ads8688.o obj-$(CONFIG_TI_AM335X_ADC) += ti_am335x_adc.o obj-$(CONFIG_TWL4030_MADC) += twl4030-madc.o diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c new file mode 100644 index 000000000000..dbee13ad33a3 --- /dev/null +++ b/drivers/iio/adc/at91-sama5d2_adc.c @@ -0,0 +1,508 @@ +/* + * Atmel ADC driver for SAMA5D2 devices and compatible. + * + * Copyright (C) 2015 Atmel, + * 2015 Ludovic Desroches <ludovic.desroches@atmel.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/iio/iio.h> +#include <linux/iio/sysfs.h> +#include <linux/regulator/consumer.h> + +/* Control Register */ +#define AT91_SAMA5D2_CR 0x00 +/* Software Reset */ +#define AT91_SAMA5D2_CR_SWRST BIT(0) +/* Start Conversion */ +#define AT91_SAMA5D2_CR_START BIT(1) +/* Touchscreen Calibration */ +#define AT91_SAMA5D2_CR_TSCALIB BIT(2) +/* Comparison Restart */ +#define AT91_SAMA5D2_CR_CMPRST BIT(4) + +/* Mode Register */ +#define AT91_SAMA5D2_MR 0x04 +/* Trigger Selection */ +#define AT91_SAMA5D2_MR_TRGSEL(v) ((v) << 1) +/* ADTRG */ +#define AT91_SAMA5D2_MR_TRGSEL_TRIG0 0 +/* TIOA0 */ +#define AT91_SAMA5D2_MR_TRGSEL_TRIG1 1 +/* TIOA1 */ +#define AT91_SAMA5D2_MR_TRGSEL_TRIG2 2 +/* TIOA2 */ +#define AT91_SAMA5D2_MR_TRGSEL_TRIG3 3 +/* PWM event line 0 */ +#define AT91_SAMA5D2_MR_TRGSEL_TRIG4 4 +/* PWM event line 1 */ +#define AT91_SAMA5D2_MR_TRGSEL_TRIG5 5 +/* TIOA3 */ +#define AT91_SAMA5D2_MR_TRGSEL_TRIG6 6 +/* RTCOUT0 */ +#define AT91_SAMA5D2_MR_TRGSEL_TRIG7 7 +/* Sleep Mode */ +#define AT91_SAMA5D2_MR_SLEEP BIT(5) +/* Fast Wake Up */ +#define AT91_SAMA5D2_MR_FWUP BIT(6) +/* Prescaler Rate Selection */ +#define AT91_SAMA5D2_MR_PRESCAL(v) ((v) << AT91_SAMA5D2_MR_PRESCAL_OFFSET) +#define AT91_SAMA5D2_MR_PRESCAL_OFFSET 8 +#define AT91_SAMA5D2_MR_PRESCAL_MAX 0xff +/* Startup Time */ +#define AT91_SAMA5D2_MR_STARTUP(v) ((v) << 16) +/* Analog Change */ +#define AT91_SAMA5D2_MR_ANACH BIT(23) +/* Tracking Time */ +#define AT91_SAMA5D2_MR_TRACKTIM(v) ((v) << 24) +#define AT91_SAMA5D2_MR_TRACKTIM_MAX 0xff +/* Transfer Time */ +#define AT91_SAMA5D2_MR_TRANSFER(v) ((v) << 28) +#define AT91_SAMA5D2_MR_TRANSFER_MAX 0x3 +/* Use Sequence Enable */ +#define AT91_SAMA5D2_MR_USEQ BIT(31) + +/* Channel Sequence Register 1 */ +#define AT91_SAMA5D2_SEQR1 0x08 +/* Channel Sequence Register 2 */ +#define AT91_SAMA5D2_SEQR2 0x0c +/* Channel Enable Register */ +#define AT91_SAMA5D2_CHER 0x10 +/* Channel Disable Register */ +#define AT91_SAMA5D2_CHDR 0x14 +/* Channel Status Register */ +#define AT91_SAMA5D2_CHSR 0x18 +/* Last Converted Data Register */ +#define AT91_SAMA5D2_LCDR 0x20 +/* Interrupt Enable Register */ +#define AT91_SAMA5D2_IER 0x24 +/* Interrupt Disable Register */ +#define AT91_SAMA5D2_IDR 0x28 +/* Interrupt Mask Register */ +#define AT91_SAMA5D2_IMR 0x2c +/* Interrupt Status Register */ +#define AT91_SAMA5D2_ISR 0x30 +/* Last Channel Trigger Mode Register */ +#define AT91_SAMA5D2_LCTMR 0x34 +/* Last Channel Compare Window Register */ +#define AT91_SAMA5D2_LCCWR 0x38 +/* Overrun Status Register */ +#define AT91_SAMA5D2_OVER 0x3c +/* Extended Mode Register */ +#define AT91_SAMA5D2_EMR 0x40 +/* Compare Window Register */ +#define AT91_SAMA5D2_CWR 0x44 +/* Channel Gain Register */ +#define AT91_SAMA5D2_CGR 0x48 +/* Channel Offset Register */ +#define AT91_SAMA5D2_COR 0x4c +/* Channel Data Register 0 */ +#define AT91_SAMA5D2_CDR0 0x50 +/* Analog Control Register */ +#define AT91_SAMA5D2_ACR 0x94 +/* Touchscreen Mode Register */ +#define AT91_SAMA5D2_TSMR 0xb0 +/* Touchscreen X Position Register */ +#define AT91_SAMA5D2_XPOSR 0xb4 +/* Touchscreen Y Position Register */ +#define AT91_SAMA5D2_YPOSR 0xb8 +/* Touchscreen Pressure Register */ +#define AT91_SAMA5D2_PRESSR 0xbc +/* Trigger Register */ +#define AT91_SAMA5D2_TRGR 0xc0 +/* Correction Select Register */ +#define AT91_SAMA5D2_COSR 0xd0 +/* Correction Value Register */ +#define AT91_SAMA5D2_CVR 0xd4 +/* Channel Error Correction Register */ +#define AT91_SAMA5D2_CECR 0xd8 +/* Write Protection Mode Register */ +#define AT91_SAMA5D2_WPMR 0xe4 +/* Write Protection Status Register */ +#define AT91_SAMA5D2_WPSR 0xe8 +/* Version Register */ +#define AT91_SAMA5D2_VERSION 0xfc + +#define AT91_AT91_SAMA5D2_CHAN(num, addr) \ + { \ + .type = IIO_VOLTAGE, \ + .channel = num, \ + .address = addr, \ + .scan_type = { \ + .sign = 'u', \ + .realbits = 12, \ + }, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\ + .datasheet_name = "CH"#num, \ + .indexed = 1, \ + } + +#define at91_adc_readl(st, reg) readl_relaxed(st->base + reg) +#define at91_adc_writel(st, reg, val) writel_relaxed(val, st->base + reg) + +struct at91_adc_soc_info { + unsigned startup_time; + unsigned min_sample_rate; + unsigned max_sample_rate; +}; + +struct at91_adc_state { + void __iomem *base; + int irq; + struct clk *per_clk; + struct regulator *reg; + struct regulator *vref; + int vref_uv; + const struct iio_chan_spec *chan; + bool conversion_done; + u32 conversion_value; + struct at91_adc_soc_info soc_info; + wait_queue_head_t wq_data_available; + /* + * lock to prevent concurrent 'single conversion' requests through + * sysfs. + */ + struct mutex lock; +}; + +static const struct iio_chan_spec at91_adc_channels[] = { + AT91_AT91_SAMA5D2_CHAN(0, 0x50), + AT91_AT91_SAMA5D2_CHAN(1, 0x54), + AT91_AT91_SAMA5D2_CHAN(2, 0x58), + AT91_AT91_SAMA5D2_CHAN(3, 0x5c), + AT91_AT91_SAMA5D2_CHAN(4, 0x60), + AT91_AT91_SAMA5D2_CHAN(5, 0x64), + AT91_AT91_SAMA5D2_CHAN(6, 0x68), + AT91_AT91_SAMA5D2_CHAN(7, 0x6c), + AT91_AT91_SAMA5D2_CHAN(8, 0x70), + AT91_AT91_SAMA5D2_CHAN(9, 0x74), + AT91_AT91_SAMA5D2_CHAN(10, 0x78), + AT91_AT91_SAMA5D2_CHAN(11, 0x7c), +}; + +static unsigned at91_adc_startup_time(unsigned startup_time_min, + unsigned adc_clk_khz) +{ + const unsigned startup_lookup[] = { + 0, 8, 16, 24, + 64, 80, 96, 112, + 512, 576, 640, 704, + 768, 832, 896, 960 + }; + unsigned ticks_min, i; + + /* + * Since the adc frequency is checked before, there is no reason + * to not meet the startup time constraint. + */ + + ticks_min = startup_time_min * adc_clk_khz / 1000; + for (i = 0; i < ARRAY_SIZE(startup_lookup); i++) + if (startup_lookup[i] > ticks_min) + break; + + return i; +} + +static void at91_adc_setup_samp_freq(struct at91_adc_state *st, unsigned freq) +{ + struct iio_dev *indio_dev = iio_priv_to_dev(st); + unsigned f_per, prescal, startup; + + f_per = clk_get_rate(st->per_clk); + prescal = (f_per / (2 * freq)) - 1; + + startup = at91_adc_startup_time(st->soc_info.startup_time, + freq / 1000); + + at91_adc_writel(st, AT91_SAMA5D2_MR, + AT91_SAMA5D2_MR_TRANSFER(2) + | AT91_SAMA5D2_MR_STARTUP(startup) + | AT91_SAMA5D2_MR_PRESCAL(prescal)); + + dev_dbg(&indio_dev->dev, "freq: %u, startup: %u, prescal: %u\n", + freq, startup, prescal); +} + +static unsigned at91_adc_get_sample_freq(struct at91_adc_state *st) +{ + unsigned f_adc, f_per = clk_get_rate(st->per_clk); + unsigned mr, prescal; + + mr = at91_adc_readl(st, AT91_SAMA5D2_MR); + prescal = (mr >> AT91_SAMA5D2_MR_PRESCAL_OFFSET) + & AT91_SAMA5D2_MR_PRESCAL_MAX; + f_adc = f_per / (2 * (prescal + 1)); + + return f_adc; +} + +static irqreturn_t at91_adc_interrupt(int irq, void *private) +{ + struct iio_dev *indio = private; + struct at91_adc_state *st = iio_priv(indio); + u32 status = at91_adc_readl(st, AT91_SAMA5D2_ISR); + u32 imr = at91_adc_readl(st, AT91_SAMA5D2_IMR); + + if (status & imr) { + st->conversion_value = at91_adc_readl(st, st->chan->address); + st->conversion_done = true; + wake_up_interruptible(&st->wq_data_available); + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static int at91_adc_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long mask) +{ + struct at91_adc_state *st = iio_priv(indio_dev); + int ret; + + switch (mask) { + case IIO_CHAN_INFO_RAW: + mutex_lock(&st->lock); + + st->chan = chan; + + at91_adc_writel(st, AT91_SAMA5D2_CHER, BIT(chan->channel)); + at91_adc_writel(st, AT91_SAMA5D2_IER, BIT(chan->channel)); + at91_adc_writel(st, AT91_SAMA5D2_CR, AT91_SAMA5D2_CR_START); + + ret = wait_event_interruptible_timeout(st->wq_data_available, + st->conversion_done, + msecs_to_jiffies(1000)); + if (ret == 0) + ret = -ETIMEDOUT; + + if (ret > 0) { + *val = st->conversion_value; + ret = IIO_VAL_INT; + st->conversion_done = false; + } + + at91_adc_writel(st, AT91_SAMA5D2_IDR, BIT(chan->channel)); + at91_adc_writel(st, AT91_SAMA5D2_CHDR, BIT(chan->channel)); + + mutex_unlock(&st->lock); + return ret; + + case IIO_CHAN_INFO_SCALE: + *val = st->vref_uv / 1000; + *val2 = chan->scan_type.realbits; + return IIO_VAL_FRACTIONAL_LOG2; + + case IIO_CHAN_INFO_SAMP_FREQ: + *val = at91_adc_get_sample_freq(st); + return IIO_VAL_INT; + + default: + return -EINVAL; + } +} + +static int at91_adc_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int val, int val2, long mask) +{ + struct at91_adc_state *st = iio_priv(indio_dev); + + if (mask != IIO_CHAN_INFO_SAMP_FREQ) + return -EINVAL; + + if (val < st->soc_info.min_sample_rate || + val > st->soc_info.max_sample_rate) + return -EINVAL; + + at91_adc_setup_samp_freq(st, val); + + return 0; +} + +static const struct iio_info at91_adc_info = { + .read_raw = &at91_adc_read_raw, + .write_raw = &at91_adc_write_raw, + .driver_module = THIS_MODULE, +}; + +static int at91_adc_probe(struct platform_device *pdev) +{ + struct iio_dev *indio_dev; + struct at91_adc_state *st; + struct resource *res; + int ret; + + indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*st)); + if (!indio_dev) + return -ENOMEM; + + indio_dev->dev.parent = &pdev->dev; + indio_dev->name = dev_name(&pdev->dev); + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->info = &at91_adc_info; + indio_dev->channels = at91_adc_channels; + indio_dev->num_channels = ARRAY_SIZE(at91_adc_channels); + + st = iio_priv(indio_dev); + + ret = of_property_read_u32(pdev->dev.of_node, + "atmel,min-sample-rate-hz", + &st->soc_info.min_sample_rate); + if (ret) { + dev_err(&pdev->dev, + "invalid or missing value for atmel,min-sample-rate-hz\n"); + return ret; + } + + ret = of_property_read_u32(pdev->dev.of_node, + "atmel,max-sample-rate-hz", + &st->soc_info.max_sample_rate); + if (ret) { + dev_err(&pdev->dev, + "invalid or missing value for atmel,max-sample-rate-hz\n"); + return ret; + } + + ret = of_property_read_u32(pdev->dev.of_node, "atmel,startup-time-ms", + &st->soc_info.startup_time); + if (ret) { + dev_err(&pdev->dev, + "invalid or missing value for atmel,startup-time-ms\n"); + return ret; + } + + init_waitqueue_head(&st->wq_data_available); + mutex_init(&st->lock); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; + + st->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(st->base)) + return PTR_ERR(st->base); + + st->irq = platform_get_irq(pdev, 0); + if (st->irq <= 0) { + if (!st->irq) + st->irq = -ENXIO; + + return st->irq; + } + + st->per_clk = devm_clk_get(&pdev->dev, "adc_clk"); + if (IS_ERR(st->per_clk)) + return PTR_ERR(st->per_clk); + + st->reg = devm_regulator_get(&pdev->dev, "vddana"); + if (IS_ERR(st->reg)) + return PTR_ERR(st->reg); + + st->vref = devm_regulator_get(&pdev->dev, "vref"); + if (IS_ERR(st->vref)) + return PTR_ERR(st->vref); + + ret = devm_request_irq(&pdev->dev, st->irq, at91_adc_interrupt, 0, + pdev->dev.driver->name, indio_dev); + if (ret) + return ret; + + ret = regulator_enable(st->reg); + if (ret) + return ret; + + ret = regulator_enable(st->vref); + if (ret) + goto reg_disable; + + st->vref_uv = regulator_get_voltage(st->vref); + if (st->vref_uv <= 0) { + ret = -EINVAL; + goto vref_disable; + } + + at91_adc_writel(st, AT91_SAMA5D2_CR, AT91_SAMA5D2_CR_SWRST); + at91_adc_writel(st, AT91_SAMA5D2_IDR, 0xffffffff); + + at91_adc_setup_samp_freq(st, st->soc_info.min_sample_rate); + + ret = clk_prepare_enable(st->per_clk); + if (ret) + goto vref_disable; + + ret = iio_device_register(indio_dev); + if (ret < 0) + goto per_clk_disable_unprepare; + + dev_info(&pdev->dev, "version: %x\n", + readl_relaxed(st->base + AT91_SAMA5D2_VERSION)); + + return 0; + +per_clk_disable_unprepare: + clk_disable_unprepare(st->per_clk); +vref_disable: + regulator_disable(st->vref); +reg_disable: + regulator_disable(st->reg); + return ret; +} + +static int at91_adc_remove(struct platform_device *pdev) +{ + struct iio_dev *indio_dev = platform_get_drvdata(pdev); + struct at91_adc_state *st = iio_priv(indio_dev); + + iio_device_unregister(indio_dev); + + clk_disable_unprepare(st->per_clk); + + regulator_disable(st->vref); + regulator_disable(st->reg); + + return 0; +} + +static const struct of_device_id at91_adc_dt_match[] = { + { + .compatible = "atmel,sama5d2-adc", + }, { + /* sentinel */ + } +}; +MODULE_DEVICE_TABLE(of, at91_adc_dt_match); + +static struct platform_driver at91_adc_driver = { + .probe = at91_adc_probe, + .remove = at91_adc_remove, + .driver = { + .name = "at91-sama5d2_adc", + .of_match_table = at91_adc_dt_match, + }, +}; +module_platform_driver(at91_adc_driver) + +MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>"); +MODULE_DESCRIPTION("Atmel AT91 SAMA5D2 ADC"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c index 0c904edd6c00..7fd24949c0c1 100644 --- a/drivers/iio/adc/axp288_adc.c +++ b/drivers/iio/adc/axp288_adc.c @@ -46,7 +46,7 @@ struct axp288_adc_info { struct regmap *regmap; }; -static const struct iio_chan_spec const axp288_adc_channels[] = { +static const struct iio_chan_spec axp288_adc_channels[] = { { .indexed = 1, .type = IIO_TEMP, diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c index d803e5018a42..65909d5858b1 100644 --- a/drivers/iio/adc/ina2xx-adc.c +++ b/drivers/iio/adc/ina2xx-adc.c @@ -19,17 +19,18 @@ * * Configurable 7-bit I2C slave address from 0x40 to 0x4F */ -#include <linux/module.h> -#include <linux/kthread.h> + #include <linux/delay.h> +#include <linux/i2c.h> #include <linux/iio/kfifo_buf.h> #include <linux/iio/sysfs.h> -#include <linux/i2c.h> +#include <linux/kthread.h> +#include <linux/module.h> #include <linux/regmap.h> -#include <linux/platform_data/ina2xx.h> - #include <linux/util_macros.h> +#include <linux/platform_data/ina2xx.h> + /* INA2XX registers definition */ #define INA2XX_CONFIG 0x00 #define INA2XX_SHUNT_VOLTAGE 0x01 /* readonly */ @@ -38,7 +39,7 @@ #define INA2XX_CURRENT 0x04 /* readonly */ #define INA2XX_CALIBRATION 0x05 -#define INA226_ALERT_MASK 0x06 +#define INA226_ALERT_MASK GENMASK(2, 1) #define INA266_CVRF BIT(3) #define INA2XX_MAX_REGISTERS 8 @@ -113,7 +114,7 @@ struct ina2xx_chip_info { struct mutex state_lock; unsigned int shunt_resistor; int avg; - s64 prev_ns; /* track buffer capture time, check for underruns*/ + s64 prev_ns; /* track buffer capture time, check for underruns */ int int_time_vbus; /* Bus voltage integration time uS */ int int_time_vshunt; /* Shunt voltage integration time uS */ bool allow_async_readout; @@ -121,21 +122,21 @@ struct ina2xx_chip_info { static const struct ina2xx_config ina2xx_config[] = { [ina219] = { - .config_default = INA219_CONFIG_DEFAULT, - .calibration_factor = 40960000, - .shunt_div = 100, - .bus_voltage_shift = 3, - .bus_voltage_lsb = 4000, - .power_lsb = 20000, - }, + .config_default = INA219_CONFIG_DEFAULT, + .calibration_factor = 40960000, + .shunt_div = 100, + .bus_voltage_shift = 3, + .bus_voltage_lsb = 4000, + .power_lsb = 20000, + }, [ina226] = { - .config_default = INA226_CONFIG_DEFAULT, - .calibration_factor = 5120000, - .shunt_div = 400, - .bus_voltage_shift = 0, - .bus_voltage_lsb = 1250, - .power_lsb = 25000, - }, + .config_default = INA226_CONFIG_DEFAULT, + .calibration_factor = 5120000, + .shunt_div = 400, + .bus_voltage_shift = 0, + .bus_voltage_lsb = 1250, + .power_lsb = 25000, + }, }; static int ina2xx_read_raw(struct iio_dev *indio_dev, @@ -149,7 +150,7 @@ static int ina2xx_read_raw(struct iio_dev *indio_dev, switch (mask) { case IIO_CHAN_INFO_RAW: ret = regmap_read(chip->regmap, chan->address, ®val); - if (ret < 0) + if (ret) return ret; if (is_signed_reg(chan->address)) @@ -251,7 +252,7 @@ static int ina226_set_int_time_vbus(struct ina2xx_chip_info *chip, return -EINVAL; bits = find_closest(val_us, ina226_conv_time_tab, - ARRAY_SIZE(ina226_conv_time_tab)); + ARRAY_SIZE(ina226_conv_time_tab)); chip->int_time_vbus = ina226_conv_time_tab[bits]; @@ -270,7 +271,7 @@ static int ina226_set_int_time_vshunt(struct ina2xx_chip_info *chip, return -EINVAL; bits = find_closest(val_us, ina226_conv_time_tab, - ARRAY_SIZE(ina226_conv_time_tab)); + ARRAY_SIZE(ina226_conv_time_tab)); chip->int_time_vshunt = ina226_conv_time_tab[bits]; @@ -285,8 +286,8 @@ static int ina2xx_write_raw(struct iio_dev *indio_dev, int val, int val2, long mask) { struct ina2xx_chip_info *chip = iio_priv(indio_dev); - int ret; unsigned int config, tmp; + int ret; if (iio_buffer_enabled(indio_dev)) return -EBUSY; @@ -294,8 +295,8 @@ static int ina2xx_write_raw(struct iio_dev *indio_dev, mutex_lock(&chip->state_lock); ret = regmap_read(chip->regmap, INA2XX_CONFIG, &config); - if (ret < 0) - goto _err; + if (ret) + goto err; tmp = config; @@ -310,19 +311,19 @@ static int ina2xx_write_raw(struct iio_dev *indio_dev, else ret = ina226_set_int_time_vbus(chip, val2, &tmp); break; + default: ret = -EINVAL; } if (!ret && (tmp != config)) ret = regmap_write(chip->regmap, INA2XX_CONFIG, tmp); -_err: +err: mutex_unlock(&chip->state_lock); return ret; } - static ssize_t ina2xx_allow_async_readout_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -355,6 +356,7 @@ static int set_shunt_resistor(struct ina2xx_chip_info *chip, unsigned int val) return -EINVAL; chip->shunt_resistor = val; + return 0; } @@ -438,7 +440,6 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev) struct ina2xx_chip_info *chip = iio_priv(indio_dev); unsigned short data[8]; int bit, ret, i = 0; - unsigned long buffer_us, elapsed_us; s64 time_a, time_b; unsigned int alert; @@ -462,8 +463,6 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev) return ret; alert &= INA266_CVRF; - trace_printk("Conversion ready: %d\n", !!alert); - } while (!alert); /* @@ -488,19 +487,14 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev) iio_push_to_buffers_with_timestamp(indio_dev, (unsigned int *)data, time_a); - buffer_us = (unsigned long)(time_b - time_a) / 1000; - elapsed_us = (unsigned long)(time_a - chip->prev_ns) / 1000; - - trace_printk("uS: elapsed: %lu, buf: %lu\n", elapsed_us, buffer_us); - chip->prev_ns = time_a; - return buffer_us; + return (unsigned long)(time_b - time_a) / 1000; }; static int ina2xx_capture_thread(void *data) { - struct iio_dev *indio_dev = (struct iio_dev *)data; + struct iio_dev *indio_dev = data; struct ina2xx_chip_info *chip = iio_priv(indio_dev); unsigned int sampling_us = SAMPLING_PERIOD(chip); int buffer_us; @@ -530,12 +524,13 @@ static int ina2xx_buffer_enable(struct iio_dev *indio_dev) struct ina2xx_chip_info *chip = iio_priv(indio_dev); unsigned int sampling_us = SAMPLING_PERIOD(chip); - trace_printk("Enabling buffer w/ scan_mask %02x, freq = %d, avg =%u\n", - (unsigned int)(*indio_dev->active_scan_mask), - 1000000/sampling_us, chip->avg); + dev_dbg(&indio_dev->dev, "Enabling buffer w/ scan_mask %02x, freq = %d, avg =%u\n", + (unsigned int)(*indio_dev->active_scan_mask), + 1000000 / sampling_us, chip->avg); - trace_printk("Expected work period: %u us\n", sampling_us); - trace_printk("Async readout mode: %d\n", chip->allow_async_readout); + dev_dbg(&indio_dev->dev, "Expected work period: %u us\n", sampling_us); + dev_dbg(&indio_dev->dev, "Async readout mode: %d\n", + chip->allow_async_readout); chip->prev_ns = iio_get_time_ns(); @@ -575,8 +570,7 @@ static int ina2xx_debug_reg(struct iio_dev *indio_dev, } /* Possible integration times for vshunt and vbus */ -static IIO_CONST_ATTR_INT_TIME_AVAIL \ - ("0.000140 0.000204 0.000332 0.000588 0.001100 0.002116 0.004156 0.008244"); +static IIO_CONST_ATTR_INT_TIME_AVAIL("0.000140 0.000204 0.000332 0.000588 0.001100 0.002116 0.004156 0.008244"); static IIO_DEVICE_ATTR(in_allow_async_readout, S_IRUGO | S_IWUSR, ina2xx_allow_async_readout_show, @@ -598,21 +592,23 @@ static const struct attribute_group ina2xx_attribute_group = { }; static const struct iio_info ina2xx_info = { - .debugfs_reg_access = &ina2xx_debug_reg, - .read_raw = &ina2xx_read_raw, - .write_raw = &ina2xx_write_raw, - .attrs = &ina2xx_attribute_group, .driver_module = THIS_MODULE, + .attrs = &ina2xx_attribute_group, + .read_raw = ina2xx_read_raw, + .write_raw = ina2xx_write_raw, + .debugfs_reg_access = ina2xx_debug_reg, }; /* Initialize the configuration and calibration registers. */ static int ina2xx_init(struct ina2xx_chip_info *chip, unsigned int config) { u16 regval; - int ret = regmap_write(chip->regmap, INA2XX_CONFIG, config); + int ret; - if (ret < 0) + ret = regmap_write(chip->regmap, INA2XX_CONFIG, config); + if (ret) return ret; + /* * Set current LSB to 1mA, shunt is in uOhms * (equation 13 in datasheet). We hardcode a Current_LSB @@ -621,7 +617,7 @@ static int ina2xx_init(struct ina2xx_chip_info *chip, unsigned int config) * to the user for now. */ regval = DIV_ROUND_CLOSEST(chip->config->calibration_factor, - chip->shunt_resistor); + chip->shunt_resistor); return regmap_write(chip->regmap, INA2XX_CALIBRATION, regval); } @@ -632,8 +628,8 @@ static int ina2xx_probe(struct i2c_client *client, struct ina2xx_chip_info *chip; struct iio_dev *indio_dev; struct iio_buffer *buffer; - int ret; unsigned int val; + int ret; indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip)); if (!indio_dev) @@ -641,8 +637,19 @@ static int ina2xx_probe(struct i2c_client *client, chip = iio_priv(indio_dev); + /* This is only used for device removal purposes. */ + i2c_set_clientdata(client, indio_dev); + + chip->regmap = devm_regmap_init_i2c(client, &ina2xx_regmap_config); + if (IS_ERR(chip->regmap)) { + dev_err(&client->dev, "failed to allocate register map\n"); + return PTR_ERR(chip->regmap); + } + chip->config = &ina2xx_config[id->driver_data]; + mutex_init(&chip->state_lock); + if (of_property_read_u32(client->dev.of_node, "shunt-resistor", &val) < 0) { struct ina2xx_platform_data *pdata = @@ -658,25 +665,6 @@ static int ina2xx_probe(struct i2c_client *client, if (ret) return ret; - mutex_init(&chip->state_lock); - - /* This is only used for device removal purposes. */ - i2c_set_clientdata(client, indio_dev); - - indio_dev->name = id->name; - indio_dev->channels = ina2xx_channels; - indio_dev->num_channels = ARRAY_SIZE(ina2xx_channels); - - indio_dev->dev.parent = &client->dev; - indio_dev->info = &ina2xx_info; - indio_dev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE; - - chip->regmap = devm_regmap_init_i2c(client, &ina2xx_regmap_config); - if (IS_ERR(chip->regmap)) { - dev_err(&client->dev, "failed to allocate register map\n"); - return PTR_ERR(chip->regmap); - } - /* Patch the current config register with default. */ val = chip->config->config_default; @@ -687,24 +675,28 @@ static int ina2xx_probe(struct i2c_client *client, } ret = ina2xx_init(chip, val); - if (ret < 0) { - dev_err(&client->dev, "error configuring the device: %d\n", - ret); - return -ENODEV; + if (ret) { + dev_err(&client->dev, "error configuring the device\n"); + return ret; } + indio_dev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE; + indio_dev->dev.parent = &client->dev; + indio_dev->channels = ina2xx_channels; + indio_dev->num_channels = ARRAY_SIZE(ina2xx_channels); + indio_dev->name = id->name; + indio_dev->info = &ina2xx_info; + indio_dev->setup_ops = &ina2xx_setup_ops; + buffer = devm_iio_kfifo_allocate(&indio_dev->dev); if (!buffer) return -ENOMEM; - indio_dev->setup_ops = &ina2xx_setup_ops; - iio_device_attach_buffer(indio_dev, buffer); return iio_device_register(indio_dev); } - static int ina2xx_remove(struct i2c_client *client) { struct iio_dev *indio_dev = i2c_get_clientdata(client); @@ -717,7 +709,6 @@ static int ina2xx_remove(struct i2c_client *client) INA2XX_MODE_MASK, 0); } - static const struct i2c_device_id ina2xx_id[] = { {"ina219", ina219}, {"ina220", ina219}, @@ -726,7 +717,6 @@ static const struct i2c_device_id ina2xx_id[] = { {"ina231", ina226}, {} }; - MODULE_DEVICE_TABLE(i2c, ina2xx_id); static struct i2c_driver ina2xx_driver = { @@ -737,7 +727,6 @@ static struct i2c_driver ina2xx_driver = { .remove = ina2xx_remove, .id_table = ina2xx_id, }; - module_i2c_driver(ina2xx_driver); MODULE_AUTHOR("Marc Titinger <marc.titinger@baylibre.com>"); diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c index d1c05f6eed18..a850ca7d1eda 100644 --- a/drivers/iio/adc/mcp320x.c +++ b/drivers/iio/adc/mcp320x.c @@ -187,26 +187,27 @@ out: .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) \ } -#define MCP320X_VOLTAGE_CHANNEL_DIFF(num) \ +#define MCP320X_VOLTAGE_CHANNEL_DIFF(chan1, chan2) \ { \ .type = IIO_VOLTAGE, \ .indexed = 1, \ - .channel = (num * 2), \ - .channel2 = (num * 2 + 1), \ - .address = (num * 2), \ + .channel = (chan1), \ + .channel2 = (chan2), \ + .address = (chan1), \ .differential = 1, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) \ } static const struct iio_chan_spec mcp3201_channels[] = { - MCP320X_VOLTAGE_CHANNEL_DIFF(0), + MCP320X_VOLTAGE_CHANNEL_DIFF(0, 1), }; static const struct iio_chan_spec mcp3202_channels[] = { MCP320X_VOLTAGE_CHANNEL(0), MCP320X_VOLTAGE_CHANNEL(1), - MCP320X_VOLTAGE_CHANNEL_DIFF(0), + MCP320X_VOLTAGE_CHANNEL_DIFF(0, 1), + MCP320X_VOLTAGE_CHANNEL_DIFF(1, 0), }; static const struct iio_chan_spec mcp3204_channels[] = { @@ -214,8 +215,10 @@ static const struct iio_chan_spec mcp3204_channels[] = { MCP320X_VOLTAGE_CHANNEL(1), MCP320X_VOLTAGE_CHANNEL(2), MCP320X_VOLTAGE_CHANNEL(3), - MCP320X_VOLTAGE_CHANNEL_DIFF(0), - MCP320X_VOLTAGE_CHANNEL_DIFF(1), + MCP320X_VOLTAGE_CHANNEL_DIFF(0, 1), + MCP320X_VOLTAGE_CHANNEL_DIFF(1, 0), + MCP320X_VOLTAGE_CHANNEL_DIFF(2, 3), + MCP320X_VOLTAGE_CHANNEL_DIFF(3, 2), }; static const struct iio_chan_spec mcp3208_channels[] = { @@ -227,10 +230,14 @@ static const struct iio_chan_spec mcp3208_channels[] = { MCP320X_VOLTAGE_CHANNEL(5), MCP320X_VOLTAGE_CHANNEL(6), MCP320X_VOLTAGE_CHANNEL(7), - MCP320X_VOLTAGE_CHANNEL_DIFF(0), - MCP320X_VOLTAGE_CHANNEL_DIFF(1), - MCP320X_VOLTAGE_CHANNEL_DIFF(2), - MCP320X_VOLTAGE_CHANNEL_DIFF(3), + MCP320X_VOLTAGE_CHANNEL_DIFF(0, 1), + MCP320X_VOLTAGE_CHANNEL_DIFF(1, 0), + MCP320X_VOLTAGE_CHANNEL_DIFF(2, 3), + MCP320X_VOLTAGE_CHANNEL_DIFF(3, 2), + MCP320X_VOLTAGE_CHANNEL_DIFF(4, 5), + MCP320X_VOLTAGE_CHANNEL_DIFF(5, 4), + MCP320X_VOLTAGE_CHANNEL_DIFF(6, 7), + MCP320X_VOLTAGE_CHANNEL_DIFF(7, 6), }; static const struct iio_info mcp320x_info = { diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c index 6eca7aea8a37..d7b36efd2f3c 100644 --- a/drivers/iio/adc/mcp3422.c +++ b/drivers/iio/adc/mcp3422.c @@ -1,11 +1,12 @@ /* - * mcp3422.c - driver for the Microchip mcp3422/3/4/6/7/8 chip family + * mcp3422.c - driver for the Microchip mcp3421/2/3/4/5/6/7/8 chip family * * Copyright (C) 2013, Angelo Compagnucci * Author: Angelo Compagnucci <angelo.compagnucci@gmail.com> * * Datasheet: http://ww1.microchip.com/downloads/en/devicedoc/22088b.pdf * http://ww1.microchip.com/downloads/en/DeviceDoc/22226a.pdf + * http://ww1.microchip.com/downloads/en/DeviceDoc/22072b.pdf * * This driver exports the value of analog input voltage to sysfs, the * voltage unit is nV. @@ -338,7 +339,7 @@ static int mcp3422_probe(struct i2c_client *client, u8 config; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) - return -ENODEV; + return -EOPNOTSUPP; indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*adc)); if (!indio_dev) @@ -357,6 +358,7 @@ static int mcp3422_probe(struct i2c_client *client, switch (adc->id) { case 1: + case 5: indio_dev->channels = mcp3421_channels; indio_dev->num_channels = ARRAY_SIZE(mcp3421_channels); break; @@ -395,6 +397,7 @@ static const struct i2c_device_id mcp3422_id[] = { { "mcp3422", 2 }, { "mcp3423", 3 }, { "mcp3424", 4 }, + { "mcp3425", 5 }, { "mcp3426", 6 }, { "mcp3427", 7 }, { "mcp3428", 8 }, @@ -421,5 +424,5 @@ static struct i2c_driver mcp3422_driver = { module_i2c_driver(mcp3422_driver); MODULE_AUTHOR("Angelo Compagnucci <angelo.compagnucci@gmail.com>"); -MODULE_DESCRIPTION("Microchip mcp3422/3/4/6/7/8 driver"); +MODULE_DESCRIPTION("Microchip mcp3421/2/3/4/5/6/7/8 driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/iio/adc/mxs-lradc.c index bb1f15224ac8..33051b87aac2 100644 --- a/drivers/staging/iio/adc/mxs-lradc.c +++ b/drivers/iio/adc/mxs-lradc.c @@ -443,7 +443,8 @@ static void mxs_lradc_setup_ts_channel(struct mxs_lradc *lradc, unsigned ch) LRADC_CH_NUM_SAMPLES(lradc->over_sample_cnt - 1), LRADC_CH(ch)); - /* from the datasheet: + /* + * from the datasheet: * "Software must clear this register in preparation for a * multi-cycle accumulation. */ @@ -504,7 +505,8 @@ static void mxs_lradc_setup_ts_pressure(struct mxs_lradc *lradc, unsigned ch1, mxs_lradc_reg_wrt(lradc, reg, LRADC_CH(ch1)); mxs_lradc_reg_wrt(lradc, reg, LRADC_CH(ch2)); - /* from the datasheet: + /* + * from the datasheet: * "Software must clear this register in preparation for a * multi-cycle accumulation. */ @@ -914,7 +916,8 @@ static int mxs_lradc_read_raw(struct iio_dev *iio_dev, case IIO_CHAN_INFO_SCALE: if (chan->type == IIO_TEMP) { - /* From the datasheet, we have to multiply by 1.012 and + /* + * From the datasheet, we have to multiply by 1.012 and * divide by 4 */ *val = 0; @@ -929,7 +932,8 @@ static int mxs_lradc_read_raw(struct iio_dev *iio_dev, case IIO_CHAN_INFO_OFFSET: if (chan->type == IIO_TEMP) { - /* The calculated value from the ADC is in Kelvin, we + /* + * The calculated value from the ADC is in Kelvin, we * want Celsius for hwmon so the offset is -273.15 * The offset is applied before scaling so it is * actually -213.15 * 4 / 1.012 = -1079.644268 @@ -1750,6 +1754,7 @@ static int mxs_lradc_remove(struct platform_device *pdev) iio_triggered_buffer_cleanup(iio); clk_disable_unprepare(lradc->clk); + return 0; } diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c index f42eb8a7d21f..2bbf0c521beb 100644 --- a/drivers/iio/adc/palmas_gpadc.c +++ b/drivers/iio/adc/palmas_gpadc.c @@ -534,7 +534,7 @@ static int palmas_gpadc_probe(struct platform_device *pdev) } ret = request_threaded_irq(adc->irq, NULL, palmas_gpadc_irq, - IRQF_ONESHOT | IRQF_EARLY_RESUME, dev_name(adc->dev), + IRQF_ONESHOT, dev_name(adc->dev), adc); if (ret < 0) { dev_err(adc->dev, @@ -549,7 +549,7 @@ static int palmas_gpadc_probe(struct platform_device *pdev) adc->irq_auto_0 = platform_get_irq(pdev, 1); ret = request_threaded_irq(adc->irq_auto_0, NULL, palmas_gpadc_irq_auto, - IRQF_ONESHOT | IRQF_EARLY_RESUME, + IRQF_ONESHOT, "palmas-adc-auto-0", adc); if (ret < 0) { dev_err(adc->dev, "request auto0 irq %d failed: %d\n", @@ -565,7 +565,7 @@ static int palmas_gpadc_probe(struct platform_device *pdev) adc->irq_auto_1 = platform_get_irq(pdev, 2); ret = request_threaded_irq(adc->irq_auto_1, NULL, palmas_gpadc_irq_auto, - IRQF_ONESHOT | IRQF_EARLY_RESUME, + IRQF_ONESHOT, "palmas-adc-auto-1", adc); if (ret < 0) { dev_err(adc->dev, "request auto1 irq %d failed: %d\n", diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c index 2c8374f86252..ecbc12138d58 100644 --- a/drivers/iio/adc/ti-adc081c.c +++ b/drivers/iio/adc/ti-adc081c.c @@ -73,7 +73,7 @@ static int adc081c_probe(struct i2c_client *client, int err; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA)) - return -ENODEV; + return -EOPNOTSUPP; iio = devm_iio_device_alloc(&client->dev, sizeof(*adc)); if (!iio) diff --git a/drivers/iio/adc/ti-adc0832.c b/drivers/iio/adc/ti-adc0832.c new file mode 100644 index 000000000000..0afeac0c9bad --- /dev/null +++ b/drivers/iio/adc/ti-adc0832.c @@ -0,0 +1,288 @@ +/* + * ADC0831/ADC0832/ADC0834/ADC0838 8-bit ADC driver + * + * Copyright (c) 2016 Akinobu Mita <akinobu.mita@gmail.com> + * + * This file is subject to the terms and conditions of version 2 of + * the GNU General Public License. See the file COPYING in the main + * directory of this archive for more details. + * + * Datasheet: http://www.ti.com/lit/ds/symlink/adc0832-n.pdf + */ + +#include <linux/module.h> +#include <linux/spi/spi.h> +#include <linux/iio/iio.h> +#include <linux/regulator/consumer.h> + +enum { + adc0831, + adc0832, + adc0834, + adc0838, +}; + +struct adc0832 { + struct spi_device *spi; + struct regulator *reg; + struct mutex lock; + u8 mux_bits; + + u8 tx_buf[2] ____cacheline_aligned; + u8 rx_buf[2]; +}; + +#define ADC0832_VOLTAGE_CHANNEL(chan) \ + { \ + .type = IIO_VOLTAGE, \ + .indexed = 1, \ + .channel = chan, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) \ + } + +#define ADC0832_VOLTAGE_CHANNEL_DIFF(chan1, chan2) \ + { \ + .type = IIO_VOLTAGE, \ + .indexed = 1, \ + .channel = (chan1), \ + .channel2 = (chan2), \ + .differential = 1, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) \ + } + +static const struct iio_chan_spec adc0831_channels[] = { + ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1), +}; + +static const struct iio_chan_spec adc0832_channels[] = { + ADC0832_VOLTAGE_CHANNEL(0), + ADC0832_VOLTAGE_CHANNEL(1), + ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1), + ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0), +}; + +static const struct iio_chan_spec adc0834_channels[] = { + ADC0832_VOLTAGE_CHANNEL(0), + ADC0832_VOLTAGE_CHANNEL(1), + ADC0832_VOLTAGE_CHANNEL(2), + ADC0832_VOLTAGE_CHANNEL(3), + ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1), + ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0), + ADC0832_VOLTAGE_CHANNEL_DIFF(2, 3), + ADC0832_VOLTAGE_CHANNEL_DIFF(3, 2), +}; + +static const struct iio_chan_spec adc0838_channels[] = { + ADC0832_VOLTAGE_CHANNEL(0), + ADC0832_VOLTAGE_CHANNEL(1), + ADC0832_VOLTAGE_CHANNEL(2), + ADC0832_VOLTAGE_CHANNEL(3), + ADC0832_VOLTAGE_CHANNEL(4), + ADC0832_VOLTAGE_CHANNEL(5), + ADC0832_VOLTAGE_CHANNEL(6), + ADC0832_VOLTAGE_CHANNEL(7), + ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1), + ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0), + ADC0832_VOLTAGE_CHANNEL_DIFF(2, 3), + ADC0832_VOLTAGE_CHANNEL_DIFF(3, 2), + ADC0832_VOLTAGE_CHANNEL_DIFF(4, 5), + ADC0832_VOLTAGE_CHANNEL_DIFF(5, 4), + ADC0832_VOLTAGE_CHANNEL_DIFF(6, 7), + ADC0832_VOLTAGE_CHANNEL_DIFF(7, 6), +}; + +static int adc0831_adc_conversion(struct adc0832 *adc) +{ + struct spi_device *spi = adc->spi; + int ret; + + ret = spi_read(spi, &adc->rx_buf, 2); + if (ret) + return ret; + + /* + * Skip TRI-STATE and a leading zero + */ + return (adc->rx_buf[0] << 2 & 0xff) | (adc->rx_buf[1] >> 6); +} + +static int adc0832_adc_conversion(struct adc0832 *adc, int channel, + bool differential) +{ + struct spi_device *spi = adc->spi; + struct spi_transfer xfer = { + .tx_buf = adc->tx_buf, + .rx_buf = adc->rx_buf, + .len = 2, + }; + int ret; + + if (!adc->mux_bits) + return adc0831_adc_conversion(adc); + + /* start bit */ + adc->tx_buf[0] = 1 << (adc->mux_bits + 1); + /* single-ended or differential */ + adc->tx_buf[0] |= differential ? 0 : (1 << adc->mux_bits); + /* odd / sign */ + adc->tx_buf[0] |= (channel % 2) << (adc->mux_bits - 1); + /* select */ + if (adc->mux_bits > 1) + adc->tx_buf[0] |= channel / 2; + + /* align Data output BIT7 (MSB) to 8-bit boundary */ + adc->tx_buf[0] <<= 1; + + ret = spi_sync_transfer(spi, &xfer, 1); + if (ret) + return ret; + + return adc->rx_buf[1]; +} + +static int adc0832_read_raw(struct iio_dev *iio, + struct iio_chan_spec const *channel, int *value, + int *shift, long mask) +{ + struct adc0832 *adc = iio_priv(iio); + + switch (mask) { + case IIO_CHAN_INFO_RAW: + mutex_lock(&adc->lock); + *value = adc0832_adc_conversion(adc, channel->channel, + channel->differential); + mutex_unlock(&adc->lock); + if (*value < 0) + return *value; + + return IIO_VAL_INT; + case IIO_CHAN_INFO_SCALE: + *value = regulator_get_voltage(adc->reg); + if (*value < 0) + return *value; + + /* convert regulator output voltage to mV */ + *value /= 1000; + *shift = 8; + + return IIO_VAL_FRACTIONAL_LOG2; + } + + return -EINVAL; +} + +static const struct iio_info adc0832_info = { + .read_raw = adc0832_read_raw, + .driver_module = THIS_MODULE, +}; + +static int adc0832_probe(struct spi_device *spi) +{ + struct iio_dev *indio_dev; + struct adc0832 *adc; + int ret; + + indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adc)); + if (!indio_dev) + return -ENOMEM; + + adc = iio_priv(indio_dev); + adc->spi = spi; + mutex_init(&adc->lock); + + indio_dev->name = spi_get_device_id(spi)->name; + indio_dev->dev.parent = &spi->dev; + indio_dev->info = &adc0832_info; + indio_dev->modes = INDIO_DIRECT_MODE; + + switch (spi_get_device_id(spi)->driver_data) { + case adc0831: + adc->mux_bits = 0; + indio_dev->channels = adc0831_channels; + indio_dev->num_channels = ARRAY_SIZE(adc0831_channels); + break; + case adc0832: + adc->mux_bits = 1; + indio_dev->channels = adc0832_channels; + indio_dev->num_channels = ARRAY_SIZE(adc0832_channels); + break; + case adc0834: + adc->mux_bits = 2; + indio_dev->channels = adc0834_channels; + indio_dev->num_channels = ARRAY_SIZE(adc0834_channels); + break; + case adc0838: + adc->mux_bits = 3; + indio_dev->channels = adc0838_channels; + indio_dev->num_channels = ARRAY_SIZE(adc0838_channels); + break; + default: + return -EINVAL; + } + + adc->reg = devm_regulator_get(&spi->dev, "vref"); + if (IS_ERR(adc->reg)) + return PTR_ERR(adc->reg); + + ret = regulator_enable(adc->reg); + if (ret) + return ret; + + spi_set_drvdata(spi, indio_dev); + + ret = iio_device_register(indio_dev); + if (ret) + regulator_disable(adc->reg); + + return ret; +} + +static int adc0832_remove(struct spi_device *spi) +{ + struct iio_dev *indio_dev = spi_get_drvdata(spi); + struct adc0832 *adc = iio_priv(indio_dev); + + iio_device_unregister(indio_dev); + regulator_disable(adc->reg); + + return 0; +} + +#ifdef CONFIG_OF + +static const struct of_device_id adc0832_dt_ids[] = { + { .compatible = "ti,adc0831", }, + { .compatible = "ti,adc0832", }, + { .compatible = "ti,adc0834", }, + { .compatible = "ti,adc0838", }, + {} +}; +MODULE_DEVICE_TABLE(of, adc0832_dt_ids); + +#endif + +static const struct spi_device_id adc0832_id[] = { + { "adc0831", adc0831 }, + { "adc0832", adc0832 }, + { "adc0834", adc0834 }, + { "adc0838", adc0838 }, + {} +}; +MODULE_DEVICE_TABLE(spi, adc0832_id); + +static struct spi_driver adc0832_driver = { + .driver = { + .name = "adc0832", + .of_match_table = of_match_ptr(adc0832_dt_ids), + }, + .probe = adc0832_probe, + .remove = adc0832_remove, + .id_table = adc0832_id, +}; +module_spi_driver(adc0832_driver); + +MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>"); +MODULE_DESCRIPTION("ADC0831/ADC0832/ADC0834/ADC0838 driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c new file mode 100644 index 000000000000..73cbf0b54e54 --- /dev/null +++ b/drivers/iio/adc/ti-ads1015.c @@ -0,0 +1,612 @@ +/* + * ADS1015 - Texas Instruments Analog-to-Digital Converter + * + * Copyright (c) 2016, Intel Corporation. + * + * This file is subject to the terms and conditions of version 2 of + * the GNU General Public License. See the file COPYING in the main + * directory of this archive for more details. + * + * IIO driver for ADS1015 ADC 7-bit I2C slave address: + * * 0x48 - ADDR connected to Ground + * * 0x49 - ADDR connected to Vdd + * * 0x4A - ADDR connected to SDA + * * 0x4B - ADDR connected to SCL + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/i2c.h> +#include <linux/regmap.h> +#include <linux/pm_runtime.h> +#include <linux/mutex.h> +#include <linux/delay.h> + +#include <linux/i2c/ads1015.h> + +#include <linux/iio/iio.h> +#include <linux/iio/types.h> +#include <linux/iio/sysfs.h> +#include <linux/iio/buffer.h> +#include <linux/iio/triggered_buffer.h> +#include <linux/iio/trigger_consumer.h> + +#define ADS1015_DRV_NAME "ads1015" + +#define ADS1015_CONV_REG 0x00 +#define ADS1015_CFG_REG 0x01 + +#define ADS1015_CFG_DR_SHIFT 5 +#define ADS1015_CFG_MOD_SHIFT 8 +#define ADS1015_CFG_PGA_SHIFT 9 +#define ADS1015_CFG_MUX_SHIFT 12 + +#define ADS1015_CFG_DR_MASK GENMASK(7, 5) +#define ADS1015_CFG_MOD_MASK BIT(8) +#define ADS1015_CFG_PGA_MASK GENMASK(11, 9) +#define ADS1015_CFG_MUX_MASK GENMASK(14, 12) + +/* device operating modes */ +#define ADS1015_CONTINUOUS 0 +#define ADS1015_SINGLESHOT 1 + +#define ADS1015_SLEEP_DELAY_MS 2000 +#define ADS1015_DEFAULT_PGA 2 +#define ADS1015_DEFAULT_DATA_RATE 4 +#define ADS1015_DEFAULT_CHAN 0 + +enum ads1015_channels { + ADS1015_AIN0_AIN1 = 0, + ADS1015_AIN0_AIN3, + ADS1015_AIN1_AIN3, + ADS1015_AIN2_AIN3, + ADS1015_AIN0, + ADS1015_AIN1, + ADS1015_AIN2, + ADS1015_AIN3, + ADS1015_TIMESTAMP, +}; + +static const unsigned int ads1015_data_rate[] = { + 128, 250, 490, 920, 1600, 2400, 3300, 3300 +}; + +static const struct { + int scale; + int uscale; +} ads1015_scale[] = { + {3, 0}, + {2, 0}, + {1, 0}, + {0, 500000}, + {0, 250000}, + {0, 125000}, + {0, 125000}, + {0, 125000}, +}; + +#define ADS1015_V_CHAN(_chan, _addr) { \ + .type = IIO_VOLTAGE, \ + .indexed = 1, \ + .address = _addr, \ + .channel = _chan, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ + BIT(IIO_CHAN_INFO_SCALE) | \ + BIT(IIO_CHAN_INFO_SAMP_FREQ), \ + .scan_index = _addr, \ + .scan_type = { \ + .sign = 's', \ + .realbits = 12, \ + .storagebits = 16, \ + .shift = 4, \ + .endianness = IIO_CPU, \ + }, \ +} + +#define ADS1015_V_DIFF_CHAN(_chan, _chan2, _addr) { \ + .type = IIO_VOLTAGE, \ + .differential = 1, \ + .indexed = 1, \ + .address = _addr, \ + .channel = _chan, \ + .channel2 = _chan2, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ + BIT(IIO_CHAN_INFO_SCALE) | \ + BIT(IIO_CHAN_INFO_SAMP_FREQ), \ + .scan_index = _addr, \ + .scan_type = { \ + .sign = 's', \ + .realbits = 12, \ + .storagebits = 16, \ + .shift = 4, \ + .endianness = IIO_CPU, \ + }, \ +} + +struct ads1015_data { + struct regmap *regmap; + /* + * Protects ADC ops, e.g: concurrent sysfs/buffered + * data reads, configuration updates + */ + struct mutex lock; + struct ads1015_channel_data channel_data[ADS1015_CHANNELS]; +}; + +static bool ads1015_is_writeable_reg(struct device *dev, unsigned int reg) +{ + return (reg == ADS1015_CFG_REG); +} + +static const struct regmap_config ads1015_regmap_config = { + .reg_bits = 8, + .val_bits = 16, + .max_register = ADS1015_CFG_REG, + .writeable_reg = ads1015_is_writeable_reg, +}; + +static const struct iio_chan_spec ads1015_channels[] = { + ADS1015_V_DIFF_CHAN(0, 1, ADS1015_AIN0_AIN1), + ADS1015_V_DIFF_CHAN(0, 3, ADS1015_AIN0_AIN3), + ADS1015_V_DIFF_CHAN(1, 3, ADS1015_AIN1_AIN3), + ADS1015_V_DIFF_CHAN(2, 3, ADS1015_AIN2_AIN3), + ADS1015_V_CHAN(0, ADS1015_AIN0), + ADS1015_V_CHAN(1, ADS1015_AIN1), + ADS1015_V_CHAN(2, ADS1015_AIN2), + ADS1015_V_CHAN(3, ADS1015_AIN3), + IIO_CHAN_SOFT_TIMESTAMP(ADS1015_TIMESTAMP), +}; + +static int ads1015_set_power_state(struct ads1015_data *data, bool on) +{ + int ret; + struct device *dev = regmap_get_device(data->regmap); + + if (on) { + ret = pm_runtime_get_sync(dev); + if (ret < 0) + pm_runtime_put_noidle(dev); + } else { + pm_runtime_mark_last_busy(dev); + ret = pm_runtime_put_autosuspend(dev); + } + + return ret; +} + +static +int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val) +{ + int ret, pga, dr, conv_time; + bool change; + + if (chan < 0 || chan >= ADS1015_CHANNELS) + return -EINVAL; + + pga = data->channel_data[chan].pga; + dr = data->channel_data[chan].data_rate; + + ret = regmap_update_bits_check(data->regmap, ADS1015_CFG_REG, + ADS1015_CFG_MUX_MASK | + ADS1015_CFG_PGA_MASK, + chan << ADS1015_CFG_MUX_SHIFT | + pga << ADS1015_CFG_PGA_SHIFT, + &change); + if (ret < 0) + return ret; + + if (change) { + conv_time = DIV_ROUND_UP(USEC_PER_SEC, ads1015_data_rate[dr]); + usleep_range(conv_time, conv_time + 1); + } + + return regmap_read(data->regmap, ADS1015_CONV_REG, val); +} + +static irqreturn_t ads1015_trigger_handler(int irq, void *p) +{ + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct ads1015_data *data = iio_priv(indio_dev); + s16 buf[8]; /* 1x s16 ADC val + 3x s16 padding + 4x s16 timestamp */ + int chan, ret, res; + + memset(buf, 0, sizeof(buf)); + + mutex_lock(&data->lock); + chan = find_first_bit(indio_dev->active_scan_mask, + indio_dev->masklength); + ret = ads1015_get_adc_result(data, chan, &res); + if (ret < 0) { + mutex_unlock(&data->lock); + goto err; + } + + buf[0] = res; + mutex_unlock(&data->lock); + + iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns()); + +err: + iio_trigger_notify_done(indio_dev->trig); + + return IRQ_HANDLED; +} + +static int ads1015_set_scale(struct ads1015_data *data, int chan, + int scale, int uscale) +{ + int i, ret, rindex = -1; + + for (i = 0; i < ARRAY_SIZE(ads1015_scale); i++) + if (ads1015_scale[i].scale == scale && + ads1015_scale[i].uscale == uscale) { + rindex = i; + break; + } + if (rindex < 0) + return -EINVAL; + + ret = regmap_update_bits(data->regmap, ADS1015_CFG_REG, + ADS1015_CFG_PGA_MASK, + rindex << ADS1015_CFG_PGA_SHIFT); + if (ret < 0) + return ret; + + data->channel_data[chan].pga = rindex; + + return 0; +} + +static int ads1015_set_data_rate(struct ads1015_data *data, int chan, int rate) +{ + int i, ret, rindex = -1; + + for (i = 0; i < ARRAY_SIZE(ads1015_data_rate); i++) + if (ads1015_data_rate[i] == rate) { + rindex = i; + break; + } + if (rindex < 0) + return -EINVAL; + + ret = regmap_update_bits(data->regmap, ADS1015_CFG_REG, + ADS1015_CFG_DR_MASK, + rindex << ADS1015_CFG_DR_SHIFT); + if (ret < 0) + return ret; + + data->channel_data[chan].data_rate = rindex; + + return 0; +} + +static int ads1015_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, int *val, + int *val2, long mask) +{ + int ret, idx; + struct ads1015_data *data = iio_priv(indio_dev); + + mutex_lock(&indio_dev->mlock); + mutex_lock(&data->lock); + switch (mask) { + case IIO_CHAN_INFO_RAW: + if (iio_buffer_enabled(indio_dev)) { + ret = -EBUSY; + break; + } + + ret = ads1015_set_power_state(data, true); + if (ret < 0) + break; + + ret = ads1015_get_adc_result(data, chan->address, val); + if (ret < 0) { + ads1015_set_power_state(data, false); + break; + } + + /* 12 bit res, D0 is bit 4 in conversion register */ + *val = sign_extend32(*val >> 4, 11); + + ret = ads1015_set_power_state(data, false); + if (ret < 0) + break; + + ret = IIO_VAL_INT; + break; + case IIO_CHAN_INFO_SCALE: + idx = data->channel_data[chan->address].pga; + *val = ads1015_scale[idx].scale; + *val2 = ads1015_scale[idx].uscale; + ret = IIO_VAL_INT_PLUS_MICRO; + break; + case IIO_CHAN_INFO_SAMP_FREQ: + idx = data->channel_data[chan->address].data_rate; + *val = ads1015_data_rate[idx]; + ret = IIO_VAL_INT; + break; + default: + ret = -EINVAL; + break; + } + mutex_unlock(&data->lock); + mutex_unlock(&indio_dev->mlock); + + return ret; +} + +static int ads1015_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, int val, + int val2, long mask) +{ + struct ads1015_data *data = iio_priv(indio_dev); + int ret; + + mutex_lock(&data->lock); + switch (mask) { + case IIO_CHAN_INFO_SCALE: + ret = ads1015_set_scale(data, chan->address, val, val2); + break; + case IIO_CHAN_INFO_SAMP_FREQ: + ret = ads1015_set_data_rate(data, chan->address, val); + break; + default: + ret = -EINVAL; + break; + } + mutex_unlock(&data->lock); + + return ret; +} + +static int ads1015_buffer_preenable(struct iio_dev *indio_dev) +{ + return ads1015_set_power_state(iio_priv(indio_dev), true); +} + +static int ads1015_buffer_postdisable(struct iio_dev *indio_dev) +{ + return ads1015_set_power_state(iio_priv(indio_dev), false); +} + +static const struct iio_buffer_setup_ops ads1015_buffer_setup_ops = { + .preenable = ads1015_buffer_preenable, + .postenable = iio_triggered_buffer_postenable, + .predisable = iio_triggered_buffer_predisable, + .postdisable = ads1015_buffer_postdisable, + .validate_scan_mask = &iio_validate_scan_mask_onehot, +}; + +static IIO_CONST_ATTR(scale_available, "3 2 1 0.5 0.25 0.125"); +static IIO_CONST_ATTR(sampling_frequency_available, + "128 250 490 920 1600 2400 3300"); + +static struct attribute *ads1015_attributes[] = { + &iio_const_attr_scale_available.dev_attr.attr, + &iio_const_attr_sampling_frequency_available.dev_attr.attr, + NULL, +}; + +static const struct attribute_group ads1015_attribute_group = { + .attrs = ads1015_attributes, +}; + +static const struct iio_info ads1015_info = { + .driver_module = THIS_MODULE, + .read_raw = ads1015_read_raw, + .write_raw = ads1015_write_raw, + .attrs = &ads1015_attribute_group, +}; + +#ifdef CONFIG_OF +static int ads1015_get_channels_config_of(struct i2c_client *client) +{ + struct ads1015_data *data = i2c_get_clientdata(client); + struct device_node *node; + + if (!client->dev.of_node || + !of_get_next_child(client->dev.of_node, NULL)) + return -EINVAL; + + for_each_child_of_node(client->dev.of_node, node) { + u32 pval; + unsigned int channel; + unsigned int pga = ADS1015_DEFAULT_PGA; + unsigned int data_rate = ADS1015_DEFAULT_DATA_RATE; + + if (of_property_read_u32(node, "reg", &pval)) { + dev_err(&client->dev, "invalid reg on %s\n", + node->full_name); + continue; + } + + channel = pval; + if (channel >= ADS1015_CHANNELS) { + dev_err(&client->dev, + "invalid channel index %d on %s\n", + channel, node->full_name); + continue; + } + + if (!of_property_read_u32(node, "ti,gain", &pval)) { + pga = pval; + if (pga > 6) { + dev_err(&client->dev, "invalid gain on %s\n", + node->full_name); + return -EINVAL; + } + } + + if (!of_property_read_u32(node, "ti,datarate", &pval)) { + data_rate = pval; + if (data_rate > 7) { + dev_err(&client->dev, + "invalid data_rate on %s\n", + node->full_name); + return -EINVAL; + } + } + + data->channel_data[channel].pga = pga; + data->channel_data[channel].data_rate = data_rate; + } + + return 0; +} +#endif + +static void ads1015_get_channels_config(struct i2c_client *client) +{ + unsigned int k; + + struct iio_dev *indio_dev = i2c_get_clientdata(client); + struct ads1015_data *data = iio_priv(indio_dev); + struct ads1015_platform_data *pdata = dev_get_platdata(&client->dev); + + /* prefer platform data */ + if (pdata) { + memcpy(data->channel_data, pdata->channel_data, + sizeof(data->channel_data)); + return; + } + +#ifdef CONFIG_OF + if (!ads1015_get_channels_config_of(client)) + return; +#endif + /* fallback on default configuration */ + for (k = 0; k < ADS1015_CHANNELS; ++k) { + data->channel_data[k].pga = ADS1015_DEFAULT_PGA; + data->channel_data[k].data_rate = ADS1015_DEFAULT_DATA_RATE; + } +} + +static int ads1015_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct iio_dev *indio_dev; + struct ads1015_data *data; + int ret; + + indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data)); + if (!indio_dev) + return -ENOMEM; + + data = iio_priv(indio_dev); + i2c_set_clientdata(client, indio_dev); + + mutex_init(&data->lock); + + indio_dev->dev.parent = &client->dev; + indio_dev->info = &ads1015_info; + indio_dev->name = ADS1015_DRV_NAME; + indio_dev->channels = ads1015_channels; + indio_dev->num_channels = ARRAY_SIZE(ads1015_channels); + indio_dev->modes = INDIO_DIRECT_MODE; + + /* we need to keep this ABI the same as used by hwmon ADS1015 driver */ + ads1015_get_channels_config(client); + + data->regmap = devm_regmap_init_i2c(client, &ads1015_regmap_config); + if (IS_ERR(data->regmap)) { + dev_err(&client->dev, "Failed to allocate register map\n"); + return PTR_ERR(data->regmap); + } + + ret = iio_triggered_buffer_setup(indio_dev, NULL, + ads1015_trigger_handler, + &ads1015_buffer_setup_ops); + if (ret < 0) { + dev_err(&client->dev, "iio triggered buffer setup failed\n"); + return ret; + } + ret = pm_runtime_set_active(&client->dev); + if (ret) + goto err_buffer_cleanup; + pm_runtime_set_autosuspend_delay(&client->dev, ADS1015_SLEEP_DELAY_MS); + pm_runtime_use_autosuspend(&client->dev); + pm_runtime_enable(&client->dev); + + ret = iio_device_register(indio_dev); + if (ret < 0) { + dev_err(&client->dev, "Failed to register IIO device\n"); + goto err_buffer_cleanup; + } + + return 0; + +err_buffer_cleanup: + iio_triggered_buffer_cleanup(indio_dev); + + return ret; +} + +static int ads1015_remove(struct i2c_client *client) +{ + struct iio_dev *indio_dev = i2c_get_clientdata(client); + struct ads1015_data *data = iio_priv(indio_dev); + + iio_device_unregister(indio_dev); + + pm_runtime_disable(&client->dev); + pm_runtime_set_suspended(&client->dev); + pm_runtime_put_noidle(&client->dev); + + iio_triggered_buffer_cleanup(indio_dev); + + /* power down single shot mode */ + return regmap_update_bits(data->regmap, ADS1015_CFG_REG, + ADS1015_CFG_MOD_MASK, + ADS1015_SINGLESHOT << ADS1015_CFG_MOD_SHIFT); +} + +#ifdef CONFIG_PM +static int ads1015_runtime_suspend(struct device *dev) +{ + struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); + struct ads1015_data *data = iio_priv(indio_dev); + + return regmap_update_bits(data->regmap, ADS1015_CFG_REG, + ADS1015_CFG_MOD_MASK, + ADS1015_SINGLESHOT << ADS1015_CFG_MOD_SHIFT); +} + +static int ads1015_runtime_resume(struct device *dev) +{ + struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); + struct ads1015_data *data = iio_priv(indio_dev); + + return regmap_update_bits(data->regmap, ADS1015_CFG_REG, + ADS1015_CFG_MOD_MASK, + ADS1015_CONTINUOUS << ADS1015_CFG_MOD_SHIFT); +} +#endif + +static const struct dev_pm_ops ads1015_pm_ops = { + SET_RUNTIME_PM_OPS(ads1015_runtime_suspend, + ads1015_runtime_resume, NULL) +}; + +static const struct i2c_device_id ads1015_id[] = { + {"ads1015", 0}, + {} +}; +MODULE_DEVICE_TABLE(i2c, ads1015_id); + +static struct i2c_driver ads1015_driver = { + .driver = { + .name = ADS1015_DRV_NAME, + .pm = &ads1015_pm_ops, + }, + .probe = ads1015_probe, + .remove = ads1015_remove, + .id_table = ads1015_id, +}; + +module_i2c_driver(ads1015_driver); + +MODULE_AUTHOR("Daniel Baluta <daniel.baluta@intel.com>"); +MODULE_DESCRIPTION("Texas Instruments ADS1015 ADC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c index ebdb838d3a1c..9fabed47053d 100644 --- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c +++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c @@ -93,12 +93,7 @@ static void iio_dmaengine_buffer_abort(struct iio_dma_buffer_queue *queue) struct dmaengine_buffer *dmaengine_buffer = iio_buffer_to_dmaengine_buffer(&queue->buffer); - dmaengine_terminate_all(dmaengine_buffer->chan); - /* FIXME: There is a slight chance of a race condition here. - * dmaengine_terminate_all() does not guarantee that all transfer - * callbacks have finished running. Need to introduce a - * dmaengine_terminate_all_sync(). - */ + dmaengine_terminate_sync(dmaengine_buffer->chan); iio_dma_buffer_block_list_abort(queue, &dmaengine_buffer->active); } diff --git a/drivers/iio/chemical/Kconfig b/drivers/iio/chemical/Kconfig index f16de61be46d..f73290f84c90 100644 --- a/drivers/iio/chemical/Kconfig +++ b/drivers/iio/chemical/Kconfig @@ -4,6 +4,20 @@ menu "Chemical Sensors" +config ATLAS_PH_SENSOR + tristate "Atlas Scientific OEM pH-SM sensor" + depends on I2C + select REGMAP_I2C + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER + select IRQ_WORK + help + Say Y here to build I2C interface support for the Atlas + Scientific OEM pH-SM sensor. + + To compile this driver as module, choose M here: the + module will be called atlas-ph-sensor. + config IAQCORE tristate "AMS iAQ-Core VOC sensors" depends on I2C diff --git a/drivers/iio/chemical/Makefile b/drivers/iio/chemical/Makefile index 167861fadfab..b02202b41289 100644 --- a/drivers/iio/chemical/Makefile +++ b/drivers/iio/chemical/Makefile @@ -3,5 +3,6 @@ # # When adding new entries keep the list in alphabetical order +obj-$(CONFIG_ATLAS_PH_SENSOR) += atlas-ph-sensor.o obj-$(CONFIG_IAQCORE) += ams-iaq-core.o obj-$(CONFIG_VZ89X) += vz89x.o diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c new file mode 100644 index 000000000000..62b37cd8fb56 --- /dev/null +++ b/drivers/iio/chemical/atlas-ph-sensor.c @@ -0,0 +1,509 @@ +/* + * atlas-ph-sensor.c - Support for Atlas Scientific OEM pH-SM sensor + * + * Copyright (C) 2015 Matt Ranostay <mranostay@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/mutex.h> +#include <linux/err.h> +#include <linux/irq.h> +#include <linux/irq_work.h> +#include <linux/gpio.h> +#include <linux/i2c.h> +#include <linux/regmap.h> +#include <linux/iio/iio.h> +#include <linux/iio/buffer.h> +#include <linux/iio/trigger.h> +#include <linux/iio/trigger_consumer.h> +#include <linux/iio/triggered_buffer.h> +#include <linux/pm_runtime.h> + +#define ATLAS_REGMAP_NAME "atlas_ph_regmap" +#define ATLAS_DRV_NAME "atlas_ph" + +#define ATLAS_REG_DEV_TYPE 0x00 +#define ATLAS_REG_DEV_VERSION 0x01 + +#define ATLAS_REG_INT_CONTROL 0x04 +#define ATLAS_REG_INT_CONTROL_EN BIT(3) + +#define ATLAS_REG_PWR_CONTROL 0x06 + +#define ATLAS_REG_CALIB_STATUS 0x0d +#define ATLAS_REG_CALIB_STATUS_MASK 0x07 +#define ATLAS_REG_CALIB_STATUS_LOW BIT(0) +#define ATLAS_REG_CALIB_STATUS_MID BIT(1) +#define ATLAS_REG_CALIB_STATUS_HIGH BIT(2) + +#define ATLAS_REG_TEMP_DATA 0x0e +#define ATLAS_REG_PH_DATA 0x16 + +#define ATLAS_PH_INT_TIME_IN_US 450000 + +struct atlas_data { + struct i2c_client *client; + struct iio_trigger *trig; + struct regmap *regmap; + struct irq_work work; + + __be32 buffer[4]; /* 32-bit pH data + 32-bit pad + 64-bit timestamp */ +}; + +static const struct regmap_range atlas_volatile_ranges[] = { + regmap_reg_range(ATLAS_REG_INT_CONTROL, ATLAS_REG_INT_CONTROL), + regmap_reg_range(ATLAS_REG_PH_DATA, ATLAS_REG_PH_DATA + 4), +}; + +static const struct regmap_access_table atlas_volatile_table = { + .yes_ranges = atlas_volatile_ranges, + .n_yes_ranges = ARRAY_SIZE(atlas_volatile_ranges), +}; + +static const struct regmap_config atlas_regmap_config = { + .name = ATLAS_REGMAP_NAME, + + .reg_bits = 8, + .val_bits = 8, + + .volatile_table = &atlas_volatile_table, + .max_register = ATLAS_REG_PH_DATA + 4, + .cache_type = REGCACHE_RBTREE, +}; + +static const struct iio_chan_spec atlas_channels[] = { + { + .type = IIO_PH, + .info_mask_separate = + BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), + .scan_index = 0, + .scan_type = { + .sign = 'u', + .realbits = 32, + .storagebits = 32, + .endianness = IIO_BE, + }, + }, + IIO_CHAN_SOFT_TIMESTAMP(1), + { + .type = IIO_TEMP, + .address = ATLAS_REG_TEMP_DATA, + .info_mask_separate = + BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), + .output = 1, + .scan_index = -1 + }, +}; + +static int atlas_set_powermode(struct atlas_data *data, int on) +{ + return regmap_write(data->regmap, ATLAS_REG_PWR_CONTROL, on); +} + +static int atlas_set_interrupt(struct atlas_data *data, bool state) +{ + return regmap_update_bits(data->regmap, ATLAS_REG_INT_CONTROL, + ATLAS_REG_INT_CONTROL_EN, + state ? ATLAS_REG_INT_CONTROL_EN : 0); +} + +static int atlas_buffer_postenable(struct iio_dev *indio_dev) +{ + struct atlas_data *data = iio_priv(indio_dev); + int ret; + + ret = iio_triggered_buffer_postenable(indio_dev); + if (ret) + return ret; + + ret = pm_runtime_get_sync(&data->client->dev); + if (ret < 0) { + pm_runtime_put_noidle(&data->client->dev); + return ret; + } + + return atlas_set_interrupt(data, true); +} + +static int atlas_buffer_predisable(struct iio_dev *indio_dev) +{ + struct atlas_data *data = iio_priv(indio_dev); + int ret; + + ret = iio_triggered_buffer_predisable(indio_dev); + if (ret) + return ret; + + ret = atlas_set_interrupt(data, false); + if (ret) + return ret; + + pm_runtime_mark_last_busy(&data->client->dev); + return pm_runtime_put_autosuspend(&data->client->dev); +} + +static const struct iio_trigger_ops atlas_interrupt_trigger_ops = { + .owner = THIS_MODULE, +}; + +static const struct iio_buffer_setup_ops atlas_buffer_setup_ops = { + .postenable = atlas_buffer_postenable, + .predisable = atlas_buffer_predisable, +}; + +static void atlas_work_handler(struct irq_work *work) +{ + struct atlas_data *data = container_of(work, struct atlas_data, work); + + iio_trigger_poll(data->trig); +} + +static irqreturn_t atlas_trigger_handler(int irq, void *private) +{ + struct iio_poll_func *pf = private; + struct iio_dev *indio_dev = pf->indio_dev; + struct atlas_data *data = iio_priv(indio_dev); + int ret; + + ret = regmap_bulk_read(data->regmap, ATLAS_REG_PH_DATA, + (u8 *) &data->buffer, sizeof(data->buffer[0])); + + if (!ret) + iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, + iio_get_time_ns()); + + iio_trigger_notify_done(indio_dev->trig); + + return IRQ_HANDLED; +} + +static irqreturn_t atlas_interrupt_handler(int irq, void *private) +{ + struct iio_dev *indio_dev = private; + struct atlas_data *data = iio_priv(indio_dev); + + irq_work_queue(&data->work); + + return IRQ_HANDLED; +} + +static int atlas_read_ph_measurement(struct atlas_data *data, __be32 *val) +{ + struct device *dev = &data->client->dev; + int suspended = pm_runtime_suspended(dev); + int ret; + + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + pm_runtime_put_noidle(dev); + return ret; + } + + if (suspended) + usleep_range(ATLAS_PH_INT_TIME_IN_US, + ATLAS_PH_INT_TIME_IN_US + 100000); + + ret = regmap_bulk_read(data->regmap, ATLAS_REG_PH_DATA, + (u8 *) val, sizeof(*val)); + + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + + return ret; +} + +static int atlas_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long mask) +{ + struct atlas_data *data = iio_priv(indio_dev); + + switch (mask) { + case IIO_CHAN_INFO_RAW: { + int ret; + __be32 reg; + + switch (chan->type) { + case IIO_TEMP: + ret = regmap_bulk_read(data->regmap, chan->address, + (u8 *) ®, sizeof(reg)); + break; + case IIO_PH: + mutex_lock(&indio_dev->mlock); + + if (iio_buffer_enabled(indio_dev)) + ret = -EBUSY; + else + ret = atlas_read_ph_measurement(data, ®); + + mutex_unlock(&indio_dev->mlock); + break; + default: + ret = -EINVAL; + } + + if (!ret) { + *val = be32_to_cpu(reg); + ret = IIO_VAL_INT; + } + return ret; + } + case IIO_CHAN_INFO_SCALE: + switch (chan->type) { + case IIO_TEMP: + *val = 1; /* 0.01 */ + *val2 = 100; + break; + case IIO_PH: + *val = 1; /* 0.001 */ + *val2 = 1000; + break; + default: + return -EINVAL; + } + return IIO_VAL_FRACTIONAL; + } + + return -EINVAL; +} + +static int atlas_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int val, int val2, long mask) +{ + struct atlas_data *data = iio_priv(indio_dev); + __be32 reg = cpu_to_be32(val); + + if (val2 != 0 || val < 0 || val > 20000) + return -EINVAL; + + if (mask != IIO_CHAN_INFO_RAW || chan->type != IIO_TEMP) + return -EINVAL; + + return regmap_bulk_write(data->regmap, chan->address, + ®, sizeof(reg)); +} + +static const struct iio_info atlas_info = { + .driver_module = THIS_MODULE, + .read_raw = atlas_read_raw, + .write_raw = atlas_write_raw, +}; + +static int atlas_check_calibration(struct atlas_data *data) +{ + struct device *dev = &data->client->dev; + int ret; + unsigned int val; + + ret = regmap_read(data->regmap, ATLAS_REG_CALIB_STATUS, &val); + if (ret) + return ret; + + if (!(val & ATLAS_REG_CALIB_STATUS_MASK)) { + dev_warn(dev, "device has not been calibrated\n"); + return 0; + } + + if (!(val & ATLAS_REG_CALIB_STATUS_LOW)) + dev_warn(dev, "device missing low point calibration\n"); + + if (!(val & ATLAS_REG_CALIB_STATUS_MID)) + dev_warn(dev, "device missing mid point calibration\n"); + + if (!(val & ATLAS_REG_CALIB_STATUS_HIGH)) + dev_warn(dev, "device missing high point calibration\n"); + + return 0; +}; + +static int atlas_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct atlas_data *data; + struct iio_trigger *trig; + struct iio_dev *indio_dev; + int ret; + + indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data)); + if (!indio_dev) + return -ENOMEM; + + indio_dev->info = &atlas_info; + indio_dev->name = ATLAS_DRV_NAME; + indio_dev->channels = atlas_channels; + indio_dev->num_channels = ARRAY_SIZE(atlas_channels); + indio_dev->modes = INDIO_BUFFER_SOFTWARE | INDIO_DIRECT_MODE; + indio_dev->dev.parent = &client->dev; + + trig = devm_iio_trigger_alloc(&client->dev, "%s-dev%d", + indio_dev->name, indio_dev->id); + + if (!trig) + return -ENOMEM; + + data = iio_priv(indio_dev); + data->client = client; + data->trig = trig; + trig->dev.parent = indio_dev->dev.parent; + trig->ops = &atlas_interrupt_trigger_ops; + iio_trigger_set_drvdata(trig, indio_dev); + + i2c_set_clientdata(client, indio_dev); + + data->regmap = devm_regmap_init_i2c(client, &atlas_regmap_config); + if (IS_ERR(data->regmap)) { + dev_err(&client->dev, "regmap initialization failed\n"); + return PTR_ERR(data->regmap); + } + + ret = pm_runtime_set_active(&client->dev); + if (ret) + return ret; + + if (client->irq <= 0) { + dev_err(&client->dev, "no valid irq defined\n"); + return -EINVAL; + } + + ret = atlas_check_calibration(data); + if (ret) + return ret; + + ret = iio_trigger_register(trig); + if (ret) { + dev_err(&client->dev, "failed to register trigger\n"); + return ret; + } + + ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time, + &atlas_trigger_handler, &atlas_buffer_setup_ops); + if (ret) { + dev_err(&client->dev, "cannot setup iio trigger\n"); + goto unregister_trigger; + } + + init_irq_work(&data->work, atlas_work_handler); + + /* interrupt pin toggles on new conversion */ + ret = devm_request_threaded_irq(&client->dev, client->irq, + NULL, atlas_interrupt_handler, + IRQF_TRIGGER_RISING | + IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + "atlas_irq", + indio_dev); + if (ret) { + dev_err(&client->dev, "request irq (%d) failed\n", client->irq); + goto unregister_buffer; + } + + ret = atlas_set_powermode(data, 1); + if (ret) { + dev_err(&client->dev, "cannot power device on"); + goto unregister_buffer; + } + + pm_runtime_enable(&client->dev); + pm_runtime_set_autosuspend_delay(&client->dev, 2500); + pm_runtime_use_autosuspend(&client->dev); + + ret = iio_device_register(indio_dev); + if (ret) { + dev_err(&client->dev, "unable to register device\n"); + goto unregister_pm; + } + + return 0; + +unregister_pm: + pm_runtime_disable(&client->dev); + atlas_set_powermode(data, 0); + +unregister_buffer: + iio_triggered_buffer_cleanup(indio_dev); + +unregister_trigger: + iio_trigger_unregister(data->trig); + + return ret; +} + +static int atlas_remove(struct i2c_client *client) +{ + struct iio_dev *indio_dev = i2c_get_clientdata(client); + struct atlas_data *data = iio_priv(indio_dev); + + iio_device_unregister(indio_dev); + iio_triggered_buffer_cleanup(indio_dev); + iio_trigger_unregister(data->trig); + + pm_runtime_disable(&client->dev); + pm_runtime_set_suspended(&client->dev); + pm_runtime_put_noidle(&client->dev); + + return atlas_set_powermode(data, 0); +} + +#ifdef CONFIG_PM +static int atlas_runtime_suspend(struct device *dev) +{ + struct atlas_data *data = + iio_priv(i2c_get_clientdata(to_i2c_client(dev))); + + return atlas_set_powermode(data, 0); +} + +static int atlas_runtime_resume(struct device *dev) +{ + struct atlas_data *data = + iio_priv(i2c_get_clientdata(to_i2c_client(dev))); + + return atlas_set_powermode(data, 1); +} +#endif + +static const struct dev_pm_ops atlas_pm_ops = { + SET_RUNTIME_PM_OPS(atlas_runtime_suspend, + atlas_runtime_resume, NULL) +}; + +static const struct i2c_device_id atlas_id[] = { + { "atlas-ph-sm", 0 }, + {} +}; +MODULE_DEVICE_TABLE(i2c, atlas_id); + +static const struct of_device_id atlas_dt_ids[] = { + { .compatible = "atlas,ph-sm" }, + { } +}; +MODULE_DEVICE_TABLE(of, atlas_dt_ids); + +static struct i2c_driver atlas_driver = { + .driver = { + .name = ATLAS_DRV_NAME, + .of_match_table = of_match_ptr(atlas_dt_ids), + .pm = &atlas_pm_ops, + }, + .probe = atlas_probe, + .remove = atlas_remove, + .id_table = atlas_id, +}; +module_i2c_driver(atlas_driver); + +MODULE_AUTHOR("Matt Ranostay <mranostay@gmail.com>"); +MODULE_DESCRIPTION("Atlas Scientific pH-SM sensor"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/chemical/vz89x.c b/drivers/iio/chemical/vz89x.c index b8b804923230..652649da500f 100644 --- a/drivers/iio/chemical/vz89x.c +++ b/drivers/iio/chemical/vz89x.c @@ -249,7 +249,7 @@ static int vz89x_probe(struct i2c_client *client, I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BYTE)) data->xfer = vz89x_smbus_xfer; else - return -ENOTSUPP; + return -EOPNOTSUPP; i2c_set_clientdata(client, indio_dev); data->client = client; diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c index 8447c31e27f2..f5a2d445d0c0 100644 --- a/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/drivers/iio/common/st_sensors/st_sensors_core.c @@ -18,13 +18,15 @@ #include <asm/unaligned.h> #include <linux/iio/common/st_sensors.h> +#include "st_sensors_core.h" + static inline u32 st_sensors_get_unaligned_le24(const u8 *p) { return (s32)((p[0] | p[1] << 8 | p[2] << 16) << 8) >> 8; } -static int st_sensors_write_data_with_mask(struct iio_dev *indio_dev, - u8 reg_addr, u8 mask, u8 data) +int st_sensors_write_data_with_mask(struct iio_dev *indio_dev, + u8 reg_addr, u8 mask, u8 data) { int err; u8 new_data; diff --git a/drivers/iio/common/st_sensors/st_sensors_core.h b/drivers/iio/common/st_sensors/st_sensors_core.h new file mode 100644 index 000000000000..cd88098ff6f1 --- /dev/null +++ b/drivers/iio/common/st_sensors/st_sensors_core.h @@ -0,0 +1,8 @@ +/* + * Local functions in the ST Sensors core + */ +#ifndef __ST_SENSORS_CORE_H +#define __ST_SENSORS_CORE_H +int st_sensors_write_data_with_mask(struct iio_dev *indio_dev, + u8 reg_addr, u8 mask, u8 data); +#endif diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c index 3e907040c2c7..6a8c98327945 100644 --- a/drivers/iio/common/st_sensors/st_sensors_trigger.c +++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c @@ -14,32 +14,65 @@ #include <linux/iio/iio.h> #include <linux/iio/trigger.h> #include <linux/interrupt.h> - #include <linux/iio/common/st_sensors.h> - +#include "st_sensors_core.h" int st_sensors_allocate_trigger(struct iio_dev *indio_dev, const struct iio_trigger_ops *trigger_ops) { - int err; + int err, irq; struct st_sensor_data *sdata = iio_priv(indio_dev); + unsigned long irq_trig; sdata->trig = iio_trigger_alloc("%s-trigger", indio_dev->name); if (sdata->trig == NULL) { - err = -ENOMEM; dev_err(&indio_dev->dev, "failed to allocate iio trigger.\n"); - goto iio_trigger_alloc_error; + return -ENOMEM; } - err = request_threaded_irq(sdata->get_irq_data_ready(indio_dev), + irq = sdata->get_irq_data_ready(indio_dev); + irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq)); + /* + * If the IRQ is triggered on falling edge, we need to mark the + * interrupt as active low, if the hardware supports this. + */ + if (irq_trig == IRQF_TRIGGER_FALLING) { + if (!sdata->sensor_settings->drdy_irq.addr_ihl) { + dev_err(&indio_dev->dev, + "falling edge specified for IRQ but hardware " + "only support rising edge, will request " + "rising edge\n"); + irq_trig = IRQF_TRIGGER_RISING; + } else { + /* Set up INT active low i.e. falling edge */ + err = st_sensors_write_data_with_mask(indio_dev, + sdata->sensor_settings->drdy_irq.addr_ihl, + sdata->sensor_settings->drdy_irq.mask_ihl, 1); + if (err < 0) + goto iio_trigger_free; + dev_info(&indio_dev->dev, + "interrupts on the falling edge\n"); + } + } else if (irq_trig == IRQF_TRIGGER_RISING) { + dev_info(&indio_dev->dev, + "interrupts on the rising edge\n"); + + } else { + dev_err(&indio_dev->dev, + "unsupported IRQ trigger specified (%lx), only " + "rising and falling edges supported, enforce " + "rising edge\n", irq_trig); + irq_trig = IRQF_TRIGGER_RISING; + } + err = request_threaded_irq(irq, iio_trigger_generic_data_rdy_poll, NULL, - IRQF_TRIGGER_RISING, + irq_trig, sdata->trig->name, sdata->trig); if (err) { dev_err(&indio_dev->dev, "failed to request trigger IRQ.\n"); - goto request_irq_error; + goto iio_trigger_free; } iio_trigger_set_drvdata(sdata->trig, indio_dev); @@ -57,9 +90,8 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev, iio_trigger_register_error: free_irq(sdata->get_irq_data_ready(indio_dev), sdata->trig); -request_irq_error: +iio_trigger_free: iio_trigger_free(sdata->trig); -iio_trigger_alloc_error: return err; } EXPORT_SYMBOL(st_sensors_allocate_trigger); diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig index e701e28fb1cd..a995139f907c 100644 --- a/drivers/iio/dac/Kconfig +++ b/drivers/iio/dac/Kconfig @@ -10,8 +10,10 @@ config AD5064 depends on (SPI_MASTER && I2C!=m) || I2C help Say yes here to build support for Analog Devices AD5024, AD5025, AD5044, - AD5045, AD5064, AD5064-1, AD5065, AD5628, AD5629R, AD5648, AD5666, AD5668, - AD5669R Digital to Analog Converter. + AD5045, AD5064, AD5064-1, AD5065, AD5625, AD5625R, AD5627, AD5627R, + AD5628, AD5629R, AD5645R, AD5647R, AD5648, AD5665, AD5665R, AD5666, + AD5667, AD5667R, AD5668, AD5669R, LTC2606, LTC2607, LTC2609, LTC2616, + LTC2617, LTC2619, LTC2626, LTC2627, LTC2629 Digital to Analog Converter. To compile this driver as a module, choose M here: the module will be called ad5064. @@ -111,6 +113,16 @@ config AD5755 To compile this driver as a module, choose M here: the module will be called ad5755. +config AD5761 + tristate "Analog Devices AD5761/61R/21/21R DAC driver" + depends on SPI_MASTER + help + Say yes here to build support for Analog Devices AD5761, AD5761R, AD5721, + AD5721R Digital to Analog Converter. + + To compile this driver as a module, choose M here: the + module will be called ad5761. + config AD5764 tristate "Analog Devices AD5764/64R/44/44R DAC driver" depends on SPI_MASTER @@ -176,11 +188,11 @@ config MAX5821 10 bits DAC. config MCP4725 - tristate "MCP4725 DAC driver" + tristate "MCP4725/6 DAC driver" depends on I2C ---help--- Say Y here if you want to build a driver for the Microchip - MCP 4725 12-bit digital-to-analog converter (DAC) with I2C + MCP 4725/6 12-bit digital-to-analog converter (DAC) with I2C interface. To compile this driver as a module, choose M here: the module @@ -196,4 +208,23 @@ config MCP4922 To compile this driver as a module, choose M here: the module will be called mcp4922. +config STX104 + tristate "Apex Embedded Systems STX104 DAC driver" + depends on ISA + help + Say yes here to build support for the 2-channel DAC on the Apex + Embedded Systems STX104 integrated analog PC/104 card. The base port + addresses for the devices may be configured via the "base" module + parameter array. + +config VF610_DAC + tristate "Vybrid vf610 DAC driver" + depends on OF + depends on HAS_IOMEM + help + Say yes here to support Vybrid board digital-to-analog converter. + + This driver can also be built as a module. If so, the module will + be called vf610_dac. + endmenu diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile index 63ae05633e0c..67b48429686d 100644 --- a/drivers/iio/dac/Makefile +++ b/drivers/iio/dac/Makefile @@ -12,6 +12,7 @@ obj-$(CONFIG_AD5504) += ad5504.o obj-$(CONFIG_AD5446) += ad5446.o obj-$(CONFIG_AD5449) += ad5449.o obj-$(CONFIG_AD5755) += ad5755.o +obj-$(CONFIG_AD5761) += ad5761.o obj-$(CONFIG_AD5764) += ad5764.o obj-$(CONFIG_AD5791) += ad5791.o obj-$(CONFIG_AD5686) += ad5686.o @@ -21,3 +22,5 @@ obj-$(CONFIG_MAX517) += max517.o obj-$(CONFIG_MAX5821) += max5821.o obj-$(CONFIG_MCP4725) += mcp4725.o obj-$(CONFIG_MCP4922) += mcp4922.o +obj-$(CONFIG_STX104) += stx104.o +obj-$(CONFIG_VF610_DAC) += vf610_dac.o diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c index 81ca0081a019..6803e4a137cd 100644 --- a/drivers/iio/dac/ad5064.c +++ b/drivers/iio/dac/ad5064.c @@ -1,6 +1,9 @@ /* - * AD5024, AD5025, AD5044, AD5045, AD5064, AD5064-1, AD5065, AD5628, AD5629R, - * AD5648, AD5666, AD5668, AD5669R Digital to analog converters driver + * AD5024, AD5025, AD5044, AD5045, AD5064, AD5064-1, AD5065, AD5625, AD5625R, + * AD5627, AD5627R, AD5628, AD5629R, AD5645R, AD5647R, AD5648, AD5665, AD5665R, + * AD5666, AD5667, AD5667R, AD5668, AD5669R, LTC2606, LTC2607, LTC2609, LTC2616, + * LTC2617, LTC2619, LTC2626, LTC2627, LTC2629 Digital to analog converters + * driver * * Copyright 2011 Analog Devices Inc. * @@ -39,6 +42,9 @@ #define AD5064_CMD_RESET 0x7 #define AD5064_CMD_CONFIG 0x8 +#define AD5064_CMD_RESET_V2 0x5 +#define AD5064_CMD_CONFIG_V2 0x7 + #define AD5064_CONFIG_DAISY_CHAIN_ENABLE BIT(1) #define AD5064_CONFIG_INT_VREF_ENABLE BIT(0) @@ -48,12 +54,25 @@ #define AD5064_LDAC_PWRDN_3STATE 0x3 /** + * enum ad5064_regmap_type - Register layout variant + * @AD5064_REGMAP_ADI: Old Analog Devices register map layout + * @AD5064_REGMAP_ADI2: New Analog Devices register map layout + * @AD5064_REGMAP_LTC: LTC register map layout + */ +enum ad5064_regmap_type { + AD5064_REGMAP_ADI, + AD5064_REGMAP_ADI2, + AD5064_REGMAP_LTC, +}; + +/** * struct ad5064_chip_info - chip specific information * @shared_vref: whether the vref supply is shared between channels - * @internal_vref: internal reference voltage. 0 if the chip has no internal - * vref. + * @internal_vref: internal reference voltage. 0 if the chip has no + internal vref. * @channel: channel specification * @num_channels: number of channels + * @regmap_type: register map layout variant */ struct ad5064_chip_info { @@ -61,6 +80,7 @@ struct ad5064_chip_info { unsigned long internal_vref; const struct iio_chan_spec *channels; unsigned int num_channels; + enum ad5064_regmap_type regmap_type; }; struct ad5064_state; @@ -111,18 +131,43 @@ enum ad5064_type { ID_AD5064, ID_AD5064_1, ID_AD5065, + ID_AD5625, + ID_AD5625R_1V25, + ID_AD5625R_2V5, + ID_AD5627, + ID_AD5627R_1V25, + ID_AD5627R_2V5, ID_AD5628_1, ID_AD5628_2, ID_AD5629_1, ID_AD5629_2, + ID_AD5645R_1V25, + ID_AD5645R_2V5, + ID_AD5647R_1V25, + ID_AD5647R_2V5, ID_AD5648_1, ID_AD5648_2, + ID_AD5665, + ID_AD5665R_1V25, + ID_AD5665R_2V5, ID_AD5666_1, ID_AD5666_2, + ID_AD5667, + ID_AD5667R_1V25, + ID_AD5667R_2V5, ID_AD5668_1, ID_AD5668_2, ID_AD5669_1, ID_AD5669_2, + ID_LTC2606, + ID_LTC2607, + ID_LTC2609, + ID_LTC2616, + ID_LTC2617, + ID_LTC2619, + ID_LTC2626, + ID_LTC2627, + ID_LTC2629, }; static int ad5064_write(struct ad5064_state *st, unsigned int cmd, @@ -136,15 +181,27 @@ static int ad5064_write(struct ad5064_state *st, unsigned int cmd, static int ad5064_sync_powerdown_mode(struct ad5064_state *st, const struct iio_chan_spec *chan) { - unsigned int val; + unsigned int val, address; + unsigned int shift; int ret; - val = (0x1 << chan->address); + if (st->chip_info->regmap_type == AD5064_REGMAP_LTC) { + val = 0; + address = chan->address; + } else { + if (st->chip_info->regmap_type == AD5064_REGMAP_ADI2) + shift = 4; + else + shift = 8; + + val = (0x1 << chan->address); + address = 0; - if (st->pwr_down[chan->channel]) - val |= st->pwr_down_mode[chan->channel] << 8; + if (st->pwr_down[chan->channel]) + val |= st->pwr_down_mode[chan->channel] << shift; + } - ret = ad5064_write(st, AD5064_CMD_POWERDOWN_DAC, 0, val, 0); + ret = ad5064_write(st, AD5064_CMD_POWERDOWN_DAC, address, val, 0); return ret; } @@ -155,6 +212,10 @@ static const char * const ad5064_powerdown_modes[] = { "three_state", }; +static const char * const ltc2617_powerdown_modes[] = { + "90kohm_to_gnd", +}; + static int ad5064_get_powerdown_mode(struct iio_dev *indio_dev, const struct iio_chan_spec *chan) { @@ -185,6 +246,13 @@ static const struct iio_enum ad5064_powerdown_mode_enum = { .set = ad5064_set_powerdown_mode, }; +static const struct iio_enum ltc2617_powerdown_mode_enum = { + .items = ltc2617_powerdown_modes, + .num_items = ARRAY_SIZE(ltc2617_powerdown_modes), + .get = ad5064_get_powerdown_mode, + .set = ad5064_set_powerdown_mode, +}; + static ssize_t ad5064_read_dac_powerdown(struct iio_dev *indio_dev, uintptr_t private, const struct iio_chan_spec *chan, char *buf) { @@ -295,7 +363,19 @@ static const struct iio_chan_spec_ext_info ad5064_ext_info[] = { { }, }; -#define AD5064_CHANNEL(chan, addr, bits, _shift) { \ +static const struct iio_chan_spec_ext_info ltc2617_ext_info[] = { + { + .name = "powerdown", + .read = ad5064_read_dac_powerdown, + .write = ad5064_write_dac_powerdown, + .shared = IIO_SEPARATE, + }, + IIO_ENUM("powerdown_mode", IIO_SEPARATE, <c2617_powerdown_mode_enum), + IIO_ENUM_AVAILABLE("powerdown_mode", <c2617_powerdown_mode_enum), + { }, +}; + +#define AD5064_CHANNEL(chan, addr, bits, _shift, _ext_info) { \ .type = IIO_VOLTAGE, \ .indexed = 1, \ .output = 1, \ @@ -309,145 +389,340 @@ static const struct iio_chan_spec_ext_info ad5064_ext_info[] = { .storagebits = 16, \ .shift = (_shift), \ }, \ - .ext_info = ad5064_ext_info, \ + .ext_info = (_ext_info), \ } -#define DECLARE_AD5064_CHANNELS(name, bits, shift) \ +#define DECLARE_AD5064_CHANNELS(name, bits, shift, ext_info) \ const struct iio_chan_spec name[] = { \ - AD5064_CHANNEL(0, 0, bits, shift), \ - AD5064_CHANNEL(1, 1, bits, shift), \ - AD5064_CHANNEL(2, 2, bits, shift), \ - AD5064_CHANNEL(3, 3, bits, shift), \ - AD5064_CHANNEL(4, 4, bits, shift), \ - AD5064_CHANNEL(5, 5, bits, shift), \ - AD5064_CHANNEL(6, 6, bits, shift), \ - AD5064_CHANNEL(7, 7, bits, shift), \ + AD5064_CHANNEL(0, 0, bits, shift, ext_info), \ + AD5064_CHANNEL(1, 1, bits, shift, ext_info), \ + AD5064_CHANNEL(2, 2, bits, shift, ext_info), \ + AD5064_CHANNEL(3, 3, bits, shift, ext_info), \ + AD5064_CHANNEL(4, 4, bits, shift, ext_info), \ + AD5064_CHANNEL(5, 5, bits, shift, ext_info), \ + AD5064_CHANNEL(6, 6, bits, shift, ext_info), \ + AD5064_CHANNEL(7, 7, bits, shift, ext_info), \ } -#define DECLARE_AD5065_CHANNELS(name, bits, shift) \ +#define DECLARE_AD5065_CHANNELS(name, bits, shift, ext_info) \ const struct iio_chan_spec name[] = { \ - AD5064_CHANNEL(0, 0, bits, shift), \ - AD5064_CHANNEL(1, 3, bits, shift), \ + AD5064_CHANNEL(0, 0, bits, shift, ext_info), \ + AD5064_CHANNEL(1, 3, bits, shift, ext_info), \ } -static DECLARE_AD5064_CHANNELS(ad5024_channels, 12, 8); -static DECLARE_AD5064_CHANNELS(ad5044_channels, 14, 6); -static DECLARE_AD5064_CHANNELS(ad5064_channels, 16, 4); +static DECLARE_AD5064_CHANNELS(ad5024_channels, 12, 8, ad5064_ext_info); +static DECLARE_AD5064_CHANNELS(ad5044_channels, 14, 6, ad5064_ext_info); +static DECLARE_AD5064_CHANNELS(ad5064_channels, 16, 4, ad5064_ext_info); + +static DECLARE_AD5065_CHANNELS(ad5025_channels, 12, 8, ad5064_ext_info); +static DECLARE_AD5065_CHANNELS(ad5045_channels, 14, 6, ad5064_ext_info); +static DECLARE_AD5065_CHANNELS(ad5065_channels, 16, 4, ad5064_ext_info); -static DECLARE_AD5065_CHANNELS(ad5025_channels, 12, 8); -static DECLARE_AD5065_CHANNELS(ad5045_channels, 14, 6); -static DECLARE_AD5065_CHANNELS(ad5065_channels, 16, 4); +static DECLARE_AD5064_CHANNELS(ad5629_channels, 12, 4, ad5064_ext_info); +static DECLARE_AD5064_CHANNELS(ad5645_channels, 14, 2, ad5064_ext_info); +static DECLARE_AD5064_CHANNELS(ad5669_channels, 16, 0, ad5064_ext_info); -static DECLARE_AD5064_CHANNELS(ad5629_channels, 12, 4); -static DECLARE_AD5064_CHANNELS(ad5669_channels, 16, 0); +static DECLARE_AD5064_CHANNELS(ltc2607_channels, 16, 0, ltc2617_ext_info); +static DECLARE_AD5064_CHANNELS(ltc2617_channels, 14, 2, ltc2617_ext_info); +static DECLARE_AD5064_CHANNELS(ltc2627_channels, 12, 4, ltc2617_ext_info); static const struct ad5064_chip_info ad5064_chip_info_tbl[] = { [ID_AD5024] = { .shared_vref = false, .channels = ad5024_channels, .num_channels = 4, + .regmap_type = AD5064_REGMAP_ADI, }, [ID_AD5025] = { .shared_vref = false, .channels = ad5025_channels, .num_channels = 2, + .regmap_type = AD5064_REGMAP_ADI, }, [ID_AD5044] = { .shared_vref = false, .channels = ad5044_channels, .num_channels = 4, + .regmap_type = AD5064_REGMAP_ADI, }, [ID_AD5045] = { .shared_vref = false, .channels = ad5045_channels, .num_channels = 2, + .regmap_type = AD5064_REGMAP_ADI, }, [ID_AD5064] = { .shared_vref = false, .channels = ad5064_channels, .num_channels = 4, + .regmap_type = AD5064_REGMAP_ADI, }, [ID_AD5064_1] = { .shared_vref = true, .channels = ad5064_channels, .num_channels = 4, + .regmap_type = AD5064_REGMAP_ADI, }, [ID_AD5065] = { .shared_vref = false, .channels = ad5065_channels, .num_channels = 2, + .regmap_type = AD5064_REGMAP_ADI, + }, + [ID_AD5625] = { + .shared_vref = true, + .channels = ad5629_channels, + .num_channels = 4, + .regmap_type = AD5064_REGMAP_ADI2 + }, + [ID_AD5625R_1V25] = { + .shared_vref = true, + .internal_vref = 1250000, + .channels = ad5629_channels, + .num_channels = 4, + .regmap_type = AD5064_REGMAP_ADI2 + }, + [ID_AD5625R_2V5] = { + .shared_vref = true, + .internal_vref = 2500000, + .channels = ad5629_channels, + .num_channels = 4, + .regmap_type = AD5064_REGMAP_ADI2 + }, + [ID_AD5627] = { + .shared_vref = true, + .channels = ad5629_channels, + .num_channels = 2, + .regmap_type = AD5064_REGMAP_ADI2 + }, + [ID_AD5627R_1V25] = { + .shared_vref = true, + .internal_vref = 1250000, + .channels = ad5629_channels, + .num_channels = 2, + .regmap_type = AD5064_REGMAP_ADI2 + }, + [ID_AD5627R_2V5] = { + .shared_vref = true, + .internal_vref = 2500000, + .channels = ad5629_channels, + .num_channels = 2, + .regmap_type = AD5064_REGMAP_ADI2 }, [ID_AD5628_1] = { .shared_vref = true, .internal_vref = 2500000, .channels = ad5024_channels, .num_channels = 8, + .regmap_type = AD5064_REGMAP_ADI, }, [ID_AD5628_2] = { .shared_vref = true, .internal_vref = 5000000, .channels = ad5024_channels, .num_channels = 8, + .regmap_type = AD5064_REGMAP_ADI, }, [ID_AD5629_1] = { .shared_vref = true, .internal_vref = 2500000, .channels = ad5629_channels, .num_channels = 8, + .regmap_type = AD5064_REGMAP_ADI, }, [ID_AD5629_2] = { .shared_vref = true, .internal_vref = 5000000, .channels = ad5629_channels, .num_channels = 8, + .regmap_type = AD5064_REGMAP_ADI, + }, + [ID_AD5645R_1V25] = { + .shared_vref = true, + .internal_vref = 1250000, + .channels = ad5645_channels, + .num_channels = 4, + .regmap_type = AD5064_REGMAP_ADI2 + }, + [ID_AD5645R_2V5] = { + .shared_vref = true, + .internal_vref = 2500000, + .channels = ad5645_channels, + .num_channels = 4, + .regmap_type = AD5064_REGMAP_ADI2 + }, + [ID_AD5647R_1V25] = { + .shared_vref = true, + .internal_vref = 1250000, + .channels = ad5645_channels, + .num_channels = 2, + .regmap_type = AD5064_REGMAP_ADI2 + }, + [ID_AD5647R_2V5] = { + .shared_vref = true, + .internal_vref = 2500000, + .channels = ad5645_channels, + .num_channels = 2, + .regmap_type = AD5064_REGMAP_ADI2 }, [ID_AD5648_1] = { .shared_vref = true, .internal_vref = 2500000, .channels = ad5044_channels, .num_channels = 8, + .regmap_type = AD5064_REGMAP_ADI, }, [ID_AD5648_2] = { .shared_vref = true, .internal_vref = 5000000, .channels = ad5044_channels, .num_channels = 8, + .regmap_type = AD5064_REGMAP_ADI, + }, + [ID_AD5665] = { + .shared_vref = true, + .channels = ad5669_channels, + .num_channels = 4, + .regmap_type = AD5064_REGMAP_ADI2 + }, + [ID_AD5665R_1V25] = { + .shared_vref = true, + .internal_vref = 1250000, + .channels = ad5669_channels, + .num_channels = 4, + .regmap_type = AD5064_REGMAP_ADI2 + }, + [ID_AD5665R_2V5] = { + .shared_vref = true, + .internal_vref = 2500000, + .channels = ad5669_channels, + .num_channels = 4, + .regmap_type = AD5064_REGMAP_ADI2 }, [ID_AD5666_1] = { .shared_vref = true, .internal_vref = 2500000, .channels = ad5064_channels, .num_channels = 4, + .regmap_type = AD5064_REGMAP_ADI, }, [ID_AD5666_2] = { .shared_vref = true, .internal_vref = 5000000, .channels = ad5064_channels, .num_channels = 4, + .regmap_type = AD5064_REGMAP_ADI, + }, + [ID_AD5667] = { + .shared_vref = true, + .channels = ad5669_channels, + .num_channels = 2, + .regmap_type = AD5064_REGMAP_ADI2 + }, + [ID_AD5667R_1V25] = { + .shared_vref = true, + .internal_vref = 1250000, + .channels = ad5669_channels, + .num_channels = 2, + .regmap_type = AD5064_REGMAP_ADI2 + }, + [ID_AD5667R_2V5] = { + .shared_vref = true, + .internal_vref = 2500000, + .channels = ad5669_channels, + .num_channels = 2, + .regmap_type = AD5064_REGMAP_ADI2 }, [ID_AD5668_1] = { .shared_vref = true, .internal_vref = 2500000, .channels = ad5064_channels, .num_channels = 8, + .regmap_type = AD5064_REGMAP_ADI, }, [ID_AD5668_2] = { .shared_vref = true, .internal_vref = 5000000, .channels = ad5064_channels, .num_channels = 8, + .regmap_type = AD5064_REGMAP_ADI, }, [ID_AD5669_1] = { .shared_vref = true, .internal_vref = 2500000, .channels = ad5669_channels, .num_channels = 8, + .regmap_type = AD5064_REGMAP_ADI, }, [ID_AD5669_2] = { .shared_vref = true, .internal_vref = 5000000, .channels = ad5669_channels, .num_channels = 8, + .regmap_type = AD5064_REGMAP_ADI, + }, + [ID_LTC2606] = { + .shared_vref = true, + .internal_vref = 0, + .channels = ltc2607_channels, + .num_channels = 1, + .regmap_type = AD5064_REGMAP_LTC, + }, + [ID_LTC2607] = { + .shared_vref = true, + .internal_vref = 0, + .channels = ltc2607_channels, + .num_channels = 2, + .regmap_type = AD5064_REGMAP_LTC, + }, + [ID_LTC2609] = { + .shared_vref = false, + .internal_vref = 0, + .channels = ltc2607_channels, + .num_channels = 4, + .regmap_type = AD5064_REGMAP_LTC, + }, + [ID_LTC2616] = { + .shared_vref = true, + .internal_vref = 0, + .channels = ltc2617_channels, + .num_channels = 1, + .regmap_type = AD5064_REGMAP_LTC, + }, + [ID_LTC2617] = { + .shared_vref = true, + .internal_vref = 0, + .channels = ltc2617_channels, + .num_channels = 2, + .regmap_type = AD5064_REGMAP_LTC, + }, + [ID_LTC2619] = { + .shared_vref = false, + .internal_vref = 0, + .channels = ltc2617_channels, + .num_channels = 4, + .regmap_type = AD5064_REGMAP_LTC, + }, + [ID_LTC2626] = { + .shared_vref = true, + .internal_vref = 0, + .channels = ltc2627_channels, + .num_channels = 1, + .regmap_type = AD5064_REGMAP_LTC, + }, + [ID_LTC2627] = { + .shared_vref = true, + .internal_vref = 0, + .channels = ltc2627_channels, + .num_channels = 2, + .regmap_type = AD5064_REGMAP_LTC, + }, + [ID_LTC2629] = { + .shared_vref = false, + .internal_vref = 0, + .channels = ltc2627_channels, + .num_channels = 4, + .regmap_type = AD5064_REGMAP_LTC, }, }; @@ -469,6 +744,22 @@ static const char * const ad5064_vref_name(struct ad5064_state *st, return st->chip_info->shared_vref ? "vref" : ad5064_vref_names[vref]; } +static int ad5064_set_config(struct ad5064_state *st, unsigned int val) +{ + unsigned int cmd; + + switch (st->chip_info->regmap_type) { + case AD5064_REGMAP_ADI2: + cmd = AD5064_CMD_CONFIG_V2; + break; + default: + cmd = AD5064_CMD_CONFIG; + break; + } + + return ad5064_write(st, cmd, 0, val, 0); +} + static int ad5064_probe(struct device *dev, enum ad5064_type type, const char *name, ad5064_write_func write) { @@ -498,8 +789,7 @@ static int ad5064_probe(struct device *dev, enum ad5064_type type, if (!st->chip_info->internal_vref) return ret; st->use_internal_vref = true; - ret = ad5064_write(st, AD5064_CMD_CONFIG, 0, - AD5064_CONFIG_INT_VREF_ENABLE, 0); + ret = ad5064_set_config(st, AD5064_CONFIG_INT_VREF_ENABLE); if (ret) { dev_err(dev, "Failed to enable internal vref: %d\n", ret); @@ -628,9 +918,19 @@ static int ad5064_i2c_write(struct ad5064_state *st, unsigned int cmd, unsigned int addr, unsigned int val) { struct i2c_client *i2c = to_i2c_client(st->dev); + unsigned int cmd_shift; int ret; - st->data.i2c[0] = (cmd << 4) | addr; + switch (st->chip_info->regmap_type) { + case AD5064_REGMAP_ADI2: + cmd_shift = 3; + break; + default: + cmd_shift = 4; + break; + } + + st->data.i2c[0] = (cmd << cmd_shift) | addr; put_unaligned_be16(val, &st->data.i2c[1]); ret = i2c_master_send(i2c, st->data.i2c, 3); @@ -653,12 +953,35 @@ static int ad5064_i2c_remove(struct i2c_client *i2c) } static const struct i2c_device_id ad5064_i2c_ids[] = { + {"ad5625", ID_AD5625 }, + {"ad5625r-1v25", ID_AD5625R_1V25 }, + {"ad5625r-2v5", ID_AD5625R_2V5 }, + {"ad5627", ID_AD5627 }, + {"ad5627r-1v25", ID_AD5627R_1V25 }, + {"ad5627r-2v5", ID_AD5627R_2V5 }, {"ad5629-1", ID_AD5629_1}, {"ad5629-2", ID_AD5629_2}, {"ad5629-3", ID_AD5629_2}, /* similar enough to ad5629-2 */ + {"ad5645r-1v25", ID_AD5645R_1V25 }, + {"ad5645r-2v5", ID_AD5645R_2V5 }, + {"ad5665", ID_AD5665 }, + {"ad5665r-1v25", ID_AD5665R_1V25 }, + {"ad5665r-2v5", ID_AD5665R_2V5 }, + {"ad5667", ID_AD5667 }, + {"ad5667r-1v25", ID_AD5667R_1V25 }, + {"ad5667r-2v5", ID_AD5667R_2V5 }, {"ad5669-1", ID_AD5669_1}, {"ad5669-2", ID_AD5669_2}, {"ad5669-3", ID_AD5669_2}, /* similar enough to ad5669-2 */ + {"ltc2606", ID_LTC2606}, + {"ltc2607", ID_LTC2607}, + {"ltc2609", ID_LTC2609}, + {"ltc2616", ID_LTC2616}, + {"ltc2617", ID_LTC2617}, + {"ltc2619", ID_LTC2619}, + {"ltc2626", ID_LTC2626}, + {"ltc2627", ID_LTC2627}, + {"ltc2629", ID_LTC2629}, {} }; MODULE_DEVICE_TABLE(i2c, ad5064_i2c_ids); diff --git a/drivers/iio/dac/ad5761.c b/drivers/iio/dac/ad5761.c new file mode 100644 index 000000000000..d6510d6928b3 --- /dev/null +++ b/drivers/iio/dac/ad5761.c @@ -0,0 +1,430 @@ +/* + * AD5721, AD5721R, AD5761, AD5761R, Voltage Output Digital to Analog Converter + * + * Copyright 2016 Qtechnology A/S + * 2016 Ricardo Ribalda <ricardo.ribalda@gmail.com> + * + * Licensed under the GPL-2. + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/spi/spi.h> +#include <linux/bitops.h> +#include <linux/iio/iio.h> +#include <linux/iio/sysfs.h> +#include <linux/regulator/consumer.h> +#include <linux/platform_data/ad5761.h> + +#define AD5761_ADDR(addr) ((addr & 0xf) << 16) +#define AD5761_ADDR_NOOP 0x0 +#define AD5761_ADDR_DAC_WRITE 0x3 +#define AD5761_ADDR_CTRL_WRITE_REG 0x4 +#define AD5761_ADDR_SW_DATA_RESET 0x7 +#define AD5761_ADDR_DAC_READ 0xb +#define AD5761_ADDR_CTRL_READ_REG 0xc +#define AD5761_ADDR_SW_FULL_RESET 0xf + +#define AD5761_CTRL_USE_INTVREF BIT(5) +#define AD5761_CTRL_ETS BIT(6) + +/** + * struct ad5761_chip_info - chip specific information + * @int_vref: Value of the internal reference voltage in mV - 0 if external + * reference voltage is used + * @channel: channel specification +*/ + +struct ad5761_chip_info { + unsigned long int_vref; + const struct iio_chan_spec channel; +}; + +struct ad5761_range_params { + int m; + int c; +}; + +enum ad5761_supported_device_ids { + ID_AD5721, + ID_AD5721R, + ID_AD5761, + ID_AD5761R, +}; + +/** + * struct ad5761_state - driver instance specific data + * @spi: spi_device + * @vref_reg: reference voltage regulator + * @use_intref: true when the internal voltage reference is used + * @vref: actual voltage reference in mVolts + * @range: output range mode used + * @data: cache aligned spi buffer + */ +struct ad5761_state { + struct spi_device *spi; + struct regulator *vref_reg; + + bool use_intref; + int vref; + enum ad5761_voltage_range range; + + /* + * DMA (thus cache coherency maintenance) requires the + * transfer buffers to live in their own cache lines. + */ + union { + __be32 d32; + u8 d8[4]; + } data[3] ____cacheline_aligned; +}; + +static const struct ad5761_range_params ad5761_range_params[] = { + [AD5761_VOLTAGE_RANGE_M10V_10V] = { + .m = 80, + .c = 40, + }, + [AD5761_VOLTAGE_RANGE_0V_10V] = { + .m = 40, + .c = 0, + }, + [AD5761_VOLTAGE_RANGE_M5V_5V] = { + .m = 40, + .c = 20, + }, + [AD5761_VOLTAGE_RANGE_0V_5V] = { + .m = 20, + .c = 0, + }, + [AD5761_VOLTAGE_RANGE_M2V5_7V5] = { + .m = 40, + .c = 10, + }, + [AD5761_VOLTAGE_RANGE_M3V_3V] = { + .m = 24, + .c = 12, + }, + [AD5761_VOLTAGE_RANGE_0V_16V] = { + .m = 64, + .c = 0, + }, + [AD5761_VOLTAGE_RANGE_0V_20V] = { + .m = 80, + .c = 0, + }, +}; + +static int _ad5761_spi_write(struct ad5761_state *st, u8 addr, u16 val) +{ + st->data[0].d32 = cpu_to_be32(AD5761_ADDR(addr) | val); + + return spi_write(st->spi, &st->data[0].d8[1], 3); +} + +static int ad5761_spi_write(struct iio_dev *indio_dev, u8 addr, u16 val) +{ + struct ad5761_state *st = iio_priv(indio_dev); + int ret; + + mutex_lock(&indio_dev->mlock); + ret = _ad5761_spi_write(st, addr, val); + mutex_unlock(&indio_dev->mlock); + + return ret; +} + +static int _ad5761_spi_read(struct ad5761_state *st, u8 addr, u16 *val) +{ + int ret; + struct spi_transfer xfers[] = { + { + .tx_buf = &st->data[0].d8[1], + .bits_per_word = 8, + .len = 3, + .cs_change = true, + }, { + .tx_buf = &st->data[1].d8[1], + .rx_buf = &st->data[2].d8[1], + .bits_per_word = 8, + .len = 3, + }, + }; + + st->data[0].d32 = cpu_to_be32(AD5761_ADDR(addr)); + st->data[1].d32 = cpu_to_be32(AD5761_ADDR(AD5761_ADDR_NOOP)); + + ret = spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers)); + + *val = be32_to_cpu(st->data[2].d32); + + return ret; +} + +static int ad5761_spi_read(struct iio_dev *indio_dev, u8 addr, u16 *val) +{ + struct ad5761_state *st = iio_priv(indio_dev); + int ret; + + mutex_lock(&indio_dev->mlock); + ret = _ad5761_spi_read(st, addr, val); + mutex_unlock(&indio_dev->mlock); + + return ret; +} + +static int ad5761_spi_set_range(struct ad5761_state *st, + enum ad5761_voltage_range range) +{ + u16 aux; + int ret; + + aux = (range & 0x7) | AD5761_CTRL_ETS; + + if (st->use_intref) + aux |= AD5761_CTRL_USE_INTVREF; + + ret = _ad5761_spi_write(st, AD5761_ADDR_SW_FULL_RESET, 0); + if (ret) + return ret; + + ret = _ad5761_spi_write(st, AD5761_ADDR_CTRL_WRITE_REG, aux); + if (ret) + return ret; + + st->range = range; + + return 0; +} + +static int ad5761_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, + int *val2, + long mask) +{ + struct ad5761_state *st; + int ret; + u16 aux; + + switch (mask) { + case IIO_CHAN_INFO_RAW: + ret = ad5761_spi_read(indio_dev, AD5761_ADDR_DAC_READ, &aux); + if (ret) + return ret; + *val = aux >> chan->scan_type.shift; + return IIO_VAL_INT; + case IIO_CHAN_INFO_SCALE: + st = iio_priv(indio_dev); + *val = st->vref * ad5761_range_params[st->range].m; + *val /= 10; + *val2 = chan->scan_type.realbits; + return IIO_VAL_FRACTIONAL_LOG2; + case IIO_CHAN_INFO_OFFSET: + st = iio_priv(indio_dev); + *val = -(1 << chan->scan_type.realbits); + *val *= ad5761_range_params[st->range].c; + *val /= ad5761_range_params[st->range].m; + return IIO_VAL_INT; + default: + return -EINVAL; + } +} + +static int ad5761_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int val, + int val2, + long mask) +{ + u16 aux; + + if (mask != IIO_CHAN_INFO_RAW) + return -EINVAL; + + if (val2 || (val << chan->scan_type.shift) > 0xffff || val < 0) + return -EINVAL; + + aux = val << chan->scan_type.shift; + + return ad5761_spi_write(indio_dev, AD5761_ADDR_DAC_WRITE, aux); +} + +static const struct iio_info ad5761_info = { + .read_raw = &ad5761_read_raw, + .write_raw = &ad5761_write_raw, + .driver_module = THIS_MODULE, +}; + +#define AD5761_CHAN(_bits) { \ + .type = IIO_VOLTAGE, \ + .output = 1, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \ + BIT(IIO_CHAN_INFO_OFFSET), \ + .scan_type = { \ + .sign = 'u', \ + .realbits = (_bits), \ + .storagebits = 16, \ + .shift = 16 - (_bits), \ + }, \ +} + +static const struct ad5761_chip_info ad5761_chip_infos[] = { + [ID_AD5721] = { + .int_vref = 0, + .channel = AD5761_CHAN(12), + }, + [ID_AD5721R] = { + .int_vref = 2500, + .channel = AD5761_CHAN(12), + }, + [ID_AD5761] = { + .int_vref = 0, + .channel = AD5761_CHAN(16), + }, + [ID_AD5761R] = { + .int_vref = 2500, + .channel = AD5761_CHAN(16), + }, +}; + +static int ad5761_get_vref(struct ad5761_state *st, + const struct ad5761_chip_info *chip_info) +{ + int ret; + + st->vref_reg = devm_regulator_get_optional(&st->spi->dev, "vref"); + if (PTR_ERR(st->vref_reg) == -ENODEV) { + /* Use Internal regulator */ + if (!chip_info->int_vref) { + dev_err(&st->spi->dev, + "Voltage reference not found\n"); + return -EIO; + } + + st->use_intref = true; + st->vref = chip_info->int_vref; + return 0; + } + + if (IS_ERR(st->vref_reg)) { + dev_err(&st->spi->dev, + "Error getting voltage reference regulator\n"); + return PTR_ERR(st->vref_reg); + } + + ret = regulator_enable(st->vref_reg); + if (ret) { + dev_err(&st->spi->dev, + "Failed to enable voltage reference\n"); + return ret; + } + + ret = regulator_get_voltage(st->vref_reg); + if (ret < 0) { + dev_err(&st->spi->dev, + "Failed to get voltage reference value\n"); + goto disable_regulator_vref; + } + + if (ret < 2000000 || ret > 3000000) { + dev_warn(&st->spi->dev, + "Invalid external voltage ref. value %d uV\n", ret); + ret = -EIO; + goto disable_regulator_vref; + } + + st->vref = ret / 1000; + st->use_intref = false; + + return 0; + +disable_regulator_vref: + regulator_disable(st->vref_reg); + st->vref_reg = NULL; + return ret; +} + +static int ad5761_probe(struct spi_device *spi) +{ + struct iio_dev *iio_dev; + struct ad5761_state *st; + int ret; + const struct ad5761_chip_info *chip_info = + &ad5761_chip_infos[spi_get_device_id(spi)->driver_data]; + enum ad5761_voltage_range voltage_range = AD5761_VOLTAGE_RANGE_0V_5V; + struct ad5761_platform_data *pdata = dev_get_platdata(&spi->dev); + + iio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st)); + if (!iio_dev) + return -ENOMEM; + + st = iio_priv(iio_dev); + + st->spi = spi; + spi_set_drvdata(spi, iio_dev); + + ret = ad5761_get_vref(st, chip_info); + if (ret) + return ret; + + if (pdata) + voltage_range = pdata->voltage_range; + + ret = ad5761_spi_set_range(st, voltage_range); + if (ret) + goto disable_regulator_err; + + iio_dev->dev.parent = &spi->dev; + iio_dev->info = &ad5761_info; + iio_dev->modes = INDIO_DIRECT_MODE; + iio_dev->channels = &chip_info->channel; + iio_dev->num_channels = 1; + iio_dev->name = spi_get_device_id(st->spi)->name; + ret = iio_device_register(iio_dev); + if (ret) + goto disable_regulator_err; + + return 0; + +disable_regulator_err: + if (!IS_ERR_OR_NULL(st->vref_reg)) + regulator_disable(st->vref_reg); + + return ret; +} + +static int ad5761_remove(struct spi_device *spi) +{ + struct iio_dev *iio_dev = spi_get_drvdata(spi); + struct ad5761_state *st = iio_priv(iio_dev); + + iio_device_unregister(iio_dev); + + if (!IS_ERR_OR_NULL(st->vref_reg)) + regulator_disable(st->vref_reg); + + return 0; +} + +static const struct spi_device_id ad5761_id[] = { + {"ad5721", ID_AD5721}, + {"ad5721r", ID_AD5721R}, + {"ad5761", ID_AD5761}, + {"ad5761r", ID_AD5761R}, + {} +}; +MODULE_DEVICE_TABLE(spi, ad5761_id); + +static struct spi_driver ad5761_driver = { + .driver = { + .name = "ad5761", + }, + .probe = ad5761_probe, + .remove = ad5761_remove, + .id_table = ad5761_id, +}; +module_spi_driver(ad5761_driver); + +MODULE_AUTHOR("Ricardo Ribalda <ricardo.ribalda@gmail.com>"); +MODULE_DESCRIPTION("Analog Devices AD5721, AD5721R, AD5761, AD5761R driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c index b4dde8315210..cca935c06f2b 100644 --- a/drivers/iio/dac/mcp4725.c +++ b/drivers/iio/dac/mcp4725.c @@ -1,5 +1,5 @@ /* - * mcp4725.c - Support for Microchip MCP4725 + * mcp4725.c - Support for Microchip MCP4725/6 * * Copyright (C) 2012 Peter Meerwald <pmeerw@pmeerw.net> * @@ -134,6 +134,12 @@ static const char * const mcp4725_powerdown_modes[] = { "500kohm_to_gnd" }; +static const char * const mcp4726_powerdown_modes[] = { + "1kohm_to_gnd", + "125kohm_to_gnd", + "640kohm_to_gnd" +}; + static int mcp4725_get_powerdown_mode(struct iio_dev *indio_dev, const struct iio_chan_spec *chan) { @@ -182,11 +188,24 @@ static ssize_t mcp4725_write_powerdown(struct iio_dev *indio_dev, return len; } -static const struct iio_enum mcp4725_powerdown_mode_enum = { - .items = mcp4725_powerdown_modes, - .num_items = ARRAY_SIZE(mcp4725_powerdown_modes), - .get = mcp4725_get_powerdown_mode, - .set = mcp4725_set_powerdown_mode, +enum { + MCP4725, + MCP4726, +}; + +static const struct iio_enum mcp472x_powerdown_mode_enum[] = { + [MCP4725] = { + .items = mcp4725_powerdown_modes, + .num_items = ARRAY_SIZE(mcp4725_powerdown_modes), + .get = mcp4725_get_powerdown_mode, + .set = mcp4725_set_powerdown_mode, + }, + [MCP4726] = { + .items = mcp4726_powerdown_modes, + .num_items = ARRAY_SIZE(mcp4726_powerdown_modes), + .get = mcp4725_get_powerdown_mode, + .set = mcp4725_set_powerdown_mode, + }, }; static const struct iio_chan_spec_ext_info mcp4725_ext_info[] = { @@ -196,19 +215,46 @@ static const struct iio_chan_spec_ext_info mcp4725_ext_info[] = { .write = mcp4725_write_powerdown, .shared = IIO_SEPARATE, }, - IIO_ENUM("powerdown_mode", IIO_SEPARATE, &mcp4725_powerdown_mode_enum), - IIO_ENUM_AVAILABLE("powerdown_mode", &mcp4725_powerdown_mode_enum), + IIO_ENUM("powerdown_mode", IIO_SEPARATE, + &mcp472x_powerdown_mode_enum[MCP4725]), + IIO_ENUM_AVAILABLE("powerdown_mode", + &mcp472x_powerdown_mode_enum[MCP4725]), + { }, +}; + +static const struct iio_chan_spec_ext_info mcp4726_ext_info[] = { + { + .name = "powerdown", + .read = mcp4725_read_powerdown, + .write = mcp4725_write_powerdown, + .shared = IIO_SEPARATE, + }, + IIO_ENUM("powerdown_mode", IIO_SEPARATE, + &mcp472x_powerdown_mode_enum[MCP4726]), + IIO_ENUM_AVAILABLE("powerdown_mode", + &mcp472x_powerdown_mode_enum[MCP4726]), { }, }; -static const struct iio_chan_spec mcp4725_channel = { - .type = IIO_VOLTAGE, - .indexed = 1, - .output = 1, - .channel = 0, - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), - .ext_info = mcp4725_ext_info, +static const struct iio_chan_spec mcp472x_channel[] = { + [MCP4725] = { + .type = IIO_VOLTAGE, + .indexed = 1, + .output = 1, + .channel = 0, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), + .ext_info = mcp4725_ext_info, + }, + [MCP4726] = { + .type = IIO_VOLTAGE, + .indexed = 1, + .output = 1, + .channel = 0, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), + .ext_info = mcp4726_ext_info, + }, }; static int mcp4725_set_value(struct iio_dev *indio_dev, int val) @@ -302,7 +348,7 @@ static int mcp4725_probe(struct i2c_client *client, indio_dev->dev.parent = &client->dev; indio_dev->name = id->name; indio_dev->info = &mcp4725_info; - indio_dev->channels = &mcp4725_channel; + indio_dev->channels = &mcp472x_channel[id->driver_data]; indio_dev->num_channels = 1; indio_dev->modes = INDIO_DIRECT_MODE; @@ -316,7 +362,7 @@ static int mcp4725_probe(struct i2c_client *client, } pd = (inbuf[0] >> 1) & 0x3; data->powerdown = pd > 0 ? true : false; - data->powerdown_mode = pd ? pd-1 : 2; /* 500kohm_to_gnd */ + data->powerdown_mode = pd ? pd - 1 : 2; /* largest register to gnd */ data->dac_value = (inbuf[1] << 4) | (inbuf[2] >> 4); return iio_device_register(indio_dev); @@ -329,7 +375,8 @@ static int mcp4725_remove(struct i2c_client *client) } static const struct i2c_device_id mcp4725_id[] = { - { "mcp4725", 0 }, + { "mcp4725", MCP4725 }, + { "mcp4726", MCP4726 }, { } }; MODULE_DEVICE_TABLE(i2c, mcp4725_id); @@ -346,5 +393,5 @@ static struct i2c_driver mcp4725_driver = { module_i2c_driver(mcp4725_driver); MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>"); -MODULE_DESCRIPTION("MCP4725 12-bit DAC"); +MODULE_DESCRIPTION("MCP4725/6 12-bit DAC"); MODULE_LICENSE("GPL"); diff --git a/drivers/iio/dac/stx104.c b/drivers/iio/dac/stx104.c new file mode 100644 index 000000000000..174f4b75ceed --- /dev/null +++ b/drivers/iio/dac/stx104.c @@ -0,0 +1,152 @@ +/* + * DAC driver for the Apex Embedded Systems STX104 + * Copyright (C) 2016 William Breathitt Gray + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +#include <linux/bitops.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/iio/iio.h> +#include <linux/iio/types.h> +#include <linux/io.h> +#include <linux/ioport.h> +#include <linux/isa.h> +#include <linux/module.h> +#include <linux/moduleparam.h> + +#define STX104_NUM_CHAN 2 + +#define STX104_CHAN(chan) { \ + .type = IIO_VOLTAGE, \ + .channel = chan, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ + .indexed = 1, \ + .output = 1 \ +} + +#define STX104_EXTENT 16 +/** + * The highest base address possible for an ISA device is 0x3FF; this results in + * 1024 possible base addresses. Dividing the number of possible base addresses + * by the address extent taken by each device results in the maximum number of + * devices on a system. + */ +#define MAX_NUM_STX104 (1024 / STX104_EXTENT) + +static unsigned base[MAX_NUM_STX104]; +static unsigned num_stx104; +module_param_array(base, uint, &num_stx104, 0); +MODULE_PARM_DESC(base, "Apex Embedded Systems STX104 base addresses"); + +/** + * struct stx104_iio - IIO device private data structure + * @chan_out_states: channels' output states + * @base: base port address of the IIO device + */ +struct stx104_iio { + unsigned chan_out_states[STX104_NUM_CHAN]; + unsigned base; +}; + +static int stx104_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, int *val, int *val2, long mask) +{ + struct stx104_iio *const priv = iio_priv(indio_dev); + + if (mask != IIO_CHAN_INFO_RAW) + return -EINVAL; + + *val = priv->chan_out_states[chan->channel]; + + return IIO_VAL_INT; +} + +static int stx104_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, int val, int val2, long mask) +{ + struct stx104_iio *const priv = iio_priv(indio_dev); + const unsigned chan_addr_offset = 2 * chan->channel; + + if (mask != IIO_CHAN_INFO_RAW) + return -EINVAL; + + priv->chan_out_states[chan->channel] = val; + outw(val, priv->base + 4 + chan_addr_offset); + + return 0; +} + +static const struct iio_info stx104_info = { + .driver_module = THIS_MODULE, + .read_raw = stx104_read_raw, + .write_raw = stx104_write_raw +}; + +static const struct iio_chan_spec stx104_channels[STX104_NUM_CHAN] = { + STX104_CHAN(0), + STX104_CHAN(1) +}; + +static int stx104_probe(struct device *dev, unsigned int id) +{ + struct iio_dev *indio_dev; + struct stx104_iio *priv; + + indio_dev = devm_iio_device_alloc(dev, sizeof(*priv)); + if (!indio_dev) + return -ENOMEM; + + if (!devm_request_region(dev, base[id], STX104_EXTENT, + dev_name(dev))) { + dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n", + base[id], base[id] + STX104_EXTENT); + return -EBUSY; + } + + indio_dev->info = &stx104_info; + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->channels = stx104_channels; + indio_dev->num_channels = STX104_NUM_CHAN; + indio_dev->name = dev_name(dev); + + priv = iio_priv(indio_dev); + priv->base = base[id]; + + /* initialize DAC output to 0V */ + outw(0, base[id] + 4); + outw(0, base[id] + 6); + + return devm_iio_device_register(dev, indio_dev); +} + +static struct isa_driver stx104_driver = { + .probe = stx104_probe, + .driver = { + .name = "stx104" + } +}; + +static void __exit stx104_exit(void) +{ + isa_unregister_driver(&stx104_driver); +} + +static int __init stx104_init(void) +{ + return isa_register_driver(&stx104_driver, num_stx104); +} + +module_init(stx104_init); +module_exit(stx104_exit); + +MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>"); +MODULE_DESCRIPTION("Apex Embedded Systems STX104 DAC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iio/dac/vf610_dac.c b/drivers/iio/dac/vf610_dac.c new file mode 100644 index 000000000000..c4ec7779b394 --- /dev/null +++ b/drivers/iio/dac/vf610_dac.c @@ -0,0 +1,298 @@ +/* + * Freescale Vybrid vf610 DAC driver + * + * Copyright 2016 Toradex AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/regulator/consumer.h> +#include <linux/slab.h> + +#include <linux/iio/iio.h> +#include <linux/iio/sysfs.h> + +#define VF610_DACx_STATCTRL 0x20 + +#define VF610_DAC_DACEN BIT(15) +#define VF610_DAC_DACRFS BIT(14) +#define VF610_DAC_LPEN BIT(11) + +#define VF610_DAC_DAT0(x) ((x) & 0xFFF) + +enum vf610_conversion_mode_sel { + VF610_DAC_CONV_HIGH_POWER, + VF610_DAC_CONV_LOW_POWER, +}; + +struct vf610_dac { + struct clk *clk; + struct device *dev; + enum vf610_conversion_mode_sel conv_mode; + void __iomem *regs; +}; + +static void vf610_dac_init(struct vf610_dac *info) +{ + int val; + + info->conv_mode = VF610_DAC_CONV_LOW_POWER; + val = VF610_DAC_DACEN | VF610_DAC_DACRFS | + VF610_DAC_LPEN; + writel(val, info->regs + VF610_DACx_STATCTRL); +} + +static void vf610_dac_exit(struct vf610_dac *info) +{ + int val; + + val = readl(info->regs + VF610_DACx_STATCTRL); + val &= ~VF610_DAC_DACEN; + writel(val, info->regs + VF610_DACx_STATCTRL); +} + +static int vf610_set_conversion_mode(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + unsigned int mode) +{ + struct vf610_dac *info = iio_priv(indio_dev); + int val; + + mutex_lock(&indio_dev->mlock); + info->conv_mode = mode; + val = readl(info->regs + VF610_DACx_STATCTRL); + if (mode) + val |= VF610_DAC_LPEN; + else + val &= ~VF610_DAC_LPEN; + writel(val, info->regs + VF610_DACx_STATCTRL); + mutex_unlock(&indio_dev->mlock); + + return 0; +} + +static int vf610_get_conversion_mode(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan) +{ + struct vf610_dac *info = iio_priv(indio_dev); + + return info->conv_mode; +} + +static const char * const vf610_conv_modes[] = { "high-power", "low-power" }; + +static const struct iio_enum vf610_conversion_mode = { + .items = vf610_conv_modes, + .num_items = ARRAY_SIZE(vf610_conv_modes), + .get = vf610_get_conversion_mode, + .set = vf610_set_conversion_mode, +}; + +static const struct iio_chan_spec_ext_info vf610_ext_info[] = { + IIO_ENUM("conversion_mode", IIO_SHARED_BY_DIR, + &vf610_conversion_mode), + {}, +}; + +#define VF610_DAC_CHAN(_chan_type) { \ + .type = (_chan_type), \ + .output = 1, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ + .ext_info = vf610_ext_info, \ +} + +static const struct iio_chan_spec vf610_dac_iio_channels[] = { + VF610_DAC_CHAN(IIO_VOLTAGE), +}; + +static int vf610_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, + long mask) +{ + struct vf610_dac *info = iio_priv(indio_dev); + + switch (mask) { + case IIO_CHAN_INFO_RAW: + *val = VF610_DAC_DAT0(readl(info->regs)); + return IIO_VAL_INT; + case IIO_CHAN_INFO_SCALE: + /* + * DACRFS is always 1 for valid reference and typical + * reference voltage as per Vybrid datasheet is 3.3V + * from section 9.1.2.1 of Vybrid datasheet + */ + *val = 3300 /* mV */; + *val2 = 12; + return IIO_VAL_FRACTIONAL_LOG2; + + default: + return -EINVAL; + } +} + +static int vf610_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int val, int val2, + long mask) +{ + struct vf610_dac *info = iio_priv(indio_dev); + + switch (mask) { + case IIO_CHAN_INFO_RAW: + mutex_lock(&indio_dev->mlock); + writel(VF610_DAC_DAT0(val), info->regs); + mutex_unlock(&indio_dev->mlock); + return 0; + + default: + return -EINVAL; + } +} + +static const struct iio_info vf610_dac_iio_info = { + .driver_module = THIS_MODULE, + .read_raw = &vf610_read_raw, + .write_raw = &vf610_write_raw, +}; + +static const struct of_device_id vf610_dac_match[] = { + { .compatible = "fsl,vf610-dac", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, vf610_dac_match); + +static int vf610_dac_probe(struct platform_device *pdev) +{ + struct iio_dev *indio_dev; + struct vf610_dac *info; + struct resource *mem; + int ret; + + indio_dev = devm_iio_device_alloc(&pdev->dev, + sizeof(struct vf610_dac)); + if (!indio_dev) { + dev_err(&pdev->dev, "Failed allocating iio device\n"); + return -ENOMEM; + } + + info = iio_priv(indio_dev); + info->dev = &pdev->dev; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + info->regs = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(info->regs)) + return PTR_ERR(info->regs); + + info->clk = devm_clk_get(&pdev->dev, "dac"); + if (IS_ERR(info->clk)) { + dev_err(&pdev->dev, "Failed getting clock, err = %ld\n", + PTR_ERR(info->clk)); + return PTR_ERR(info->clk); + } + + platform_set_drvdata(pdev, indio_dev); + + indio_dev->name = dev_name(&pdev->dev); + indio_dev->dev.parent = &pdev->dev; + indio_dev->dev.of_node = pdev->dev.of_node; + indio_dev->info = &vf610_dac_iio_info; + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->channels = vf610_dac_iio_channels; + indio_dev->num_channels = ARRAY_SIZE(vf610_dac_iio_channels); + + ret = clk_prepare_enable(info->clk); + if (ret) { + dev_err(&pdev->dev, + "Could not prepare or enable the clock\n"); + return ret; + } + + vf610_dac_init(info); + + ret = iio_device_register(indio_dev); + if (ret) { + dev_err(&pdev->dev, "Couldn't register the device\n"); + goto error_iio_device_register; + } + + return 0; + +error_iio_device_register: + clk_disable_unprepare(info->clk); + + return ret; +} + +static int vf610_dac_remove(struct platform_device *pdev) +{ + struct iio_dev *indio_dev = platform_get_drvdata(pdev); + struct vf610_dac *info = iio_priv(indio_dev); + + iio_device_unregister(indio_dev); + vf610_dac_exit(info); + clk_disable_unprepare(info->clk); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int vf610_dac_suspend(struct device *dev) +{ + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct vf610_dac *info = iio_priv(indio_dev); + + vf610_dac_exit(info); + clk_disable_unprepare(info->clk); + + return 0; +} + +static int vf610_dac_resume(struct device *dev) +{ + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct vf610_dac *info = iio_priv(indio_dev); + int ret; + + ret = clk_prepare_enable(info->clk); + if (ret) + return ret; + + vf610_dac_init(info); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(vf610_dac_pm_ops, vf610_dac_suspend, vf610_dac_resume); + +static struct platform_driver vf610_dac_driver = { + .probe = vf610_dac_probe, + .remove = vf610_dac_remove, + .driver = { + .name = "vf610-dac", + .of_match_table = vf610_dac_match, + .pm = &vf610_dac_pm_ops, + }, +}; +module_platform_driver(vf610_dac_driver); + +MODULE_AUTHOR("Sanchayan Maity <sanchayan.maity@toradex.com>"); +MODULE_DESCRIPTION("Freescale VF610 DAC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c index 02eddcebeea3..110f95b6e52f 100644 --- a/drivers/iio/gyro/st_gyro_core.c +++ b/drivers/iio/gyro/st_gyro_core.c @@ -185,6 +185,11 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = { .drdy_irq = { .addr = ST_GYRO_1_DRDY_IRQ_ADDR, .mask_int2 = ST_GYRO_1_DRDY_IRQ_INT2_MASK, + /* + * The sensor has IHL (active low) and open + * drain settings, but only for INT1 and not + * for the DRDY line on INT2. + */ }, .multi_read_bit = ST_GYRO_1_MULTIREAD_BIT, .bootime = 2, @@ -248,6 +253,11 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = { .drdy_irq = { .addr = ST_GYRO_2_DRDY_IRQ_ADDR, .mask_int2 = ST_GYRO_2_DRDY_IRQ_INT2_MASK, + /* + * The sensor has IHL (active low) and open + * drain settings, but only for INT1 and not + * for the DRDY line on INT2. + */ }, .multi_read_bit = ST_GYRO_2_MULTIREAD_BIT, .bootime = 2, @@ -307,6 +317,11 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = { .drdy_irq = { .addr = ST_GYRO_3_DRDY_IRQ_ADDR, .mask_int2 = ST_GYRO_3_DRDY_IRQ_INT2_MASK, + /* + * The sensor has IHL (active low) and open + * drain settings, but only for INT1 and not + * for the DRDY line on INT2. + */ }, .multi_read_bit = ST_GYRO_3_MULTIREAD_BIT, .bootime = 2, diff --git a/drivers/iio/health/Kconfig b/drivers/iio/health/Kconfig index a647679da805..c5f004a8e447 100644 --- a/drivers/iio/health/Kconfig +++ b/drivers/iio/health/Kconfig @@ -3,7 +3,35 @@ # # When adding new entries keep the list in alphabetical order -menu "Health sensors" +menu "Health Sensors" + +menu "Heart Rate Monitors" + +config AFE4403 + tristate "TI AFE4403 Heart Rate Monitor" + depends on SPI_MASTER + select REGMAP_SPI + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER + help + Say yes to choose the Texas Instruments AFE4403 + heart rate monitor and low-cost pulse oximeter. + + To compile this driver as a module, choose M here: the + module will be called afe4403. + +config AFE4404 + tristate "TI AFE4404 heart rate and pulse oximeter sensor" + depends on I2C + select REGMAP_I2C + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER + help + Say yes to choose the Texas Instruments AFE4404 + heart rate monitor and low-cost pulse oximeter. + + To compile this driver as a module, choose M here: the + module will be called afe4404. config MAX30100 tristate "MAX30100 heart rate and pulse oximeter sensor" @@ -19,3 +47,5 @@ config MAX30100 module will be called max30100. endmenu + +endmenu diff --git a/drivers/iio/health/Makefile b/drivers/iio/health/Makefile index 7c475d7faad8..9955a2ae8df1 100644 --- a/drivers/iio/health/Makefile +++ b/drivers/iio/health/Makefile @@ -4,4 +4,6 @@ # When adding new entries keep the list in alphabetical order +obj-$(CONFIG_AFE4403) += afe4403.o +obj-$(CONFIG_AFE4404) += afe4404.o obj-$(CONFIG_MAX30100) += max30100.o diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c new file mode 100644 index 000000000000..88e43f87b926 --- /dev/null +++ b/drivers/iio/health/afe4403.c @@ -0,0 +1,708 @@ +/* + * AFE4403 Heart Rate Monitors and Low-Cost Pulse Oximeters + * + * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/ + * Andrew F. Davis <afd@ti.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include <linux/device.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/regmap.h> +#include <linux/spi/spi.h> +#include <linux/sysfs.h> +#include <linux/regulator/consumer.h> + +#include <linux/iio/iio.h> +#include <linux/iio/sysfs.h> +#include <linux/iio/buffer.h> +#include <linux/iio/trigger.h> +#include <linux/iio/triggered_buffer.h> +#include <linux/iio/trigger_consumer.h> + +#include "afe440x.h" + +#define AFE4403_DRIVER_NAME "afe4403" + +/* AFE4403 Registers */ +#define AFE4403_TIAGAIN 0x20 +#define AFE4403_TIA_AMB_GAIN 0x21 + +/* AFE4403 GAIN register fields */ +#define AFE4403_TIAGAIN_RES_MASK GENMASK(2, 0) +#define AFE4403_TIAGAIN_RES_SHIFT 0 +#define AFE4403_TIAGAIN_CAP_MASK GENMASK(7, 3) +#define AFE4403_TIAGAIN_CAP_SHIFT 3 + +/* AFE4403 LEDCNTRL register fields */ +#define AFE440X_LEDCNTRL_LED1_MASK GENMASK(15, 8) +#define AFE440X_LEDCNTRL_LED1_SHIFT 8 +#define AFE440X_LEDCNTRL_LED2_MASK GENMASK(7, 0) +#define AFE440X_LEDCNTRL_LED2_SHIFT 0 +#define AFE440X_LEDCNTRL_LED_RANGE_MASK GENMASK(17, 16) +#define AFE440X_LEDCNTRL_LED_RANGE_SHIFT 16 + +/* AFE4403 CONTROL2 register fields */ +#define AFE440X_CONTROL2_PWR_DWN_TX BIT(2) +#define AFE440X_CONTROL2_EN_SLOW_DIAG BIT(8) +#define AFE440X_CONTROL2_DIAG_OUT_TRI BIT(10) +#define AFE440X_CONTROL2_TX_BRDG_MOD BIT(11) +#define AFE440X_CONTROL2_TX_REF_MASK GENMASK(18, 17) +#define AFE440X_CONTROL2_TX_REF_SHIFT 17 + +/* AFE4404 NULL fields */ +#define NULL_MASK 0 +#define NULL_SHIFT 0 + +/* AFE4403 LEDCNTRL values */ +#define AFE440X_LEDCNTRL_RANGE_TX_HALF 0x1 +#define AFE440X_LEDCNTRL_RANGE_TX_FULL 0x2 +#define AFE440X_LEDCNTRL_RANGE_TX_OFF 0x3 + +/* AFE4403 CONTROL2 values */ +#define AFE440X_CONTROL2_TX_REF_025 0x0 +#define AFE440X_CONTROL2_TX_REF_050 0x1 +#define AFE440X_CONTROL2_TX_REF_100 0x2 +#define AFE440X_CONTROL2_TX_REF_075 0x3 + +/* AFE4403 CONTROL3 values */ +#define AFE440X_CONTROL3_CLK_DIV_2 0x0 +#define AFE440X_CONTROL3_CLK_DIV_4 0x2 +#define AFE440X_CONTROL3_CLK_DIV_6 0x3 +#define AFE440X_CONTROL3_CLK_DIV_8 0x4 +#define AFE440X_CONTROL3_CLK_DIV_12 0x5 +#define AFE440X_CONTROL3_CLK_DIV_1 0x7 + +/* AFE4403 TIAGAIN_CAP values */ +#define AFE4403_TIAGAIN_CAP_5_P 0x0 +#define AFE4403_TIAGAIN_CAP_10_P 0x1 +#define AFE4403_TIAGAIN_CAP_20_P 0x2 +#define AFE4403_TIAGAIN_CAP_30_P 0x3 +#define AFE4403_TIAGAIN_CAP_55_P 0x8 +#define AFE4403_TIAGAIN_CAP_155_P 0x10 + +/* AFE4403 TIAGAIN_RES values */ +#define AFE4403_TIAGAIN_RES_500_K 0x0 +#define AFE4403_TIAGAIN_RES_250_K 0x1 +#define AFE4403_TIAGAIN_RES_100_K 0x2 +#define AFE4403_TIAGAIN_RES_50_K 0x3 +#define AFE4403_TIAGAIN_RES_25_K 0x4 +#define AFE4403_TIAGAIN_RES_10_K 0x5 +#define AFE4403_TIAGAIN_RES_1_M 0x6 +#define AFE4403_TIAGAIN_RES_NONE 0x7 + +/** + * struct afe4403_data + * @dev - Device structure + * @spi - SPI device handle + * @regmap - Register map of the device + * @regulator - Pointer to the regulator for the IC + * @trig - IIO trigger for this device + * @irq - ADC_RDY line interrupt number + */ +struct afe4403_data { + struct device *dev; + struct spi_device *spi; + struct regmap *regmap; + struct regulator *regulator; + struct iio_trigger *trig; + int irq; +}; + +enum afe4403_chan_id { + LED1, + ALED1, + LED2, + ALED2, + LED1_ALED1, + LED2_ALED2, + ILED1, + ILED2, +}; + +static const struct afe440x_reg_info afe4403_reg_info[] = { + [LED1] = AFE440X_REG_INFO(AFE440X_LED1VAL, 0, NULL), + [ALED1] = AFE440X_REG_INFO(AFE440X_ALED1VAL, 0, NULL), + [LED2] = AFE440X_REG_INFO(AFE440X_LED2VAL, 0, NULL), + [ALED2] = AFE440X_REG_INFO(AFE440X_ALED2VAL, 0, NULL), + [LED1_ALED1] = AFE440X_REG_INFO(AFE440X_LED1_ALED1VAL, 0, NULL), + [LED2_ALED2] = AFE440X_REG_INFO(AFE440X_LED2_ALED2VAL, 0, NULL), + [ILED1] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE440X_LEDCNTRL_LED1), + [ILED2] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE440X_LEDCNTRL_LED2), +}; + +static const struct iio_chan_spec afe4403_channels[] = { + /* ADC values */ + AFE440X_INTENSITY_CHAN(LED1, "led1", 0), + AFE440X_INTENSITY_CHAN(ALED1, "led1_ambient", 0), + AFE440X_INTENSITY_CHAN(LED2, "led2", 0), + AFE440X_INTENSITY_CHAN(ALED2, "led2_ambient", 0), + AFE440X_INTENSITY_CHAN(LED1_ALED1, "led1-led1_ambient", 0), + AFE440X_INTENSITY_CHAN(LED2_ALED2, "led2-led2_ambient", 0), + /* LED current */ + AFE440X_CURRENT_CHAN(ILED1, "led1"), + AFE440X_CURRENT_CHAN(ILED2, "led2"), +}; + +static const struct afe440x_val_table afe4403_res_table[] = { + { 500000 }, { 250000 }, { 100000 }, { 50000 }, + { 25000 }, { 10000 }, { 1000000 }, { 0 }, +}; +AFE440X_TABLE_ATTR(tia_resistance_available, afe4403_res_table); + +static const struct afe440x_val_table afe4403_cap_table[] = { + { 0, 5000 }, { 0, 10000 }, { 0, 20000 }, { 0, 25000 }, + { 0, 30000 }, { 0, 35000 }, { 0, 45000 }, { 0, 50000 }, + { 0, 55000 }, { 0, 60000 }, { 0, 70000 }, { 0, 75000 }, + { 0, 80000 }, { 0, 85000 }, { 0, 95000 }, { 0, 100000 }, + { 0, 155000 }, { 0, 160000 }, { 0, 170000 }, { 0, 175000 }, + { 0, 180000 }, { 0, 185000 }, { 0, 195000 }, { 0, 200000 }, + { 0, 205000 }, { 0, 210000 }, { 0, 220000 }, { 0, 225000 }, + { 0, 230000 }, { 0, 235000 }, { 0, 245000 }, { 0, 250000 }, +}; +AFE440X_TABLE_ATTR(tia_capacitance_available, afe4403_cap_table); + +static ssize_t afe440x_show_register(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct afe4403_data *afe = iio_priv(indio_dev); + struct afe440x_attr *afe440x_attr = to_afe440x_attr(attr); + unsigned int reg_val, type; + int vals[2]; + int ret, val_len; + + ret = regmap_read(afe->regmap, afe440x_attr->reg, ®_val); + if (ret) + return ret; + + reg_val &= afe440x_attr->mask; + reg_val >>= afe440x_attr->shift; + + switch (afe440x_attr->type) { + case SIMPLE: + type = IIO_VAL_INT; + val_len = 1; + vals[0] = reg_val; + break; + case RESISTANCE: + case CAPACITANCE: + type = IIO_VAL_INT_PLUS_MICRO; + val_len = 2; + if (reg_val < afe440x_attr->table_size) { + vals[0] = afe440x_attr->val_table[reg_val].integer; + vals[1] = afe440x_attr->val_table[reg_val].fract; + break; + } + return -EINVAL; + default: + return -EINVAL; + } + + return iio_format_value(buf, type, val_len, vals); +} + +static ssize_t afe440x_store_register(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct afe4403_data *afe = iio_priv(indio_dev); + struct afe440x_attr *afe440x_attr = to_afe440x_attr(attr); + int val, integer, fract, ret; + + ret = iio_str_to_fixpoint(buf, 100000, &integer, &fract); + if (ret) + return ret; + + switch (afe440x_attr->type) { + case SIMPLE: + val = integer; + break; + case RESISTANCE: + case CAPACITANCE: + for (val = 0; val < afe440x_attr->table_size; val++) + if (afe440x_attr->val_table[val].integer == integer && + afe440x_attr->val_table[val].fract == fract) + break; + if (val == afe440x_attr->table_size) + return -EINVAL; + break; + default: + return -EINVAL; + } + + ret = regmap_update_bits(afe->regmap, afe440x_attr->reg, + afe440x_attr->mask, + (val << afe440x_attr->shift)); + if (ret) + return ret; + + return count; +} + +static AFE440X_ATTR(tia_separate_en, AFE4403_TIAGAIN, AFE440X_TIAGAIN_ENSEPGAIN, SIMPLE, NULL, 0); + +static AFE440X_ATTR(tia_resistance1, AFE4403_TIAGAIN, AFE4403_TIAGAIN_RES, RESISTANCE, afe4403_res_table, ARRAY_SIZE(afe4403_res_table)); +static AFE440X_ATTR(tia_capacitance1, AFE4403_TIAGAIN, AFE4403_TIAGAIN_CAP, CAPACITANCE, afe4403_cap_table, ARRAY_SIZE(afe4403_cap_table)); + +static AFE440X_ATTR(tia_resistance2, AFE4403_TIA_AMB_GAIN, AFE4403_TIAGAIN_RES, RESISTANCE, afe4403_res_table, ARRAY_SIZE(afe4403_res_table)); +static AFE440X_ATTR(tia_capacitance2, AFE4403_TIA_AMB_GAIN, AFE4403_TIAGAIN_RES, CAPACITANCE, afe4403_cap_table, ARRAY_SIZE(afe4403_cap_table)); + +static struct attribute *afe440x_attributes[] = { + &afe440x_attr_tia_separate_en.dev_attr.attr, + &afe440x_attr_tia_resistance1.dev_attr.attr, + &afe440x_attr_tia_capacitance1.dev_attr.attr, + &afe440x_attr_tia_resistance2.dev_attr.attr, + &afe440x_attr_tia_capacitance2.dev_attr.attr, + &dev_attr_tia_resistance_available.attr, + &dev_attr_tia_capacitance_available.attr, + NULL +}; + +static const struct attribute_group afe440x_attribute_group = { + .attrs = afe440x_attributes +}; + +static int afe4403_read(struct afe4403_data *afe, unsigned int reg, u32 *val) +{ + u8 tx[4] = {AFE440X_CONTROL0, 0x0, 0x0, AFE440X_CONTROL0_READ}; + u8 rx[3]; + int ret; + + /* Enable reading from the device */ + ret = spi_write_then_read(afe->spi, tx, 4, NULL, 0); + if (ret) + return ret; + + ret = spi_write_then_read(afe->spi, ®, 1, rx, 3); + if (ret) + return ret; + + *val = (rx[0] << 16) | + (rx[1] << 8) | + (rx[2]); + + /* Disable reading from the device */ + tx[3] = AFE440X_CONTROL0_WRITE; + ret = spi_write_then_read(afe->spi, tx, 4, NULL, 0); + if (ret) + return ret; + + return 0; +} + +static int afe4403_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long mask) +{ + struct afe4403_data *afe = iio_priv(indio_dev); + const struct afe440x_reg_info reg_info = afe4403_reg_info[chan->address]; + int ret; + + switch (chan->type) { + case IIO_INTENSITY: + switch (mask) { + case IIO_CHAN_INFO_RAW: + ret = afe4403_read(afe, reg_info.reg, val); + if (ret) + return ret; + return IIO_VAL_INT; + case IIO_CHAN_INFO_OFFSET: + ret = regmap_read(afe->regmap, reg_info.offreg, + val); + if (ret) + return ret; + *val &= reg_info.mask; + *val >>= reg_info.shift; + return IIO_VAL_INT; + } + break; + case IIO_CURRENT: + switch (mask) { + case IIO_CHAN_INFO_RAW: + ret = regmap_read(afe->regmap, reg_info.reg, val); + if (ret) + return ret; + *val &= reg_info.mask; + *val >>= reg_info.shift; + return IIO_VAL_INT; + case IIO_CHAN_INFO_SCALE: + *val = 0; + *val2 = 800000; + return IIO_VAL_INT_PLUS_MICRO; + } + break; + default: + break; + } + + return -EINVAL; +} + +static int afe4403_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int val, int val2, long mask) +{ + struct afe4403_data *afe = iio_priv(indio_dev); + const struct afe440x_reg_info reg_info = afe4403_reg_info[chan->address]; + + switch (chan->type) { + case IIO_INTENSITY: + switch (mask) { + case IIO_CHAN_INFO_OFFSET: + return regmap_update_bits(afe->regmap, + reg_info.offreg, + reg_info.mask, + (val << reg_info.shift)); + } + break; + case IIO_CURRENT: + switch (mask) { + case IIO_CHAN_INFO_RAW: + return regmap_update_bits(afe->regmap, + reg_info.reg, + reg_info.mask, + (val << reg_info.shift)); + } + break; + default: + break; + } + + return -EINVAL; +} + +static const struct iio_info afe4403_iio_info = { + .attrs = &afe440x_attribute_group, + .read_raw = afe4403_read_raw, + .write_raw = afe4403_write_raw, + .driver_module = THIS_MODULE, +}; + +static irqreturn_t afe4403_trigger_handler(int irq, void *private) +{ + struct iio_poll_func *pf = private; + struct iio_dev *indio_dev = pf->indio_dev; + struct afe4403_data *afe = iio_priv(indio_dev); + int ret, bit, i = 0; + s32 buffer[8]; + u8 tx[4] = {AFE440X_CONTROL0, 0x0, 0x0, AFE440X_CONTROL0_READ}; + u8 rx[3]; + + /* Enable reading from the device */ + ret = spi_write_then_read(afe->spi, tx, 4, NULL, 0); + if (ret) + goto err; + + for_each_set_bit(bit, indio_dev->active_scan_mask, + indio_dev->masklength) { + ret = spi_write_then_read(afe->spi, + &afe4403_reg_info[bit].reg, 1, + rx, 3); + if (ret) + goto err; + + buffer[i++] = (rx[0] << 16) | + (rx[1] << 8) | + (rx[2]); + } + + /* Disable reading from the device */ + tx[3] = AFE440X_CONTROL0_WRITE; + ret = spi_write_then_read(afe->spi, tx, 4, NULL, 0); + if (ret) + goto err; + + iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp); +err: + iio_trigger_notify_done(indio_dev->trig); + + return IRQ_HANDLED; +} + +static const struct iio_trigger_ops afe4403_trigger_ops = { + .owner = THIS_MODULE, +}; + +#define AFE4403_TIMING_PAIRS \ + { AFE440X_LED2STC, 0x000050 }, \ + { AFE440X_LED2ENDC, 0x0003e7 }, \ + { AFE440X_LED1LEDSTC, 0x0007d0 }, \ + { AFE440X_LED1LEDENDC, 0x000bb7 }, \ + { AFE440X_ALED2STC, 0x000438 }, \ + { AFE440X_ALED2ENDC, 0x0007cf }, \ + { AFE440X_LED1STC, 0x000820 }, \ + { AFE440X_LED1ENDC, 0x000bb7 }, \ + { AFE440X_LED2LEDSTC, 0x000000 }, \ + { AFE440X_LED2LEDENDC, 0x0003e7 }, \ + { AFE440X_ALED1STC, 0x000c08 }, \ + { AFE440X_ALED1ENDC, 0x000f9f }, \ + { AFE440X_LED2CONVST, 0x0003ef }, \ + { AFE440X_LED2CONVEND, 0x0007cf }, \ + { AFE440X_ALED2CONVST, 0x0007d7 }, \ + { AFE440X_ALED2CONVEND, 0x000bb7 }, \ + { AFE440X_LED1CONVST, 0x000bbf }, \ + { AFE440X_LED1CONVEND, 0x009c3f }, \ + { AFE440X_ALED1CONVST, 0x000fa7 }, \ + { AFE440X_ALED1CONVEND, 0x001387 }, \ + { AFE440X_ADCRSTSTCT0, 0x0003e8 }, \ + { AFE440X_ADCRSTENDCT0, 0x0003eb }, \ + { AFE440X_ADCRSTSTCT1, 0x0007d0 }, \ + { AFE440X_ADCRSTENDCT1, 0x0007d3 }, \ + { AFE440X_ADCRSTSTCT2, 0x000bb8 }, \ + { AFE440X_ADCRSTENDCT2, 0x000bbb }, \ + { AFE440X_ADCRSTSTCT3, 0x000fa0 }, \ + { AFE440X_ADCRSTENDCT3, 0x000fa3 }, \ + { AFE440X_PRPCOUNT, 0x009c3f }, \ + { AFE440X_PDNCYCLESTC, 0x001518 }, \ + { AFE440X_PDNCYCLEENDC, 0x00991f } + +static const struct reg_sequence afe4403_reg_sequences[] = { + AFE4403_TIMING_PAIRS, + { AFE440X_CONTROL1, AFE440X_CONTROL1_TIMEREN | 0x000007}, + { AFE4403_TIA_AMB_GAIN, AFE4403_TIAGAIN_RES_1_M }, + { AFE440X_LEDCNTRL, (0x14 << AFE440X_LEDCNTRL_LED1_SHIFT) | + (0x14 << AFE440X_LEDCNTRL_LED2_SHIFT) }, + { AFE440X_CONTROL2, AFE440X_CONTROL2_TX_REF_050 << + AFE440X_CONTROL2_TX_REF_SHIFT }, +}; + +static const struct regmap_range afe4403_yes_ranges[] = { + regmap_reg_range(AFE440X_LED2VAL, AFE440X_LED1_ALED1VAL), +}; + +static const struct regmap_access_table afe4403_volatile_table = { + .yes_ranges = afe4403_yes_ranges, + .n_yes_ranges = ARRAY_SIZE(afe4403_yes_ranges), +}; + +static const struct regmap_config afe4403_regmap_config = { + .reg_bits = 8, + .val_bits = 24, + + .max_register = AFE440X_PDNCYCLEENDC, + .cache_type = REGCACHE_RBTREE, + .volatile_table = &afe4403_volatile_table, +}; + +#ifdef CONFIG_OF +static const struct of_device_id afe4403_of_match[] = { + { .compatible = "ti,afe4403", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, afe4403_of_match); +#endif + +static int __maybe_unused afe4403_suspend(struct device *dev) +{ + struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct afe4403_data *afe = iio_priv(indio_dev); + int ret; + + ret = regmap_update_bits(afe->regmap, AFE440X_CONTROL2, + AFE440X_CONTROL2_PDN_AFE, + AFE440X_CONTROL2_PDN_AFE); + if (ret) + return ret; + + ret = regulator_disable(afe->regulator); + if (ret) { + dev_err(dev, "Unable to disable regulator\n"); + return ret; + } + + return 0; +} + +static int __maybe_unused afe4403_resume(struct device *dev) +{ + struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct afe4403_data *afe = iio_priv(indio_dev); + int ret; + + ret = regulator_enable(afe->regulator); + if (ret) { + dev_err(dev, "Unable to enable regulator\n"); + return ret; + } + + ret = regmap_update_bits(afe->regmap, AFE440X_CONTROL2, + AFE440X_CONTROL2_PDN_AFE, 0); + if (ret) + return ret; + + return 0; +} + +static SIMPLE_DEV_PM_OPS(afe4403_pm_ops, afe4403_suspend, afe4403_resume); + +static int afe4403_probe(struct spi_device *spi) +{ + struct iio_dev *indio_dev; + struct afe4403_data *afe; + int ret; + + indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*afe)); + if (!indio_dev) + return -ENOMEM; + + afe = iio_priv(indio_dev); + spi_set_drvdata(spi, indio_dev); + + afe->dev = &spi->dev; + afe->spi = spi; + afe->irq = spi->irq; + + afe->regmap = devm_regmap_init_spi(spi, &afe4403_regmap_config); + if (IS_ERR(afe->regmap)) { + dev_err(afe->dev, "Unable to allocate register map\n"); + return PTR_ERR(afe->regmap); + } + + afe->regulator = devm_regulator_get(afe->dev, "tx_sup"); + if (IS_ERR(afe->regulator)) { + dev_err(afe->dev, "Unable to get regulator\n"); + return PTR_ERR(afe->regulator); + } + ret = regulator_enable(afe->regulator); + if (ret) { + dev_err(afe->dev, "Unable to enable regulator\n"); + return ret; + } + + ret = regmap_write(afe->regmap, AFE440X_CONTROL0, + AFE440X_CONTROL0_SW_RESET); + if (ret) { + dev_err(afe->dev, "Unable to reset device\n"); + goto err_disable_reg; + } + + ret = regmap_multi_reg_write(afe->regmap, afe4403_reg_sequences, + ARRAY_SIZE(afe4403_reg_sequences)); + if (ret) { + dev_err(afe->dev, "Unable to set register defaults\n"); + goto err_disable_reg; + } + + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->dev.parent = afe->dev; + indio_dev->channels = afe4403_channels; + indio_dev->num_channels = ARRAY_SIZE(afe4403_channels); + indio_dev->name = AFE4403_DRIVER_NAME; + indio_dev->info = &afe4403_iio_info; + + if (afe->irq > 0) { + afe->trig = devm_iio_trigger_alloc(afe->dev, + "%s-dev%d", + indio_dev->name, + indio_dev->id); + if (!afe->trig) { + dev_err(afe->dev, "Unable to allocate IIO trigger\n"); + ret = -ENOMEM; + goto err_disable_reg; + } + + iio_trigger_set_drvdata(afe->trig, indio_dev); + + afe->trig->ops = &afe4403_trigger_ops; + afe->trig->dev.parent = afe->dev; + + ret = iio_trigger_register(afe->trig); + if (ret) { + dev_err(afe->dev, "Unable to register IIO trigger\n"); + goto err_disable_reg; + } + + ret = devm_request_threaded_irq(afe->dev, afe->irq, + iio_trigger_generic_data_rdy_poll, + NULL, IRQF_ONESHOT, + AFE4403_DRIVER_NAME, + afe->trig); + if (ret) { + dev_err(afe->dev, "Unable to request IRQ\n"); + goto err_trig; + } + } + + ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time, + afe4403_trigger_handler, NULL); + if (ret) { + dev_err(afe->dev, "Unable to setup buffer\n"); + goto err_trig; + } + + ret = iio_device_register(indio_dev); + if (ret) { + dev_err(afe->dev, "Unable to register IIO device\n"); + goto err_buff; + } + + return 0; + +err_buff: + iio_triggered_buffer_cleanup(indio_dev); +err_trig: + if (afe->irq > 0) + iio_trigger_unregister(afe->trig); +err_disable_reg: + regulator_disable(afe->regulator); + + return ret; +} + +static int afe4403_remove(struct spi_device *spi) +{ + struct iio_dev *indio_dev = spi_get_drvdata(spi); + struct afe4403_data *afe = iio_priv(indio_dev); + int ret; + + iio_device_unregister(indio_dev); + + iio_triggered_buffer_cleanup(indio_dev); + + if (afe->irq > 0) + iio_trigger_unregister(afe->trig); + + ret = regulator_disable(afe->regulator); + if (ret) { + dev_err(afe->dev, "Unable to disable regulator\n"); + return ret; + } + + return 0; +} + +static const struct spi_device_id afe4403_ids[] = { + { "afe4403", 0 }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(spi, afe4403_ids); + +static struct spi_driver afe4403_spi_driver = { + .driver = { + .name = AFE4403_DRIVER_NAME, + .of_match_table = of_match_ptr(afe4403_of_match), + .pm = &afe4403_pm_ops, + }, + .probe = afe4403_probe, + .remove = afe4403_remove, + .id_table = afe4403_ids, +}; +module_spi_driver(afe4403_spi_driver); + +MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>"); +MODULE_DESCRIPTION("TI AFE4403 Heart Rate and Pulse Oximeter"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c new file mode 100644 index 000000000000..5096a4643784 --- /dev/null +++ b/drivers/iio/health/afe4404.c @@ -0,0 +1,679 @@ +/* + * AFE4404 Heart Rate Monitors and Low-Cost Pulse Oximeters + * + * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/ + * Andrew F. Davis <afd@ti.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include <linux/device.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/i2c.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/regmap.h> +#include <linux/sysfs.h> +#include <linux/regulator/consumer.h> + +#include <linux/iio/iio.h> +#include <linux/iio/sysfs.h> +#include <linux/iio/buffer.h> +#include <linux/iio/trigger.h> +#include <linux/iio/triggered_buffer.h> +#include <linux/iio/trigger_consumer.h> + +#include "afe440x.h" + +#define AFE4404_DRIVER_NAME "afe4404" + +/* AFE4404 registers */ +#define AFE4404_TIA_GAIN_SEP 0x20 +#define AFE4404_TIA_GAIN 0x21 +#define AFE4404_PROG_TG_STC 0x34 +#define AFE4404_PROG_TG_ENDC 0x35 +#define AFE4404_LED3LEDSTC 0x36 +#define AFE4404_LED3LEDENDC 0x37 +#define AFE4404_CLKDIV_PRF 0x39 +#define AFE4404_OFFDAC 0x3a +#define AFE4404_DEC 0x3d +#define AFE4404_AVG_LED2_ALED2VAL 0x3f +#define AFE4404_AVG_LED1_ALED1VAL 0x40 + +/* AFE4404 GAIN register fields */ +#define AFE4404_TIA_GAIN_RES_MASK GENMASK(2, 0) +#define AFE4404_TIA_GAIN_RES_SHIFT 0 +#define AFE4404_TIA_GAIN_CAP_MASK GENMASK(5, 3) +#define AFE4404_TIA_GAIN_CAP_SHIFT 3 + +/* AFE4404 LEDCNTRL register fields */ +#define AFE4404_LEDCNTRL_ILED1_MASK GENMASK(5, 0) +#define AFE4404_LEDCNTRL_ILED1_SHIFT 0 +#define AFE4404_LEDCNTRL_ILED2_MASK GENMASK(11, 6) +#define AFE4404_LEDCNTRL_ILED2_SHIFT 6 +#define AFE4404_LEDCNTRL_ILED3_MASK GENMASK(17, 12) +#define AFE4404_LEDCNTRL_ILED3_SHIFT 12 + +/* AFE4404 CONTROL2 register fields */ +#define AFE440X_CONTROL2_ILED_2X_MASK BIT(17) +#define AFE440X_CONTROL2_ILED_2X_SHIFT 17 + +/* AFE4404 CONTROL3 register fields */ +#define AFE440X_CONTROL3_OSC_ENABLE BIT(9) + +/* AFE4404 OFFDAC register current fields */ +#define AFE4404_OFFDAC_CURR_LED1_MASK GENMASK(9, 5) +#define AFE4404_OFFDAC_CURR_LED1_SHIFT 5 +#define AFE4404_OFFDAC_CURR_LED2_MASK GENMASK(19, 15) +#define AFE4404_OFFDAC_CURR_LED2_SHIFT 15 +#define AFE4404_OFFDAC_CURR_LED3_MASK GENMASK(4, 0) +#define AFE4404_OFFDAC_CURR_LED3_SHIFT 0 +#define AFE4404_OFFDAC_CURR_ALED1_MASK GENMASK(14, 10) +#define AFE4404_OFFDAC_CURR_ALED1_SHIFT 10 +#define AFE4404_OFFDAC_CURR_ALED2_MASK GENMASK(4, 0) +#define AFE4404_OFFDAC_CURR_ALED2_SHIFT 0 + +/* AFE4404 NULL fields */ +#define NULL_MASK 0 +#define NULL_SHIFT 0 + +/* AFE4404 TIA_GAIN_CAP values */ +#define AFE4404_TIA_GAIN_CAP_5_P 0x0 +#define AFE4404_TIA_GAIN_CAP_2_5_P 0x1 +#define AFE4404_TIA_GAIN_CAP_10_P 0x2 +#define AFE4404_TIA_GAIN_CAP_7_5_P 0x3 +#define AFE4404_TIA_GAIN_CAP_20_P 0x4 +#define AFE4404_TIA_GAIN_CAP_17_5_P 0x5 +#define AFE4404_TIA_GAIN_CAP_25_P 0x6 +#define AFE4404_TIA_GAIN_CAP_22_5_P 0x7 + +/* AFE4404 TIA_GAIN_RES values */ +#define AFE4404_TIA_GAIN_RES_500_K 0x0 +#define AFE4404_TIA_GAIN_RES_250_K 0x1 +#define AFE4404_TIA_GAIN_RES_100_K 0x2 +#define AFE4404_TIA_GAIN_RES_50_K 0x3 +#define AFE4404_TIA_GAIN_RES_25_K 0x4 +#define AFE4404_TIA_GAIN_RES_10_K 0x5 +#define AFE4404_TIA_GAIN_RES_1_M 0x6 +#define AFE4404_TIA_GAIN_RES_2_M 0x7 + +/** + * struct afe4404_data + * @dev - Device structure + * @regmap - Register map of the device + * @regulator - Pointer to the regulator for the IC + * @trig - IIO trigger for this device + * @irq - ADC_RDY line interrupt number + */ +struct afe4404_data { + struct device *dev; + struct regmap *regmap; + struct regulator *regulator; + struct iio_trigger *trig; + int irq; +}; + +enum afe4404_chan_id { + LED1, + ALED1, + LED2, + ALED2, + LED3, + LED1_ALED1, + LED2_ALED2, + ILED1, + ILED2, + ILED3, +}; + +static const struct afe440x_reg_info afe4404_reg_info[] = { + [LED1] = AFE440X_REG_INFO(AFE440X_LED1VAL, AFE4404_OFFDAC, AFE4404_OFFDAC_CURR_LED1), + [ALED1] = AFE440X_REG_INFO(AFE440X_ALED1VAL, AFE4404_OFFDAC, AFE4404_OFFDAC_CURR_ALED1), + [LED2] = AFE440X_REG_INFO(AFE440X_LED2VAL, AFE4404_OFFDAC, AFE4404_OFFDAC_CURR_LED2), + [ALED2] = AFE440X_REG_INFO(AFE440X_ALED2VAL, AFE4404_OFFDAC, AFE4404_OFFDAC_CURR_ALED2), + [LED3] = AFE440X_REG_INFO(AFE440X_ALED2VAL, 0, NULL), + [LED1_ALED1] = AFE440X_REG_INFO(AFE440X_LED1_ALED1VAL, 0, NULL), + [LED2_ALED2] = AFE440X_REG_INFO(AFE440X_LED2_ALED2VAL, 0, NULL), + [ILED1] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE4404_LEDCNTRL_ILED1), + [ILED2] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE4404_LEDCNTRL_ILED2), + [ILED3] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE4404_LEDCNTRL_ILED3), +}; + +static const struct iio_chan_spec afe4404_channels[] = { + /* ADC values */ + AFE440X_INTENSITY_CHAN(LED1, "led1", BIT(IIO_CHAN_INFO_OFFSET)), + AFE440X_INTENSITY_CHAN(ALED1, "led1_ambient", BIT(IIO_CHAN_INFO_OFFSET)), + AFE440X_INTENSITY_CHAN(LED2, "led2", BIT(IIO_CHAN_INFO_OFFSET)), + AFE440X_INTENSITY_CHAN(ALED2, "led2_ambient", BIT(IIO_CHAN_INFO_OFFSET)), + AFE440X_INTENSITY_CHAN(LED3, "led3", BIT(IIO_CHAN_INFO_OFFSET)), + AFE440X_INTENSITY_CHAN(LED1_ALED1, "led1-led1_ambient", 0), + AFE440X_INTENSITY_CHAN(LED2_ALED2, "led2-led2_ambient", 0), + /* LED current */ + AFE440X_CURRENT_CHAN(ILED1, "led1"), + AFE440X_CURRENT_CHAN(ILED2, "led2"), + AFE440X_CURRENT_CHAN(ILED3, "led3"), +}; + +static const struct afe440x_val_table afe4404_res_table[] = { + { .integer = 500000, .fract = 0 }, + { .integer = 250000, .fract = 0 }, + { .integer = 100000, .fract = 0 }, + { .integer = 50000, .fract = 0 }, + { .integer = 25000, .fract = 0 }, + { .integer = 10000, .fract = 0 }, + { .integer = 1000000, .fract = 0 }, + { .integer = 2000000, .fract = 0 }, +}; +AFE440X_TABLE_ATTR(tia_resistance_available, afe4404_res_table); + +static const struct afe440x_val_table afe4404_cap_table[] = { + { .integer = 0, .fract = 5000 }, + { .integer = 0, .fract = 2500 }, + { .integer = 0, .fract = 10000 }, + { .integer = 0, .fract = 7500 }, + { .integer = 0, .fract = 20000 }, + { .integer = 0, .fract = 17500 }, + { .integer = 0, .fract = 25000 }, + { .integer = 0, .fract = 22500 }, +}; +AFE440X_TABLE_ATTR(tia_capacitance_available, afe4404_cap_table); + +static ssize_t afe440x_show_register(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct afe4404_data *afe = iio_priv(indio_dev); + struct afe440x_attr *afe440x_attr = to_afe440x_attr(attr); + unsigned int reg_val, type; + int vals[2]; + int ret, val_len; + + ret = regmap_read(afe->regmap, afe440x_attr->reg, ®_val); + if (ret) + return ret; + + reg_val &= afe440x_attr->mask; + reg_val >>= afe440x_attr->shift; + + switch (afe440x_attr->type) { + case SIMPLE: + type = IIO_VAL_INT; + val_len = 1; + vals[0] = reg_val; + break; + case RESISTANCE: + case CAPACITANCE: + type = IIO_VAL_INT_PLUS_MICRO; + val_len = 2; + if (reg_val < afe440x_attr->table_size) { + vals[0] = afe440x_attr->val_table[reg_val].integer; + vals[1] = afe440x_attr->val_table[reg_val].fract; + break; + } + return -EINVAL; + default: + return -EINVAL; + } + + return iio_format_value(buf, type, val_len, vals); +} + +static ssize_t afe440x_store_register(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct afe4404_data *afe = iio_priv(indio_dev); + struct afe440x_attr *afe440x_attr = to_afe440x_attr(attr); + int val, integer, fract, ret; + + ret = iio_str_to_fixpoint(buf, 100000, &integer, &fract); + if (ret) + return ret; + + switch (afe440x_attr->type) { + case SIMPLE: + val = integer; + break; + case RESISTANCE: + case CAPACITANCE: + for (val = 0; val < afe440x_attr->table_size; val++) + if (afe440x_attr->val_table[val].integer == integer && + afe440x_attr->val_table[val].fract == fract) + break; + if (val == afe440x_attr->table_size) + return -EINVAL; + break; + default: + return -EINVAL; + } + + ret = regmap_update_bits(afe->regmap, afe440x_attr->reg, + afe440x_attr->mask, + (val << afe440x_attr->shift)); + if (ret) + return ret; + + return count; +} + +static AFE440X_ATTR(tia_separate_en, AFE4404_TIA_GAIN_SEP, AFE440X_TIAGAIN_ENSEPGAIN, SIMPLE, NULL, 0); + +static AFE440X_ATTR(tia_resistance1, AFE4404_TIA_GAIN, AFE4404_TIA_GAIN_RES, RESISTANCE, afe4404_res_table, ARRAY_SIZE(afe4404_res_table)); +static AFE440X_ATTR(tia_capacitance1, AFE4404_TIA_GAIN, AFE4404_TIA_GAIN_CAP, CAPACITANCE, afe4404_cap_table, ARRAY_SIZE(afe4404_cap_table)); + +static AFE440X_ATTR(tia_resistance2, AFE4404_TIA_GAIN_SEP, AFE4404_TIA_GAIN_RES, RESISTANCE, afe4404_res_table, ARRAY_SIZE(afe4404_res_table)); +static AFE440X_ATTR(tia_capacitance2, AFE4404_TIA_GAIN_SEP, AFE4404_TIA_GAIN_CAP, CAPACITANCE, afe4404_cap_table, ARRAY_SIZE(afe4404_cap_table)); + +static struct attribute *afe440x_attributes[] = { + &afe440x_attr_tia_separate_en.dev_attr.attr, + &afe440x_attr_tia_resistance1.dev_attr.attr, + &afe440x_attr_tia_capacitance1.dev_attr.attr, + &afe440x_attr_tia_resistance2.dev_attr.attr, + &afe440x_attr_tia_capacitance2.dev_attr.attr, + &dev_attr_tia_resistance_available.attr, + &dev_attr_tia_capacitance_available.attr, + NULL +}; + +static const struct attribute_group afe440x_attribute_group = { + .attrs = afe440x_attributes +}; + +static int afe4404_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long mask) +{ + struct afe4404_data *afe = iio_priv(indio_dev); + const struct afe440x_reg_info reg_info = afe4404_reg_info[chan->address]; + int ret; + + switch (chan->type) { + case IIO_INTENSITY: + switch (mask) { + case IIO_CHAN_INFO_RAW: + ret = regmap_read(afe->regmap, reg_info.reg, val); + if (ret) + return ret; + return IIO_VAL_INT; + case IIO_CHAN_INFO_OFFSET: + ret = regmap_read(afe->regmap, reg_info.offreg, + val); + if (ret) + return ret; + *val &= reg_info.mask; + *val >>= reg_info.shift; + return IIO_VAL_INT; + } + break; + case IIO_CURRENT: + switch (mask) { + case IIO_CHAN_INFO_RAW: + ret = regmap_read(afe->regmap, reg_info.reg, val); + if (ret) + return ret; + *val &= reg_info.mask; + *val >>= reg_info.shift; + return IIO_VAL_INT; + case IIO_CHAN_INFO_SCALE: + *val = 0; + *val2 = 800000; + return IIO_VAL_INT_PLUS_MICRO; + } + break; + default: + break; + } + + return -EINVAL; +} + +static int afe4404_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int val, int val2, long mask) +{ + struct afe4404_data *afe = iio_priv(indio_dev); + const struct afe440x_reg_info reg_info = afe4404_reg_info[chan->address]; + + switch (chan->type) { + case IIO_INTENSITY: + switch (mask) { + case IIO_CHAN_INFO_OFFSET: + return regmap_update_bits(afe->regmap, + reg_info.offreg, + reg_info.mask, + (val << reg_info.shift)); + } + break; + case IIO_CURRENT: + switch (mask) { + case IIO_CHAN_INFO_RAW: + return regmap_update_bits(afe->regmap, + reg_info.reg, + reg_info.mask, + (val << reg_info.shift)); + } + break; + default: + break; + } + + return -EINVAL; +} + +static const struct iio_info afe4404_iio_info = { + .attrs = &afe440x_attribute_group, + .read_raw = afe4404_read_raw, + .write_raw = afe4404_write_raw, + .driver_module = THIS_MODULE, +}; + +static irqreturn_t afe4404_trigger_handler(int irq, void *private) +{ + struct iio_poll_func *pf = private; + struct iio_dev *indio_dev = pf->indio_dev; + struct afe4404_data *afe = iio_priv(indio_dev); + int ret, bit, i = 0; + s32 buffer[10]; + + for_each_set_bit(bit, indio_dev->active_scan_mask, + indio_dev->masklength) { + ret = regmap_read(afe->regmap, afe4404_reg_info[bit].reg, + &buffer[i++]); + if (ret) + goto err; + } + + iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp); +err: + iio_trigger_notify_done(indio_dev->trig); + + return IRQ_HANDLED; +} + +static const struct iio_trigger_ops afe4404_trigger_ops = { + .owner = THIS_MODULE, +}; + +/* Default timings from data-sheet */ +#define AFE4404_TIMING_PAIRS \ + { AFE440X_PRPCOUNT, 39999 }, \ + { AFE440X_LED2LEDSTC, 0 }, \ + { AFE440X_LED2LEDENDC, 398 }, \ + { AFE440X_LED2STC, 80 }, \ + { AFE440X_LED2ENDC, 398 }, \ + { AFE440X_ADCRSTSTCT0, 5600 }, \ + { AFE440X_ADCRSTENDCT0, 5606 }, \ + { AFE440X_LED2CONVST, 5607 }, \ + { AFE440X_LED2CONVEND, 6066 }, \ + { AFE4404_LED3LEDSTC, 400 }, \ + { AFE4404_LED3LEDENDC, 798 }, \ + { AFE440X_ALED2STC, 480 }, \ + { AFE440X_ALED2ENDC, 798 }, \ + { AFE440X_ADCRSTSTCT1, 6068 }, \ + { AFE440X_ADCRSTENDCT1, 6074 }, \ + { AFE440X_ALED2CONVST, 6075 }, \ + { AFE440X_ALED2CONVEND, 6534 }, \ + { AFE440X_LED1LEDSTC, 800 }, \ + { AFE440X_LED1LEDENDC, 1198 }, \ + { AFE440X_LED1STC, 880 }, \ + { AFE440X_LED1ENDC, 1198 }, \ + { AFE440X_ADCRSTSTCT2, 6536 }, \ + { AFE440X_ADCRSTENDCT2, 6542 }, \ + { AFE440X_LED1CONVST, 6543 }, \ + { AFE440X_LED1CONVEND, 7003 }, \ + { AFE440X_ALED1STC, 1280 }, \ + { AFE440X_ALED1ENDC, 1598 }, \ + { AFE440X_ADCRSTSTCT3, 7005 }, \ + { AFE440X_ADCRSTENDCT3, 7011 }, \ + { AFE440X_ALED1CONVST, 7012 }, \ + { AFE440X_ALED1CONVEND, 7471 }, \ + { AFE440X_PDNCYCLESTC, 7671 }, \ + { AFE440X_PDNCYCLEENDC, 39199 } + +static const struct reg_sequence afe4404_reg_sequences[] = { + AFE4404_TIMING_PAIRS, + { AFE440X_CONTROL1, AFE440X_CONTROL1_TIMEREN }, + { AFE4404_TIA_GAIN, AFE4404_TIA_GAIN_RES_50_K }, + { AFE440X_LEDCNTRL, (0xf << AFE4404_LEDCNTRL_ILED1_SHIFT) | + (0x3 << AFE4404_LEDCNTRL_ILED2_SHIFT) | + (0x3 << AFE4404_LEDCNTRL_ILED3_SHIFT) }, + { AFE440X_CONTROL2, AFE440X_CONTROL3_OSC_ENABLE }, +}; + +static const struct regmap_range afe4404_yes_ranges[] = { + regmap_reg_range(AFE440X_LED2VAL, AFE440X_LED1_ALED1VAL), + regmap_reg_range(AFE4404_AVG_LED2_ALED2VAL, AFE4404_AVG_LED1_ALED1VAL), +}; + +static const struct regmap_access_table afe4404_volatile_table = { + .yes_ranges = afe4404_yes_ranges, + .n_yes_ranges = ARRAY_SIZE(afe4404_yes_ranges), +}; + +static const struct regmap_config afe4404_regmap_config = { + .reg_bits = 8, + .val_bits = 24, + + .max_register = AFE4404_AVG_LED1_ALED1VAL, + .cache_type = REGCACHE_RBTREE, + .volatile_table = &afe4404_volatile_table, +}; + +#ifdef CONFIG_OF +static const struct of_device_id afe4404_of_match[] = { + { .compatible = "ti,afe4404", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, afe4404_of_match); +#endif + +static int __maybe_unused afe4404_suspend(struct device *dev) +{ + struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct afe4404_data *afe = iio_priv(indio_dev); + int ret; + + ret = regmap_update_bits(afe->regmap, AFE440X_CONTROL2, + AFE440X_CONTROL2_PDN_AFE, + AFE440X_CONTROL2_PDN_AFE); + if (ret) + return ret; + + ret = regulator_disable(afe->regulator); + if (ret) { + dev_err(dev, "Unable to disable regulator\n"); + return ret; + } + + return 0; +} + +static int __maybe_unused afe4404_resume(struct device *dev) +{ + struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct afe4404_data *afe = iio_priv(indio_dev); + int ret; + + ret = regulator_enable(afe->regulator); + if (ret) { + dev_err(dev, "Unable to enable regulator\n"); + return ret; + } + + ret = regmap_update_bits(afe->regmap, AFE440X_CONTROL2, + AFE440X_CONTROL2_PDN_AFE, 0); + if (ret) + return ret; + + return 0; +} + +static SIMPLE_DEV_PM_OPS(afe4404_pm_ops, afe4404_suspend, afe4404_resume); + +static int afe4404_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct iio_dev *indio_dev; + struct afe4404_data *afe; + int ret; + + indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*afe)); + if (!indio_dev) + return -ENOMEM; + + afe = iio_priv(indio_dev); + i2c_set_clientdata(client, indio_dev); + + afe->dev = &client->dev; + afe->irq = client->irq; + + afe->regmap = devm_regmap_init_i2c(client, &afe4404_regmap_config); + if (IS_ERR(afe->regmap)) { + dev_err(afe->dev, "Unable to allocate register map\n"); + return PTR_ERR(afe->regmap); + } + + afe->regulator = devm_regulator_get(afe->dev, "tx_sup"); + if (IS_ERR(afe->regulator)) { + dev_err(afe->dev, "Unable to get regulator\n"); + return PTR_ERR(afe->regulator); + } + ret = regulator_enable(afe->regulator); + if (ret) { + dev_err(afe->dev, "Unable to enable regulator\n"); + return ret; + } + + ret = regmap_write(afe->regmap, AFE440X_CONTROL0, + AFE440X_CONTROL0_SW_RESET); + if (ret) { + dev_err(afe->dev, "Unable to reset device\n"); + goto disable_reg; + } + + ret = regmap_multi_reg_write(afe->regmap, afe4404_reg_sequences, + ARRAY_SIZE(afe4404_reg_sequences)); + if (ret) { + dev_err(afe->dev, "Unable to set register defaults\n"); + goto disable_reg; + } + + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->dev.parent = afe->dev; + indio_dev->channels = afe4404_channels; + indio_dev->num_channels = ARRAY_SIZE(afe4404_channels); + indio_dev->name = AFE4404_DRIVER_NAME; + indio_dev->info = &afe4404_iio_info; + + if (afe->irq > 0) { + afe->trig = devm_iio_trigger_alloc(afe->dev, + "%s-dev%d", + indio_dev->name, + indio_dev->id); + if (!afe->trig) { + dev_err(afe->dev, "Unable to allocate IIO trigger\n"); + ret = -ENOMEM; + goto disable_reg; + } + + iio_trigger_set_drvdata(afe->trig, indio_dev); + + afe->trig->ops = &afe4404_trigger_ops; + afe->trig->dev.parent = afe->dev; + + ret = iio_trigger_register(afe->trig); + if (ret) { + dev_err(afe->dev, "Unable to register IIO trigger\n"); + goto disable_reg; + } + + ret = devm_request_threaded_irq(afe->dev, afe->irq, + iio_trigger_generic_data_rdy_poll, + NULL, IRQF_ONESHOT, + AFE4404_DRIVER_NAME, + afe->trig); + if (ret) { + dev_err(afe->dev, "Unable to request IRQ\n"); + goto disable_reg; + } + } + + ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time, + afe4404_trigger_handler, NULL); + if (ret) { + dev_err(afe->dev, "Unable to setup buffer\n"); + goto unregister_trigger; + } + + ret = iio_device_register(indio_dev); + if (ret) { + dev_err(afe->dev, "Unable to register IIO device\n"); + goto unregister_triggered_buffer; + } + + return 0; + +unregister_triggered_buffer: + iio_triggered_buffer_cleanup(indio_dev); +unregister_trigger: + if (afe->irq > 0) + iio_trigger_unregister(afe->trig); +disable_reg: + regulator_disable(afe->regulator); + + return ret; +} + +static int afe4404_remove(struct i2c_client *client) +{ + struct iio_dev *indio_dev = i2c_get_clientdata(client); + struct afe4404_data *afe = iio_priv(indio_dev); + int ret; + + iio_device_unregister(indio_dev); + + iio_triggered_buffer_cleanup(indio_dev); + + if (afe->irq > 0) + iio_trigger_unregister(afe->trig); + + ret = regulator_disable(afe->regulator); + if (ret) { + dev_err(afe->dev, "Unable to disable regulator\n"); + return ret; + } + + return 0; +} + +static const struct i2c_device_id afe4404_ids[] = { + { "afe4404", 0 }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(i2c, afe4404_ids); + +static struct i2c_driver afe4404_i2c_driver = { + .driver = { + .name = AFE4404_DRIVER_NAME, + .of_match_table = of_match_ptr(afe4404_of_match), + .pm = &afe4404_pm_ops, + }, + .probe = afe4404_probe, + .remove = afe4404_remove, + .id_table = afe4404_ids, +}; +module_i2c_driver(afe4404_i2c_driver); + +MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>"); +MODULE_DESCRIPTION("TI AFE4404 Heart Rate and Pulse Oximeter"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iio/health/afe440x.h b/drivers/iio/health/afe440x.h new file mode 100644 index 000000000000..c671ab78a23a --- /dev/null +++ b/drivers/iio/health/afe440x.h @@ -0,0 +1,191 @@ +/* + * AFE440X Heart Rate Monitors and Low-Cost Pulse Oximeters + * + * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/ + * Andrew F. Davis <afd@ti.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef _AFE440X_H +#define _AFE440X_H + +/* AFE440X registers */ +#define AFE440X_CONTROL0 0x00 +#define AFE440X_LED2STC 0x01 +#define AFE440X_LED2ENDC 0x02 +#define AFE440X_LED1LEDSTC 0x03 +#define AFE440X_LED1LEDENDC 0x04 +#define AFE440X_ALED2STC 0x05 +#define AFE440X_ALED2ENDC 0x06 +#define AFE440X_LED1STC 0x07 +#define AFE440X_LED1ENDC 0x08 +#define AFE440X_LED2LEDSTC 0x09 +#define AFE440X_LED2LEDENDC 0x0a +#define AFE440X_ALED1STC 0x0b +#define AFE440X_ALED1ENDC 0x0c +#define AFE440X_LED2CONVST 0x0d +#define AFE440X_LED2CONVEND 0x0e +#define AFE440X_ALED2CONVST 0x0f +#define AFE440X_ALED2CONVEND 0x10 +#define AFE440X_LED1CONVST 0x11 +#define AFE440X_LED1CONVEND 0x12 +#define AFE440X_ALED1CONVST 0x13 +#define AFE440X_ALED1CONVEND 0x14 +#define AFE440X_ADCRSTSTCT0 0x15 +#define AFE440X_ADCRSTENDCT0 0x16 +#define AFE440X_ADCRSTSTCT1 0x17 +#define AFE440X_ADCRSTENDCT1 0x18 +#define AFE440X_ADCRSTSTCT2 0x19 +#define AFE440X_ADCRSTENDCT2 0x1a +#define AFE440X_ADCRSTSTCT3 0x1b +#define AFE440X_ADCRSTENDCT3 0x1c +#define AFE440X_PRPCOUNT 0x1d +#define AFE440X_CONTROL1 0x1e +#define AFE440X_LEDCNTRL 0x22 +#define AFE440X_CONTROL2 0x23 +#define AFE440X_ALARM 0x29 +#define AFE440X_LED2VAL 0x2a +#define AFE440X_ALED2VAL 0x2b +#define AFE440X_LED1VAL 0x2c +#define AFE440X_ALED1VAL 0x2d +#define AFE440X_LED2_ALED2VAL 0x2e +#define AFE440X_LED1_ALED1VAL 0x2f +#define AFE440X_CONTROL3 0x31 +#define AFE440X_PDNCYCLESTC 0x32 +#define AFE440X_PDNCYCLEENDC 0x33 + +/* CONTROL0 register fields */ +#define AFE440X_CONTROL0_REG_READ BIT(0) +#define AFE440X_CONTROL0_TM_COUNT_RST BIT(1) +#define AFE440X_CONTROL0_SW_RESET BIT(3) + +/* CONTROL1 register fields */ +#define AFE440X_CONTROL1_TIMEREN BIT(8) + +/* TIAGAIN register fields */ +#define AFE440X_TIAGAIN_ENSEPGAIN_MASK BIT(15) +#define AFE440X_TIAGAIN_ENSEPGAIN_SHIFT 15 + +/* CONTROL2 register fields */ +#define AFE440X_CONTROL2_PDN_AFE BIT(0) +#define AFE440X_CONTROL2_PDN_RX BIT(1) +#define AFE440X_CONTROL2_DYNAMIC4 BIT(3) +#define AFE440X_CONTROL2_DYNAMIC3 BIT(4) +#define AFE440X_CONTROL2_DYNAMIC2 BIT(14) +#define AFE440X_CONTROL2_DYNAMIC1 BIT(20) + +/* CONTROL3 register fields */ +#define AFE440X_CONTROL3_CLKDIV GENMASK(2, 0) + +/* CONTROL0 values */ +#define AFE440X_CONTROL0_WRITE 0x0 +#define AFE440X_CONTROL0_READ 0x1 + +struct afe440x_reg_info { + unsigned int reg; + unsigned int offreg; + unsigned int shift; + unsigned int mask; +}; + +#define AFE440X_REG_INFO(_reg, _offreg, _sm) \ + { \ + .reg = _reg, \ + .offreg = _offreg, \ + .shift = _sm ## _SHIFT, \ + .mask = _sm ## _MASK, \ + } + +#define AFE440X_INTENSITY_CHAN(_index, _name, _mask) \ + { \ + .type = IIO_INTENSITY, \ + .channel = _index, \ + .address = _index, \ + .scan_index = _index, \ + .scan_type = { \ + .sign = 's', \ + .realbits = 24, \ + .storagebits = 32, \ + .endianness = IIO_CPU, \ + }, \ + .extend_name = _name, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ + _mask, \ + } + +#define AFE440X_CURRENT_CHAN(_index, _name) \ + { \ + .type = IIO_CURRENT, \ + .channel = _index, \ + .address = _index, \ + .scan_index = _index, \ + .extend_name = _name, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ + BIT(IIO_CHAN_INFO_SCALE), \ + .output = true, \ + } + +enum afe440x_reg_type { + SIMPLE, + RESISTANCE, + CAPACITANCE, +}; + +struct afe440x_val_table { + int integer; + int fract; +}; + +#define AFE440X_TABLE_ATTR(_name, _table) \ +static ssize_t _name ## _show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + ssize_t len = 0; \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(_table); i++) \ + len += scnprintf(buf + len, PAGE_SIZE - len, "%d.%06u ", \ + _table[i].integer, \ + _table[i].fract); \ + \ + buf[len - 1] = '\n'; \ + \ + return len; \ +} \ +static DEVICE_ATTR_RO(_name) + +struct afe440x_attr { + struct device_attribute dev_attr; + unsigned int reg; + unsigned int shift; + unsigned int mask; + enum afe440x_reg_type type; + const struct afe440x_val_table *val_table; + unsigned int table_size; +}; + +#define to_afe440x_attr(_dev_attr) \ + container_of(_dev_attr, struct afe440x_attr, dev_attr) + +#define AFE440X_ATTR(_name, _reg, _field, _type, _table, _size) \ + struct afe440x_attr afe440x_attr_##_name = { \ + .dev_attr = __ATTR(_name, (S_IRUGO | S_IWUSR), \ + afe440x_show_register, \ + afe440x_store_register), \ + .reg = _reg, \ + .shift = _field ## _SHIFT, \ + .mask = _field ## _MASK, \ + .type = _type, \ + .val_table = _table, \ + .table_size = _size, \ + } + +#endif /* _AFE440X_H */ diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c index 9d1c81f91dd7..09db89359544 100644 --- a/drivers/iio/health/max30100.c +++ b/drivers/iio/health/max30100.c @@ -13,7 +13,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * TODO: allow LED current and pulse length controls via device tree properties + * TODO: enable pulse length controls via device tree properties */ #include <linux/module.h> @@ -24,6 +24,7 @@ #include <linux/irq.h> #include <linux/i2c.h> #include <linux/mutex.h> +#include <linux/of.h> #include <linux/regmap.h> #include <linux/iio/iio.h> #include <linux/iio/buffer.h> @@ -65,6 +66,7 @@ #define MAX30100_REG_SPO2_CONFIG_1600US 0x3 #define MAX30100_REG_LED_CONFIG 0x09 +#define MAX30100_REG_LED_CONFIG_LED_MASK 0x0f #define MAX30100_REG_LED_CONFIG_RED_LED_SHIFT 4 #define MAX30100_REG_LED_CONFIG_24MA 0x07 @@ -111,6 +113,12 @@ static const struct regmap_config max30100_regmap_config = { .volatile_reg = max30100_is_volatile_reg, }; +static const unsigned int max30100_led_current_mapping[] = { + 4400, 7600, 11000, 14200, 17400, + 20800, 24000, 27100, 30600, 33800, + 37000, 40200, 43600, 46800, 50000 +}; + static const unsigned long max30100_scan_masks[] = {0x3, 0}; static const struct iio_chan_spec max30100_channels[] = { @@ -243,15 +251,76 @@ static irqreturn_t max30100_interrupt_handler(int irq, void *private) return IRQ_HANDLED; } +static int max30100_get_current_idx(unsigned int val, int *reg) +{ + int idx; + + /* LED turned off */ + if (val == 0) { + *reg = 0; + return 0; + } + + for (idx = 0; idx < ARRAY_SIZE(max30100_led_current_mapping); idx++) { + if (max30100_led_current_mapping[idx] == val) { + *reg = idx + 1; + return 0; + } + } + + return -EINVAL; +} + +static int max30100_led_init(struct max30100_data *data) +{ + struct device *dev = &data->client->dev; + struct device_node *np = dev->of_node; + unsigned int val[2]; + int reg, ret; + + ret = of_property_read_u32_array(np, "maxim,led-current-microamp", + (unsigned int *) &val, 2); + if (ret) { + /* Default to 24 mA RED LED, 50 mA IR LED */ + reg = (MAX30100_REG_LED_CONFIG_24MA << + MAX30100_REG_LED_CONFIG_RED_LED_SHIFT) | + MAX30100_REG_LED_CONFIG_50MA; + dev_warn(dev, "no led-current-microamp set"); + + return regmap_write(data->regmap, MAX30100_REG_LED_CONFIG, reg); + } + + /* RED LED current */ + ret = max30100_get_current_idx(val[0], ®); + if (ret) { + dev_err(dev, "invalid RED current setting %d", val[0]); + return ret; + } + + ret = regmap_update_bits(data->regmap, MAX30100_REG_LED_CONFIG, + MAX30100_REG_LED_CONFIG_LED_MASK << + MAX30100_REG_LED_CONFIG_RED_LED_SHIFT, + reg << MAX30100_REG_LED_CONFIG_RED_LED_SHIFT); + if (ret) + return ret; + + /* IR LED current */ + ret = max30100_get_current_idx(val[1], ®); + if (ret) { + dev_err(dev, "invalid IR current setting %d", val[1]); + return ret; + } + + return regmap_update_bits(data->regmap, MAX30100_REG_LED_CONFIG, + MAX30100_REG_LED_CONFIG_LED_MASK, reg); +} + static int max30100_chip_init(struct max30100_data *data) { int ret; - /* RED IR LED = 24mA, IR LED = 50mA */ - ret = regmap_write(data->regmap, MAX30100_REG_LED_CONFIG, - (MAX30100_REG_LED_CONFIG_24MA << - MAX30100_REG_LED_CONFIG_RED_LED_SHIFT) | - MAX30100_REG_LED_CONFIG_50MA); + /* setup LED current settings */ + ret = max30100_led_init(data); if (ret) return ret; diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig index 6a23698d347c..866dda133336 100644 --- a/drivers/iio/humidity/Kconfig +++ b/drivers/iio/humidity/Kconfig @@ -43,14 +43,16 @@ config SI7005 humidity and temperature sensor. To compile this driver as a module, choose M here: the module - will be called si7005. + will be called si7005. This driver also + supports Hoperf TH02 Humidity and Temperature Sensor. config SI7020 tristate "Si7013/20/21 Relative Humidity and Temperature Sensors" depends on I2C help Say yes here to build support for the Silicon Labs Si7013/20/21 - Relative Humidity and Temperature Sensors. + Relative Humidity and Temperature Sensors. This driver also + supports Hoperf TH06 Humidity and Temperature Sensor. To compile this driver as a module, choose M here: the module will be called si7020. diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c index cfc5a051ab9f..20b500da94db 100644 --- a/drivers/iio/humidity/dht11.c +++ b/drivers/iio/humidity/dht11.c @@ -50,12 +50,32 @@ #define DHT11_EDGES_PER_READ (2 * DHT11_BITS_PER_READ + \ DHT11_EDGES_PREAMBLE + 1) -/* Data transmission timing (nano seconds) */ +/* + * Data transmission timing: + * Data bits are encoded as pulse length (high time) on the data line. + * 0-bit: 22-30uS -- typically 26uS (AM2302) + * 1-bit: 68-75uS -- typically 70uS (AM2302) + * The acutal timings also depend on the properties of the cable, with + * longer cables typically making pulses shorter. + * + * Our decoding depends on the time resolution of the system: + * timeres > 34uS ... don't know what a 1-tick pulse is + * 34uS > timeres > 30uS ... no problem (30kHz and 32kHz clocks) + * 30uS > timeres > 23uS ... don't know what a 2-tick pulse is + * timeres < 23uS ... no problem + * + * Luckily clocks in the 33-44kHz range are quite uncommon, so we can + * support most systems if the threshold for decoding a pulse as 1-bit + * is chosen carefully. If somebody really wants to support clocks around + * 40kHz, where this driver is most unreliable, there are two options. + * a) select an implementation using busy loop polling on those systems + * b) use the checksum to do some probabilistic decoding + */ #define DHT11_START_TRANSMISSION 18 /* ms */ -#define DHT11_SENSOR_RESPONSE 80000 -#define DHT11_START_BIT 50000 -#define DHT11_DATA_BIT_LOW 27000 -#define DHT11_DATA_BIT_HIGH 70000 +#define DHT11_MIN_TIMERES 34000 /* ns */ +#define DHT11_THRESHOLD 49000 /* ns */ +#define DHT11_AMBIG_LOW 23000 /* ns */ +#define DHT11_AMBIG_HIGH 30000 /* ns */ struct dht11 { struct device *dev; @@ -76,43 +96,39 @@ struct dht11 { struct {s64 ts; int value; } edges[DHT11_EDGES_PER_READ]; }; -static unsigned char dht11_decode_byte(int *timing, int threshold) +static unsigned char dht11_decode_byte(char *bits) { unsigned char ret = 0; int i; for (i = 0; i < 8; ++i) { ret <<= 1; - if (timing[i] >= threshold) + if (bits[i]) ++ret; } return ret; } -static int dht11_decode(struct dht11 *dht11, int offset, int timeres) +static int dht11_decode(struct dht11 *dht11, int offset) { - int i, t, timing[DHT11_BITS_PER_READ], threshold; + int i, t; + char bits[DHT11_BITS_PER_READ]; unsigned char temp_int, temp_dec, hum_int, hum_dec, checksum; - threshold = DHT11_DATA_BIT_HIGH / timeres; - if (DHT11_DATA_BIT_LOW / timeres + 1 >= threshold) - pr_err("dht11: WARNING: decoding ambiguous\n"); - - /* scale down with timeres and check validity */ for (i = 0; i < DHT11_BITS_PER_READ; ++i) { t = dht11->edges[offset + 2 * i + 2].ts - dht11->edges[offset + 2 * i + 1].ts; if (!dht11->edges[offset + 2 * i + 1].value) return -EIO; /* lost synchronisation */ - timing[i] = t / timeres; + bits[i] = t > DHT11_THRESHOLD; } - hum_int = dht11_decode_byte(timing, threshold); - hum_dec = dht11_decode_byte(&timing[8], threshold); - temp_int = dht11_decode_byte(&timing[16], threshold); - temp_dec = dht11_decode_byte(&timing[24], threshold); - checksum = dht11_decode_byte(&timing[32], threshold); + hum_int = dht11_decode_byte(bits); + hum_dec = dht11_decode_byte(&bits[8]); + temp_int = dht11_decode_byte(&bits[16]); + temp_dec = dht11_decode_byte(&bits[24]); + checksum = dht11_decode_byte(&bits[32]); if (((hum_int + hum_dec + temp_int + temp_dec) & 0xff) != checksum) return -EIO; @@ -161,12 +177,12 @@ static int dht11_read_raw(struct iio_dev *iio_dev, int *val, int *val2, long m) { struct dht11 *dht11 = iio_priv(iio_dev); - int ret, timeres; + int ret, timeres, offset; mutex_lock(&dht11->lock); if (dht11->timestamp + DHT11_DATA_VALID_TIME < ktime_get_boot_ns()) { timeres = ktime_get_resolution_ns(); - if (DHT11_DATA_BIT_HIGH < 2 * timeres) { + if (timeres > DHT11_MIN_TIMERES) { dev_err(dht11->dev, "timeresolution %dns too low\n", timeres); /* In theory a better clock could become available @@ -176,6 +192,10 @@ static int dht11_read_raw(struct iio_dev *iio_dev, ret = -EAGAIN; goto err; } + if (timeres > DHT11_AMBIG_LOW && timeres < DHT11_AMBIG_HIGH) + dev_warn(dht11->dev, + "timeresolution: %dns - decoding ambiguous\n", + timeres); reinit_completion(&dht11->completion); @@ -208,11 +228,14 @@ static int dht11_read_raw(struct iio_dev *iio_dev, if (ret < 0) goto err; - ret = dht11_decode(dht11, - dht11->num_edges == DHT11_EDGES_PER_READ ? - DHT11_EDGES_PREAMBLE : - DHT11_EDGES_PREAMBLE - 2, - timeres); + offset = DHT11_EDGES_PREAMBLE + + dht11->num_edges - DHT11_EDGES_PER_READ; + for (; offset >= 0; --offset) { + ret = dht11_decode(dht11, offset); + if (!ret) + break; + } + if (ret) goto err; } diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c index a7f61e881a49..fa4767613173 100644 --- a/drivers/iio/humidity/hdc100x.c +++ b/drivers/iio/humidity/hdc100x.c @@ -274,7 +274,7 @@ static int hdc100x_probe(struct i2c_client *client, if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BYTE)) - return -ENODEV; + return -EOPNOTSUPP; indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data)); if (!indio_dev) diff --git a/drivers/iio/humidity/htu21.c b/drivers/iio/humidity/htu21.c index d1636a74980e..11cbc38b450f 100644 --- a/drivers/iio/humidity/htu21.c +++ b/drivers/iio/humidity/htu21.c @@ -192,7 +192,7 @@ static int htu21_probe(struct i2c_client *client, I2C_FUNC_SMBUS_READ_I2C_BLOCK)) { dev_err(&client->dev, "Adapter does not support some i2c transaction\n"); - return -ENODEV; + return -EOPNOTSUPP; } indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*dev_data)); diff --git a/drivers/iio/humidity/si7005.c b/drivers/iio/humidity/si7005.c index 91972ccd8aaf..6297766e93d0 100644 --- a/drivers/iio/humidity/si7005.c +++ b/drivers/iio/humidity/si7005.c @@ -135,7 +135,7 @@ static int si7005_probe(struct i2c_client *client, int ret; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA)) - return -ENODEV; + return -EOPNOTSUPP; indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data)); if (!indio_dev) @@ -170,6 +170,7 @@ static int si7005_probe(struct i2c_client *client, static const struct i2c_device_id si7005_id[] = { { "si7005", 0 }, + { "th02", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, si7005_id); diff --git a/drivers/iio/humidity/si7020.c b/drivers/iio/humidity/si7020.c index 71991b5c0658..ffc2ccf6374e 100644 --- a/drivers/iio/humidity/si7020.c +++ b/drivers/iio/humidity/si7020.c @@ -121,7 +121,7 @@ static int si7020_probe(struct i2c_client *client, if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WRITE_BYTE | I2C_FUNC_SMBUS_READ_WORD_DATA)) - return -ENODEV; + return -EOPNOTSUPP; /* Reset device, loads default settings. */ ret = i2c_smbus_write_byte(client, SI7020CMD_RESET); @@ -149,6 +149,7 @@ static int si7020_probe(struct i2c_client *client, static const struct i2c_device_id si7020_id[] = { { "si7020", 0 }, + { "th06", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, si7020_id); diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig index 8f8d1370ed8b..a7f557af4389 100644 --- a/drivers/iio/imu/inv_mpu6050/Kconfig +++ b/drivers/iio/imu/inv_mpu6050/Kconfig @@ -3,15 +3,31 @@ # config INV_MPU6050_IIO - tristate "Invensense MPU6050 devices" - depends on I2C && SYSFS - depends on I2C_MUX + tristate select IIO_BUFFER select IIO_TRIGGERED_BUFFER + +config INV_MPU6050_I2C + tristate "Invensense MPU6050 devices (I2C)" + depends on I2C + select INV_MPU6050_IIO + select I2C_MUX + select REGMAP_I2C help This driver supports the Invensense MPU6050 devices. This driver can also support MPU6500 in MPU6050 compatibility mode and also in MPU6500 mode with some limitations. It is a gyroscope/accelerometer combo device. This driver can be built as a module. The module will be called - inv-mpu6050. + inv-mpu6050-i2c. + +config INV_MPU6050_SPI + tristate "Invensense MPU6050 devices (SPI)" + depends on SPI_MASTER + select INV_MPU6050_IIO + select REGMAP_SPI + help + This driver supports the Invensense MPU6050 devices. + It is a gyroscope/accelerometer combo device. + This driver can be built as a module. The module will be called + inv-mpu6050-spi. diff --git a/drivers/iio/imu/inv_mpu6050/Makefile b/drivers/iio/imu/inv_mpu6050/Makefile index f566f6a7b3a9..734af5e6cef9 100644 --- a/drivers/iio/imu/inv_mpu6050/Makefile +++ b/drivers/iio/imu/inv_mpu6050/Makefile @@ -3,4 +3,10 @@ # obj-$(CONFIG_INV_MPU6050_IIO) += inv-mpu6050.o -inv-mpu6050-objs := inv_mpu_core.o inv_mpu_ring.o inv_mpu_trigger.o inv_mpu_acpi.o +inv-mpu6050-objs := inv_mpu_core.o inv_mpu_ring.o inv_mpu_trigger.o + +obj-$(CONFIG_INV_MPU6050_I2C) += inv-mpu6050-i2c.o +inv-mpu6050-i2c-objs := inv_mpu_i2c.o inv_mpu_acpi.o + +obj-$(CONFIG_INV_MPU6050_SPI) += inv-mpu6050-spi.o +inv-mpu6050-spi-objs := inv_mpu_spi.o diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c index 1c982a56acd5..2771106fd650 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c @@ -66,11 +66,11 @@ static int asus_acpi_get_sensor_info(struct acpi_device *adev, union acpi_object *elem; int j; - elem = &(cpm->package.elements[i]); + elem = &cpm->package.elements[i]; for (j = 0; j < elem->package.count; ++j) { union acpi_object *sub_elem; - sub_elem = &(elem->package.elements[j]); + sub_elem = &elem->package.elements[j]; if (sub_elem->type == ACPI_TYPE_STRING) strlcpy(info->type, sub_elem->string.pointer, sizeof(info->type)); @@ -139,22 +139,23 @@ static int inv_mpu_process_acpi_config(struct i2c_client *client, return 0; } -int inv_mpu_acpi_create_mux_client(struct inv_mpu6050_state *st) +int inv_mpu_acpi_create_mux_client(struct i2c_client *client) { + struct inv_mpu6050_state *st = iio_priv(dev_get_drvdata(&client->dev)); st->mux_client = NULL; - if (ACPI_HANDLE(&st->client->dev)) { + if (ACPI_HANDLE(&client->dev)) { struct i2c_board_info info; struct acpi_device *adev; int ret = -1; - adev = ACPI_COMPANION(&st->client->dev); + adev = ACPI_COMPANION(&client->dev); memset(&info, 0, sizeof(info)); dmi_check_system(inv_mpu_dev_list); switch (matched_product_name) { case INV_MPU_ASUS_T100TA: - ret = asus_acpi_get_sensor_info(adev, st->client, + ret = asus_acpi_get_sensor_info(adev, client, &info); break; /* Add more matched product processing here */ @@ -166,7 +167,7 @@ int inv_mpu_acpi_create_mux_client(struct inv_mpu6050_state *st) /* No matching DMI, so create device on INV6XX type */ unsigned short primary, secondary; - ret = inv_mpu_process_acpi_config(st->client, &primary, + ret = inv_mpu_process_acpi_config(client, &primary, &secondary); if (!ret && secondary) { char *name; @@ -185,14 +186,15 @@ int inv_mpu_acpi_create_mux_client(struct inv_mpu6050_state *st) st->mux_client = i2c_new_device(st->mux_adapter, &info); if (!st->mux_client) return -ENODEV; - } return 0; } -void inv_mpu_acpi_delete_mux_client(struct inv_mpu6050_state *st) +void inv_mpu_acpi_delete_mux_client(struct i2c_client *client) { + struct inv_mpu6050_state *st = iio_priv(dev_get_drvdata(&client->dev)); + if (st->mux_client) i2c_unregister_device(st->mux_client); } @@ -200,12 +202,12 @@ void inv_mpu_acpi_delete_mux_client(struct inv_mpu6050_state *st) #include "inv_mpu_iio.h" -int inv_mpu_acpi_create_mux_client(struct inv_mpu6050_state *st) +int inv_mpu_acpi_create_mux_client(struct i2c_client *client) { return 0; } -void inv_mpu_acpi_delete_mux_client(struct inv_mpu6050_state *st) +void inv_mpu_acpi_delete_mux_client(struct i2c_client *client) { } #endif diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c index f0e06093b5e8..d192953e9a38 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c @@ -39,6 +39,26 @@ static const int gyro_scale_6050[] = {133090, 266181, 532362, 1064724}; */ static const int accel_scale[] = {598, 1196, 2392, 4785}; +static const struct inv_mpu6050_reg_map reg_set_6500 = { + .sample_rate_div = INV_MPU6050_REG_SAMPLE_RATE_DIV, + .lpf = INV_MPU6050_REG_CONFIG, + .user_ctrl = INV_MPU6050_REG_USER_CTRL, + .fifo_en = INV_MPU6050_REG_FIFO_EN, + .gyro_config = INV_MPU6050_REG_GYRO_CONFIG, + .accl_config = INV_MPU6050_REG_ACCEL_CONFIG, + .fifo_count_h = INV_MPU6050_REG_FIFO_COUNT_H, + .fifo_r_w = INV_MPU6050_REG_FIFO_R_W, + .raw_gyro = INV_MPU6050_REG_RAW_GYRO, + .raw_accl = INV_MPU6050_REG_RAW_ACCEL, + .temperature = INV_MPU6050_REG_TEMPERATURE, + .int_enable = INV_MPU6050_REG_INT_ENABLE, + .pwr_mgmt_1 = INV_MPU6050_REG_PWR_MGMT_1, + .pwr_mgmt_2 = INV_MPU6050_REG_PWR_MGMT_2, + .int_pin_cfg = INV_MPU6050_REG_INT_PIN_CFG, + .accl_offset = INV_MPU6500_REG_ACCEL_OFFSET, + .gyro_offset = INV_MPU6050_REG_GYRO_OFFSET, +}; + static const struct inv_mpu6050_reg_map reg_set_6050 = { .sample_rate_div = INV_MPU6050_REG_SAMPLE_RATE_DIV, .lpf = INV_MPU6050_REG_CONFIG, @@ -55,6 +75,8 @@ static const struct inv_mpu6050_reg_map reg_set_6050 = { .pwr_mgmt_1 = INV_MPU6050_REG_PWR_MGMT_1, .pwr_mgmt_2 = INV_MPU6050_REG_PWR_MGMT_2, .int_pin_cfg = INV_MPU6050_REG_INT_PIN_CFG, + .accl_offset = INV_MPU6050_REG_ACCEL_OFFSET, + .gyro_offset = INV_MPU6050_REG_GYRO_OFFSET, }; static const struct inv_mpu6050_chip_config chip_config_6050 = { @@ -66,7 +88,13 @@ static const struct inv_mpu6050_chip_config chip_config_6050 = { .accl_fs = INV_MPU6050_FS_02G, }; -static const struct inv_mpu6050_hw hw_info[INV_NUM_PARTS] = { +static const struct inv_mpu6050_hw hw_info[] = { + { + .num_reg = 117, + .name = "MPU6500", + .reg = ®_set_6500, + .config = &chip_config_6050, + }, { .num_reg = 117, .name = "MPU6050", @@ -75,134 +103,53 @@ static const struct inv_mpu6050_hw hw_info[INV_NUM_PARTS] = { }, }; -int inv_mpu6050_write_reg(struct inv_mpu6050_state *st, int reg, u8 d) -{ - return i2c_smbus_write_i2c_block_data(st->client, reg, 1, &d); -} - -/* - * The i2c read/write needs to happen in unlocked mode. As the parent - * adapter is common. If we use locked versions, it will fail as - * the mux adapter will lock the parent i2c adapter, while calling - * select/deselect functions. - */ -static int inv_mpu6050_write_reg_unlocked(struct inv_mpu6050_state *st, - u8 reg, u8 d) -{ - int ret; - u8 buf[2]; - struct i2c_msg msg[1] = { - { - .addr = st->client->addr, - .flags = 0, - .len = sizeof(buf), - .buf = buf, - } - }; - - buf[0] = reg; - buf[1] = d; - ret = __i2c_transfer(st->client->adapter, msg, 1); - if (ret != 1) - return ret; - - return 0; -} - -static int inv_mpu6050_select_bypass(struct i2c_adapter *adap, void *mux_priv, - u32 chan_id) -{ - struct iio_dev *indio_dev = mux_priv; - struct inv_mpu6050_state *st = iio_priv(indio_dev); - int ret = 0; - - /* Use the same mutex which was used everywhere to protect power-op */ - mutex_lock(&indio_dev->mlock); - if (!st->powerup_count) { - ret = inv_mpu6050_write_reg_unlocked(st, st->reg->pwr_mgmt_1, - 0); - if (ret) - goto write_error; - - msleep(INV_MPU6050_REG_UP_TIME); - } - if (!ret) { - st->powerup_count++; - ret = inv_mpu6050_write_reg_unlocked(st, st->reg->int_pin_cfg, - st->client->irq | - INV_MPU6050_BIT_BYPASS_EN); - } -write_error: - mutex_unlock(&indio_dev->mlock); - - return ret; -} - -static int inv_mpu6050_deselect_bypass(struct i2c_adapter *adap, - void *mux_priv, u32 chan_id) -{ - struct iio_dev *indio_dev = mux_priv; - struct inv_mpu6050_state *st = iio_priv(indio_dev); - - mutex_lock(&indio_dev->mlock); - /* It doesn't really mattter, if any of the calls fails */ - inv_mpu6050_write_reg_unlocked(st, st->reg->int_pin_cfg, - st->client->irq); - st->powerup_count--; - if (!st->powerup_count) - inv_mpu6050_write_reg_unlocked(st, st->reg->pwr_mgmt_1, - INV_MPU6050_BIT_SLEEP); - mutex_unlock(&indio_dev->mlock); - - return 0; -} - int inv_mpu6050_switch_engine(struct inv_mpu6050_state *st, bool en, u32 mask) { - u8 d, mgmt_1; + unsigned int d, mgmt_1; int result; - - /* switch clock needs to be careful. Only when gyro is on, can - clock source be switched to gyro. Otherwise, it must be set to - internal clock */ - if (INV_MPU6050_BIT_PWR_GYRO_STBY == mask) { - result = i2c_smbus_read_i2c_block_data(st->client, - st->reg->pwr_mgmt_1, 1, &mgmt_1); - if (result != 1) + /* + * switch clock needs to be careful. Only when gyro is on, can + * clock source be switched to gyro. Otherwise, it must be set to + * internal clock + */ + if (mask == INV_MPU6050_BIT_PWR_GYRO_STBY) { + result = regmap_read(st->map, st->reg->pwr_mgmt_1, &mgmt_1); + if (result) return result; mgmt_1 &= ~INV_MPU6050_BIT_CLK_MASK; } - if ((INV_MPU6050_BIT_PWR_GYRO_STBY == mask) && (!en)) { - /* turning off gyro requires switch to internal clock first. - Then turn off gyro engine */ + if ((mask == INV_MPU6050_BIT_PWR_GYRO_STBY) && (!en)) { + /* + * turning off gyro requires switch to internal clock first. + * Then turn off gyro engine + */ mgmt_1 |= INV_CLK_INTERNAL; - result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_1, mgmt_1); + result = regmap_write(st->map, st->reg->pwr_mgmt_1, mgmt_1); if (result) return result; } - result = i2c_smbus_read_i2c_block_data(st->client, - st->reg->pwr_mgmt_2, 1, &d); - if (result != 1) + result = regmap_read(st->map, st->reg->pwr_mgmt_2, &d); + if (result) return result; if (en) d &= ~mask; else d |= mask; - result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_2, d); + result = regmap_write(st->map, st->reg->pwr_mgmt_2, d); if (result) return result; if (en) { /* Wait for output stabilize */ msleep(INV_MPU6050_TEMP_UP_TIME); - if (INV_MPU6050_BIT_PWR_GYRO_STBY == mask) { + if (mask == INV_MPU6050_BIT_PWR_GYRO_STBY) { /* switch internal clock to PLL */ mgmt_1 |= INV_CLK_PLL; - result = inv_mpu6050_write_reg(st, - st->reg->pwr_mgmt_1, mgmt_1); + result = regmap_write(st->map, + st->reg->pwr_mgmt_1, mgmt_1); if (result) return result; } @@ -218,25 +165,26 @@ int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on) if (power_on) { /* Already under indio-dev->mlock mutex */ if (!st->powerup_count) - result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_1, - 0); + result = regmap_write(st->map, st->reg->pwr_mgmt_1, 0); if (!result) st->powerup_count++; } else { st->powerup_count--; if (!st->powerup_count) - result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_1, - INV_MPU6050_BIT_SLEEP); + result = regmap_write(st->map, st->reg->pwr_mgmt_1, + INV_MPU6050_BIT_SLEEP); } if (result) return result; if (power_on) - msleep(INV_MPU6050_REG_UP_TIME); + usleep_range(INV_MPU6050_REG_UP_TIME_MIN, + INV_MPU6050_REG_UP_TIME_MAX); return 0; } +EXPORT_SYMBOL_GPL(inv_mpu6050_set_power_itg); /** * inv_mpu6050_init_config() - Initialize hardware, disable FIFO. @@ -257,59 +205,73 @@ static int inv_mpu6050_init_config(struct iio_dev *indio_dev) if (result) return result; d = (INV_MPU6050_FSR_2000DPS << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT); - result = inv_mpu6050_write_reg(st, st->reg->gyro_config, d); + result = regmap_write(st->map, st->reg->gyro_config, d); if (result) return result; d = INV_MPU6050_FILTER_20HZ; - result = inv_mpu6050_write_reg(st, st->reg->lpf, d); + result = regmap_write(st->map, st->reg->lpf, d); if (result) return result; d = INV_MPU6050_ONE_K_HZ / INV_MPU6050_INIT_FIFO_RATE - 1; - result = inv_mpu6050_write_reg(st, st->reg->sample_rate_div, d); + result = regmap_write(st->map, st->reg->sample_rate_div, d); if (result) return result; d = (INV_MPU6050_FS_02G << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT); - result = inv_mpu6050_write_reg(st, st->reg->accl_config, d); + result = regmap_write(st->map, st->reg->accl_config, d); if (result) return result; memcpy(&st->chip_config, hw_info[st->chip_type].config, - sizeof(struct inv_mpu6050_chip_config)); + sizeof(struct inv_mpu6050_chip_config)); result = inv_mpu6050_set_power_itg(st, false); return result; } +static int inv_mpu6050_sensor_set(struct inv_mpu6050_state *st, int reg, + int axis, int val) +{ + int ind, result; + __be16 d = cpu_to_be16(val); + + ind = (axis - IIO_MOD_X) * 2; + result = regmap_bulk_write(st->map, reg + ind, (u8 *)&d, 2); + if (result) + return -EINVAL; + + return 0; +} + static int inv_mpu6050_sensor_show(struct inv_mpu6050_state *st, int reg, - int axis, int *val) + int axis, int *val) { int ind, result; __be16 d; ind = (axis - IIO_MOD_X) * 2; - result = i2c_smbus_read_i2c_block_data(st->client, reg + ind, 2, - (u8 *)&d); - if (result != 2) + result = regmap_bulk_read(st->map, reg + ind, (u8 *)&d, 2); + if (result) return -EINVAL; *val = (short)be16_to_cpup(&d); return IIO_VAL_INT; } -static int inv_mpu6050_read_raw(struct iio_dev *indio_dev, - struct iio_chan_spec const *chan, - int *val, - int *val2, - long mask) { +static int +inv_mpu6050_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long mask) +{ struct inv_mpu6050_state *st = iio_priv(indio_dev); + int ret = 0; switch (mask) { case IIO_CHAN_INFO_RAW: { - int ret, result; + int result; ret = IIO_VAL_INT; result = 0; @@ -323,16 +285,16 @@ static int inv_mpu6050_read_raw(struct iio_dev *indio_dev, switch (chan->type) { case IIO_ANGL_VEL: if (!st->chip_config.gyro_fifo_enable || - !st->chip_config.enable) { + !st->chip_config.enable) { result = inv_mpu6050_switch_engine(st, true, INV_MPU6050_BIT_PWR_GYRO_STBY); if (result) goto error_read_raw; } - ret = inv_mpu6050_sensor_show(st, st->reg->raw_gyro, - chan->channel2, val); + ret = inv_mpu6050_sensor_show(st, st->reg->raw_gyro, + chan->channel2, val); if (!st->chip_config.gyro_fifo_enable || - !st->chip_config.enable) { + !st->chip_config.enable) { result = inv_mpu6050_switch_engine(st, false, INV_MPU6050_BIT_PWR_GYRO_STBY); if (result) @@ -341,16 +303,16 @@ static int inv_mpu6050_read_raw(struct iio_dev *indio_dev, break; case IIO_ACCEL: if (!st->chip_config.accl_fifo_enable || - !st->chip_config.enable) { + !st->chip_config.enable) { result = inv_mpu6050_switch_engine(st, true, INV_MPU6050_BIT_PWR_ACCL_STBY); if (result) goto error_read_raw; } ret = inv_mpu6050_sensor_show(st, st->reg->raw_accl, - chan->channel2, val); + chan->channel2, val); if (!st->chip_config.accl_fifo_enable || - !st->chip_config.enable) { + !st->chip_config.enable) { result = inv_mpu6050_switch_engine(st, false, INV_MPU6050_BIT_PWR_ACCL_STBY); if (result) @@ -360,8 +322,8 @@ static int inv_mpu6050_read_raw(struct iio_dev *indio_dev, case IIO_TEMP: /* wait for stablization */ msleep(INV_MPU6050_SENSOR_UP_TIME); - inv_mpu6050_sensor_show(st, st->reg->temperature, - IIO_MOD_X, val); + ret = inv_mpu6050_sensor_show(st, st->reg->temperature, + IIO_MOD_X, val); break; default: ret = -EINVAL; @@ -405,6 +367,20 @@ error_read_raw: default: return -EINVAL; } + case IIO_CHAN_INFO_CALIBBIAS: + switch (chan->type) { + case IIO_ANGL_VEL: + ret = inv_mpu6050_sensor_show(st, st->reg->gyro_offset, + chan->channel2, val); + return IIO_VAL_INT; + case IIO_ACCEL: + ret = inv_mpu6050_sensor_show(st, st->reg->accl_offset, + chan->channel2, val); + return IIO_VAL_INT; + + default: + return -EINVAL; + } default: return -EINVAL; } @@ -418,8 +394,7 @@ static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val) for (i = 0; i < ARRAY_SIZE(gyro_scale_6050); ++i) { if (gyro_scale_6050[i] == val) { d = (i << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT); - result = inv_mpu6050_write_reg(st, - st->reg->gyro_config, d); + result = regmap_write(st->map, st->reg->gyro_config, d); if (result) return result; @@ -448,6 +423,7 @@ static int inv_write_raw_get_fmt(struct iio_dev *indio_dev, return -EINVAL; } + static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val) { int result, i; @@ -456,8 +432,7 @@ static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val) for (i = 0; i < ARRAY_SIZE(accel_scale); ++i) { if (accel_scale[i] == val) { d = (i << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT); - result = inv_mpu6050_write_reg(st, - st->reg->accl_config, d); + result = regmap_write(st->map, st->reg->accl_config, d); if (result) return result; @@ -470,16 +445,17 @@ static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val) } static int inv_mpu6050_write_raw(struct iio_dev *indio_dev, - struct iio_chan_spec const *chan, - int val, - int val2, - long mask) { + struct iio_chan_spec const *chan, + int val, int val2, long mask) +{ struct inv_mpu6050_state *st = iio_priv(indio_dev); int result; mutex_lock(&indio_dev->mlock); - /* we should only update scale when the chip is disabled, i.e., - not running */ + /* + * we should only update scale when the chip is disabled, i.e. + * not running + */ if (st->chip_config.enable) { result = -EBUSY; goto error_write_raw; @@ -502,6 +478,21 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev, break; } break; + case IIO_CHAN_INFO_CALIBBIAS: + switch (chan->type) { + case IIO_ANGL_VEL: + result = inv_mpu6050_sensor_set(st, + st->reg->gyro_offset, + chan->channel2, val); + break; + case IIO_ACCEL: + result = inv_mpu6050_sensor_set(st, + st->reg->accl_offset, + chan->channel2, val); + break; + default: + result = -EINVAL; + } default: result = -EINVAL; break; @@ -537,7 +528,7 @@ static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate) while ((h < hz[i]) && (i < ARRAY_SIZE(d) - 1)) i++; data = d[i]; - result = inv_mpu6050_write_reg(st, st->reg->lpf, data); + result = regmap_write(st->map, st->reg->lpf, data); if (result) return result; st->chip_config.lpf = data; @@ -548,8 +539,9 @@ static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate) /** * inv_mpu6050_fifo_rate_store() - Set fifo rate. */ -static ssize_t inv_mpu6050_fifo_rate_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) +static ssize_t +inv_mpu6050_fifo_rate_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { s32 fifo_rate; u8 d; @@ -560,7 +552,7 @@ static ssize_t inv_mpu6050_fifo_rate_store(struct device *dev, if (kstrtoint(buf, 10, &fifo_rate)) return -EINVAL; if (fifo_rate < INV_MPU6050_MIN_FIFO_RATE || - fifo_rate > INV_MPU6050_MAX_FIFO_RATE) + fifo_rate > INV_MPU6050_MAX_FIFO_RATE) return -EINVAL; if (fifo_rate == st->chip_config.fifo_rate) return count; @@ -575,7 +567,7 @@ static ssize_t inv_mpu6050_fifo_rate_store(struct device *dev, goto fifo_rate_fail; d = INV_MPU6050_ONE_K_HZ / fifo_rate - 1; - result = inv_mpu6050_write_reg(st, st->reg->sample_rate_div, d); + result = regmap_write(st->map, st->reg->sample_rate_div, d); if (result) goto fifo_rate_fail; st->chip_config.fifo_rate = fifo_rate; @@ -596,8 +588,9 @@ fifo_rate_fail: /** * inv_fifo_rate_show() - Get the current sampling rate. */ -static ssize_t inv_fifo_rate_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t +inv_fifo_rate_show(struct device *dev, struct device_attribute *attr, + char *buf) { struct inv_mpu6050_state *st = iio_priv(dev_to_iio_dev(dev)); @@ -608,16 +601,18 @@ static ssize_t inv_fifo_rate_show(struct device *dev, * inv_attr_show() - calling this function will show current * parameters. */ -static ssize_t inv_attr_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t inv_attr_show(struct device *dev, struct device_attribute *attr, + char *buf) { struct inv_mpu6050_state *st = iio_priv(dev_to_iio_dev(dev)); struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); s8 *m; switch (this_attr->address) { - /* In MPU6050, the two matrix are the same because gyro and accel - are integrated in one chip */ + /* + * In MPU6050, the two matrix are the same because gyro and accel + * are integrated in one chip + */ case ATTR_GYRO_MATRIX: case ATTR_ACCL_MATRIX: m = st->plat_data.orientation; @@ -654,14 +649,15 @@ static int inv_mpu6050_validate_trigger(struct iio_dev *indio_dev, .type = _type, \ .modified = 1, \ .channel2 = _channel2, \ - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ + BIT(IIO_CHAN_INFO_CALIBBIAS), \ .scan_index = _index, \ .scan_type = { \ .sign = 's', \ .realbits = 16, \ .storagebits = 16, \ - .shift = 0 , \ + .shift = 0, \ .endianness = IIO_BE, \ }, \ } @@ -674,7 +670,7 @@ static const struct iio_chan_spec inv_mpu_channels[] = { */ { .type = IIO_TEMP, - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_OFFSET) | BIT(IIO_CHAN_INFO_SCALE), .scan_index = -1, @@ -727,25 +723,25 @@ static const struct iio_info mpu_info = { /** * inv_check_and_setup_chip() - check and setup chip. */ -static int inv_check_and_setup_chip(struct inv_mpu6050_state *st, - const struct i2c_device_id *id) +static int inv_check_and_setup_chip(struct inv_mpu6050_state *st) { int result; - st->chip_type = INV_MPU6050; st->hw = &hw_info[st->chip_type]; st->reg = hw_info[st->chip_type].reg; /* reset to make sure previous state are not there */ - result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_1, - INV_MPU6050_BIT_H_RESET); + result = regmap_write(st->map, st->reg->pwr_mgmt_1, + INV_MPU6050_BIT_H_RESET); if (result) return result; msleep(INV_MPU6050_POWER_UP_TIME); - /* toggle power state. After reset, the sleep bit could be on - or off depending on the OTP settings. Toggling power would - make it in a definite state as well as making the hardware - state align with the software state */ + /* + * toggle power state. After reset, the sleep bit could be on + * or off depending on the OTP settings. Toggling power would + * make it in a definite state as well as making the hardware + * state align with the software state + */ result = inv_mpu6050_set_power_itg(st, false); if (result) return result; @@ -754,65 +750,59 @@ static int inv_check_and_setup_chip(struct inv_mpu6050_state *st, return result; result = inv_mpu6050_switch_engine(st, false, - INV_MPU6050_BIT_PWR_ACCL_STBY); + INV_MPU6050_BIT_PWR_ACCL_STBY); if (result) return result; result = inv_mpu6050_switch_engine(st, false, - INV_MPU6050_BIT_PWR_GYRO_STBY); + INV_MPU6050_BIT_PWR_GYRO_STBY); if (result) return result; return 0; } -/** - * inv_mpu_probe() - probe function. - * @client: i2c client. - * @id: i2c device id. - * - * Returns 0 on success, a negative error code otherwise. - */ -static int inv_mpu_probe(struct i2c_client *client, - const struct i2c_device_id *id) +int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name, + int (*inv_mpu_bus_setup)(struct iio_dev *), int chip_type) { struct inv_mpu6050_state *st; struct iio_dev *indio_dev; struct inv_mpu6050_platform_data *pdata; + struct device *dev = regmap_get_device(regmap); int result; - if (!i2c_check_functionality(client->adapter, - I2C_FUNC_SMBUS_I2C_BLOCK)) - return -ENOSYS; - - indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*st)); + indio_dev = devm_iio_device_alloc(dev, sizeof(*st)); if (!indio_dev) return -ENOMEM; st = iio_priv(indio_dev); - st->client = client; + st->chip_type = chip_type; st->powerup_count = 0; - pdata = dev_get_platdata(&client->dev); + st->irq = irq; + st->map = regmap; + pdata = dev_get_platdata(dev); if (pdata) st->plat_data = *pdata; /* power is turned on inside check chip type*/ - result = inv_check_and_setup_chip(st, id); + result = inv_check_and_setup_chip(st); if (result) return result; + if (inv_mpu_bus_setup) + inv_mpu_bus_setup(indio_dev); + result = inv_mpu6050_init_config(indio_dev); if (result) { - dev_err(&client->dev, - "Could not initialize device.\n"); + dev_err(dev, "Could not initialize device.\n"); return result; } - i2c_set_clientdata(client, indio_dev); - indio_dev->dev.parent = &client->dev; - /* id will be NULL when enumerated via ACPI */ - if (id) - indio_dev->name = (char *)id->name; + dev_set_drvdata(dev, indio_dev); + indio_dev->dev.parent = dev; + /* name will be NULL when enumerated via ACPI */ + if (name) + indio_dev->name = name; else - indio_dev->name = (char *)dev_name(&client->dev); + indio_dev->name = dev_name(dev); indio_dev->channels = inv_mpu_channels; indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels); @@ -824,13 +814,12 @@ static int inv_mpu_probe(struct i2c_client *client, inv_mpu6050_read_fifo, NULL); if (result) { - dev_err(&st->client->dev, "configure buffer fail %d\n", - result); + dev_err(dev, "configure buffer fail %d\n", result); return result; } result = inv_mpu6050_probe_trigger(indio_dev); if (result) { - dev_err(&st->client->dev, "trigger probe fail %d\n", result); + dev_err(dev, "trigger probe fail %d\n", result); goto out_unreg_ring; } @@ -838,102 +827,47 @@ static int inv_mpu_probe(struct i2c_client *client, spin_lock_init(&st->time_stamp_lock); result = iio_device_register(indio_dev); if (result) { - dev_err(&st->client->dev, "IIO register fail %d\n", result); + dev_err(dev, "IIO register fail %d\n", result); goto out_remove_trigger; } - st->mux_adapter = i2c_add_mux_adapter(client->adapter, - &client->dev, - indio_dev, - 0, 0, 0, - inv_mpu6050_select_bypass, - inv_mpu6050_deselect_bypass); - if (!st->mux_adapter) { - result = -ENODEV; - goto out_unreg_device; - } - - result = inv_mpu_acpi_create_mux_client(st); - if (result) - goto out_del_mux; - return 0; -out_del_mux: - i2c_del_mux_adapter(st->mux_adapter); -out_unreg_device: - iio_device_unregister(indio_dev); out_remove_trigger: inv_mpu6050_remove_trigger(st); out_unreg_ring: iio_triggered_buffer_cleanup(indio_dev); return result; } +EXPORT_SYMBOL_GPL(inv_mpu_core_probe); -static int inv_mpu_remove(struct i2c_client *client) +int inv_mpu_core_remove(struct device *dev) { - struct iio_dev *indio_dev = i2c_get_clientdata(client); - struct inv_mpu6050_state *st = iio_priv(indio_dev); + struct iio_dev *indio_dev = dev_get_drvdata(dev); - inv_mpu_acpi_delete_mux_client(st); - i2c_del_mux_adapter(st->mux_adapter); iio_device_unregister(indio_dev); - inv_mpu6050_remove_trigger(st); + inv_mpu6050_remove_trigger(iio_priv(indio_dev)); iio_triggered_buffer_cleanup(indio_dev); return 0; } +EXPORT_SYMBOL_GPL(inv_mpu_core_remove); + #ifdef CONFIG_PM_SLEEP static int inv_mpu_resume(struct device *dev) { - return inv_mpu6050_set_power_itg( - iio_priv(i2c_get_clientdata(to_i2c_client(dev))), true); + return inv_mpu6050_set_power_itg(iio_priv(dev_get_drvdata(dev)), true); } static int inv_mpu_suspend(struct device *dev) { - return inv_mpu6050_set_power_itg( - iio_priv(i2c_get_clientdata(to_i2c_client(dev))), false); + return inv_mpu6050_set_power_itg(iio_priv(dev_get_drvdata(dev)), false); } -static SIMPLE_DEV_PM_OPS(inv_mpu_pmops, inv_mpu_suspend, inv_mpu_resume); - -#define INV_MPU6050_PMOPS (&inv_mpu_pmops) -#else -#define INV_MPU6050_PMOPS NULL #endif /* CONFIG_PM_SLEEP */ -/* - * device id table is used to identify what device can be - * supported by this driver - */ -static const struct i2c_device_id inv_mpu_id[] = { - {"mpu6050", INV_MPU6050}, - {"mpu6500", INV_MPU6500}, - {} -}; - -MODULE_DEVICE_TABLE(i2c, inv_mpu_id); - -static const struct acpi_device_id inv_acpi_match[] = { - {"INVN6500", 0}, - { }, -}; - -MODULE_DEVICE_TABLE(acpi, inv_acpi_match); - -static struct i2c_driver inv_mpu_driver = { - .probe = inv_mpu_probe, - .remove = inv_mpu_remove, - .id_table = inv_mpu_id, - .driver = { - .name = "inv-mpu6050", - .pm = INV_MPU6050_PMOPS, - .acpi_match_table = ACPI_PTR(inv_acpi_match), - }, -}; - -module_i2c_driver(inv_mpu_driver); +SIMPLE_DEV_PM_OPS(inv_mpu_pmops, inv_mpu_suspend, inv_mpu_resume); +EXPORT_SYMBOL_GPL(inv_mpu_pmops); MODULE_AUTHOR("Invensense Corporation"); MODULE_DESCRIPTION("Invensense device MPU6050 driver"); diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c new file mode 100644 index 000000000000..f581256d9d4c --- /dev/null +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c @@ -0,0 +1,208 @@ +/* +* Copyright (C) 2012 Invensense, Inc. +* +* This software is licensed under the terms of the GNU General Public +* License version 2, as published by the Free Software Foundation, and +* may be copied, distributed, and modified under those terms. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +*/ + +#include <linux/acpi.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/i2c-mux.h> +#include <linux/iio/iio.h> +#include <linux/module.h> +#include "inv_mpu_iio.h" + +static const struct regmap_config inv_mpu_regmap_config = { + .reg_bits = 8, + .val_bits = 8, +}; + +/* + * The i2c read/write needs to happen in unlocked mode. As the parent + * adapter is common. If we use locked versions, it will fail as + * the mux adapter will lock the parent i2c adapter, while calling + * select/deselect functions. + */ +static int inv_mpu6050_write_reg_unlocked(struct i2c_client *client, + u8 reg, u8 d) +{ + int ret; + u8 buf[2] = {reg, d}; + struct i2c_msg msg[1] = { + { + .addr = client->addr, + .flags = 0, + .len = sizeof(buf), + .buf = buf, + } + }; + + ret = __i2c_transfer(client->adapter, msg, 1); + if (ret != 1) + return ret; + + return 0; +} + +static int inv_mpu6050_select_bypass(struct i2c_adapter *adap, void *mux_priv, + u32 chan_id) +{ + struct i2c_client *client = mux_priv; + struct iio_dev *indio_dev = dev_get_drvdata(&client->dev); + struct inv_mpu6050_state *st = iio_priv(indio_dev); + int ret = 0; + + /* Use the same mutex which was used everywhere to protect power-op */ + mutex_lock(&indio_dev->mlock); + if (!st->powerup_count) { + ret = inv_mpu6050_write_reg_unlocked(client, + st->reg->pwr_mgmt_1, 0); + if (ret) + goto write_error; + + usleep_range(INV_MPU6050_REG_UP_TIME_MIN, + INV_MPU6050_REG_UP_TIME_MAX); + } + if (!ret) { + st->powerup_count++; + ret = inv_mpu6050_write_reg_unlocked(client, + st->reg->int_pin_cfg, + INV_MPU6050_INT_PIN_CFG | + INV_MPU6050_BIT_BYPASS_EN); + } +write_error: + mutex_unlock(&indio_dev->mlock); + + return ret; +} + +static int inv_mpu6050_deselect_bypass(struct i2c_adapter *adap, + void *mux_priv, u32 chan_id) +{ + struct i2c_client *client = mux_priv; + struct iio_dev *indio_dev = dev_get_drvdata(&client->dev); + struct inv_mpu6050_state *st = iio_priv(indio_dev); + + mutex_lock(&indio_dev->mlock); + /* It doesn't really mattter, if any of the calls fails */ + inv_mpu6050_write_reg_unlocked(client, st->reg->int_pin_cfg, + INV_MPU6050_INT_PIN_CFG); + st->powerup_count--; + if (!st->powerup_count) + inv_mpu6050_write_reg_unlocked(client, st->reg->pwr_mgmt_1, + INV_MPU6050_BIT_SLEEP); + mutex_unlock(&indio_dev->mlock); + + return 0; +} + +/** + * inv_mpu_probe() - probe function. + * @client: i2c client. + * @id: i2c device id. + * + * Returns 0 on success, a negative error code otherwise. + */ +static int inv_mpu_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct inv_mpu6050_state *st; + int result; + const char *name = id ? id->name : NULL; + struct regmap *regmap; + + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_I2C_BLOCK)) + return -EOPNOTSUPP; + + regmap = devm_regmap_init_i2c(client, &inv_mpu_regmap_config); + if (IS_ERR(regmap)) { + dev_err(&client->dev, "Failed to register i2c regmap %d\n", + (int)PTR_ERR(regmap)); + return PTR_ERR(regmap); + } + + result = inv_mpu_core_probe(regmap, client->irq, name, + NULL, id->driver_data); + if (result < 0) + return result; + + st = iio_priv(dev_get_drvdata(&client->dev)); + st->mux_adapter = i2c_add_mux_adapter(client->adapter, + &client->dev, + client, + 0, 0, 0, + inv_mpu6050_select_bypass, + inv_mpu6050_deselect_bypass); + if (!st->mux_adapter) { + result = -ENODEV; + goto out_unreg_device; + } + + result = inv_mpu_acpi_create_mux_client(client); + if (result) + goto out_del_mux; + + return 0; + +out_del_mux: + i2c_del_mux_adapter(st->mux_adapter); +out_unreg_device: + inv_mpu_core_remove(&client->dev); + return result; +} + +static int inv_mpu_remove(struct i2c_client *client) +{ + struct iio_dev *indio_dev = i2c_get_clientdata(client); + struct inv_mpu6050_state *st = iio_priv(indio_dev); + + inv_mpu_acpi_delete_mux_client(client); + i2c_del_mux_adapter(st->mux_adapter); + + return inv_mpu_core_remove(&client->dev); +} + +/* + * device id table is used to identify what device can be + * supported by this driver + */ +static const struct i2c_device_id inv_mpu_id[] = { + {"mpu6050", INV_MPU6050}, + {"mpu6500", INV_MPU6500}, + {} +}; + +MODULE_DEVICE_TABLE(i2c, inv_mpu_id); + +static const struct acpi_device_id inv_acpi_match[] = { + {"INVN6500", 0}, + { }, +}; + +MODULE_DEVICE_TABLE(acpi, inv_acpi_match); + +static struct i2c_driver inv_mpu_driver = { + .probe = inv_mpu_probe, + .remove = inv_mpu_remove, + .id_table = inv_mpu_id, + .driver = { + .acpi_match_table = ACPI_PTR(inv_acpi_match), + .name = "inv-mpu6050-i2c", + .pm = &inv_mpu_pmops, + }, +}; + +module_i2c_driver(inv_mpu_driver); + +MODULE_AUTHOR("Invensense Corporation"); +MODULE_DESCRIPTION("Invensense device MPU6050 driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h index db0a4a2758ab..e302a49703bf 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h @@ -15,6 +15,7 @@ #include <linux/spinlock.h> #include <linux/iio/iio.h> #include <linux/iio/buffer.h> +#include <linux/regmap.h> #include <linux/iio/sysfs.h> #include <linux/iio/kfifo_buf.h> #include <linux/iio/trigger.h> @@ -38,6 +39,9 @@ * @int_enable: Interrupt enable register. * @pwr_mgmt_1: Controls chip's power state and clock source. * @pwr_mgmt_2: Controls power state of individual sensors. + * @int_pin_cfg; Controls interrupt pin configuration. + * @accl_offset: Controls the accelerometer calibration offset. + * @gyro_offset: Controls the gyroscope calibration offset. */ struct inv_mpu6050_reg_map { u8 sample_rate_div; @@ -55,12 +59,15 @@ struct inv_mpu6050_reg_map { u8 pwr_mgmt_1; u8 pwr_mgmt_2; u8 int_pin_cfg; + u8 accl_offset; + u8 gyro_offset; }; /*device enum */ enum inv_devices { INV_MPU6050, INV_MPU6500, + INV_MPU6000, INV_NUM_PARTS }; @@ -107,9 +114,10 @@ struct inv_mpu6050_hw { * @hw: Other hardware-specific information. * @chip_type: chip type. * @time_stamp_lock: spin lock to time stamp. - * @client: i2c client handle. * @plat_data: platform data. * @timestamps: kfifo queue to store time stamp. + * @map regmap pointer. + * @irq interrupt number. */ struct inv_mpu6050_state { #define TIMESTAMP_FIFO_SIZE 16 @@ -119,15 +127,19 @@ struct inv_mpu6050_state { const struct inv_mpu6050_hw *hw; enum inv_devices chip_type; spinlock_t time_stamp_lock; - struct i2c_client *client; struct i2c_adapter *mux_adapter; struct i2c_client *mux_client; unsigned int powerup_count; struct inv_mpu6050_platform_data plat_data; DECLARE_KFIFO(timestamps, long long, TIMESTAMP_FIFO_SIZE); + struct regmap *map; + int irq; }; /*register and associated bit definition*/ +#define INV_MPU6050_REG_ACCEL_OFFSET 0x06 +#define INV_MPU6050_REG_GYRO_OFFSET 0x13 + #define INV_MPU6050_REG_SAMPLE_RATE_DIV 0x19 #define INV_MPU6050_REG_CONFIG 0x1A #define INV_MPU6050_REG_GYRO_CONFIG 0x1B @@ -151,6 +163,7 @@ struct inv_mpu6050_state { #define INV_MPU6050_BIT_I2C_MST_EN 0x20 #define INV_MPU6050_BIT_FIFO_EN 0x40 #define INV_MPU6050_BIT_DMP_EN 0x80 +#define INV_MPU6050_BIT_I2C_IF_DIS 0x10 #define INV_MPU6050_REG_PWR_MGMT_1 0x6B #define INV_MPU6050_BIT_H_RESET 0x80 @@ -167,10 +180,18 @@ struct inv_mpu6050_state { #define INV_MPU6050_BYTES_PER_3AXIS_SENSOR 6 #define INV_MPU6050_FIFO_COUNT_BYTE 2 #define INV_MPU6050_FIFO_THRESHOLD 500 + +/* mpu6500 registers */ +#define INV_MPU6500_REG_ACCEL_OFFSET 0x77 + +/* delay time in milliseconds */ #define INV_MPU6050_POWER_UP_TIME 100 #define INV_MPU6050_TEMP_UP_TIME 100 #define INV_MPU6050_SENSOR_UP_TIME 30 -#define INV_MPU6050_REG_UP_TIME 5 + +/* delay time in microseconds */ +#define INV_MPU6050_REG_UP_TIME_MIN 5000 +#define INV_MPU6050_REG_UP_TIME_MAX 10000 #define INV_MPU6050_TEMP_OFFSET 12421 #define INV_MPU6050_TEMP_SCALE 2941 @@ -185,6 +206,7 @@ struct inv_mpu6050_state { #define INV_MPU6050_REG_INT_PIN_CFG 0x37 #define INV_MPU6050_BIT_BYPASS_EN 0x2 +#define INV_MPU6050_INT_PIN_CFG 0 /* init parameters */ #define INV_MPU6050_INIT_FIFO_RATE 50 @@ -252,5 +274,10 @@ int inv_reset_fifo(struct iio_dev *indio_dev); int inv_mpu6050_switch_engine(struct inv_mpu6050_state *st, bool en, u32 mask); int inv_mpu6050_write_reg(struct inv_mpu6050_state *st, int reg, u8 val); int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on); -int inv_mpu_acpi_create_mux_client(struct inv_mpu6050_state *st); -void inv_mpu_acpi_delete_mux_client(struct inv_mpu6050_state *st); +int inv_mpu_acpi_create_mux_client(struct i2c_client *client); +void inv_mpu_acpi_delete_mux_client(struct i2c_client *client); +int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name, + int (*inv_mpu_bus_setup)(struct iio_dev *), int chip_type); +int inv_mpu_core_remove(struct device *dev); +int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on); +extern const struct dev_pm_ops inv_mpu_pmops; diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c index ba27e277511f..d0700628ee6d 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c @@ -13,7 +13,6 @@ #include <linux/module.h> #include <linux/slab.h> -#include <linux/i2c.h> #include <linux/err.h> #include <linux/delay.h> #include <linux/sysfs.h> @@ -41,23 +40,24 @@ int inv_reset_fifo(struct iio_dev *indio_dev) struct inv_mpu6050_state *st = iio_priv(indio_dev); /* disable interrupt */ - result = inv_mpu6050_write_reg(st, st->reg->int_enable, 0); + result = regmap_write(st->map, st->reg->int_enable, 0); if (result) { - dev_err(&st->client->dev, "int_enable failed %d\n", result); + dev_err(regmap_get_device(st->map), "int_enable failed %d\n", + result); return result; } /* disable the sensor output to FIFO */ - result = inv_mpu6050_write_reg(st, st->reg->fifo_en, 0); + result = regmap_write(st->map, st->reg->fifo_en, 0); if (result) goto reset_fifo_fail; /* disable fifo reading */ - result = inv_mpu6050_write_reg(st, st->reg->user_ctrl, 0); + result = regmap_write(st->map, st->reg->user_ctrl, 0); if (result) goto reset_fifo_fail; /* reset FIFO*/ - result = inv_mpu6050_write_reg(st, st->reg->user_ctrl, - INV_MPU6050_BIT_FIFO_RST); + result = regmap_write(st->map, st->reg->user_ctrl, + INV_MPU6050_BIT_FIFO_RST); if (result) goto reset_fifo_fail; @@ -67,14 +67,14 @@ int inv_reset_fifo(struct iio_dev *indio_dev) /* enable interrupt */ if (st->chip_config.accl_fifo_enable || st->chip_config.gyro_fifo_enable) { - result = inv_mpu6050_write_reg(st, st->reg->int_enable, - INV_MPU6050_BIT_DATA_RDY_EN); + result = regmap_write(st->map, st->reg->int_enable, + INV_MPU6050_BIT_DATA_RDY_EN); if (result) return result; } /* enable FIFO reading and I2C master interface*/ - result = inv_mpu6050_write_reg(st, st->reg->user_ctrl, - INV_MPU6050_BIT_FIFO_EN); + result = regmap_write(st->map, st->reg->user_ctrl, + INV_MPU6050_BIT_FIFO_EN); if (result) goto reset_fifo_fail; /* enable sensor output to FIFO */ @@ -83,16 +83,16 @@ int inv_reset_fifo(struct iio_dev *indio_dev) d |= INV_MPU6050_BITS_GYRO_OUT; if (st->chip_config.accl_fifo_enable) d |= INV_MPU6050_BIT_ACCEL_OUT; - result = inv_mpu6050_write_reg(st, st->reg->fifo_en, d); + result = regmap_write(st->map, st->reg->fifo_en, d); if (result) goto reset_fifo_fail; return 0; reset_fifo_fail: - dev_err(&st->client->dev, "reset fifo failed %d\n", result); - result = inv_mpu6050_write_reg(st, st->reg->int_enable, - INV_MPU6050_BIT_DATA_RDY_EN); + dev_err(regmap_get_device(st->map), "reset fifo failed %d\n", result); + result = regmap_write(st->map, st->reg->int_enable, + INV_MPU6050_BIT_DATA_RDY_EN); return result; } @@ -109,7 +109,7 @@ irqreturn_t inv_mpu6050_irq_handler(int irq, void *p) timestamp = iio_get_time_ns(); kfifo_in_spinlocked(&st->timestamps, ×tamp, 1, - &st->time_stamp_lock); + &st->time_stamp_lock); return IRQ_WAKE_THREAD; } @@ -143,10 +143,9 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p) * read fifo_count register to know how many bytes inside FIFO * right now */ - result = i2c_smbus_read_i2c_block_data(st->client, - st->reg->fifo_count_h, - INV_MPU6050_FIFO_COUNT_BYTE, data); - if (result != INV_MPU6050_FIFO_COUNT_BYTE) + result = regmap_bulk_read(st->map, st->reg->fifo_count_h, data, + INV_MPU6050_FIFO_COUNT_BYTE); + if (result) goto end_session; fifo_count = be16_to_cpup((__be16 *)(&data[0])); if (fifo_count < bytes_per_datum) @@ -158,22 +157,21 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p) goto flush_fifo; /* Timestamp mismatch. */ if (kfifo_len(&st->timestamps) > - fifo_count / bytes_per_datum + INV_MPU6050_TIME_STAMP_TOR) - goto flush_fifo; + fifo_count / bytes_per_datum + INV_MPU6050_TIME_STAMP_TOR) + goto flush_fifo; while (fifo_count >= bytes_per_datum) { - result = i2c_smbus_read_i2c_block_data(st->client, - st->reg->fifo_r_w, - bytes_per_datum, data); - if (result != bytes_per_datum) + result = regmap_bulk_read(st->map, st->reg->fifo_r_w, + data, bytes_per_datum); + if (result) goto flush_fifo; result = kfifo_out(&st->timestamps, ×tamp, 1); /* when there is no timestamp, put timestamp as 0 */ - if (0 == result) + if (result == 0) timestamp = 0; result = iio_push_to_buffers_with_timestamp(indio_dev, data, - timestamp); + timestamp); if (result) goto flush_fifo; fifo_count -= bytes_per_datum; diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c new file mode 100644 index 000000000000..dea6c4361de0 --- /dev/null +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c @@ -0,0 +1,98 @@ +/* +* Copyright (C) 2015 Intel Corporation Inc. +* +* This software is licensed under the terms of the GNU General Public +* License version 2, as published by the Free Software Foundation, and +* may be copied, distributed, and modified under those terms. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +*/ +#include <linux/module.h> +#include <linux/acpi.h> +#include <linux/spi/spi.h> +#include <linux/regmap.h> +#include <linux/iio/iio.h> +#include "inv_mpu_iio.h" + +static const struct regmap_config inv_mpu_regmap_config = { + .reg_bits = 8, + .val_bits = 8, +}; + +static int inv_mpu_i2c_disable(struct iio_dev *indio_dev) +{ + struct inv_mpu6050_state *st = iio_priv(indio_dev); + int ret = 0; + + ret = inv_mpu6050_set_power_itg(st, true); + if (ret) + return ret; + + ret = regmap_write(st->map, INV_MPU6050_REG_USER_CTRL, + INV_MPU6050_BIT_I2C_IF_DIS); + if (ret) { + inv_mpu6050_set_power_itg(st, false); + return ret; + } + + return inv_mpu6050_set_power_itg(st, false); +} + +static int inv_mpu_probe(struct spi_device *spi) +{ + struct regmap *regmap; + const struct spi_device_id *id = spi_get_device_id(spi); + const char *name = id ? id->name : NULL; + + regmap = devm_regmap_init_spi(spi, &inv_mpu_regmap_config); + if (IS_ERR(regmap)) { + dev_err(&spi->dev, "Failed to register spi regmap %d\n", + (int)PTR_ERR(regmap)); + return PTR_ERR(regmap); + } + + return inv_mpu_core_probe(regmap, spi->irq, name, + inv_mpu_i2c_disable, id->driver_data); +} + +static int inv_mpu_remove(struct spi_device *spi) +{ + return inv_mpu_core_remove(&spi->dev); +} + +/* + * device id table is used to identify what device can be + * supported by this driver + */ +static const struct spi_device_id inv_mpu_id[] = { + {"mpu6000", INV_MPU6000}, + {} +}; + +MODULE_DEVICE_TABLE(spi, inv_mpu_id); + +static const struct acpi_device_id inv_acpi_match[] = { + {"INVN6000", 0}, + { }, +}; +MODULE_DEVICE_TABLE(acpi, inv_acpi_match); + +static struct spi_driver inv_mpu_driver = { + .probe = inv_mpu_probe, + .remove = inv_mpu_remove, + .id_table = inv_mpu_id, + .driver = { + .acpi_match_table = ACPI_PTR(inv_acpi_match), + .name = "inv-mpu6000-spi", + .pm = &inv_mpu_pmops, + }, +}; + +module_spi_driver(inv_mpu_driver); + +MODULE_AUTHOR("Adriana Reus <adriana.reus@intel.com>"); +MODULE_DESCRIPTION("Invensense device MPU6000 driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c index 844610c3a3a9..e8818d4dd4b8 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c @@ -19,19 +19,19 @@ static void inv_scan_query(struct iio_dev *indio_dev) st->chip_config.gyro_fifo_enable = test_bit(INV_MPU6050_SCAN_GYRO_X, - indio_dev->active_scan_mask) || - test_bit(INV_MPU6050_SCAN_GYRO_Y, - indio_dev->active_scan_mask) || - test_bit(INV_MPU6050_SCAN_GYRO_Z, - indio_dev->active_scan_mask); + indio_dev->active_scan_mask) || + test_bit(INV_MPU6050_SCAN_GYRO_Y, + indio_dev->active_scan_mask) || + test_bit(INV_MPU6050_SCAN_GYRO_Z, + indio_dev->active_scan_mask); st->chip_config.accl_fifo_enable = test_bit(INV_MPU6050_SCAN_ACCL_X, - indio_dev->active_scan_mask) || - test_bit(INV_MPU6050_SCAN_ACCL_Y, - indio_dev->active_scan_mask) || - test_bit(INV_MPU6050_SCAN_ACCL_Z, - indio_dev->active_scan_mask); + indio_dev->active_scan_mask) || + test_bit(INV_MPU6050_SCAN_ACCL_Y, + indio_dev->active_scan_mask) || + test_bit(INV_MPU6050_SCAN_ACCL_Z, + indio_dev->active_scan_mask); } /** @@ -65,15 +65,15 @@ static int inv_mpu6050_set_enable(struct iio_dev *indio_dev, bool enable) if (result) return result; } else { - result = inv_mpu6050_write_reg(st, st->reg->fifo_en, 0); + result = regmap_write(st->map, st->reg->fifo_en, 0); if (result) return result; - result = inv_mpu6050_write_reg(st, st->reg->int_enable, 0); + result = regmap_write(st->map, st->reg->int_enable, 0); if (result) return result; - result = inv_mpu6050_write_reg(st, st->reg->user_ctrl, 0); + result = regmap_write(st->map, st->reg->user_ctrl, 0); if (result) return result; @@ -101,7 +101,7 @@ static int inv_mpu6050_set_enable(struct iio_dev *indio_dev, bool enable) * @state: Desired trigger state */ static int inv_mpu_data_rdy_trigger_set_state(struct iio_trigger *trig, - bool state) + bool state) { return inv_mpu6050_set_enable(iio_trigger_get_drvdata(trig), state); } @@ -123,7 +123,7 @@ int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev) if (!st->trig) return -ENOMEM; - ret = devm_request_irq(&indio_dev->dev, st->client->irq, + ret = devm_request_irq(&indio_dev->dev, st->irq, &iio_trigger_generic_data_rdy_poll, IRQF_TRIGGER_RISING, "inv_mpu", @@ -131,7 +131,7 @@ int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev) if (ret) return ret; - st->trig->dev.parent = &st->client->dev; + st->trig->dev.parent = regmap_get_device(st->map); st->trig->ops = &inv_mpu_trigger_ops; iio_trigger_set_drvdata(st->trig, indio_dev); diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index 139ae916225f..b976332d45d3 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -512,33 +512,41 @@ static ssize_t iio_buffer_show_enable(struct device *dev, return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer)); } +static unsigned int iio_storage_bytes_for_si(struct iio_dev *indio_dev, + unsigned int scan_index) +{ + const struct iio_chan_spec *ch; + unsigned int bytes; + + ch = iio_find_channel_from_si(indio_dev, scan_index); + bytes = ch->scan_type.storagebits / 8; + if (ch->scan_type.repeat > 1) + bytes *= ch->scan_type.repeat; + return bytes; +} + +static unsigned int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev) +{ + return iio_storage_bytes_for_si(indio_dev, + indio_dev->scan_index_timestamp); +} + static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const unsigned long *mask, bool timestamp) { - const struct iio_chan_spec *ch; unsigned bytes = 0; int length, i; /* How much space will the demuxed element take? */ for_each_set_bit(i, mask, indio_dev->masklength) { - ch = iio_find_channel_from_si(indio_dev, i); - if (ch->scan_type.repeat > 1) - length = ch->scan_type.storagebits / 8 * - ch->scan_type.repeat; - else - length = ch->scan_type.storagebits / 8; + length = iio_storage_bytes_for_si(indio_dev, i); bytes = ALIGN(bytes, length); bytes += length; } + if (timestamp) { - ch = iio_find_channel_from_si(indio_dev, - indio_dev->scan_index_timestamp); - if (ch->scan_type.repeat > 1) - length = ch->scan_type.storagebits / 8 * - ch->scan_type.repeat; - else - length = ch->scan_type.storagebits / 8; + length = iio_storage_bytes_for_timestamp(indio_dev); bytes = ALIGN(bytes, length); bytes += length; } @@ -1288,7 +1296,6 @@ static int iio_buffer_add_demux(struct iio_buffer *buffer, static int iio_buffer_update_demux(struct iio_dev *indio_dev, struct iio_buffer *buffer) { - const struct iio_chan_spec *ch; int ret, in_ind = -1, out_ind, length; unsigned in_loc = 0, out_loc = 0; struct iio_demux_table *p = NULL; @@ -1315,21 +1322,11 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev, in_ind = find_next_bit(indio_dev->active_scan_mask, indio_dev->masklength, in_ind + 1); - ch = iio_find_channel_from_si(indio_dev, in_ind); - if (ch->scan_type.repeat > 1) - length = ch->scan_type.storagebits / 8 * - ch->scan_type.repeat; - else - length = ch->scan_type.storagebits / 8; + length = iio_storage_bytes_for_si(indio_dev, in_ind); /* Make sure we are aligned */ in_loc = roundup(in_loc, length) + length; } - ch = iio_find_channel_from_si(indio_dev, in_ind); - if (ch->scan_type.repeat > 1) - length = ch->scan_type.storagebits / 8 * - ch->scan_type.repeat; - else - length = ch->scan_type.storagebits / 8; + length = iio_storage_bytes_for_si(indio_dev, in_ind); out_loc = roundup(out_loc, length); in_loc = roundup(in_loc, length); ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length); @@ -1340,13 +1337,7 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev, } /* Relies on scan_timestamp being last */ if (buffer->scan_timestamp) { - ch = iio_find_channel_from_si(indio_dev, - indio_dev->scan_index_timestamp); - if (ch->scan_type.repeat > 1) - length = ch->scan_type.storagebits / 8 * - ch->scan_type.repeat; - else - length = ch->scan_type.storagebits / 8; + length = iio_storage_bytes_for_timestamp(indio_dev); out_loc = roundup(out_loc, length); in_loc = roundup(in_loc, length); ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length); diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index af7cc1e65656..70cb7eb0a75c 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -77,6 +77,7 @@ static const char * const iio_chan_type_name_spec[] = { [IIO_VELOCITY] = "velocity", [IIO_CONCENTRATION] = "concentration", [IIO_RESISTANCE] = "resistance", + [IIO_PH] = "ph", }; static const char * const iio_modifier_names[] = { diff --git a/drivers/iio/light/bh1750.c b/drivers/iio/light/bh1750.c index 8b4164343f20..b05946604f80 100644 --- a/drivers/iio/light/bh1750.c +++ b/drivers/iio/light/bh1750.c @@ -241,7 +241,7 @@ static int bh1750_probe(struct i2c_client *client, if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C | I2C_FUNC_SMBUS_WRITE_BYTE)) - return -ENODEV; + return -EOPNOTSUPP; indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data)); if (!indio_dev) diff --git a/drivers/iio/light/jsa1212.c b/drivers/iio/light/jsa1212.c index c4e8c6b6c3c3..99a62816c3b4 100644 --- a/drivers/iio/light/jsa1212.c +++ b/drivers/iio/light/jsa1212.c @@ -326,7 +326,7 @@ static int jsa1212_probe(struct i2c_client *client, int ret; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) - return -ENODEV; + return -EOPNOTSUPP; indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data)); if (!indio_dev) diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c index 01e111e72d4b..b776c8ed4387 100644 --- a/drivers/iio/light/opt3001.c +++ b/drivers/iio/light/opt3001.c @@ -65,19 +65,25 @@ #define OPT3001_REG_EXPONENT(n) ((n) >> 12) #define OPT3001_REG_MANTISSA(n) ((n) & 0xfff) +#define OPT3001_INT_TIME_LONG 800000 +#define OPT3001_INT_TIME_SHORT 100000 + /* * Time to wait for conversion result to be ready. The device datasheet - * worst-case max value is 880ms. Add some slack to be on the safe side. + * sect. 6.5 states results are ready after total integration time plus 3ms. + * This results in worst-case max values of 113ms or 883ms, respectively. + * Add some slack to be on the safe side. */ -#define OPT3001_RESULT_READY_TIMEOUT msecs_to_jiffies(1000) +#define OPT3001_RESULT_READY_SHORT 150 +#define OPT3001_RESULT_READY_LONG 1000 struct opt3001 { struct i2c_client *client; struct device *dev; struct mutex lock; - u16 ok_to_ignore_lock:1; - u16 result_ready:1; + bool ok_to_ignore_lock; + bool result_ready; wait_queue_head_t result_ready_queue; u16 result; @@ -89,6 +95,8 @@ struct opt3001 { u8 high_thresh_exp; u8 low_thresh_exp; + + bool use_irq; }; struct opt3001_scale { @@ -227,26 +235,30 @@ static int opt3001_get_lux(struct opt3001 *opt, int *val, int *val2) u16 reg; u8 exponent; u16 value; + long timeout; - /* - * Enable the end-of-conversion interrupt mechanism. Note that doing - * so will overwrite the low-level limit value however we will restore - * this value later on. - */ - ret = i2c_smbus_write_word_swapped(opt->client, OPT3001_LOW_LIMIT, - OPT3001_LOW_LIMIT_EOC_ENABLE); - if (ret < 0) { - dev_err(opt->dev, "failed to write register %02x\n", - OPT3001_LOW_LIMIT); - return ret; + if (opt->use_irq) { + /* + * Enable the end-of-conversion interrupt mechanism. Note that + * doing so will overwrite the low-level limit value however we + * will restore this value later on. + */ + ret = i2c_smbus_write_word_swapped(opt->client, + OPT3001_LOW_LIMIT, + OPT3001_LOW_LIMIT_EOC_ENABLE); + if (ret < 0) { + dev_err(opt->dev, "failed to write register %02x\n", + OPT3001_LOW_LIMIT); + return ret; + } + + /* Allow IRQ to access the device despite lock being set */ + opt->ok_to_ignore_lock = true; } - /* Reset data-ready indicator flag (will be set in the IRQ routine) */ + /* Reset data-ready indicator flag */ opt->result_ready = false; - /* Allow IRQ to access the device despite lock being set */ - opt->ok_to_ignore_lock = true; - /* Configure for single-conversion mode and start a new conversion */ ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_CONFIGURATION); if (ret < 0) { @@ -266,32 +278,69 @@ static int opt3001_get_lux(struct opt3001 *opt, int *val, int *val2) goto err; } - /* Wait for the IRQ to indicate the conversion is complete */ - ret = wait_event_timeout(opt->result_ready_queue, opt->result_ready, - OPT3001_RESULT_READY_TIMEOUT); + if (opt->use_irq) { + /* Wait for the IRQ to indicate the conversion is complete */ + ret = wait_event_timeout(opt->result_ready_queue, + opt->result_ready, + msecs_to_jiffies(OPT3001_RESULT_READY_LONG)); + } else { + /* Sleep for result ready time */ + timeout = (opt->int_time == OPT3001_INT_TIME_SHORT) ? + OPT3001_RESULT_READY_SHORT : OPT3001_RESULT_READY_LONG; + msleep(timeout); + + /* Check result ready flag */ + ret = i2c_smbus_read_word_swapped(opt->client, + OPT3001_CONFIGURATION); + if (ret < 0) { + dev_err(opt->dev, "failed to read register %02x\n", + OPT3001_CONFIGURATION); + goto err; + } + + if (!(ret & OPT3001_CONFIGURATION_CRF)) { + ret = -ETIMEDOUT; + goto err; + } + + /* Obtain value */ + ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_RESULT); + if (ret < 0) { + dev_err(opt->dev, "failed to read register %02x\n", + OPT3001_RESULT); + goto err; + } + opt->result = ret; + opt->result_ready = true; + } err: - /* Disallow IRQ to access the device while lock is active */ - opt->ok_to_ignore_lock = false; + if (opt->use_irq) + /* Disallow IRQ to access the device while lock is active */ + opt->ok_to_ignore_lock = false; if (ret == 0) return -ETIMEDOUT; else if (ret < 0) return ret; - /* - * Disable the end-of-conversion interrupt mechanism by restoring the - * low-level limit value (clearing OPT3001_LOW_LIMIT_EOC_ENABLE). Note - * that selectively clearing those enable bits would affect the actual - * limit value due to bit-overlap and therefore can't be done. - */ - value = (opt->low_thresh_exp << 12) | opt->low_thresh_mantissa; - ret = i2c_smbus_write_word_swapped(opt->client, OPT3001_LOW_LIMIT, - value); - if (ret < 0) { - dev_err(opt->dev, "failed to write register %02x\n", - OPT3001_LOW_LIMIT); - return ret; + if (opt->use_irq) { + /* + * Disable the end-of-conversion interrupt mechanism by + * restoring the low-level limit value (clearing + * OPT3001_LOW_LIMIT_EOC_ENABLE). Note that selectively clearing + * those enable bits would affect the actual limit value due to + * bit-overlap and therefore can't be done. + */ + value = (opt->low_thresh_exp << 12) | opt->low_thresh_mantissa; + ret = i2c_smbus_write_word_swapped(opt->client, + OPT3001_LOW_LIMIT, + value); + if (ret < 0) { + dev_err(opt->dev, "failed to write register %02x\n", + OPT3001_LOW_LIMIT); + return ret; + } } exponent = OPT3001_REG_EXPONENT(opt->result); @@ -325,13 +374,13 @@ static int opt3001_set_int_time(struct opt3001 *opt, int time) reg = ret; switch (time) { - case 100000: + case OPT3001_INT_TIME_SHORT: reg &= ~OPT3001_CONFIGURATION_CT; - opt->int_time = 100000; + opt->int_time = OPT3001_INT_TIME_SHORT; break; - case 800000: + case OPT3001_INT_TIME_LONG: reg |= OPT3001_CONFIGURATION_CT; - opt->int_time = 800000; + opt->int_time = OPT3001_INT_TIME_LONG; break; default: return -EINVAL; @@ -597,9 +646,9 @@ static int opt3001_configure(struct opt3001 *opt) /* Reflect status of the device's integration time setting */ if (reg & OPT3001_CONFIGURATION_CT) - opt->int_time = 800000; + opt->int_time = OPT3001_INT_TIME_LONG; else - opt->int_time = 100000; + opt->int_time = OPT3001_INT_TIME_SHORT; /* Ensure device is in shutdown initially */ opt3001_set_mode(opt, ®, OPT3001_CONFIGURATION_M_SHUTDOWN); @@ -733,12 +782,18 @@ static int opt3001_probe(struct i2c_client *client, return ret; } - ret = request_threaded_irq(irq, NULL, opt3001_irq, - IRQF_TRIGGER_FALLING | IRQF_ONESHOT, - "opt3001", iio); - if (ret) { - dev_err(dev, "failed to request IRQ #%d\n", irq); - return ret; + /* Make use of INT pin only if valid IRQ no. is given */ + if (irq > 0) { + ret = request_threaded_irq(irq, NULL, opt3001_irq, + IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + "opt3001", iio); + if (ret) { + dev_err(dev, "failed to request IRQ #%d\n", irq); + return ret; + } + opt->use_irq = true; + } else { + dev_dbg(opt->dev, "enabling interrupt-less operation\n"); } return 0; @@ -751,7 +806,8 @@ static int opt3001_remove(struct i2c_client *client) int ret; u16 reg; - free_irq(client->irq, iio); + if (opt->use_irq) + free_irq(client->irq, iio); ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_CONFIGURATION); if (ret < 0) { diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig index 868abada3409..021dc5361f53 100644 --- a/drivers/iio/magnetometer/Kconfig +++ b/drivers/iio/magnetometer/Kconfig @@ -105,4 +105,37 @@ config IIO_ST_MAGN_SPI_3AXIS depends on IIO_ST_MAGN_3AXIS depends on IIO_ST_SENSORS_SPI +config SENSORS_HMC5843 + tristate + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER + +config SENSORS_HMC5843_I2C + tristate "Honeywell HMC5843/5883/5883L 3-Axis Magnetometer (I2C)" + depends on I2C + select SENSORS_HMC5843 + select REGMAP_I2C + help + Say Y here to add support for the Honeywell HMC5843, HMC5883 and + HMC5883L 3-Axis Magnetometer (digital compass). + + This driver can also be compiled as a set of modules. + If so, these modules will be created: + - hmc5843_core (core functions) + - hmc5843_i2c (support for HMC5843, HMC5883, HMC5883L and HMC5983) + +config SENSORS_HMC5843_SPI + tristate "Honeywell HMC5983 3-Axis Magnetometer (SPI)" + depends on SPI_MASTER + select SENSORS_HMC5843 + select REGMAP_SPI + help + Say Y here to add support for the Honeywell HMC5983 3-Axis Magnetometer + (digital compass). + + This driver can also be compiled as a set of modules. + If so, these modules will be created: + - hmc5843_core (core functions) + - hmc5843_spi (support for HMC5983) + endmenu diff --git a/drivers/iio/magnetometer/Makefile b/drivers/iio/magnetometer/Makefile index 2c72df458ec2..dd03fe524481 100644 --- a/drivers/iio/magnetometer/Makefile +++ b/drivers/iio/magnetometer/Makefile @@ -15,3 +15,7 @@ st_magn-$(CONFIG_IIO_BUFFER) += st_magn_buffer.o obj-$(CONFIG_IIO_ST_MAGN_I2C_3AXIS) += st_magn_i2c.o obj-$(CONFIG_IIO_ST_MAGN_SPI_3AXIS) += st_magn_spi.o + +obj-$(CONFIG_SENSORS_HMC5843) += hmc5843_core.o +obj-$(CONFIG_SENSORS_HMC5843_I2C) += hmc5843_i2c.o +obj-$(CONFIG_SENSORS_HMC5843_SPI) += hmc5843_spi.o diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c index b13936dacc78..9c5c9ef3f1da 100644 --- a/drivers/iio/magnetometer/ak8975.c +++ b/drivers/iio/magnetometer/ak8975.c @@ -252,7 +252,7 @@ struct ak_def { u8 data_regs[3]; }; -static struct ak_def ak_def_array[AK_MAX_TYPE] = { +static const struct ak_def ak_def_array[AK_MAX_TYPE] = { { .type = AK8975, .raw_to_gauss = ak8975_raw_to_gauss, @@ -360,7 +360,7 @@ static struct ak_def ak_def_array[AK_MAX_TYPE] = { */ struct ak8975_data { struct i2c_client *client; - struct ak_def *def; + const struct ak_def *def; struct attribute_group attrs; struct mutex lock; u8 asa[3]; diff --git a/drivers/staging/iio/magnetometer/hmc5843.h b/drivers/iio/magnetometer/hmc5843.h index 06f35d3828e4..76a5d7484d8d 100644 --- a/drivers/staging/iio/magnetometer/hmc5843.h +++ b/drivers/iio/magnetometer/hmc5843.h @@ -7,8 +7,7 @@ * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. - * - * */ + */ #ifndef HMC5843_CORE_H #define HMC5843_CORE_H @@ -38,7 +37,7 @@ enum hmc5843_ids { * @regmap: hardware access register maps * @variant: describe chip variants * @buffer: 3x 16-bit channels + padding + 64-bit timestamp - **/ + */ struct hmc5843_data { struct device *dev; struct mutex lock; diff --git a/drivers/staging/iio/magnetometer/hmc5843_core.c b/drivers/iio/magnetometer/hmc5843_core.c index 394bc141a1b0..77882b466e0f 100644 --- a/drivers/staging/iio/magnetometer/hmc5843_core.c +++ b/drivers/iio/magnetometer/hmc5843_core.c @@ -18,7 +18,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * */ #include <linux/module.h> @@ -66,6 +65,33 @@ #define HMC5843_MEAS_CONF_NEGATIVE_BIAS 0x02 #define HMC5843_MEAS_CONF_MASK 0x03 +/* + * API for setting the measurement configuration to + * Normal, Positive bias and Negative bias + * + * From the datasheet: + * 0 - Normal measurement configuration (default): In normal measurement + * configuration the device follows normal measurement flow. Pins BP + * and BN are left floating and high impedance. + * + * 1 - Positive bias configuration: In positive bias configuration, a + * positive current is forced across the resistive load on pins BP + * and BN. + * + * 2 - Negative bias configuration. In negative bias configuration, a + * negative current is forced across the resistive load on pins BP + * and BN. + * + * 3 - Only available on HMC5983. Magnetic sensor is disabled. + * Temperature sensor is enabled. + */ + +static const char *const hmc5843_meas_conf_modes[] = {"normal", "positivebias", + "negativebias"}; + +static const char *const hmc5983_meas_conf_modes[] = {"normal", "positivebias", + "negativebias", + "disabled"}; /* Scaling factors: 10000000/Gain */ static const int hmc5843_regval_to_nanoscale[] = { 6173, 7692, 10309, 12821, 18868, 21739, 25641, 35714 @@ -174,24 +200,6 @@ static int hmc5843_read_measurement(struct hmc5843_data *data, return IIO_VAL_INT; } -/* - * API for setting the measurement configuration to - * Normal, Positive bias and Negative bias - * - * From the datasheet: - * 0 - Normal measurement configuration (default): In normal measurement - * configuration the device follows normal measurement flow. Pins BP - * and BN are left floating and high impedance. - * - * 1 - Positive bias configuration: In positive bias configuration, a - * positive current is forced across the resistive load on pins BP - * and BN. - * - * 2 - Negative bias configuration. In negative bias configuration, a - * negative current is forced across the resistive load on pins BP - * and BN. - * - */ static int hmc5843_set_meas_conf(struct hmc5843_data *data, u8 meas_conf) { int ret; @@ -205,48 +213,55 @@ static int hmc5843_set_meas_conf(struct hmc5843_data *data, u8 meas_conf) } static -ssize_t hmc5843_show_measurement_configuration(struct device *dev, - struct device_attribute *attr, - char *buf) +int hmc5843_show_measurement_configuration(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan) { - struct hmc5843_data *data = iio_priv(dev_to_iio_dev(dev)); + struct hmc5843_data *data = iio_priv(indio_dev); unsigned int val; int ret; ret = regmap_read(data->regmap, HMC5843_CONFIG_REG_A, &val); if (ret) return ret; - val &= HMC5843_MEAS_CONF_MASK; - return sprintf(buf, "%d\n", val); + return val & HMC5843_MEAS_CONF_MASK; } static -ssize_t hmc5843_set_measurement_configuration(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t count) +int hmc5843_set_measurement_configuration(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + unsigned int meas_conf) { - struct hmc5843_data *data = iio_priv(dev_to_iio_dev(dev)); - unsigned long meas_conf = 0; - int ret; + struct hmc5843_data *data = iio_priv(indio_dev); - ret = kstrtoul(buf, 10, &meas_conf); - if (ret) - return ret; - if (meas_conf >= HMC5843_MEAS_CONF_MASK) - return -EINVAL; + return hmc5843_set_meas_conf(data, meas_conf); +} - ret = hmc5843_set_meas_conf(data, meas_conf); +static const struct iio_enum hmc5843_meas_conf_enum = { + .items = hmc5843_meas_conf_modes, + .num_items = ARRAY_SIZE(hmc5843_meas_conf_modes), + .get = hmc5843_show_measurement_configuration, + .set = hmc5843_set_measurement_configuration, +}; - return (ret < 0) ? ret : count; -} +static const struct iio_chan_spec_ext_info hmc5843_ext_info[] = { + IIO_ENUM("meas_conf", true, &hmc5843_meas_conf_enum), + IIO_ENUM_AVAILABLE("meas_conf", &hmc5843_meas_conf_enum), + { }, +}; -static IIO_DEVICE_ATTR(meas_conf, - S_IWUSR | S_IRUGO, - hmc5843_show_measurement_configuration, - hmc5843_set_measurement_configuration, - 0); +static const struct iio_enum hmc5983_meas_conf_enum = { + .items = hmc5983_meas_conf_modes, + .num_items = ARRAY_SIZE(hmc5983_meas_conf_modes), + .get = hmc5843_show_measurement_configuration, + .set = hmc5843_set_measurement_configuration, +}; + +static const struct iio_chan_spec_ext_info hmc5983_ext_info[] = { + IIO_ENUM("meas_conf", true, &hmc5983_meas_conf_enum), + IIO_ENUM_AVAILABLE("meas_conf", &hmc5983_meas_conf_enum), + { }, +}; static ssize_t hmc5843_show_samp_freq_avail(struct device *dev, @@ -459,6 +474,25 @@ done: .storagebits = 16, \ .endianness = IIO_BE, \ }, \ + .ext_info = hmc5843_ext_info, \ + } + +#define HMC5983_CHANNEL(axis, idx) \ + { \ + .type = IIO_MAGN, \ + .modified = 1, \ + .channel2 = IIO_MOD_##axis, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \ + BIT(IIO_CHAN_INFO_SAMP_FREQ), \ + .scan_index = idx, \ + .scan_type = { \ + .sign = 's', \ + .realbits = 16, \ + .storagebits = 16, \ + .endianness = IIO_BE, \ + }, \ + .ext_info = hmc5983_ext_info, \ } static const struct iio_chan_spec hmc5843_channels[] = { @@ -476,8 +510,14 @@ static const struct iio_chan_spec hmc5883_channels[] = { IIO_CHAN_SOFT_TIMESTAMP(3), }; +static const struct iio_chan_spec hmc5983_channels[] = { + HMC5983_CHANNEL(X, 0), + HMC5983_CHANNEL(Z, 1), + HMC5983_CHANNEL(Y, 2), + IIO_CHAN_SOFT_TIMESTAMP(3), +}; + static struct attribute *hmc5843_attributes[] = { - &iio_dev_attr_meas_conf.dev_attr.attr, &iio_dev_attr_scale_available.dev_attr.attr, &iio_dev_attr_sampling_frequency_available.dev_attr.attr, NULL @@ -516,7 +556,7 @@ static const struct hmc5843_chip_info hmc5843_chip_info_tbl[] = { ARRAY_SIZE(hmc5883l_regval_to_nanoscale), }, [HMC5983_ID] = { - .channels = hmc5883_channels, + .channels = hmc5983_channels, .regval_to_samp_freq = hmc5983_regval_to_samp_freq, .n_regval_to_samp_freq = ARRAY_SIZE(hmc5983_regval_to_samp_freq), @@ -565,14 +605,14 @@ static const unsigned long hmc5843_scan_masks[] = {0x7, 0}; int hmc5843_common_suspend(struct device *dev) { return hmc5843_set_mode(iio_priv(dev_get_drvdata(dev)), - HMC5843_MODE_CONVERSION_CONTINUOUS); + HMC5843_MODE_SLEEP); } EXPORT_SYMBOL(hmc5843_common_suspend); int hmc5843_common_resume(struct device *dev) { return hmc5843_set_mode(iio_priv(dev_get_drvdata(dev)), - HMC5843_MODE_SLEEP); + HMC5843_MODE_CONVERSION_CONTINUOUS); } EXPORT_SYMBOL(hmc5843_common_resume); diff --git a/drivers/staging/iio/magnetometer/hmc5843_i2c.c b/drivers/iio/magnetometer/hmc5843_i2c.c index 3e06ceb32059..3de7f4426ac4 100644 --- a/drivers/staging/iio/magnetometer/hmc5843_i2c.c +++ b/drivers/iio/magnetometer/hmc5843_i2c.c @@ -7,8 +7,7 @@ * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. - * - * */ + */ #include <linux/module.h> #include <linux/i2c.h> diff --git a/drivers/staging/iio/magnetometer/hmc5843_spi.c b/drivers/iio/magnetometer/hmc5843_spi.c index 8be198058ea2..535f03a70d63 100644 --- a/drivers/staging/iio/magnetometer/hmc5843_spi.c +++ b/drivers/iio/magnetometer/hmc5843_spi.c @@ -6,8 +6,7 @@ * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. - * - * */ + */ #include <linux/module.h> #include <linux/spi/spi.h> diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c index b27f0146647b..501f858df413 100644 --- a/drivers/iio/magnetometer/st_magn_core.c +++ b/drivers/iio/magnetometer/st_magn_core.c @@ -175,6 +175,8 @@ #define ST_MAGN_3_BDU_MASK 0x10 #define ST_MAGN_3_DRDY_IRQ_ADDR 0x62 #define ST_MAGN_3_DRDY_INT_MASK 0x01 +#define ST_MAGN_3_IHL_IRQ_ADDR 0x63 +#define ST_MAGN_3_IHL_IRQ_MASK 0x04 #define ST_MAGN_3_FS_AVL_15000_GAIN 1500 #define ST_MAGN_3_MULTIREAD_BIT false #define ST_MAGN_3_OUT_X_L_ADDR 0x68 @@ -480,6 +482,8 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = { .drdy_irq = { .addr = ST_MAGN_3_DRDY_IRQ_ADDR, .mask_int1 = ST_MAGN_3_DRDY_INT_MASK, + .addr_ihl = ST_MAGN_3_IHL_IRQ_ADDR, + .mask_ihl = ST_MAGN_3_IHL_IRQ_MASK, }, .multi_read_bit = ST_MAGN_3_MULTIREAD_BIT, .bootime = 2, diff --git a/drivers/iio/potentiometer/Kconfig b/drivers/iio/potentiometer/Kconfig index fd75db73e582..ffc735c168fb 100644 --- a/drivers/iio/potentiometer/Kconfig +++ b/drivers/iio/potentiometer/Kconfig @@ -17,4 +17,16 @@ config MCP4531 To compile this driver as a module, choose M here: the module will be called mcp4531. +config TPL0102 + tristate "Texas Instruments digital potentiometer driver" + depends on I2C + select REGMAP_I2C + help + Say yes here to build support for the Texas Instruments + TPL0102, TPL0402 + digital potentiometer chips. + + To compile this driver as a module, choose M here: the + module will be called tpl0102. + endmenu diff --git a/drivers/iio/potentiometer/Makefile b/drivers/iio/potentiometer/Makefile index 8afe49227012..b563b492b486 100644 --- a/drivers/iio/potentiometer/Makefile +++ b/drivers/iio/potentiometer/Makefile @@ -4,3 +4,4 @@ # When adding new entries keep the list in alphabetical order obj-$(CONFIG_MCP4531) += mcp4531.o +obj-$(CONFIG_TPL0102) += tpl0102.o diff --git a/drivers/iio/potentiometer/mcp4531.c b/drivers/iio/potentiometer/mcp4531.c index a3f66874ee2e..0db67fe14766 100644 --- a/drivers/iio/potentiometer/mcp4531.c +++ b/drivers/iio/potentiometer/mcp4531.c @@ -159,7 +159,7 @@ static int mcp4531_probe(struct i2c_client *client, if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA)) { dev_err(dev, "SMBUS Word Data not supported\n"); - return -EIO; + return -EOPNOTSUPP; } indio_dev = devm_iio_device_alloc(dev, sizeof(*data)); diff --git a/drivers/iio/potentiometer/tpl0102.c b/drivers/iio/potentiometer/tpl0102.c new file mode 100644 index 000000000000..313124b6fd59 --- /dev/null +++ b/drivers/iio/potentiometer/tpl0102.c @@ -0,0 +1,166 @@ +/* + * tpl0102.c - Support for Texas Instruments digital potentiometers + * + * Copyright (C) 2016 Matt Ranostay <mranostay@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * TODO: enable/disable hi-z output control + */ + +#include <linux/module.h> +#include <linux/i2c.h> +#include <linux/regmap.h> +#include <linux/iio/iio.h> + +struct tpl0102_cfg { + int wipers; + int max_pos; + int kohms; +}; + +enum tpl0102_type { + CAT5140_503, + CAT5140_104, + TPL0102_104, + TPL0401_103, +}; + +static const struct tpl0102_cfg tpl0102_cfg[] = { + /* on-semiconductor parts */ + [CAT5140_503] = { .wipers = 1, .max_pos = 256, .kohms = 50, }, + [CAT5140_104] = { .wipers = 1, .max_pos = 256, .kohms = 100, }, + /* ti parts */ + [TPL0102_104] = { .wipers = 2, .max_pos = 256, .kohms = 100 }, + [TPL0401_103] = { .wipers = 1, .max_pos = 128, .kohms = 10, }, +}; + +struct tpl0102_data { + struct regmap *regmap; + unsigned long devid; +}; + +static const struct regmap_config tpl0102_regmap_config = { + .reg_bits = 8, + .val_bits = 8, +}; + +#define TPL0102_CHANNEL(ch) { \ + .type = IIO_RESISTANCE, \ + .indexed = 1, \ + .output = 1, \ + .channel = (ch), \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ +} + +static const struct iio_chan_spec tpl0102_channels[] = { + TPL0102_CHANNEL(0), + TPL0102_CHANNEL(1), +}; + +static int tpl0102_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long mask) +{ + struct tpl0102_data *data = iio_priv(indio_dev); + + switch (mask) { + case IIO_CHAN_INFO_RAW: { + int ret = regmap_read(data->regmap, chan->channel, val); + + return ret ? ret : IIO_VAL_INT; + } + case IIO_CHAN_INFO_SCALE: + *val = 1000 * tpl0102_cfg[data->devid].kohms; + *val2 = tpl0102_cfg[data->devid].max_pos; + return IIO_VAL_FRACTIONAL; + } + + return -EINVAL; +} + +static int tpl0102_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int val, int val2, long mask) +{ + struct tpl0102_data *data = iio_priv(indio_dev); + + if (mask != IIO_CHAN_INFO_RAW) + return -EINVAL; + + if (val >= tpl0102_cfg[data->devid].max_pos || val < 0) + return -EINVAL; + + return regmap_write(data->regmap, chan->channel, val); +} + +static const struct iio_info tpl0102_info = { + .read_raw = tpl0102_read_raw, + .write_raw = tpl0102_write_raw, + .driver_module = THIS_MODULE, +}; + +static int tpl0102_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct tpl0102_data *data; + struct iio_dev *indio_dev; + + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_WORD_DATA)) + return -ENOTSUPP; + + indio_dev = devm_iio_device_alloc(dev, sizeof(*data)); + if (!indio_dev) + return -ENOMEM; + data = iio_priv(indio_dev); + i2c_set_clientdata(client, indio_dev); + + data->devid = id->driver_data; + data->regmap = devm_regmap_init_i2c(client, &tpl0102_regmap_config); + if (IS_ERR(data->regmap)) { + dev_err(dev, "regmap initialization failed\n"); + return PTR_ERR(data->regmap); + } + + indio_dev->dev.parent = dev; + indio_dev->info = &tpl0102_info; + indio_dev->channels = tpl0102_channels; + indio_dev->num_channels = tpl0102_cfg[data->devid].wipers; + indio_dev->name = client->name; + + return devm_iio_device_register(dev, indio_dev); +} + +static const struct i2c_device_id tpl0102_id[] = { + { "cat5140-503", CAT5140_503 }, + { "cat5140-104", CAT5140_104 }, + { "tpl0102-104", TPL0102_104 }, + { "tpl0401-103", TPL0401_103 }, + {} +}; +MODULE_DEVICE_TABLE(i2c, tpl0102_id); + +static struct i2c_driver tpl0102_driver = { + .driver = { + .name = "tpl0102", + }, + .probe = tpl0102_probe, + .id_table = tpl0102_id, +}; + +module_i2c_driver(tpl0102_driver); + +MODULE_AUTHOR("Matt Ranostay <mranostay@gmail.com>"); +MODULE_DESCRIPTION("TPL0102 digital potentiometer"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig index 6f2e7c9ac23e..31c0e1fd2202 100644 --- a/drivers/iio/pressure/Kconfig +++ b/drivers/iio/pressure/Kconfig @@ -10,11 +10,11 @@ config BMP280 depends on I2C select REGMAP_I2C help - Say yes here to build support for Bosch Sensortec BMP280 - pressure and temperature sensor. + Say yes here to build support for Bosch Sensortec BMP280 + pressure and temperature sensor. - To compile this driver as a module, choose M here: the module - will be called bmp280. + To compile this driver as a module, choose M here: the module + will be called bmp280. config HID_SENSOR_PRESS depends on HID_SENSOR_HUB @@ -27,18 +27,33 @@ config HID_SENSOR_PRESS Say yes here to build support for the HID SENSOR Pressure driver - To compile this driver as a module, choose M here: the module - will be called hid-sensor-press. + To compile this driver as a module, choose M here: the module + will be called hid-sensor-press. config MPL115 + tristate + +config MPL115_I2C tristate "Freescale MPL115A2 pressure sensor driver" depends on I2C + select MPL115 help Say yes here to build support for the Freescale MPL115A2 pressure sensor connected via I2C. - To compile this driver as a module, choose M here: the module - will be called mpl115. + To compile this driver as a module, choose M here: the module + will be called mpl115_i2c. + +config MPL115_SPI + tristate "Freescale MPL115A1 pressure sensor driver" + depends on SPI_MASTER + select MPL115 + help + Say yes here to build support for the Freescale MPL115A1 + pressure sensor connected via SPI. + + To compile this driver as a module, choose M here: the module + will be called mpl115_spi. config MPL3115 tristate "Freescale MPL3115A2 pressure sensor driver" @@ -49,11 +64,13 @@ config MPL3115 Say yes here to build support for the Freescale MPL3115A2 pressure sensor / altimeter. - To compile this driver as a module, choose M here: the module - will be called mpl3115. + To compile this driver as a module, choose M here: the module + will be called mpl3115. config MS5611 tristate "Measurement Specialties MS5611 pressure sensor driver" + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER help Say Y here to build support for the Measurement Specialties MS5611, MS5607 pressure and temperature sensors. @@ -82,7 +99,7 @@ config MS5611_SPI config MS5637 tristate "Measurement Specialties MS5637 pressure & temperature sensor" depends on I2C - select IIO_MS_SENSORS_I2C + select IIO_MS_SENSORS_I2C help If you say yes here you get support for the Measurement Specialties MS5637 pressure and temperature sensor. @@ -128,7 +145,7 @@ config T5403 Say yes here to build support for the EPCOS T5403 pressure sensor connected via I2C. - To compile this driver as a module, choose M here: the module - will be called t5403. + To compile this driver as a module, choose M here: the module + will be called t5403. endmenu diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile index 46571c96823f..d336af14f3fe 100644 --- a/drivers/iio/pressure/Makefile +++ b/drivers/iio/pressure/Makefile @@ -6,6 +6,8 @@ obj-$(CONFIG_BMP280) += bmp280.o obj-$(CONFIG_HID_SENSOR_PRESS) += hid-sensor-press.o obj-$(CONFIG_MPL115) += mpl115.o +obj-$(CONFIG_MPL115_I2C) += mpl115_i2c.o +obj-$(CONFIG_MPL115_SPI) += mpl115_spi.o obj-$(CONFIG_MPL3115) += mpl3115.o obj-$(CONFIG_MS5611) += ms5611_core.o obj-$(CONFIG_MS5611_I2C) += ms5611_i2c.o diff --git a/drivers/iio/pressure/mpl115.c b/drivers/iio/pressure/mpl115.c index a0d7deeac62f..73f2f0c46e62 100644 --- a/drivers/iio/pressure/mpl115.c +++ b/drivers/iio/pressure/mpl115.c @@ -1,5 +1,5 @@ /* - * mpl115.c - Support for Freescale MPL115A2 pressure/temperature sensor + * mpl115.c - Support for Freescale MPL115A pressure/temperature sensor * * Copyright (c) 2014 Peter Meerwald <pmeerw@pmeerw.net> * @@ -7,17 +7,16 @@ * the GNU General Public License. See the file COPYING in the main * directory of this archive for more details. * - * (7-bit I2C slave address 0x60) - * * TODO: shutdown pin * */ #include <linux/module.h> -#include <linux/i2c.h> #include <linux/iio/iio.h> #include <linux/delay.h> +#include "mpl115.h" + #define MPL115_PADC 0x00 /* pressure ADC output value, MSB first, 10 bit */ #define MPL115_TADC 0x02 /* temperature ADC output value, MSB first, 10 bit */ #define MPL115_A0 0x04 /* 12 bit integer, 3 bit fraction */ @@ -27,16 +26,18 @@ #define MPL115_CONVERT 0x12 /* convert temperature and pressure */ struct mpl115_data { - struct i2c_client *client; + struct device *dev; struct mutex lock; s16 a0; s16 b1, b2; s16 c12; + const struct mpl115_ops *ops; }; static int mpl115_request(struct mpl115_data *data) { - int ret = i2c_smbus_write_byte_data(data->client, MPL115_CONVERT, 0); + int ret = data->ops->write(data->dev, MPL115_CONVERT, 0); + if (ret < 0) return ret; @@ -57,12 +58,12 @@ static int mpl115_comp_pressure(struct mpl115_data *data, int *val, int *val2) if (ret < 0) goto done; - ret = i2c_smbus_read_word_swapped(data->client, MPL115_PADC); + ret = data->ops->read(data->dev, MPL115_PADC); if (ret < 0) goto done; padc = ret >> 6; - ret = i2c_smbus_read_word_swapped(data->client, MPL115_TADC); + ret = data->ops->read(data->dev, MPL115_TADC); if (ret < 0) goto done; tadc = ret >> 6; @@ -90,7 +91,7 @@ static int mpl115_read_temp(struct mpl115_data *data) ret = mpl115_request(data); if (ret < 0) goto done; - ret = i2c_smbus_read_word_swapped(data->client, MPL115_TADC); + ret = data->ops->read(data->dev, MPL115_TADC); done: mutex_unlock(&data->lock); return ret; @@ -145,66 +146,53 @@ static const struct iio_info mpl115_info = { .driver_module = THIS_MODULE, }; -static int mpl115_probe(struct i2c_client *client, - const struct i2c_device_id *id) +int mpl115_probe(struct device *dev, const char *name, + const struct mpl115_ops *ops) { struct mpl115_data *data; struct iio_dev *indio_dev; int ret; - if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA)) - return -ENODEV; - - indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data)); + indio_dev = devm_iio_device_alloc(dev, sizeof(*data)); if (!indio_dev) return -ENOMEM; data = iio_priv(indio_dev); - data->client = client; + data->dev = dev; + data->ops = ops; mutex_init(&data->lock); - i2c_set_clientdata(client, indio_dev); indio_dev->info = &mpl115_info; - indio_dev->name = id->name; - indio_dev->dev.parent = &client->dev; + indio_dev->name = name; + indio_dev->dev.parent = dev; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = mpl115_channels; indio_dev->num_channels = ARRAY_SIZE(mpl115_channels); - ret = i2c_smbus_read_word_swapped(data->client, MPL115_A0); + ret = data->ops->init(data->dev); + if (ret) + return ret; + + ret = data->ops->read(data->dev, MPL115_A0); if (ret < 0) return ret; data->a0 = ret; - ret = i2c_smbus_read_word_swapped(data->client, MPL115_B1); + ret = data->ops->read(data->dev, MPL115_B1); if (ret < 0) return ret; data->b1 = ret; - ret = i2c_smbus_read_word_swapped(data->client, MPL115_B2); + ret = data->ops->read(data->dev, MPL115_B2); if (ret < 0) return ret; data->b2 = ret; - ret = i2c_smbus_read_word_swapped(data->client, MPL115_C12); + ret = data->ops->read(data->dev, MPL115_C12); if (ret < 0) return ret; data->c12 = ret; - return devm_iio_device_register(&client->dev, indio_dev); + return devm_iio_device_register(dev, indio_dev); } - -static const struct i2c_device_id mpl115_id[] = { - { "mpl115", 0 }, - { } -}; -MODULE_DEVICE_TABLE(i2c, mpl115_id); - -static struct i2c_driver mpl115_driver = { - .driver = { - .name = "mpl115", - }, - .probe = mpl115_probe, - .id_table = mpl115_id, -}; -module_i2c_driver(mpl115_driver); +EXPORT_SYMBOL_GPL(mpl115_probe); MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>"); MODULE_DESCRIPTION("Freescale MPL115 pressure/temperature driver"); diff --git a/drivers/iio/pressure/mpl115.h b/drivers/iio/pressure/mpl115.h new file mode 100644 index 000000000000..01b652774dc3 --- /dev/null +++ b/drivers/iio/pressure/mpl115.h @@ -0,0 +1,24 @@ +/* + * Freescale MPL115A pressure/temperature sensor + * + * Copyright (c) 2014 Peter Meerwald <pmeerw@pmeerw.net> + * Copyright (c) 2016 Akinobu Mita <akinobu.mita@gmail.com> + * + * This file is subject to the terms and conditions of version 2 of + * the GNU General Public License. See the file COPYING in the main + * directory of this archive for more details. + */ + +#ifndef _MPL115_H_ +#define _MPL115_H_ + +struct mpl115_ops { + int (*init)(struct device *); + int (*read)(struct device *, u8); + int (*write)(struct device *, u8, u8); +}; + +int mpl115_probe(struct device *dev, const char *name, + const struct mpl115_ops *ops); + +#endif diff --git a/drivers/iio/pressure/mpl115_i2c.c b/drivers/iio/pressure/mpl115_i2c.c new file mode 100644 index 000000000000..1a29be462f6e --- /dev/null +++ b/drivers/iio/pressure/mpl115_i2c.c @@ -0,0 +1,67 @@ +/* + * Freescale MPL115A2 pressure/temperature sensor + * + * Copyright (c) 2014 Peter Meerwald <pmeerw@pmeerw.net> + * + * This file is subject to the terms and conditions of version 2 of + * the GNU General Public License. See the file COPYING in the main + * directory of this archive for more details. + * + * (7-bit I2C slave address 0x60) + * + * Datasheet: http://www.nxp.com/files/sensors/doc/data_sheet/MPL115A2.pdf + */ + +#include <linux/module.h> +#include <linux/i2c.h> + +#include "mpl115.h" + +static int mpl115_i2c_init(struct device *dev) +{ + return 0; +} + +static int mpl115_i2c_read(struct device *dev, u8 address) +{ + return i2c_smbus_read_word_swapped(to_i2c_client(dev), address); +} + +static int mpl115_i2c_write(struct device *dev, u8 address, u8 value) +{ + return i2c_smbus_write_byte_data(to_i2c_client(dev), address, value); +} + +static const struct mpl115_ops mpl115_i2c_ops = { + .init = mpl115_i2c_init, + .read = mpl115_i2c_read, + .write = mpl115_i2c_write, +}; + +static int mpl115_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA)) + return -EOPNOTSUPP; + + return mpl115_probe(&client->dev, id->name, &mpl115_i2c_ops); +} + +static const struct i2c_device_id mpl115_i2c_id[] = { + { "mpl115", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, mpl115_i2c_id); + +static struct i2c_driver mpl115_i2c_driver = { + .driver = { + .name = "mpl115", + }, + .probe = mpl115_i2c_probe, + .id_table = mpl115_i2c_id, +}; +module_i2c_driver(mpl115_i2c_driver); + +MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>"); +MODULE_DESCRIPTION("Freescale MPL115A2 pressure/temperature driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/pressure/mpl115_spi.c b/drivers/iio/pressure/mpl115_spi.c new file mode 100644 index 000000000000..9ebf55f5b3aa --- /dev/null +++ b/drivers/iio/pressure/mpl115_spi.c @@ -0,0 +1,106 @@ +/* + * Freescale MPL115A1 pressure/temperature sensor + * + * Copyright (c) 2016 Akinobu Mita <akinobu.mita@gmail.com> + * + * This file is subject to the terms and conditions of version 2 of + * the GNU General Public License. See the file COPYING in the main + * directory of this archive for more details. + * + * Datasheet: http://www.nxp.com/files/sensors/doc/data_sheet/MPL115A1.pdf + */ + +#include <linux/module.h> +#include <linux/spi/spi.h> + +#include "mpl115.h" + +#define MPL115_SPI_WRITE(address) ((address) << 1) +#define MPL115_SPI_READ(address) (0x80 | (address) << 1) + +struct mpl115_spi_buf { + u8 tx[4]; + u8 rx[4]; +}; + +static int mpl115_spi_init(struct device *dev) +{ + struct spi_device *spi = to_spi_device(dev); + struct mpl115_spi_buf *buf; + + buf = devm_kzalloc(dev, sizeof(*buf), GFP_KERNEL); + if (!buf) + return -ENOMEM; + + spi_set_drvdata(spi, buf); + + return 0; +} + +static int mpl115_spi_read(struct device *dev, u8 address) +{ + struct spi_device *spi = to_spi_device(dev); + struct mpl115_spi_buf *buf = spi_get_drvdata(spi); + struct spi_transfer xfer = { + .tx_buf = buf->tx, + .rx_buf = buf->rx, + .len = 4, + }; + int ret; + + buf->tx[0] = MPL115_SPI_READ(address); + buf->tx[2] = MPL115_SPI_READ(address + 1); + + ret = spi_sync_transfer(spi, &xfer, 1); + if (ret) + return ret; + + return (buf->rx[1] << 8) | buf->rx[3]; +} + +static int mpl115_spi_write(struct device *dev, u8 address, u8 value) +{ + struct spi_device *spi = to_spi_device(dev); + struct mpl115_spi_buf *buf = spi_get_drvdata(spi); + struct spi_transfer xfer = { + .tx_buf = buf->tx, + .len = 2, + }; + + buf->tx[0] = MPL115_SPI_WRITE(address); + buf->tx[1] = value; + + return spi_sync_transfer(spi, &xfer, 1); +} + +static const struct mpl115_ops mpl115_spi_ops = { + .init = mpl115_spi_init, + .read = mpl115_spi_read, + .write = mpl115_spi_write, +}; + +static int mpl115_spi_probe(struct spi_device *spi) +{ + const struct spi_device_id *id = spi_get_device_id(spi); + + return mpl115_probe(&spi->dev, id->name, &mpl115_spi_ops); +} + +static const struct spi_device_id mpl115_spi_ids[] = { + { "mpl115", 0 }, + {} +}; +MODULE_DEVICE_TABLE(spi, mpl115_spi_ids); + +static struct spi_driver mpl115_spi_driver = { + .driver = { + .name = "mpl115", + }, + .probe = mpl115_spi_probe, + .id_table = mpl115_spi_ids, +}; +module_spi_driver(mpl115_spi_driver); + +MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>"); +MODULE_DESCRIPTION("Freescale MPL115A1 pressure/temperature driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/pressure/ms5611.h b/drivers/iio/pressure/ms5611.h index 23b93c797dba..8b08e4b7e3a9 100644 --- a/drivers/iio/pressure/ms5611.h +++ b/drivers/iio/pressure/ms5611.h @@ -51,6 +51,8 @@ struct ms5611_state { struct ms5611_chip_info *chip_info; }; -int ms5611_probe(struct iio_dev *indio_dev, struct device *dev, int type); +int ms5611_probe(struct iio_dev *indio_dev, struct device *dev, + const char* name, int type); +int ms5611_remove(struct iio_dev *indio_dev); #endif /* _MS5611_H */ diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c index 2f3d9b4aca4e..992ad8d3b67a 100644 --- a/drivers/iio/pressure/ms5611_core.c +++ b/drivers/iio/pressure/ms5611_core.c @@ -16,7 +16,11 @@ #include <linux/module.h> #include <linux/iio/iio.h> #include <linux/delay.h> +#include <linux/regulator/consumer.h> +#include <linux/iio/buffer.h> +#include <linux/iio/triggered_buffer.h> +#include <linux/iio/trigger_consumer.h> #include "ms5611.h" static bool ms5611_prom_is_valid(u16 *prom, size_t len) @@ -133,17 +137,17 @@ static int ms5607_temp_and_pressure_compensate(struct ms5611_chip_info *chip_inf t = 2000 + ((chip_info->prom[6] * dt) >> 23); if (t < 2000) { - s64 off2, sens2, t2; + s64 off2, sens2, t2, tmp; t2 = (dt * dt) >> 31; - off2 = (61 * (t - 2000) * (t - 2000)) >> 4; - sens2 = off2 << 1; + tmp = (t - 2000) * (t - 2000); + off2 = (61 * tmp) >> 4; + sens2 = tmp << 1; if (t < -1500) { - s64 tmp = (t + 1500) * (t + 1500); - + tmp = (t + 1500) * (t + 1500); off2 += 15 * tmp; - sens2 += (8 * tmp); + sens2 += 8 * tmp; } t -= t2; @@ -173,6 +177,28 @@ static int ms5611_reset(struct iio_dev *indio_dev) return 0; } +static irqreturn_t ms5611_trigger_handler(int irq, void *p) +{ + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct ms5611_state *st = iio_priv(indio_dev); + s32 buf[4]; /* s32 (pressure) + s32 (temp) + 2 * s32 (timestamp) */ + int ret; + + mutex_lock(&st->lock); + ret = ms5611_read_temp_and_pressure(indio_dev, &buf[1], &buf[0]); + mutex_unlock(&st->lock); + if (ret < 0) + goto err; + + iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns()); + +err: + iio_trigger_notify_done(indio_dev->trig); + + return IRQ_HANDLED; +} + static int ms5611_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) @@ -201,11 +227,25 @@ static int ms5611_read_raw(struct iio_dev *indio_dev, default: return -EINVAL; } + case IIO_CHAN_INFO_SCALE: + switch (chan->type) { + case IIO_TEMP: + *val = 10; + return IIO_VAL_INT; + case IIO_PRESSURE: + *val = 0; + *val2 = 1000; + return IIO_VAL_INT_PLUS_MICRO; + default: + return -EINVAL; + } } return -EINVAL; } +static const unsigned long ms5611_scan_masks[] = {0x3, 0}; + static struct ms5611_chip_info chip_info_tbl[] = { [MS5611] = { .temp_and_pressure_compensate = ms5611_temp_and_pressure_compensate, @@ -218,12 +258,29 @@ static struct ms5611_chip_info chip_info_tbl[] = { static const struct iio_chan_spec ms5611_channels[] = { { .type = IIO_PRESSURE, - .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), + .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) | + BIT(IIO_CHAN_INFO_SCALE), + .scan_index = 0, + .scan_type = { + .sign = 's', + .realbits = 32, + .storagebits = 32, + .endianness = IIO_CPU, + }, }, { .type = IIO_TEMP, - .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), - } + .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) | + BIT(IIO_CHAN_INFO_SCALE), + .scan_index = 1, + .scan_type = { + .sign = 's', + .realbits = 32, + .storagebits = 32, + .endianness = IIO_CPU, + }, + }, + IIO_CHAN_SOFT_TIMESTAMP(2), }; static const struct iio_info ms5611_info = { @@ -234,6 +291,18 @@ static const struct iio_info ms5611_info = { static int ms5611_init(struct iio_dev *indio_dev) { int ret; + struct regulator *vdd = devm_regulator_get(indio_dev->dev.parent, + "vdd"); + + /* Enable attached regulator if any. */ + if (!IS_ERR(vdd)) { + ret = regulator_enable(vdd); + if (ret) { + dev_err(indio_dev->dev.parent, + "failed to enable Vdd supply: %d\n", ret); + return ret; + } + } ret = ms5611_reset(indio_dev); if (ret < 0) @@ -242,7 +311,8 @@ static int ms5611_init(struct iio_dev *indio_dev) return ms5611_read_prom(indio_dev); } -int ms5611_probe(struct iio_dev *indio_dev, struct device *dev, int type) +int ms5611_probe(struct iio_dev *indio_dev, struct device *dev, + const char *name, int type) { int ret; struct ms5611_state *st = iio_priv(indio_dev); @@ -250,20 +320,48 @@ int ms5611_probe(struct iio_dev *indio_dev, struct device *dev, int type) mutex_init(&st->lock); st->chip_info = &chip_info_tbl[type]; indio_dev->dev.parent = dev; - indio_dev->name = dev->driver->name; + indio_dev->name = name; indio_dev->info = &ms5611_info; indio_dev->channels = ms5611_channels; indio_dev->num_channels = ARRAY_SIZE(ms5611_channels); indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->available_scan_masks = ms5611_scan_masks; ret = ms5611_init(indio_dev); if (ret < 0) return ret; - return devm_iio_device_register(dev, indio_dev); + ret = iio_triggered_buffer_setup(indio_dev, NULL, + ms5611_trigger_handler, NULL); + if (ret < 0) { + dev_err(dev, "iio triggered buffer setup failed\n"); + return ret; + } + + ret = iio_device_register(indio_dev); + if (ret < 0) { + dev_err(dev, "unable to register iio device\n"); + goto err_buffer_cleanup; + } + + return 0; + +err_buffer_cleanup: + iio_triggered_buffer_cleanup(indio_dev); + + return ret; } EXPORT_SYMBOL(ms5611_probe); +int ms5611_remove(struct iio_dev *indio_dev) +{ + iio_device_unregister(indio_dev); + iio_triggered_buffer_cleanup(indio_dev); + + return 0; +} +EXPORT_SYMBOL(ms5611_remove); + MODULE_AUTHOR("Tomasz Duszynski <tduszyns@gmail.com>"); MODULE_DESCRIPTION("MS5611 core driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/iio/pressure/ms5611_i2c.c b/drivers/iio/pressure/ms5611_i2c.c index 245797d1ecf0..7f6fc8eee922 100644 --- a/drivers/iio/pressure/ms5611_i2c.c +++ b/drivers/iio/pressure/ms5611_i2c.c @@ -92,19 +92,25 @@ static int ms5611_i2c_probe(struct i2c_client *client, I2C_FUNC_SMBUS_WRITE_BYTE | I2C_FUNC_SMBUS_READ_WORD_DATA | I2C_FUNC_SMBUS_READ_I2C_BLOCK)) - return -ENODEV; + return -EOPNOTSUPP; indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*st)); if (!indio_dev) return -ENOMEM; st = iio_priv(indio_dev); + i2c_set_clientdata(client, indio_dev); st->reset = ms5611_i2c_reset; st->read_prom_word = ms5611_i2c_read_prom_word; st->read_adc_temp_and_pressure = ms5611_i2c_read_adc_temp_and_pressure; st->client = client; - return ms5611_probe(indio_dev, &client->dev, id->driver_data); + return ms5611_probe(indio_dev, &client->dev, id->name, id->driver_data); +} + +static int ms5611_i2c_remove(struct i2c_client *client) +{ + return ms5611_remove(i2c_get_clientdata(client)); } static const struct i2c_device_id ms5611_id[] = { @@ -120,6 +126,7 @@ static struct i2c_driver ms5611_driver = { }, .id_table = ms5611_id, .probe = ms5611_i2c_probe, + .remove = ms5611_i2c_remove, }; module_i2c_driver(ms5611_driver); diff --git a/drivers/iio/pressure/ms5611_spi.c b/drivers/iio/pressure/ms5611_spi.c index aaa0c4ba91a7..5cc009e85f0e 100644 --- a/drivers/iio/pressure/ms5611_spi.c +++ b/drivers/iio/pressure/ms5611_spi.c @@ -90,6 +90,8 @@ static int ms5611_spi_probe(struct spi_device *spi) if (!indio_dev) return -ENOMEM; + spi_set_drvdata(spi, indio_dev); + spi->mode = SPI_MODE_0; spi->max_speed_hz = 20000000; spi->bits_per_word = 8; @@ -103,8 +105,13 @@ static int ms5611_spi_probe(struct spi_device *spi) st->read_adc_temp_and_pressure = ms5611_spi_read_adc_temp_and_pressure; st->client = spi; - return ms5611_probe(indio_dev, &spi->dev, - spi_get_device_id(spi)->driver_data); + return ms5611_probe(indio_dev, &spi->dev, spi_get_device_id(spi)->name, + spi_get_device_id(spi)->driver_data); +} + +static int ms5611_spi_remove(struct spi_device *spi) +{ + return ms5611_remove(spi_get_drvdata(spi)); } static const struct spi_device_id ms5611_id[] = { @@ -120,6 +127,7 @@ static struct spi_driver ms5611_driver = { }, .id_table = ms5611_id, .probe = ms5611_spi_probe, + .remove = ms5611_spi_remove, }; module_spi_driver(ms5611_driver); diff --git a/drivers/iio/pressure/ms5637.c b/drivers/iio/pressure/ms5637.c index e8d0e0da938d..e68052c118e6 100644 --- a/drivers/iio/pressure/ms5637.c +++ b/drivers/iio/pressure/ms5637.c @@ -136,7 +136,7 @@ static int ms5637_probe(struct i2c_client *client, I2C_FUNC_SMBUS_READ_I2C_BLOCK)) { dev_err(&client->dev, "Adapter does not support some i2c transaction\n"); - return -ENODEV; + return -EOPNOTSUPP; } indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*dev_data)); diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c index b39a2fb0671c..172393ad34af 100644 --- a/drivers/iio/pressure/st_pressure_core.c +++ b/drivers/iio/pressure/st_pressure_core.c @@ -62,6 +62,8 @@ #define ST_PRESS_LPS331AP_DRDY_IRQ_ADDR 0x22 #define ST_PRESS_LPS331AP_DRDY_IRQ_INT1_MASK 0x04 #define ST_PRESS_LPS331AP_DRDY_IRQ_INT2_MASK 0x20 +#define ST_PRESS_LPS331AP_IHL_IRQ_ADDR 0x22 +#define ST_PRESS_LPS331AP_IHL_IRQ_MASK 0x80 #define ST_PRESS_LPS331AP_MULTIREAD_BIT true #define ST_PRESS_LPS331AP_TEMP_OFFSET 42500 @@ -100,6 +102,8 @@ #define ST_PRESS_LPS25H_DRDY_IRQ_ADDR 0x23 #define ST_PRESS_LPS25H_DRDY_IRQ_INT1_MASK 0x01 #define ST_PRESS_LPS25H_DRDY_IRQ_INT2_MASK 0x10 +#define ST_PRESS_LPS25H_IHL_IRQ_ADDR 0x22 +#define ST_PRESS_LPS25H_IHL_IRQ_MASK 0x80 #define ST_PRESS_LPS25H_MULTIREAD_BIT true #define ST_PRESS_LPS25H_TEMP_OFFSET 42500 #define ST_PRESS_LPS25H_OUT_XL_ADDR 0x28 @@ -220,6 +224,8 @@ static const struct st_sensor_settings st_press_sensors_settings[] = { .addr = ST_PRESS_LPS331AP_DRDY_IRQ_ADDR, .mask_int1 = ST_PRESS_LPS331AP_DRDY_IRQ_INT1_MASK, .mask_int2 = ST_PRESS_LPS331AP_DRDY_IRQ_INT2_MASK, + .addr_ihl = ST_PRESS_LPS331AP_IHL_IRQ_ADDR, + .mask_ihl = ST_PRESS_LPS331AP_IHL_IRQ_MASK, }, .multi_read_bit = ST_PRESS_LPS331AP_MULTIREAD_BIT, .bootime = 2, @@ -304,6 +310,8 @@ static const struct st_sensor_settings st_press_sensors_settings[] = { .addr = ST_PRESS_LPS25H_DRDY_IRQ_ADDR, .mask_int1 = ST_PRESS_LPS25H_DRDY_IRQ_INT1_MASK, .mask_int2 = ST_PRESS_LPS25H_DRDY_IRQ_INT2_MASK, + .addr_ihl = ST_PRESS_LPS25H_IHL_IRQ_ADDR, + .mask_ihl = ST_PRESS_LPS25H_IHL_IRQ_MASK, }, .multi_read_bit = ST_PRESS_LPS25H_MULTIREAD_BIT, .bootime = 2, diff --git a/drivers/iio/pressure/t5403.c b/drivers/iio/pressure/t5403.c index e11cd3938d67..2667e71721f5 100644 --- a/drivers/iio/pressure/t5403.c +++ b/drivers/iio/pressure/t5403.c @@ -221,7 +221,7 @@ static int t5403_probe(struct i2c_client *client, if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_I2C_BLOCK)) - return -ENODEV; + return -EOPNOTSUPP; ret = i2c_smbus_read_byte_data(client, T5403_SLAVE_ADDR); if (ret < 0) diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c index db35e04a0637..4f502386aa86 100644 --- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c +++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c @@ -278,7 +278,7 @@ static int lidar_probe(struct i2c_client *client, I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BYTE)) data->xfer = lidar_smbus_xfer; else - return -ENOTSUPP; + return -EOPNOTSUPP; indio_dev->info = &lidar_info; indio_dev->name = LIDAR_DRV_NAME; diff --git a/drivers/iio/temperature/mlx90614.c b/drivers/iio/temperature/mlx90614.c index a570c2e2aac3..4b645fc672aa 100644 --- a/drivers/iio/temperature/mlx90614.c +++ b/drivers/iio/temperature/mlx90614.c @@ -516,7 +516,7 @@ static int mlx90614_probe(struct i2c_client *client, int ret; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA)) - return -ENODEV; + return -EOPNOTSUPP; indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data)); if (!indio_dev) diff --git a/drivers/iio/temperature/tmp006.c b/drivers/iio/temperature/tmp006.c index e78c1069a6a9..18c9b43c02cb 100644 --- a/drivers/iio/temperature/tmp006.c +++ b/drivers/iio/temperature/tmp006.c @@ -205,7 +205,7 @@ static int tmp006_probe(struct i2c_client *client, int ret; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA)) - return -ENODEV; + return -EOPNOTSUPP; if (!tmp006_check_identification(client)) { dev_err(&client->dev, "no TMP006 sensor\n"); diff --git a/drivers/iio/temperature/tsys01.c b/drivers/iio/temperature/tsys01.c index 05c12060ce8d..3e60c6189d98 100644 --- a/drivers/iio/temperature/tsys01.c +++ b/drivers/iio/temperature/tsys01.c @@ -190,7 +190,7 @@ static int tsys01_i2c_probe(struct i2c_client *client, I2C_FUNC_SMBUS_READ_I2C_BLOCK)) { dev_err(&client->dev, "Adapter does not support some i2c transaction\n"); - return -ENODEV; + return -EOPNOTSUPP; } indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*dev_data)); diff --git a/drivers/iio/temperature/tsys02d.c b/drivers/iio/temperature/tsys02d.c index 4c1fbd52ea08..ab6fe8f6f2d1 100644 --- a/drivers/iio/temperature/tsys02d.c +++ b/drivers/iio/temperature/tsys02d.c @@ -137,7 +137,7 @@ static int tsys02d_probe(struct i2c_client *client, I2C_FUNC_SMBUS_READ_I2C_BLOCK)) { dev_err(&client->dev, "Adapter does not support some i2c transaction\n"); - return -ENODEV; + return -EOPNOTSUPP; } indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*dev_data)); diff --git a/drivers/isdn/Makefile b/drivers/isdn/Makefile index 91c81965e7ca..c32e45826c2c 100644 --- a/drivers/isdn/Makefile +++ b/drivers/isdn/Makefile @@ -8,9 +8,6 @@ obj-$(CONFIG_MISDN) += mISDN/ obj-$(CONFIG_ISDN) += hardware/ obj-$(CONFIG_ISDN_DIVERSION) += divert/ obj-$(CONFIG_ISDN_DRV_HISAX) += hisax/ -obj-$(CONFIG_ISDN_DRV_ICN) += icn/ -obj-$(CONFIG_ISDN_DRV_PCBIT) += pcbit/ obj-$(CONFIG_ISDN_DRV_LOOP) += isdnloop/ -obj-$(CONFIG_ISDN_DRV_ACT2000) += act2000/ obj-$(CONFIG_HYSDN) += hysdn/ obj-$(CONFIG_ISDN_DRV_GIGASET) += gigaset/ diff --git a/drivers/isdn/i4l/Kconfig b/drivers/isdn/i4l/Kconfig index f5b714cd7618..68e54d9f2f53 100644 --- a/drivers/isdn/i4l/Kconfig +++ b/drivers/isdn/i4l/Kconfig @@ -123,16 +123,6 @@ comment "ISDN4Linux hardware drivers" source "drivers/isdn/hisax/Kconfig" - -menu "Active cards" - -source "drivers/isdn/icn/Kconfig" - -source "drivers/isdn/pcbit/Kconfig" - -source "drivers/isdn/act2000/Kconfig" - -endmenu # end ISDN_I4L endif diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 15579514d120..a216b4667742 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -525,6 +525,284 @@ config VEXPRESS_SYSCFG ARM Ltd. Versatile Express uses specialised platform configuration bus. System Configuration interface is one of the possible means of generating transactions on this bus. +config PANEL + tristate "Parallel port LCD/Keypad Panel support" + depends on PARPORT + ---help--- + Say Y here if you have an HD44780 or KS-0074 LCD connected to your + parallel port. This driver also features 4 and 6-key keypads. The LCD + is accessible through the /dev/lcd char device (10, 156), and the + keypad through /dev/keypad (10, 185). Both require misc device to be + enabled. This code can either be compiled as a module, or linked into + the kernel and started at boot. If you don't understand what all this + is about, say N. + +config PANEL_PARPORT + int "Default parallel port number (0=LPT1)" + depends on PANEL + range 0 255 + default "0" + ---help--- + This is the index of the parallel port the panel is connected to. One + driver instance only supports one parallel port, so if your keypad + and LCD are connected to two separate ports, you have to start two + modules with different arguments. Numbering starts with '0' for LPT1, + and so on. + +config PANEL_PROFILE + int "Default panel profile (0-5, 0=custom)" + depends on PANEL + range 0 5 + default "5" + ---help--- + To ease configuration, the driver supports different configuration + profiles for past and recent wirings. These profiles can also be + used to define an approximative configuration, completed by a few + other options. Here are the profiles : + + 0 = custom (see further) + 1 = 2x16 parallel LCD, old keypad + 2 = 2x16 serial LCD (KS-0074), new keypad + 3 = 2x16 parallel LCD (Hantronix), no keypad + 4 = 2x16 parallel LCD (Nexcom NSA1045) with Nexcom's keypad + 5 = 2x40 parallel LCD (old one), with old keypad + + Custom configurations allow you to define how your display is + wired to the parallel port, and how it works. This is only intended + for experts. + +config PANEL_KEYPAD + depends on PANEL && PANEL_PROFILE="0" + int "Keypad type (0=none, 1=old 6 keys, 2=new 6 keys, 3=Nexcom 4 keys)" + range 0 3 + default 0 + ---help--- + This enables and configures a keypad connected to the parallel port. + The keys will be read from character device 10,185. Valid values are : + + 0 : do not enable this driver + 1 : old 6 keys keypad + 2 : new 6 keys keypad, as used on the server at www.ant-computing.com + 3 : Nexcom NSA1045's 4 keys keypad + + New profiles can be described in the driver source. The driver also + supports simultaneous keys pressed when the keypad supports them. + +config PANEL_LCD + depends on PANEL && PANEL_PROFILE="0" + int "LCD type (0=none, 1=custom, 2=old //, 3=ks0074, 4=hantronix, 5=Nexcom)" + range 0 5 + default 0 + ---help--- + This enables and configures an LCD connected to the parallel port. + The driver includes an interpreter for escape codes starting with + '\e[L' which are specific to the LCD, and a few ANSI codes. The + driver will be registered as character device 10,156, usually + under the name '/dev/lcd'. There are a total of 6 supported types : + + 0 : do not enable the driver + 1 : custom configuration and wiring (see further) + 2 : 2x16 & 2x40 parallel LCD (old wiring) + 3 : 2x16 serial LCD (KS-0074 based) + 4 : 2x16 parallel LCD (Hantronix wiring) + 5 : 2x16 parallel LCD (Nexcom wiring) + + When type '1' is specified, other options will appear to configure + more precise aspects (wiring, dimensions, protocol, ...). Please note + that those values changed from the 2.4 driver for better consistency. + +config PANEL_LCD_HEIGHT + depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" + int "Number of lines on the LCD (1-2)" + range 1 2 + default 2 + ---help--- + This is the number of visible character lines on the LCD in custom profile. + It can either be 1 or 2. + +config PANEL_LCD_WIDTH + depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" + int "Number of characters per line on the LCD (1-40)" + range 1 40 + default 40 + ---help--- + This is the number of characters per line on the LCD in custom profile. + Common values are 16,20,24,40. + +config PANEL_LCD_BWIDTH + depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" + int "Internal LCD line width (1-40, 40 by default)" + range 1 40 + default 40 + ---help--- + Most LCDs use a standard controller which supports hardware lines of 40 + characters, although sometimes only 16, 20 or 24 of them are really wired + to the terminal. This results in some non-visible but addressable characters, + and is the case for most parallel LCDs. Other LCDs, and some serial ones, + however, use the same line width internally as what is visible. The KS0074 + for example, uses 16 characters per line for 16 visible characters per line. + + This option lets you configure the value used by your LCD in 'custom' profile. + If you don't know, put '40' here. + +config PANEL_LCD_HWIDTH + depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" + int "Hardware LCD line width (1-64, 64 by default)" + range 1 64 + default 64 + ---help--- + Most LCDs use a single address bit to differentiate line 0 and line 1. Since + some of them need to be able to address 40 chars with the lower bits, they + often use the immediately superior power of 2, which is 64, to address the + next line. + + If you don't know what your LCD uses, in doubt let 16 here for a 2x16, and + 64 here for a 2x40. + +config PANEL_LCD_CHARSET + depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" + int "LCD character set (0=normal, 1=KS0074)" + range 0 1 + default 0 + ---help--- + Some controllers such as the KS0074 use a somewhat strange character set + where many symbols are at unusual places. The driver knows how to map + 'standard' ASCII characters to the character sets used by these controllers. + Valid values are : + + 0 : normal (untranslated) character set + 1 : KS0074 character set + + If you don't know, use the normal one (0). + +config PANEL_LCD_PROTO + depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" + int "LCD communication mode (0=parallel 8 bits, 1=serial)" + range 0 1 + default 0 + ---help--- + This driver now supports any serial or parallel LCD wired to a parallel + port. But before assigning signals, the driver needs to know if it will + be driving a serial LCD or a parallel one. Serial LCDs only use 2 wires + (SDA/SCL), while parallel ones use 2 or 3 wires for the control signals + (E, RS, sometimes RW), and 4 or 8 for the data. Use 0 here for a 8 bits + parallel LCD, and 1 for a serial LCD. + +config PANEL_LCD_PIN_E + depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0" + int "Parallel port pin number & polarity connected to the LCD E signal (-17...17) " + range -17 17 + default 14 + ---help--- + This describes the number of the parallel port pin to which the LCD 'E' + signal has been connected. It can be : + + 0 : no connection (eg: connected to ground) + 1..17 : directly connected to any of these pins on the DB25 plug + -1..-17 : connected to the same pin through an inverter (eg: transistor). + + Default for the 'E' pin in custom profile is '14' (AUTOFEED). + +config PANEL_LCD_PIN_RS + depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0" + int "Parallel port pin number & polarity connected to the LCD RS signal (-17...17) " + range -17 17 + default 17 + ---help--- + This describes the number of the parallel port pin to which the LCD 'RS' + signal has been connected. It can be : + + 0 : no connection (eg: connected to ground) + 1..17 : directly connected to any of these pins on the DB25 plug + -1..-17 : connected to the same pin through an inverter (eg: transistor). + + Default for the 'RS' pin in custom profile is '17' (SELECT IN). + +config PANEL_LCD_PIN_RW + depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0" + int "Parallel port pin number & polarity connected to the LCD RW signal (-17...17) " + range -17 17 + default 16 + ---help--- + This describes the number of the parallel port pin to which the LCD 'RW' + signal has been connected. It can be : + + 0 : no connection (eg: connected to ground) + 1..17 : directly connected to any of these pins on the DB25 plug + -1..-17 : connected to the same pin through an inverter (eg: transistor). + + Default for the 'RW' pin in custom profile is '16' (INIT). + +config PANEL_LCD_PIN_SCL + depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0" + int "Parallel port pin number & polarity connected to the LCD SCL signal (-17...17) " + range -17 17 + default 1 + ---help--- + This describes the number of the parallel port pin to which the serial + LCD 'SCL' signal has been connected. It can be : + + 0 : no connection (eg: connected to ground) + 1..17 : directly connected to any of these pins on the DB25 plug + -1..-17 : connected to the same pin through an inverter (eg: transistor). + + Default for the 'SCL' pin in custom profile is '1' (STROBE). + +config PANEL_LCD_PIN_SDA + depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0" + int "Parallel port pin number & polarity connected to the LCD SDA signal (-17...17) " + range -17 17 + default 2 + ---help--- + This describes the number of the parallel port pin to which the serial + LCD 'SDA' signal has been connected. It can be : + + 0 : no connection (eg: connected to ground) + 1..17 : directly connected to any of these pins on the DB25 plug + -1..-17 : connected to the same pin through an inverter (eg: transistor). + + Default for the 'SDA' pin in custom profile is '2' (D0). + +config PANEL_LCD_PIN_BL + depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" + int "Parallel port pin number & polarity connected to the LCD backlight signal (-17...17) " + range -17 17 + default 0 + ---help--- + This describes the number of the parallel port pin to which the LCD 'BL' signal + has been connected. It can be : + + 0 : no connection (eg: connected to ground) + 1..17 : directly connected to any of these pins on the DB25 plug + -1..-17 : connected to the same pin through an inverter (eg: transistor). + + Default for the 'BL' pin in custom profile is '0' (uncontrolled). + +config PANEL_CHANGE_MESSAGE + depends on PANEL + bool "Change LCD initialization message ?" + default "n" + ---help--- + This allows you to replace the boot message indicating the kernel version + and the driver version with a custom message. This is useful on appliances + where a simple 'Starting system' message can be enough to stop a customer + from worrying. + + If you say 'Y' here, you'll be able to choose a message yourself. Otherwise, + say 'N' and keep the default message with the version. + +config PANEL_BOOT_MESSAGE + depends on PANEL && PANEL_CHANGE_MESSAGE="y" + string "New initialization message" + default "" + ---help--- + This allows you to replace the boot message indicating the kernel version + and the driver version with a custom message. This is useful on appliances + where a simple 'Starting system' message can be enough to stop a customer + from worrying. + + An empty message will only clear the display at driver init time. Any other + printf()-formatted message is valid with newline and escape codes. source "drivers/misc/c2port/Kconfig" source "drivers/misc/eeprom/Kconfig" diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 537d7f3b78da..b2fb6dbffcef 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -56,3 +56,4 @@ obj-$(CONFIG_GENWQE) += genwqe/ obj-$(CONFIG_ECHO) += echo/ obj-$(CONFIG_VEXPRESS_SYSCFG) += vexpress-syscfg.o obj-$(CONFIG_CXL_BASE) += cxl/ +obj-$(CONFIG_PANEL) += panel.o diff --git a/drivers/staging/panel/panel.c b/drivers/misc/panel.c index 70b8f4fabfad..6030ac5b8c63 100644 --- a/drivers/staging/panel/panel.c +++ b/drivers/misc/panel.c @@ -172,8 +172,6 @@ static __u8 scan_mask_o; /* logical or of the input bits involved in the scan matrix */ static __u8 scan_mask_i; -typedef __u64 pmask_t; - enum input_type { INPUT_TYPE_STD, INPUT_TYPE_KBD, @@ -188,8 +186,8 @@ enum input_state { struct logical_input { struct list_head list; - pmask_t mask; - pmask_t value; + __u64 mask; + __u64 value; enum input_type type; enum input_state state; __u8 rise_time, fall_time; @@ -219,19 +217,19 @@ static LIST_HEAD(logical_inputs); /* list of all defined logical inputs */ * corresponds to the ground. * Within each group, bits are stored in the same order as read on the port : * BAPSE (busy=4, ack=3, paper empty=2, select=1, error=0). - * So, each __u64 (or pmask_t) is represented like this : + * So, each __u64 is represented like this : * 0000000000000000000BAPSEBAPSEBAPSEBAPSEBAPSEBAPSEBAPSEBAPSEBAPSE * <-----unused------><gnd><d07><d06><d05><d04><d03><d02><d01><d00> */ /* what has just been read from the I/O ports */ -static pmask_t phys_read; +static __u64 phys_read; /* previous phys_read */ -static pmask_t phys_read_prev; +static __u64 phys_read_prev; /* stabilized phys_read (phys_read|phys_read_prev) */ -static pmask_t phys_curr; +static __u64 phys_curr; /* previous phys_curr */ -static pmask_t phys_prev; +static __u64 phys_prev; /* 0 means that at least one logical signal needs be computed */ static char inputs_stable; @@ -650,34 +648,28 @@ static const char nexcom_keypad_profile[][4][9] = { static const char (*keypad_profile)[4][9] = old_keypad_profile; -/* FIXME: this should be converted to a bit array containing signals states */ -static struct { - unsigned char e; /* parallel LCD E (data latch on falling edge) */ - unsigned char rs; /* parallel LCD RS (0 = cmd, 1 = data) */ - unsigned char rw; /* parallel LCD R/W (0 = W, 1 = R) */ - unsigned char bl; /* parallel LCD backlight (0 = off, 1 = on) */ - unsigned char cl; /* serial LCD clock (latch on rising edge) */ - unsigned char da; /* serial LCD data */ -} bits; +static DECLARE_BITMAP(bits, LCD_BITS); + +static void lcd_get_bits(unsigned int port, int *val) +{ + unsigned int bit, state; + + for (bit = 0; bit < LCD_BITS; bit++) { + state = test_bit(bit, bits) ? BIT_SET : BIT_CLR; + *val &= lcd_bits[port][bit][BIT_MSK]; + *val |= lcd_bits[port][bit][state]; + } +} static void init_scan_timer(void); /* sets data port bits according to current signals values */ static int set_data_bits(void) { - int val, bit; + int val; val = r_dtr(pprt); - for (bit = 0; bit < LCD_BITS; bit++) - val &= lcd_bits[LCD_PORT_D][bit][BIT_MSK]; - - val |= lcd_bits[LCD_PORT_D][LCD_BIT_E][bits.e] - | lcd_bits[LCD_PORT_D][LCD_BIT_RS][bits.rs] - | lcd_bits[LCD_PORT_D][LCD_BIT_RW][bits.rw] - | lcd_bits[LCD_PORT_D][LCD_BIT_BL][bits.bl] - | lcd_bits[LCD_PORT_D][LCD_BIT_CL][bits.cl] - | lcd_bits[LCD_PORT_D][LCD_BIT_DA][bits.da]; - + lcd_get_bits(LCD_PORT_D, &val); w_dtr(pprt, val); return val; } @@ -685,19 +677,10 @@ static int set_data_bits(void) /* sets ctrl port bits according to current signals values */ static int set_ctrl_bits(void) { - int val, bit; + int val; val = r_ctr(pprt); - for (bit = 0; bit < LCD_BITS; bit++) - val &= lcd_bits[LCD_PORT_C][bit][BIT_MSK]; - - val |= lcd_bits[LCD_PORT_C][LCD_BIT_E][bits.e] - | lcd_bits[LCD_PORT_C][LCD_BIT_RS][bits.rs] - | lcd_bits[LCD_PORT_C][LCD_BIT_RW][bits.rw] - | lcd_bits[LCD_PORT_C][LCD_BIT_BL][bits.bl] - | lcd_bits[LCD_PORT_C][LCD_BIT_CL][bits.cl] - | lcd_bits[LCD_PORT_C][LCD_BIT_DA][bits.da]; - + lcd_get_bits(LCD_PORT_C, &val); w_ctr(pprt, val); return val; } @@ -793,12 +776,17 @@ static void lcd_send_serial(int byte) * LCD reads D0 on STROBE's rising edge. */ for (bit = 0; bit < 8; bit++) { - bits.cl = BIT_CLR; /* CLK low */ + clear_bit(LCD_BIT_CL, bits); /* CLK low */ panel_set_bits(); - bits.da = byte & 1; + if (byte & 1) { + set_bit(LCD_BIT_DA, bits); + } else { + clear_bit(LCD_BIT_DA, bits); + } + panel_set_bits(); udelay(2); /* maintain the data during 2 us before CLK up */ - bits.cl = BIT_SET; /* CLK high */ + set_bit(LCD_BIT_CL, bits); /* CLK high */ panel_set_bits(); udelay(1); /* maintain the strobe during 1 us */ byte >>= 1; @@ -813,7 +801,10 @@ static void lcd_backlight(int on) /* The backlight is activated by setting the AUTOFEED line to +5V */ spin_lock_irq(&pprt_lock); - bits.bl = on; + if (on) + set_bit(LCD_BIT_BL, bits); + else + clear_bit(LCD_BIT_BL, bits); panel_set_bits(); spin_unlock_irq(&pprt_lock); } @@ -848,14 +839,14 @@ static void lcd_write_cmd_p8(int cmd) w_dtr(pprt, cmd); udelay(20); /* maintain the data during 20 us before the strobe */ - bits.e = BIT_SET; - bits.rs = BIT_CLR; - bits.rw = BIT_CLR; + set_bit(LCD_BIT_E, bits); + clear_bit(LCD_BIT_RS, bits); + clear_bit(LCD_BIT_RW, bits); set_ctrl_bits(); udelay(40); /* maintain the strobe during 40 us */ - bits.e = BIT_CLR; + clear_bit(LCD_BIT_E, bits); set_ctrl_bits(); udelay(120); /* the shortest command takes at least 120 us */ @@ -870,14 +861,14 @@ static void lcd_write_data_p8(int data) w_dtr(pprt, data); udelay(20); /* maintain the data during 20 us before the strobe */ - bits.e = BIT_SET; - bits.rs = BIT_SET; - bits.rw = BIT_CLR; + set_bit(LCD_BIT_E, bits); + set_bit(LCD_BIT_RS, bits); + clear_bit(LCD_BIT_RW, bits); set_ctrl_bits(); udelay(40); /* maintain the strobe during 40 us */ - bits.e = BIT_CLR; + clear_bit(LCD_BIT_E, bits); set_ctrl_bits(); udelay(45); /* the shortest data takes at least 45 us */ @@ -943,7 +934,8 @@ static void lcd_clear_fast_s(void) lcd_send_serial(0x5F); /* R/W=W, RS=1 */ lcd_send_serial(' ' & 0x0F); lcd_send_serial((' ' >> 4) & 0x0F); - udelay(40); /* the shortest data takes at least 40 us */ + /* the shortest data takes at least 40 us */ + udelay(40); } spin_unlock_irq(&pprt_lock); @@ -969,15 +961,15 @@ static void lcd_clear_fast_p8(void) /* maintain the data during 20 us before the strobe */ udelay(20); - bits.e = BIT_SET; - bits.rs = BIT_SET; - bits.rw = BIT_CLR; + set_bit(LCD_BIT_E, bits); + set_bit(LCD_BIT_RS, bits); + clear_bit(LCD_BIT_RW, bits); set_ctrl_bits(); /* maintain the strobe during 40 us */ udelay(40); - bits.e = BIT_CLR; + clear_bit(LCD_BIT_E, bits); set_ctrl_bits(); /* the shortest data takes at least 45 us */ @@ -1784,7 +1776,7 @@ static void phys_scan_contacts(void) gndmask = PNL_PINPUT(r_str(pprt)) & scan_mask_i; /* grounded inputs are signals 40-44 */ - phys_read |= (pmask_t) gndmask << 40; + phys_read |= (__u64)gndmask << 40; if (bitmask != gndmask) { /* @@ -1800,7 +1792,7 @@ static void phys_scan_contacts(void) w_dtr(pprt, oldval & ~bitval); /* enable this output */ bitmask = PNL_PINPUT(r_str(pprt)) & ~gndmask; - phys_read |= (pmask_t) bitmask << (5 * bit); + phys_read |= (__u64)bitmask << (5 * bit); } w_dtr(pprt, oldval); /* disable all outputs */ } @@ -2037,32 +2029,32 @@ static void init_scan_timer(void) * corresponding to out and in bits respectively. * returns 1 if ok, 0 if error (in which case, nothing is written). */ -static int input_name2mask(const char *name, pmask_t *mask, pmask_t *value, - char *imask, char *omask) +static u8 input_name2mask(const char *name, __u64 *mask, __u64 *value, + u8 *imask, u8 *omask) { - static char sigtab[10] = "EeSsPpAaBb"; - char im, om; - pmask_t m, v; + const char sigtab[] = "EeSsPpAaBb"; + u8 im, om; + __u64 m, v; - om = 0ULL; - im = 0ULL; + om = 0; + im = 0; m = 0ULL; v = 0ULL; while (*name) { int in, out, bit, neg; + const char *idx; - for (in = 0; (in < sizeof(sigtab)) && (sigtab[in] != *name); - in++) - ; - - if (in >= sizeof(sigtab)) + idx = strchr(sigtab, *name); + if (!idx) return 0; /* input name not found */ + + in = idx - sigtab; neg = (in & 1); /* odd (lower) names are negated */ in >>= 1; im |= BIT(in); name++; - if (isdigit(*name)) { + if (*name >= '0' && *name <= '7') { out = *name - '0'; om |= BIT(out); } else if (*name == '-') { diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c index 839df4aace76..9973cebb4d6f 100644 --- a/drivers/platform/goldfish/goldfish_pipe.c +++ b/drivers/platform/goldfish/goldfish_pipe.c @@ -58,6 +58,7 @@ #include <linux/slab.h> #include <linux/io.h> #include <linux/goldfish.h> +#include <linux/dma-mapping.h> #include <linux/mm.h> #include <linux/acpi.h> @@ -216,17 +217,16 @@ static int valid_batchbuffer_addr(struct goldfish_pipe_dev *dev, static int setup_access_params_addr(struct platform_device *pdev, struct goldfish_pipe_dev *dev) { - u64 paddr; + dma_addr_t dma_handle; struct access_params *aps; - aps = devm_kzalloc(&pdev->dev, sizeof(struct access_params), GFP_KERNEL); + aps = dmam_alloc_coherent(&pdev->dev, sizeof(struct access_params), + &dma_handle, GFP_KERNEL); if (!aps) - return -1; + return -ENOMEM; - /* FIXME */ - paddr = __pa(aps); - writel((u32)(paddr >> 32), dev->base + PIPE_REG_PARAMS_ADDR_HIGH); - writel((u32)paddr, dev->base + PIPE_REG_PARAMS_ADDR_LOW); + writel(upper_32_bits(dma_handle), dev->base + PIPE_REG_PARAMS_ADDR_HIGH); + writel(lower_32_bits(dma_handle), dev->base + PIPE_REG_PARAMS_ADDR_LOW); if (valid_batchbuffer_addr(dev, aps)) { dev->aps = aps; diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 5d3b86a33857..f0ca4a18b799 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -30,10 +30,6 @@ source "drivers/staging/wlan-ng/Kconfig" source "drivers/staging/comedi/Kconfig" -source "drivers/staging/olpc_dcon/Kconfig" - -source "drivers/staging/panel/Kconfig" - source "drivers/staging/rtl8192u/Kconfig" source "drivers/staging/rtl8192e/Kconfig" @@ -76,8 +72,6 @@ source "drivers/staging/android/Kconfig" source "drivers/staging/board/Kconfig" -source "drivers/staging/gdm72xx/Kconfig" - source "drivers/staging/gdm724x/Kconfig" source "drivers/staging/fwserial/Kconfig" @@ -92,8 +86,6 @@ source "drivers/staging/lustre/Kconfig" source "drivers/staging/dgnc/Kconfig" -source "drivers/staging/dgap/Kconfig" - source "drivers/staging/gs_fpgaboot/Kconfig" source "drivers/staging/skein/Kconfig" @@ -110,4 +102,6 @@ source "drivers/staging/wilc1000/Kconfig" source "drivers/staging/most/Kconfig" +source "drivers/staging/i4l/Kconfig" + endif # STAGING diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index 30918edef5e3..22464a09cb27 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -1,14 +1,9 @@ # Makefile for staging directory -# fix for build system bug... -obj-$(CONFIG_STAGING) += staging.o - obj-y += media/ obj-$(CONFIG_SLICOSS) += slicoss/ obj-$(CONFIG_PRISM2_USB) += wlan-ng/ obj-$(CONFIG_COMEDI) += comedi/ -obj-$(CONFIG_FB_OLPC_DCON) += olpc_dcon/ -obj-$(CONFIG_PANEL) += panel/ obj-$(CONFIG_RTL8192U) += rtl8192u/ obj-$(CONFIG_RTL8192E) += rtl8192e/ obj-$(CONFIG_R8712U) += rtl8712/ @@ -31,13 +26,11 @@ obj-$(CONFIG_MFD_NVEC) += nvec/ obj-$(CONFIG_STAGING_RDMA) += rdma/ obj-$(CONFIG_ANDROID) += android/ obj-$(CONFIG_STAGING_BOARD) += board/ -obj-$(CONFIG_WIMAX_GDM72XX) += gdm72xx/ obj-$(CONFIG_LTE_GDM724X) += gdm724x/ obj-$(CONFIG_FIREWIRE_SERIAL) += fwserial/ obj-$(CONFIG_GOLDFISH) += goldfish/ -obj-$(CONFIG_LUSTRE_FS) += lustre/ +obj-$(CONFIG_LNET) += lustre/ obj-$(CONFIG_DGNC) += dgnc/ -obj-$(CONFIG_DGAP) += dgap/ obj-$(CONFIG_MTD_SPINAND_MT29F) += mt29f_spinand/ obj-$(CONFIG_GS_FPGABOOT) += gs_fpgaboot/ obj-$(CONFIG_CRYPTO_SKEIN) += skein/ @@ -47,3 +40,4 @@ obj-$(CONFIG_FB_TFT) += fbtft/ obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/ obj-$(CONFIG_WILC1000) += wilc1000/ obj-$(CONFIG_MOST) += most/ +obj-$(CONFIG_ISDN_I4L) += i4l/ diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig index 42b15126aa06..bd90d2002afb 100644 --- a/drivers/staging/android/Kconfig +++ b/drivers/staging/android/Kconfig @@ -57,15 +57,6 @@ config SW_SYNC synchronization. Useful when there is no hardware primitive backing the synchronization. -config SW_SYNC_USER - bool "Userspace API for SW_SYNC" - default n - depends on SW_SYNC - ---help--- - Provides a user space API to the sw sync object. - *WARNING* improper use of this can result in deadlocking kernel - drivers from userspace. - source "drivers/staging/android/ion/Kconfig" endif # if ANDROID diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index 5bb1283d19cd..8a8078f954d5 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -106,21 +106,34 @@ static struct kmem_cache *ashmem_range_cachep __read_mostly; #define range_on_lru(range) \ ((range)->purged == ASHMEM_NOT_PURGED) -#define page_range_subsumes_range(range, start, end) \ - (((range)->pgstart >= (start)) && ((range)->pgend <= (end))) +static inline int page_range_subsumes_range(struct ashmem_range *range, + size_t start, size_t end) +{ + return (((range)->pgstart >= (start)) && ((range)->pgend <= (end))); +} -#define page_range_subsumed_by_range(range, start, end) \ - (((range)->pgstart <= (start)) && ((range)->pgend >= (end))) +static inline int page_range_subsumed_by_range(struct ashmem_range *range, + size_t start, size_t end) +{ + return (((range)->pgstart <= (start)) && ((range)->pgend >= (end))); +} -#define page_in_range(range, page) \ - (((range)->pgstart <= (page)) && ((range)->pgend >= (page))) +static inline int page_in_range(struct ashmem_range *range, size_t page) +{ + return (((range)->pgstart <= (page)) && ((range)->pgend >= (page))); +} -#define page_range_in_range(range, start, end) \ - (page_in_range(range, start) || page_in_range(range, end) || \ - page_range_subsumes_range(range, start, end)) +static inline int page_range_in_range(struct ashmem_range *range, + size_t start, size_t end) +{ + return (page_in_range(range, start) || page_in_range(range, end) || + page_range_subsumes_range(range, start, end)); +} -#define range_before_page(range, page) \ - ((range)->pgend < (page)) +static inline int range_before_page(struct ashmem_range *range, size_t page) +{ + return ((range)->pgend < (page)); +} #define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE) @@ -441,7 +454,9 @@ ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) if (!(sc->gfp_mask & __GFP_FS)) return SHRINK_STOP; - mutex_lock(&ashmem_mutex); + if (!mutex_trylock(&ashmem_mutex)) + return -1; + list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) { loff_t start = range->pgstart * PAGE_SIZE; loff_t end = (range->pgend + 1) * PAGE_SIZE; @@ -661,8 +676,8 @@ restart: if (page_range_subsumed_by_range(range, pgstart, pgend)) return 0; if (page_range_in_range(range, pgstart, pgend)) { - pgstart = min_t(size_t, range->pgstart, pgstart); - pgend = max_t(size_t, range->pgend, pgend); + pgstart = min(range->pgstart, pgstart); + pgend = max(range->pgend, pgend); purged |= range->purged; range_del(range); goto restart; diff --git a/drivers/staging/android/ion/hisilicon/hi6220_ion.c b/drivers/staging/android/ion/hisilicon/hi6220_ion.c index e3c07b2ba00e..fe9f0fd210cd 100644 --- a/drivers/staging/android/ion/hisilicon/hi6220_ion.c +++ b/drivers/staging/android/ion/hisilicon/hi6220_ion.c @@ -214,10 +214,7 @@ static struct platform_driver hi6220_ion_driver = { static int __init hi6220_ion_init(void) { - int ret; - - ret = platform_driver_register(&hi6220_ion_driver); - return ret; + return platform_driver_register(&hi6220_ion_driver); } subsys_initcall(hi6220_ion_init); diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index e237e9f3312d..1c872bdfddf6 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -251,8 +251,10 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, * memory coming from the heaps is ready for dma, ie if it has a * cached mapping that mapping has been invalidated */ - for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) + for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) { sg_dma_address(sg) = sg_phys(sg); + sg_dma_len(sg) = sg->length; + } mutex_lock(&dev->buffer_lock); ion_buffer_add(dev, buffer); mutex_unlock(&dev->buffer_lock); @@ -385,13 +387,22 @@ static void ion_handle_get(struct ion_handle *handle) kref_get(&handle->ref); } -static int ion_handle_put(struct ion_handle *handle) +static int ion_handle_put_nolock(struct ion_handle *handle) +{ + int ret; + + ret = kref_put(&handle->ref, ion_handle_destroy); + + return ret; +} + +int ion_handle_put(struct ion_handle *handle) { struct ion_client *client = handle->client; int ret; mutex_lock(&client->lock); - ret = kref_put(&handle->ref, ion_handle_destroy); + ret = ion_handle_put_nolock(handle); mutex_unlock(&client->lock); return ret; @@ -415,20 +426,30 @@ static struct ion_handle *ion_handle_lookup(struct ion_client *client, return ERR_PTR(-EINVAL); } -static struct ion_handle *ion_handle_get_by_id(struct ion_client *client, +static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client, int id) { struct ion_handle *handle; - mutex_lock(&client->lock); handle = idr_find(&client->idr, id); if (handle) ion_handle_get(handle); - mutex_unlock(&client->lock); return handle ? handle : ERR_PTR(-EINVAL); } +struct ion_handle *ion_handle_get_by_id(struct ion_client *client, + int id) +{ + struct ion_handle *handle; + + mutex_lock(&client->lock); + handle = ion_handle_get_by_id_nolock(client, id); + mutex_unlock(&client->lock); + + return handle; +} + static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle) { @@ -530,22 +551,28 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len, } EXPORT_SYMBOL(ion_alloc); -void ion_free(struct ion_client *client, struct ion_handle *handle) +static void ion_free_nolock(struct ion_client *client, struct ion_handle *handle) { bool valid_handle; BUG_ON(client != handle->client); - mutex_lock(&client->lock); valid_handle = ion_handle_validate(client, handle); if (!valid_handle) { WARN(1, "%s: invalid handle passed to free.\n", __func__); - mutex_unlock(&client->lock); return; } + ion_handle_put_nolock(handle); +} + +void ion_free(struct ion_client *client, struct ion_handle *handle) +{ + BUG_ON(client != handle->client); + + mutex_lock(&client->lock); + ion_free_nolock(client, handle); mutex_unlock(&client->lock); - ion_handle_put(handle); } EXPORT_SYMBOL(ion_free); @@ -675,6 +702,34 @@ void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle) } EXPORT_SYMBOL(ion_unmap_kernel); +static struct mutex debugfs_mutex; +static struct rb_root *ion_root_client; +static int is_client_alive(struct ion_client *client) +{ + struct rb_node *node; + struct ion_client *tmp; + struct ion_device *dev; + + node = ion_root_client->rb_node; + dev = container_of(ion_root_client, struct ion_device, clients); + + down_read(&dev->lock); + while (node) { + tmp = rb_entry(node, struct ion_client, node); + if (client < tmp) { + node = node->rb_left; + } else if (client > tmp) { + node = node->rb_right; + } else { + up_read(&dev->lock); + return 1; + } + } + + up_read(&dev->lock); + return 0; +} + static int ion_debug_client_show(struct seq_file *s, void *unused) { struct ion_client *client = s->private; @@ -683,6 +738,14 @@ static int ion_debug_client_show(struct seq_file *s, void *unused) const char *names[ION_NUM_HEAP_IDS] = {NULL}; int i; + mutex_lock(&debugfs_mutex); + if (!is_client_alive(client)) { + seq_printf(s, "ion_client 0x%p dead, can't dump its buffers\n", + client); + mutex_unlock(&debugfs_mutex); + return 0; + } + mutex_lock(&client->lock); for (n = rb_first(&client->handles); n; n = rb_next(n)) { struct ion_handle *handle = rb_entry(n, struct ion_handle, @@ -694,6 +757,7 @@ static int ion_debug_client_show(struct seq_file *s, void *unused) sizes[id] += handle->buffer->size; } mutex_unlock(&client->lock); + mutex_unlock(&debugfs_mutex); seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes"); for (i = 0; i < ION_NUM_HEAP_IDS; i++) { @@ -830,6 +894,7 @@ void ion_client_destroy(struct ion_client *client) struct rb_node *n; pr_debug("%s: %d\n", __func__, __LINE__); + mutex_lock(&debugfs_mutex); while ((n = rb_first(&client->handles))) { struct ion_handle *handle = rb_entry(n, struct ion_handle, node); @@ -848,6 +913,7 @@ void ion_client_destroy(struct ion_client *client) kfree(client->display_name); kfree(client->name); kfree(client); + mutex_unlock(&debugfs_mutex); } EXPORT_SYMBOL(ion_client_destroy); @@ -1151,22 +1217,18 @@ int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle) } EXPORT_SYMBOL(ion_share_dma_buf_fd); -struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd) +struct ion_handle *ion_import_dma_buf(struct ion_client *client, + struct dma_buf *dmabuf) { - struct dma_buf *dmabuf; struct ion_buffer *buffer; struct ion_handle *handle; int ret; - dmabuf = dma_buf_get(fd); - if (IS_ERR(dmabuf)) - return ERR_CAST(dmabuf); /* if this memory came from ion */ if (dmabuf->ops != &dma_buf_ops) { pr_err("%s: can not import dmabuf from another exporter\n", __func__); - dma_buf_put(dmabuf); return ERR_PTR(-EINVAL); } buffer = dmabuf->priv; @@ -1194,11 +1256,25 @@ struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd) } end: - dma_buf_put(dmabuf); return handle; } EXPORT_SYMBOL(ion_import_dma_buf); +struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd) +{ + struct dma_buf *dmabuf; + struct ion_handle *handle; + + dmabuf = dma_buf_get(fd); + if (IS_ERR(dmabuf)) + return ERR_CAST(dmabuf); + + handle = ion_import_dma_buf(client, dmabuf); + dma_buf_put(dmabuf); + return handle; +} +EXPORT_SYMBOL(ion_import_dma_buf_fd); + static int ion_sync_for_device(struct ion_client *client, int fd) { struct dma_buf *dmabuf; @@ -1281,11 +1357,15 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct ion_handle *handle; - handle = ion_handle_get_by_id(client, data.handle.handle); - if (IS_ERR(handle)) + mutex_lock(&client->lock); + handle = ion_handle_get_by_id_nolock(client, data.handle.handle); + if (IS_ERR(handle)) { + mutex_unlock(&client->lock); return PTR_ERR(handle); - ion_free(client, handle); - ion_handle_put(handle); + } + ion_free_nolock(client, handle); + ion_handle_put_nolock(handle); + mutex_unlock(&client->lock); break; } case ION_IOC_SHARE: @@ -1306,7 +1386,7 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct ion_handle *handle; - handle = ion_import_dma_buf(client, data.fd.fd); + handle = ion_import_dma_buf_fd(client, data.fd.fd); if (IS_ERR(handle)) ret = PTR_ERR(handle); else @@ -1403,6 +1483,7 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused) seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size"); seq_puts(s, "----------------------------------------------------\n"); + mutex_lock(&debugfs_mutex); for (n = rb_first(&dev->clients); n; n = rb_next(n)) { struct ion_client *client = rb_entry(n, struct ion_client, node); @@ -1421,6 +1502,8 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused) client->pid, size); } } + mutex_unlock(&debugfs_mutex); + seq_puts(s, "----------------------------------------------------\n"); seq_puts(s, "orphaned allocations (info is from last known client):\n"); mutex_lock(&dev->buffer_lock); @@ -1472,7 +1555,7 @@ static int debug_shrink_set(void *data, u64 val) struct shrink_control sc; int objs; - sc.gfp_mask = -1; + sc.gfp_mask = GFP_HIGHUSER; sc.nr_to_scan = val; if (!val) { @@ -1490,7 +1573,7 @@ static int debug_shrink_get(void *data, u64 *val) struct shrink_control sc; int objs; - sc.gfp_mask = -1; + sc.gfp_mask = GFP_HIGHUSER; sc.nr_to_scan = 0; objs = heap->shrinker.count_objects(&heap->shrinker, &sc); @@ -1605,6 +1688,8 @@ debugfs_done: init_rwsem(&idev->lock); plist_head_init(&idev->heaps); idev->clients = RB_ROOT; + ion_root_client = &idev->clients; + mutex_init(&debugfs_mutex); return idev; } EXPORT_SYMBOL(ion_device_create); diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h index b860c5f579f5..a1331fc169a1 100644 --- a/drivers/staging/android/ion/ion.h +++ b/drivers/staging/android/ion/ion.h @@ -192,14 +192,26 @@ struct dma_buf *ion_share_dma_buf(struct ion_client *client, int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle); /** - * ion_import_dma_buf() - given an dma-buf fd from the ion exporter get handle + * ion_import_dma_buf() - get ion_handle from dma-buf + * @client: the client + * @dmabuf: the dma-buf + * + * Get the ion_buffer associated with the dma-buf and return the ion_handle. + * If no ion_handle exists for this buffer, return newly created ion_handle. + * If dma-buf from another exporter is passed, return ERR_PTR(-EINVAL) + */ +struct ion_handle *ion_import_dma_buf(struct ion_client *client, + struct dma_buf *dmabuf); + +/** + * ion_import_dma_buf_fd() - given a dma-buf fd from the ion exporter get handle * @client: the client * @fd: the dma-buf fd * - * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf, - * import that fd and return a handle representing it. If a dma-buf from + * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf_fd, + * import that fd and return a handle representing it. If a dma-buf from * another exporter is passed in this function will return ERR_PTR(-EINVAL) */ -struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd); +struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd); #endif /* _LINUX_ION_H */ diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c index 9156d8238c97..1fb0d81556da 100644 --- a/drivers/staging/android/ion/ion_carveout_heap.c +++ b/drivers/staging/android/ion/ion_carveout_heap.c @@ -81,7 +81,7 @@ static int ion_carveout_heap_allocate(struct ion_heap *heap, if (align > PAGE_SIZE) return -EINVAL; - table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); + table = kmalloc(sizeof(*table), GFP_KERNEL); if (!table) return -ENOMEM; ret = sg_alloc_table(table, 1, GFP_KERNEL); @@ -117,7 +117,7 @@ static void ion_carveout_heap_free(struct ion_buffer *buffer) if (ion_buffer_cached(buffer)) dma_sync_sg_for_device(NULL, table->sgl, table->nents, - DMA_BIDIRECTIONAL); + DMA_BIDIRECTIONAL); ion_carveout_free(heap, paddr, buffer->size); sg_free_table(table); @@ -163,11 +163,11 @@ struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data) if (ret) return ERR_PTR(ret); - carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL); + carveout_heap = kzalloc(sizeof(*carveout_heap), GFP_KERNEL); if (!carveout_heap) return ERR_PTR(-ENOMEM); - carveout_heap->pool = gen_pool_create(12, -1); + carveout_heap->pool = gen_pool_create(PAGE_SHIFT, -1); if (!carveout_heap->pool) { kfree(carveout_heap); return ERR_PTR(-ENOMEM); diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c index fd7e23e0c06e..1fe80165a462 100644 --- a/drivers/staging/android/ion/ion_page_pool.c +++ b/drivers/staging/android/ion/ion_page_pool.c @@ -149,8 +149,8 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order) { - struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool), - GFP_KERNEL); + struct ion_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL); + if (!pool) return NULL; pool->high_count = 0; diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c index d4c3e5512dd5..b69dfc706440 100644 --- a/drivers/staging/android/ion/ion_system_heap.c +++ b/drivers/staging/android/ion/ion_system_heap.c @@ -27,7 +27,7 @@ #include "ion_priv.h" static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN | - __GFP_NORETRY) & ~__GFP_DIRECT_RECLAIM; + __GFP_NORETRY) & ~__GFP_RECLAIM; static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN); static const unsigned int orders[] = {8, 4, 0}; static const int num_orders = ARRAY_SIZE(orders); diff --git a/drivers/staging/android/ion/tegra/tegra_ion.c b/drivers/staging/android/ion/tegra/tegra_ion.c index 4d3c516cc15e..49e55e5acead 100644 --- a/drivers/staging/android/ion/tegra/tegra_ion.c +++ b/drivers/staging/android/ion/tegra/tegra_ion.c @@ -33,12 +33,11 @@ static int tegra_ion_probe(struct platform_device *pdev) num_heaps = pdata->nr; - heaps = devm_kzalloc(&pdev->dev, - sizeof(struct ion_heap *) * pdata->nr, - GFP_KERNEL); + heaps = devm_kcalloc(&pdev->dev, pdata->nr, + sizeof(struct ion_heap *), GFP_KERNEL); idev = ion_device_create(NULL); - if (IS_ERR_OR_NULL(idev)) + if (IS_ERR(idev)) return PTR_ERR(idev); /* create the heaps as specified in the board file */ diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index 8b5a4a82d8b8..2509e5df7244 100644 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c @@ -50,6 +50,7 @@ static short lowmem_adj[6] = { 6, 12, }; + static int lowmem_adj_size = 4; static int lowmem_minfree[6] = { 3 * 512, /* 6MB */ @@ -57,6 +58,7 @@ static int lowmem_minfree[6] = { 4 * 1024, /* 16MB */ 16 * 1024, /* 64MB */ }; + static int lowmem_minfree_size = 4; static unsigned long lowmem_deathpending_timeout; @@ -84,6 +86,7 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) int tasksize; int i; short min_score_adj = OOM_SCORE_ADJ_MAX + 1; + int minfree = 0; int selected_tasksize = 0; short selected_oom_score_adj; int array_size = ARRAY_SIZE(lowmem_adj); @@ -97,8 +100,8 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) if (lowmem_minfree_size < array_size) array_size = lowmem_minfree_size; for (i = 0; i < array_size; i++) { - if (other_free < lowmem_minfree[i] && - other_file < lowmem_minfree[i]) { + minfree = lowmem_minfree[i]; + if (other_free < minfree && other_file < minfree) { min_score_adj = lowmem_adj[i]; break; } @@ -153,8 +156,8 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) selected = p; selected_tasksize = tasksize; selected_oom_score_adj = oom_score_adj; - lowmem_print(2, "select %d (%s), adj %hd, size %d, to kill\n", - p->pid, p->comm, oom_score_adj, tasksize); + lowmem_print(2, "select '%s' (%d), adj %hd, size %d, to kill\n", + p->comm, p->pid, oom_score_adj, tasksize); } if (selected) { task_lock(selected); @@ -167,9 +170,18 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) if (selected->mm) mark_oom_victim(selected); task_unlock(selected); - lowmem_print(1, "send sigkill to %d (%s), adj %hd, size %d\n", - selected->pid, selected->comm, - selected_oom_score_adj, selected_tasksize); + lowmem_print(1, "Killing '%s' (%d), adj %hd,\n" + " to free %ldkB on behalf of '%s' (%d) because\n" + " cache %ldkB is below limit %ldkB for oom_score_adj %hd\n" + " Free memory is %ldkB above reserved\n", + selected->comm, selected->pid, + selected_oom_score_adj, + selected_tasksize * (long)(PAGE_SIZE / 1024), + current->comm, current->pid, + other_file * (long)(PAGE_SIZE / 1024), + minfree * (long)(PAGE_SIZE / 1024), + min_score_adj, + other_free * (long)(PAGE_SIZE / 1024)); lowmem_deathpending_timeout = jiffies + HZ; rem += selected_tasksize; } diff --git a/drivers/staging/android/sw_sync.c b/drivers/staging/android/sw_sync.c index c4ff1679ebbc..af39ff58fa33 100644 --- a/drivers/staging/android/sw_sync.c +++ b/drivers/staging/android/sw_sync.c @@ -25,15 +25,7 @@ #include "sw_sync.h" -static int sw_sync_cmp(u32 a, u32 b) -{ - if (a == b) - return 0; - - return ((s32)a - (s32)b) < 0 ? -1 : 1; -} - -struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value) +struct fence *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value) { struct sw_sync_pt *pt; @@ -42,47 +34,17 @@ struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value) pt->value = value; - return (struct sync_pt *)pt; + return (struct fence *)pt; } EXPORT_SYMBOL(sw_sync_pt_create); -static struct sync_pt *sw_sync_pt_dup(struct sync_pt *sync_pt) -{ - struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; - struct sw_sync_timeline *obj = - (struct sw_sync_timeline *)sync_pt_parent(sync_pt); - - return (struct sync_pt *)sw_sync_pt_create(obj, pt->value); -} - -static int sw_sync_pt_has_signaled(struct sync_pt *sync_pt) +static int sw_sync_fence_has_signaled(struct fence *fence) { - struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; + struct sw_sync_pt *pt = (struct sw_sync_pt *)fence; struct sw_sync_timeline *obj = - (struct sw_sync_timeline *)sync_pt_parent(sync_pt); - - return sw_sync_cmp(obj->value, pt->value) >= 0; -} - -static int sw_sync_pt_compare(struct sync_pt *a, struct sync_pt *b) -{ - struct sw_sync_pt *pt_a = (struct sw_sync_pt *)a; - struct sw_sync_pt *pt_b = (struct sw_sync_pt *)b; - - return sw_sync_cmp(pt_a->value, pt_b->value); -} - -static int sw_sync_fill_driver_data(struct sync_pt *sync_pt, - void *data, int size) -{ - struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; - - if (size < sizeof(pt->value)) - return -ENOMEM; + (struct sw_sync_timeline *)fence_parent(fence); - memcpy(data, &pt->value, sizeof(pt->value)); - - return sizeof(pt->value); + return (pt->value > obj->value) ? 0 : 1; } static void sw_sync_timeline_value_str(struct sync_timeline *sync_timeline, @@ -93,22 +55,18 @@ static void sw_sync_timeline_value_str(struct sync_timeline *sync_timeline, snprintf(str, size, "%d", timeline->value); } -static void sw_sync_pt_value_str(struct sync_pt *sync_pt, - char *str, int size) +static void sw_sync_fence_value_str(struct fence *fence, char *str, int size) { - struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; + struct sw_sync_pt *pt = (struct sw_sync_pt *)fence; snprintf(str, size, "%d", pt->value); } static struct sync_timeline_ops sw_sync_timeline_ops = { .driver_name = "sw_sync", - .dup = sw_sync_pt_dup, - .has_signaled = sw_sync_pt_has_signaled, - .compare = sw_sync_pt_compare, - .fill_driver_data = sw_sync_fill_driver_data, + .has_signaled = sw_sync_fence_has_signaled, .timeline_value_str = sw_sync_timeline_value_str, - .pt_value_str = sw_sync_pt_value_str, + .fence_value_str = sw_sync_fence_value_str, }; struct sw_sync_timeline *sw_sync_timeline_create(const char *name) @@ -129,132 +87,3 @@ void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc) sync_timeline_signal(&obj->obj); } EXPORT_SYMBOL(sw_sync_timeline_inc); - -#ifdef CONFIG_SW_SYNC_USER -/* *WARNING* - * - * improper use of this can result in deadlocking kernel drivers from userspace. - */ - -/* opening sw_sync create a new sync obj */ -static int sw_sync_open(struct inode *inode, struct file *file) -{ - struct sw_sync_timeline *obj; - char task_comm[TASK_COMM_LEN]; - - get_task_comm(task_comm, current); - - obj = sw_sync_timeline_create(task_comm); - if (!obj) - return -ENOMEM; - - file->private_data = obj; - - return 0; -} - -static int sw_sync_release(struct inode *inode, struct file *file) -{ - struct sw_sync_timeline *obj = file->private_data; - - sync_timeline_destroy(&obj->obj); - return 0; -} - -static long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj, - unsigned long arg) -{ - int fd = get_unused_fd_flags(O_CLOEXEC); - int err; - struct sync_pt *pt; - struct sync_fence *fence; - struct sw_sync_create_fence_data data; - - if (fd < 0) - return fd; - - if (copy_from_user(&data, (void __user *)arg, sizeof(data))) { - err = -EFAULT; - goto err; - } - - pt = sw_sync_pt_create(obj, data.value); - if (!pt) { - err = -ENOMEM; - goto err; - } - - data.name[sizeof(data.name) - 1] = '\0'; - fence = sync_fence_create(data.name, pt); - if (!fence) { - sync_pt_free(pt); - err = -ENOMEM; - goto err; - } - - data.fence = fd; - if (copy_to_user((void __user *)arg, &data, sizeof(data))) { - sync_fence_put(fence); - err = -EFAULT; - goto err; - } - - sync_fence_install(fence, fd); - - return 0; - -err: - put_unused_fd(fd); - return err; -} - -static long sw_sync_ioctl_inc(struct sw_sync_timeline *obj, unsigned long arg) -{ - u32 value; - - if (copy_from_user(&value, (void __user *)arg, sizeof(value))) - return -EFAULT; - - sw_sync_timeline_inc(obj, value); - - return 0; -} - -static long sw_sync_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) -{ - struct sw_sync_timeline *obj = file->private_data; - - switch (cmd) { - case SW_SYNC_IOC_CREATE_FENCE: - return sw_sync_ioctl_create_fence(obj, arg); - - case SW_SYNC_IOC_INC: - return sw_sync_ioctl_inc(obj, arg); - - default: - return -ENOTTY; - } -} - -static const struct file_operations sw_sync_fops = { - .owner = THIS_MODULE, - .open = sw_sync_open, - .release = sw_sync_release, - .unlocked_ioctl = sw_sync_ioctl, - .compat_ioctl = sw_sync_ioctl, -}; - -static struct miscdevice sw_sync_dev = { - .minor = MISC_DYNAMIC_MINOR, - .name = "sw_sync", - .fops = &sw_sync_fops, -}; - -static int __init sw_sync_device_init(void) -{ - return misc_register(&sw_sync_dev); -} -device_initcall(sw_sync_device_init); - -#endif /* CONFIG_SW_SYNC_USER */ diff --git a/drivers/staging/android/sw_sync.h b/drivers/staging/android/sw_sync.h index c87ae9ebf267..e18667bfb0ca 100644 --- a/drivers/staging/android/sw_sync.h +++ b/drivers/staging/android/sw_sync.h @@ -29,7 +29,7 @@ struct sw_sync_timeline { }; struct sw_sync_pt { - struct sync_pt pt; + struct fence pt; u32 value; }; @@ -38,7 +38,7 @@ struct sw_sync_pt { struct sw_sync_timeline *sw_sync_timeline_create(const char *name); void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc); -struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value); +struct fence *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value); #else static inline struct sw_sync_timeline *sw_sync_timeline_create(const char *name) { @@ -49,8 +49,8 @@ static inline void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc) { } -static inline struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, - u32 value) +static inline struct fence *sw_sync_pt_create(struct sw_sync_timeline *obj, + u32 value) { return NULL; } diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c index ed43796b5b58..3a8f21031440 100644 --- a/drivers/staging/android/sync.c +++ b/drivers/staging/android/sync.c @@ -32,7 +32,7 @@ #include "trace/sync.h" static const struct fence_ops android_fence_ops; -static const struct file_operations sync_fence_fops; +static const struct file_operations sync_file_fops; struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops, int size, const char *name) @@ -68,9 +68,6 @@ static void sync_timeline_free(struct kref *kref) sync_timeline_debug_remove(obj); - if (obj->ops->release_obj) - obj->ops->release_obj(obj); - kfree(obj); } @@ -93,10 +90,6 @@ void sync_timeline_destroy(struct sync_timeline *obj) */ smp_wmb(); - /* - * signal any children that their parent is going away. - */ - sync_timeline_signal(obj); sync_timeline_put(obj); } EXPORT_SYMBOL(sync_timeline_destroy); @@ -104,126 +97,115 @@ EXPORT_SYMBOL(sync_timeline_destroy); void sync_timeline_signal(struct sync_timeline *obj) { unsigned long flags; - LIST_HEAD(signaled_pts); - struct sync_pt *pt, *next; + struct fence *fence, *next; trace_sync_timeline(obj); spin_lock_irqsave(&obj->child_list_lock, flags); - list_for_each_entry_safe(pt, next, &obj->active_list_head, + list_for_each_entry_safe(fence, next, &obj->active_list_head, active_list) { - if (fence_is_signaled_locked(&pt->base)) - list_del_init(&pt->active_list); + if (fence_is_signaled_locked(fence)) + list_del_init(&fence->active_list); } spin_unlock_irqrestore(&obj->child_list_lock, flags); } EXPORT_SYMBOL(sync_timeline_signal); -struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size) +struct fence *sync_pt_create(struct sync_timeline *obj, int size) { unsigned long flags; - struct sync_pt *pt; + struct fence *fence; - if (size < sizeof(struct sync_pt)) + if (size < sizeof(*fence)) return NULL; - pt = kzalloc(size, GFP_KERNEL); - if (!pt) + fence = kzalloc(size, GFP_KERNEL); + if (!fence) return NULL; spin_lock_irqsave(&obj->child_list_lock, flags); sync_timeline_get(obj); - fence_init(&pt->base, &android_fence_ops, &obj->child_list_lock, + fence_init(fence, &android_fence_ops, &obj->child_list_lock, obj->context, ++obj->value); - list_add_tail(&pt->child_list, &obj->child_list_head); - INIT_LIST_HEAD(&pt->active_list); + list_add_tail(&fence->child_list, &obj->child_list_head); + INIT_LIST_HEAD(&fence->active_list); spin_unlock_irqrestore(&obj->child_list_lock, flags); - return pt; + return fence; } EXPORT_SYMBOL(sync_pt_create); -void sync_pt_free(struct sync_pt *pt) -{ - fence_put(&pt->base); -} -EXPORT_SYMBOL(sync_pt_free); - -static struct sync_fence *sync_fence_alloc(int size, const char *name) +static struct sync_file *sync_file_alloc(int size, const char *name) { - struct sync_fence *fence; + struct sync_file *sync_file; - fence = kzalloc(size, GFP_KERNEL); - if (!fence) + sync_file = kzalloc(size, GFP_KERNEL); + if (!sync_file) return NULL; - fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops, - fence, 0); - if (IS_ERR(fence->file)) + sync_file->file = anon_inode_getfile("sync_file", &sync_file_fops, + sync_file, 0); + if (IS_ERR(sync_file->file)) goto err; - kref_init(&fence->kref); - strlcpy(fence->name, name, sizeof(fence->name)); + kref_init(&sync_file->kref); + strlcpy(sync_file->name, name, sizeof(sync_file->name)); - init_waitqueue_head(&fence->wq); + init_waitqueue_head(&sync_file->wq); - return fence; + return sync_file; err: - kfree(fence); + kfree(sync_file); return NULL; } static void fence_check_cb_func(struct fence *f, struct fence_cb *cb) { - struct sync_fence_cb *check; - struct sync_fence *fence; + struct sync_file_cb *check; + struct sync_file *sync_file; - check = container_of(cb, struct sync_fence_cb, cb); - fence = check->fence; + check = container_of(cb, struct sync_file_cb, cb); + sync_file = check->sync_file; - if (atomic_dec_and_test(&fence->status)) - wake_up_all(&fence->wq); + if (atomic_dec_and_test(&sync_file->status)) + wake_up_all(&sync_file->wq); } -/* TODO: implement a create which takes more that one sync_pt */ -struct sync_fence *sync_fence_create_dma(const char *name, struct fence *pt) +/* TODO: implement a create which takes more that one fence */ +struct sync_file *sync_file_create(const char *name, struct fence *fence) { - struct sync_fence *fence; + struct sync_file *sync_file; - fence = sync_fence_alloc(offsetof(struct sync_fence, cbs[1]), name); - if (!fence) + sync_file = sync_file_alloc(offsetof(struct sync_file, cbs[1]), + name); + if (!sync_file) return NULL; - fence->num_fences = 1; - atomic_set(&fence->status, 1); + sync_file->num_fences = 1; + atomic_set(&sync_file->status, 1); - fence->cbs[0].sync_pt = pt; - fence->cbs[0].fence = fence; - if (fence_add_callback(pt, &fence->cbs[0].cb, fence_check_cb_func)) - atomic_dec(&fence->status); + sync_file->cbs[0].fence = fence; + sync_file->cbs[0].sync_file = sync_file; + if (fence_add_callback(fence, &sync_file->cbs[0].cb, + fence_check_cb_func)) + atomic_dec(&sync_file->status); - sync_fence_debug_add(fence); + sync_file_debug_add(sync_file); - return fence; + return sync_file; } -EXPORT_SYMBOL(sync_fence_create_dma); +EXPORT_SYMBOL(sync_file_create); -struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt) -{ - return sync_fence_create_dma(name, &pt->base); -} -EXPORT_SYMBOL(sync_fence_create); - -struct sync_fence *sync_fence_fdget(int fd) +struct sync_file *sync_file_fdget(int fd) { struct file *file = fget(fd); if (!file) return NULL; - if (file->f_op != &sync_fence_fops) + if (file->f_op != &sync_file_fops) goto err; return file->private_data; @@ -232,70 +214,71 @@ err: fput(file); return NULL; } -EXPORT_SYMBOL(sync_fence_fdget); +EXPORT_SYMBOL(sync_file_fdget); -void sync_fence_put(struct sync_fence *fence) +void sync_file_put(struct sync_file *sync_file) { - fput(fence->file); + fput(sync_file->file); } -EXPORT_SYMBOL(sync_fence_put); +EXPORT_SYMBOL(sync_file_put); -void sync_fence_install(struct sync_fence *fence, int fd) +void sync_file_install(struct sync_file *sync_file, int fd) { - fd_install(fd, fence->file); + fd_install(fd, sync_file->file); } -EXPORT_SYMBOL(sync_fence_install); +EXPORT_SYMBOL(sync_file_install); -static void sync_fence_add_pt(struct sync_fence *fence, - int *i, struct fence *pt) +static void sync_file_add_pt(struct sync_file *sync_file, int *i, + struct fence *fence) { - fence->cbs[*i].sync_pt = pt; - fence->cbs[*i].fence = fence; + sync_file->cbs[*i].fence = fence; + sync_file->cbs[*i].sync_file = sync_file; - if (!fence_add_callback(pt, &fence->cbs[*i].cb, fence_check_cb_func)) { - fence_get(pt); + if (!fence_add_callback(fence, &sync_file->cbs[*i].cb, + fence_check_cb_func)) { + fence_get(fence); (*i)++; } } -struct sync_fence *sync_fence_merge(const char *name, - struct sync_fence *a, struct sync_fence *b) +struct sync_file *sync_file_merge(const char *name, + struct sync_file *a, struct sync_file *b) { int num_fences = a->num_fences + b->num_fences; - struct sync_fence *fence; + struct sync_file *sync_file; int i, i_a, i_b; - unsigned long size = offsetof(struct sync_fence, cbs[num_fences]); + unsigned long size = offsetof(struct sync_file, cbs[num_fences]); - fence = sync_fence_alloc(size, name); - if (!fence) + sync_file = sync_file_alloc(size, name); + if (!sync_file) return NULL; - atomic_set(&fence->status, num_fences); + atomic_set(&sync_file->status, num_fences); /* - * Assume sync_fence a and b are both ordered and have no + * Assume sync_file a and b are both ordered and have no * duplicates with the same context. * - * If a sync_fence can only be created with sync_fence_merge - * and sync_fence_create, this is a reasonable assumption. + * If a sync_file can only be created with sync_file_merge + * and sync_file_create, this is a reasonable assumption. */ for (i = i_a = i_b = 0; i_a < a->num_fences && i_b < b->num_fences; ) { - struct fence *pt_a = a->cbs[i_a].sync_pt; - struct fence *pt_b = b->cbs[i_b].sync_pt; + struct fence *pt_a = a->cbs[i_a].fence; + struct fence *pt_b = b->cbs[i_b].fence; if (pt_a->context < pt_b->context) { - sync_fence_add_pt(fence, &i, pt_a); + sync_file_add_pt(sync_file, &i, pt_a); i_a++; } else if (pt_a->context > pt_b->context) { - sync_fence_add_pt(fence, &i, pt_b); + sync_file_add_pt(sync_file, &i, pt_b); i_b++; } else { if (pt_a->seqno - pt_b->seqno <= INT_MAX) - sync_fence_add_pt(fence, &i, pt_a); + sync_file_add_pt(sync_file, &i, pt_a); else - sync_fence_add_pt(fence, &i, pt_b); + sync_file_add_pt(sync_file, &i, pt_b); i_a++; i_b++; @@ -303,156 +286,55 @@ struct sync_fence *sync_fence_merge(const char *name, } for (; i_a < a->num_fences; i_a++) - sync_fence_add_pt(fence, &i, a->cbs[i_a].sync_pt); + sync_file_add_pt(sync_file, &i, a->cbs[i_a].fence); for (; i_b < b->num_fences; i_b++) - sync_fence_add_pt(fence, &i, b->cbs[i_b].sync_pt); + sync_file_add_pt(sync_file, &i, b->cbs[i_b].fence); if (num_fences > i) - atomic_sub(num_fences - i, &fence->status); - fence->num_fences = i; - - sync_fence_debug_add(fence); - return fence; -} -EXPORT_SYMBOL(sync_fence_merge); - -int sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode, - int wake_flags, void *key) -{ - struct sync_fence_waiter *wait; - - wait = container_of(curr, struct sync_fence_waiter, work); - list_del_init(&wait->work.task_list); - - wait->callback(wait->work.private, wait); - return 1; -} - -int sync_fence_wait_async(struct sync_fence *fence, - struct sync_fence_waiter *waiter) -{ - int err = atomic_read(&fence->status); - unsigned long flags; - - if (err < 0) - return err; - - if (!err) - return 1; - - init_waitqueue_func_entry(&waiter->work, sync_fence_wake_up_wq); - waiter->work.private = fence; - - spin_lock_irqsave(&fence->wq.lock, flags); - err = atomic_read(&fence->status); - if (err > 0) - __add_wait_queue_tail(&fence->wq, &waiter->work); - spin_unlock_irqrestore(&fence->wq.lock, flags); - - if (err < 0) - return err; - - return !err; -} -EXPORT_SYMBOL(sync_fence_wait_async); - -int sync_fence_cancel_async(struct sync_fence *fence, - struct sync_fence_waiter *waiter) -{ - unsigned long flags; - int ret = 0; - - spin_lock_irqsave(&fence->wq.lock, flags); - if (!list_empty(&waiter->work.task_list)) - list_del_init(&waiter->work.task_list); - else - ret = -ENOENT; - spin_unlock_irqrestore(&fence->wq.lock, flags); - return ret; -} -EXPORT_SYMBOL(sync_fence_cancel_async); - -int sync_fence_wait(struct sync_fence *fence, long timeout) -{ - long ret; - int i; + atomic_sub(num_fences - i, &sync_file->status); + sync_file->num_fences = i; - if (timeout < 0) - timeout = MAX_SCHEDULE_TIMEOUT; - else - timeout = msecs_to_jiffies(timeout); - - trace_sync_wait(fence, 1); - for (i = 0; i < fence->num_fences; ++i) - trace_sync_pt(fence->cbs[i].sync_pt); - ret = wait_event_interruptible_timeout(fence->wq, - atomic_read(&fence->status) <= 0, - timeout); - trace_sync_wait(fence, 0); - - if (ret < 0) { - return ret; - } else if (ret == 0) { - if (timeout) { - pr_info("fence timeout on [%p] after %dms\n", fence, - jiffies_to_msecs(timeout)); - sync_dump(); - } - return -ETIME; - } - - ret = atomic_read(&fence->status); - if (ret) { - pr_info("fence error %ld on [%p]\n", ret, fence); - sync_dump(); - } - return ret; + sync_file_debug_add(sync_file); + return sync_file; } -EXPORT_SYMBOL(sync_fence_wait); +EXPORT_SYMBOL(sync_file_merge); static const char *android_fence_get_driver_name(struct fence *fence) { - struct sync_pt *pt = container_of(fence, struct sync_pt, base); - struct sync_timeline *parent = sync_pt_parent(pt); + struct sync_timeline *parent = fence_parent(fence); return parent->ops->driver_name; } static const char *android_fence_get_timeline_name(struct fence *fence) { - struct sync_pt *pt = container_of(fence, struct sync_pt, base); - struct sync_timeline *parent = sync_pt_parent(pt); + struct sync_timeline *parent = fence_parent(fence); return parent->name; } static void android_fence_release(struct fence *fence) { - struct sync_pt *pt = container_of(fence, struct sync_pt, base); - struct sync_timeline *parent = sync_pt_parent(pt); + struct sync_timeline *parent = fence_parent(fence); unsigned long flags; spin_lock_irqsave(fence->lock, flags); - list_del(&pt->child_list); - if (WARN_ON_ONCE(!list_empty(&pt->active_list))) - list_del(&pt->active_list); + list_del(&fence->child_list); + if (WARN_ON_ONCE(!list_empty(&fence->active_list))) + list_del(&fence->active_list); spin_unlock_irqrestore(fence->lock, flags); - if (parent->ops->free_pt) - parent->ops->free_pt(pt); - sync_timeline_put(parent); - fence_free(&pt->base); + fence_free(fence); } static bool android_fence_signaled(struct fence *fence) { - struct sync_pt *pt = container_of(fence, struct sync_pt, base); - struct sync_timeline *parent = sync_pt_parent(pt); + struct sync_timeline *parent = fence_parent(fence); int ret; - ret = parent->ops->has_signaled(pt); + ret = parent->ops->has_signaled(fence); if (ret < 0) fence->status = ret; return ret; @@ -460,46 +342,32 @@ static bool android_fence_signaled(struct fence *fence) static bool android_fence_enable_signaling(struct fence *fence) { - struct sync_pt *pt = container_of(fence, struct sync_pt, base); - struct sync_timeline *parent = sync_pt_parent(pt); + struct sync_timeline *parent = fence_parent(fence); if (android_fence_signaled(fence)) return false; - list_add_tail(&pt->active_list, &parent->active_list_head); + list_add_tail(&fence->active_list, &parent->active_list_head); return true; } -static int android_fence_fill_driver_data(struct fence *fence, - void *data, int size) -{ - struct sync_pt *pt = container_of(fence, struct sync_pt, base); - struct sync_timeline *parent = sync_pt_parent(pt); - - if (!parent->ops->fill_driver_data) - return 0; - return parent->ops->fill_driver_data(pt, data, size); -} - static void android_fence_value_str(struct fence *fence, char *str, int size) { - struct sync_pt *pt = container_of(fence, struct sync_pt, base); - struct sync_timeline *parent = sync_pt_parent(pt); + struct sync_timeline *parent = fence_parent(fence); - if (!parent->ops->pt_value_str) { + if (!parent->ops->fence_value_str) { if (size) *str = 0; return; } - parent->ops->pt_value_str(pt, str, size); + parent->ops->fence_value_str(fence, str, size); } static void android_fence_timeline_value_str(struct fence *fence, char *str, int size) { - struct sync_pt *pt = container_of(fence, struct sync_pt, base); - struct sync_timeline *parent = sync_pt_parent(pt); + struct sync_timeline *parent = fence_parent(fence); if (!parent->ops->timeline_value_str) { if (size) @@ -516,65 +384,57 @@ static const struct fence_ops android_fence_ops = { .signaled = android_fence_signaled, .wait = fence_default_wait, .release = android_fence_release, - .fill_driver_data = android_fence_fill_driver_data, .fence_value_str = android_fence_value_str, .timeline_value_str = android_fence_timeline_value_str, }; -static void sync_fence_free(struct kref *kref) +static void sync_file_free(struct kref *kref) { - struct sync_fence *fence = container_of(kref, struct sync_fence, kref); + struct sync_file *sync_file = container_of(kref, struct sync_file, + kref); int i; - for (i = 0; i < fence->num_fences; ++i) { - fence_remove_callback(fence->cbs[i].sync_pt, &fence->cbs[i].cb); - fence_put(fence->cbs[i].sync_pt); + for (i = 0; i < sync_file->num_fences; ++i) { + fence_remove_callback(sync_file->cbs[i].fence, + &sync_file->cbs[i].cb); + fence_put(sync_file->cbs[i].fence); } - kfree(fence); + kfree(sync_file); } -static int sync_fence_release(struct inode *inode, struct file *file) +static int sync_file_release(struct inode *inode, struct file *file) { - struct sync_fence *fence = file->private_data; + struct sync_file *sync_file = file->private_data; - sync_fence_debug_remove(fence); + sync_file_debug_remove(sync_file); - kref_put(&fence->kref, sync_fence_free); + kref_put(&sync_file->kref, sync_file_free); return 0; } -static unsigned int sync_fence_poll(struct file *file, poll_table *wait) +static unsigned int sync_file_poll(struct file *file, poll_table *wait) { - struct sync_fence *fence = file->private_data; + struct sync_file *sync_file = file->private_data; int status; - poll_wait(file, &fence->wq, wait); + poll_wait(file, &sync_file->wq, wait); - status = atomic_read(&fence->status); + status = atomic_read(&sync_file->status); if (!status) return POLLIN; - else if (status < 0) + if (status < 0) return POLLERR; return 0; } -static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg) -{ - __s32 value; - - if (copy_from_user(&value, (void __user *)arg, sizeof(value))) - return -EFAULT; - - return sync_fence_wait(fence, value); -} - -static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg) +static long sync_file_ioctl_merge(struct sync_file *sync_file, + unsigned long arg) { int fd = get_unused_fd_flags(O_CLOEXEC); int err; - struct sync_fence *fence2, *fence3; + struct sync_file *fence2, *fence3; struct sync_merge_data data; if (fd < 0) @@ -585,14 +445,14 @@ static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg) goto err_put_fd; } - fence2 = sync_fence_fdget(data.fd2); + fence2 = sync_file_fdget(data.fd2); if (!fence2) { err = -ENOENT; goto err_put_fd; } data.name[sizeof(data.name) - 1] = '\0'; - fence3 = sync_fence_merge(data.name, fence, fence2); + fence3 = sync_file_merge(data.name, sync_file, fence2); if (!fence3) { err = -ENOMEM; goto err_put_fence2; @@ -604,40 +464,28 @@ static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg) goto err_put_fence3; } - sync_fence_install(fence3, fd); - sync_fence_put(fence2); + sync_file_install(fence3, fd); + sync_file_put(fence2); return 0; err_put_fence3: - sync_fence_put(fence3); + sync_file_put(fence3); err_put_fence2: - sync_fence_put(fence2); + sync_file_put(fence2); err_put_fd: put_unused_fd(fd); return err; } -static int sync_fill_pt_info(struct fence *fence, void *data, int size) +static int sync_fill_fence_info(struct fence *fence, void *data, int size) { - struct sync_pt_info *info = data; - int ret; + struct sync_fence_info *info = data; - if (size < sizeof(struct sync_pt_info)) + if (size < sizeof(*info)) return -ENOMEM; - info->len = sizeof(struct sync_pt_info); - - if (fence->ops->fill_driver_data) { - ret = fence->ops->fill_driver_data(fence, info->driver_data, - size - sizeof(*info)); - if (ret < 0) - return ret; - - info->len += ret; - } - strlcpy(info->obj_name, fence->ops->get_timeline_name(fence), sizeof(info->obj_name)); strlcpy(info->driver_name, fence->ops->get_driver_name(fence), @@ -648,13 +496,13 @@ static int sync_fill_pt_info(struct fence *fence, void *data, int size) info->status = 0; info->timestamp_ns = ktime_to_ns(fence->timestamp); - return info->len; + return sizeof(*info); } -static long sync_fence_ioctl_fence_info(struct sync_fence *fence, +static long sync_file_ioctl_fence_info(struct sync_file *sync_file, unsigned long arg) { - struct sync_fence_info_data *data; + struct sync_file_info *info; __u32 size; __u32 len = 0; int ret, i; @@ -662,27 +510,27 @@ static long sync_fence_ioctl_fence_info(struct sync_fence *fence, if (copy_from_user(&size, (void __user *)arg, sizeof(size))) return -EFAULT; - if (size < sizeof(struct sync_fence_info_data)) + if (size < sizeof(struct sync_file_info)) return -EINVAL; if (size > 4096) size = 4096; - data = kzalloc(size, GFP_KERNEL); - if (!data) + info = kzalloc(size, GFP_KERNEL); + if (!info) return -ENOMEM; - strlcpy(data->name, fence->name, sizeof(data->name)); - data->status = atomic_read(&fence->status); - if (data->status >= 0) - data->status = !data->status; + strlcpy(info->name, sync_file->name, sizeof(info->name)); + info->status = atomic_read(&sync_file->status); + if (info->status >= 0) + info->status = !info->status; - len = sizeof(struct sync_fence_info_data); + len = sizeof(struct sync_file_info); - for (i = 0; i < fence->num_fences; ++i) { - struct fence *pt = fence->cbs[i].sync_pt; + for (i = 0; i < sync_file->num_fences; ++i) { + struct fence *fence = sync_file->cbs[i].fence; - ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len); + ret = sync_fill_fence_info(fence, (u8 *)info + len, size - len); if (ret < 0) goto out; @@ -690,43 +538,40 @@ static long sync_fence_ioctl_fence_info(struct sync_fence *fence, len += ret; } - data->len = len; + info->len = len; - if (copy_to_user((void __user *)arg, data, len)) + if (copy_to_user((void __user *)arg, info, len)) ret = -EFAULT; else ret = 0; out: - kfree(data); + kfree(info); return ret; } -static long sync_fence_ioctl(struct file *file, unsigned int cmd, +static long sync_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - struct sync_fence *fence = file->private_data; + struct sync_file *sync_file = file->private_data; switch (cmd) { - case SYNC_IOC_WAIT: - return sync_fence_ioctl_wait(fence, arg); - case SYNC_IOC_MERGE: - return sync_fence_ioctl_merge(fence, arg); + return sync_file_ioctl_merge(sync_file, arg); case SYNC_IOC_FENCE_INFO: - return sync_fence_ioctl_fence_info(fence, arg); + return sync_file_ioctl_fence_info(sync_file, arg); default: return -ENOTTY; } } -static const struct file_operations sync_fence_fops = { - .release = sync_fence_release, - .poll = sync_fence_poll, - .unlocked_ioctl = sync_fence_ioctl, - .compat_ioctl = sync_fence_ioctl, +static const struct file_operations sync_file_fops = { + .release = sync_file_release, + .poll = sync_file_poll, + .unlocked_ioctl = sync_file_ioctl, + .compat_ioctl = sync_file_ioctl, }; diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h index afa0752275a7..d2a173433a7d 100644 --- a/drivers/staging/android/sync.h +++ b/drivers/staging/android/sync.h @@ -18,63 +18,35 @@ #include <linux/ktime.h> #include <linux/list.h> #include <linux/spinlock.h> -#include <linux/wait.h> #include <linux/fence.h> #include "uapi/sync.h" struct sync_timeline; -struct sync_pt; -struct sync_fence; +struct sync_file; /** * struct sync_timeline_ops - sync object implementation ops * @driver_name: name of the implementation - * @dup: duplicate a sync_pt * @has_signaled: returns: * 1 if pt has signaled * 0 if pt has not signaled * <0 on error - * @compare: returns: - * 1 if b will signal before a - * 0 if a and b will signal at the same time - * -1 if a will signal before b - * @free_pt: called before sync_pt is freed - * @release_obj: called before sync_timeline is freed - * @fill_driver_data: write implementation specific driver data to data. - * should return an error if there is not enough room - * as specified by size. This information is returned - * to userspace by SYNC_IOC_FENCE_INFO. * @timeline_value_str: fill str with the value of the sync_timeline's counter - * @pt_value_str: fill str with the value of the sync_pt + * @fence_value_str: fill str with the value of the fence */ struct sync_timeline_ops { const char *driver_name; /* required */ - struct sync_pt * (*dup)(struct sync_pt *pt); - - /* required */ - int (*has_signaled)(struct sync_pt *pt); - - /* required */ - int (*compare)(struct sync_pt *a, struct sync_pt *b); - - /* optional */ - void (*free_pt)(struct sync_pt *sync_pt); - - /* optional */ - void (*release_obj)(struct sync_timeline *sync_timeline); - - /* optional */ - int (*fill_driver_data)(struct sync_pt *syncpt, void *data, int size); + int (*has_signaled)(struct fence *fence); /* optional */ void (*timeline_value_str)(struct sync_timeline *timeline, char *str, int size); /* optional */ - void (*pt_value_str)(struct sync_pt *pt, char *str, int size); + void (*fence_value_str)(struct fence *fence, char *str, int size); }; /** @@ -85,7 +57,7 @@ struct sync_timeline_ops { * @destroyed: set when sync_timeline is destroyed * @child_list_head: list of children sync_pts for this sync_timeline * @child_list_lock: lock protecting @child_list_head, destroyed, and - * sync_pt.status + * fence.status * @active_list_head: list of active (unsignaled/errored) sync_pts * @sync_timeline_list: membership in global sync_timeline_list */ @@ -108,86 +80,44 @@ struct sync_timeline { #endif }; -/** - * struct sync_pt - sync point - * @fence: base fence class - * @child_list: membership in sync_timeline.child_list_head - * @active_list: membership in sync_timeline.active_list_head - * @signaled_list: membership in temporary signaled_list on stack - * @fence: sync_fence to which the sync_pt belongs - * @pt_list: membership in sync_fence.pt_list_head - * @status: 1: signaled, 0:active, <0: error - * @timestamp: time which sync_pt status transitioned from active to - * signaled or error. - */ -struct sync_pt { - struct fence base; - - struct list_head child_list; - struct list_head active_list; -}; - -static inline struct sync_timeline *sync_pt_parent(struct sync_pt *pt) +static inline struct sync_timeline *fence_parent(struct fence *fence) { - return container_of(pt->base.lock, struct sync_timeline, + return container_of(fence->lock, struct sync_timeline, child_list_lock); } -struct sync_fence_cb { +struct sync_file_cb { struct fence_cb cb; - struct fence *sync_pt; - struct sync_fence *fence; + struct fence *fence; + struct sync_file *sync_file; }; /** - * struct sync_fence - sync fence + * struct sync_file - sync file to export to the userspace * @file: file representing this fence * @kref: reference count on fence. - * @name: name of sync_fence. Useful for debugging - * @pt_list_head: list of sync_pts in the fence. immutable once fence - * is created - * @status: 0: signaled, >0:active, <0: error - * + * @name: name of sync_file. Useful for debugging + * @sync_file_list: membership in global file list + * @num_fences number of sync_pts in the fence * @wq: wait queue for fence signaling - * @sync_fence_list: membership in global fence list + * @status: 0: signaled, >0:active, <0: error + * @cbs: sync_pts callback information */ -struct sync_fence { +struct sync_file { struct file *file; struct kref kref; char name[32]; #ifdef CONFIG_DEBUG_FS - struct list_head sync_fence_list; + struct list_head sync_file_list; #endif int num_fences; wait_queue_head_t wq; atomic_t status; - struct sync_fence_cb cbs[]; -}; - -struct sync_fence_waiter; -typedef void (*sync_callback_t)(struct sync_fence *fence, - struct sync_fence_waiter *waiter); - -/** - * struct sync_fence_waiter - metadata for asynchronous waiter on a fence - * @waiter_list: membership in sync_fence.waiter_list_head - * @callback: function pointer to call when fence signals - * @callback_data: pointer to pass to @callback - */ -struct sync_fence_waiter { - wait_queue_t work; - sync_callback_t callback; + struct sync_file_cb cbs[]; }; -static inline void sync_fence_waiter_init(struct sync_fence_waiter *waiter, - sync_callback_t callback) -{ - INIT_LIST_HEAD(&waiter->work.task_list); - waiter->callback = callback; -} - /* * API for sync_timeline implementers */ @@ -200,7 +130,8 @@ static inline void sync_fence_waiter_init(struct sync_fence_waiter *waiter, * * Creates a new sync_timeline which will use the implementation specified by * @ops. @size bytes will be allocated allowing for implementation specific - * data to be kept after the generic sync_timeline struct. + * data to be kept after the generic sync_timeline struct. Returns the + * sync_timeline object or NULL in case of error. */ struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops, int size, const char *name); @@ -211,7 +142,7 @@ struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops, * * A sync implementation should call this when the @obj is going away * (i.e. module unload.) @obj won't actually be freed until all its children - * sync_pts are freed. + * fences are freed. */ void sync_timeline_destroy(struct sync_timeline *obj); @@ -219,148 +150,92 @@ void sync_timeline_destroy(struct sync_timeline *obj); * sync_timeline_signal() - signal a status change on a sync_timeline * @obj: sync_timeline to signal * - * A sync implementation should call this any time one of it's sync_pts + * A sync implementation should call this any time one of it's fences * has signaled or has an error condition. */ void sync_timeline_signal(struct sync_timeline *obj); /** * sync_pt_create() - creates a sync pt - * @parent: sync_pt's parent sync_timeline + * @parent: fence's parent sync_timeline * @size: size to allocate for this pt * - * Creates a new sync_pt as a child of @parent. @size bytes will be + * Creates a new fence as a child of @parent. @size bytes will be * allocated allowing for implementation specific data to be kept after - * the generic sync_timeline struct. - */ -struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size); - -/** - * sync_pt_free() - frees a sync pt - * @pt: sync_pt to free - * - * This should only be called on sync_pts which have been created but - * not added to a fence. + * the generic sync_timeline struct. Returns the fence object or + * NULL in case of error. */ -void sync_pt_free(struct sync_pt *pt); +struct fence *sync_pt_create(struct sync_timeline *parent, int size); /** * sync_fence_create() - creates a sync fence * @name: name of fence to create - * @pt: sync_pt to add to the fence - * - * Creates a fence containg @pt. Once this is called, the fence takes - * ownership of @pt. - */ -struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt); - -/** - * sync_fence_create_dma() - creates a sync fence from dma-fence - * @name: name of fence to create - * @pt: dma-fence to add to the fence + * @fence: fence to add to the sync_fence * - * Creates a fence containg @pt. Once this is called, the fence takes - * ownership of @pt. + * Creates a sync_file containg @fence. Once this is called, the sync_file + * takes ownership of @fence. */ -struct sync_fence *sync_fence_create_dma(const char *name, struct fence *pt); +struct sync_file *sync_file_create(const char *name, struct fence *fence); /* - * API for sync_fence consumers + * API for sync_file consumers */ /** - * sync_fence_merge() - merge two fences + * sync_file_merge() - merge two sync_files * @name: name of new fence - * @a: fence a - * @b: fence b + * @a: sync_file a + * @b: sync_file b * - * Creates a new fence which contains copies of all the sync_pts in both - * @a and @b. @a and @b remain valid, independent fences. + * Creates a new sync_file which contains copies of all the fences in both + * @a and @b. @a and @b remain valid, independent sync_file. Returns the + * new merged sync_file or NULL in case of error. */ -struct sync_fence *sync_fence_merge(const char *name, - struct sync_fence *a, struct sync_fence *b); +struct sync_file *sync_file_merge(const char *name, + struct sync_file *a, struct sync_file *b); /** - * sync_fence_fdget() - get a fence from an fd + * sync_file_fdget() - get a sync_file from an fd * @fd: fd referencing a fence * - * Ensures @fd references a valid fence, increments the refcount of the backing - * file, and returns the fence. + * Ensures @fd references a valid sync_file, increments the refcount of the + * backing file. Returns the sync_file or NULL in case of error. */ -struct sync_fence *sync_fence_fdget(int fd); +struct sync_file *sync_file_fdget(int fd); /** - * sync_fence_put() - puts a reference of a sync fence - * @fence: fence to put + * sync_file_put() - puts a reference of a sync_file + * @sync_file: sync_file to put * - * Puts a reference on @fence. If this is the last reference, the fence and - * all it's sync_pts will be freed + * Puts a reference on @sync_fence. If this is the last reference, the + * sync_fil and all it's sync_pts will be freed */ -void sync_fence_put(struct sync_fence *fence); +void sync_file_put(struct sync_file *sync_file); /** - * sync_fence_install() - installs a fence into a file descriptor - * @fence: fence to install + * sync_file_install() - installs a sync_file into a file descriptor + * @sync_file: sync_file to install * @fd: file descriptor in which to install the fence * - * Installs @fence into @fd. @fd's should be acquired through + * Installs @sync_file into @fd. @fd's should be acquired through * get_unused_fd_flags(O_CLOEXEC). */ -void sync_fence_install(struct sync_fence *fence, int fd); - -/** - * sync_fence_wait_async() - registers and async wait on the fence - * @fence: fence to wait on - * @waiter: waiter callback struck - * - * Returns 1 if @fence has already signaled. - * - * Registers a callback to be called when @fence signals or has an error. - * @waiter should be initialized with sync_fence_waiter_init(). - */ -int sync_fence_wait_async(struct sync_fence *fence, - struct sync_fence_waiter *waiter); - -/** - * sync_fence_cancel_async() - cancels an async wait - * @fence: fence to wait on - * @waiter: waiter callback struck - * - * returns 0 if waiter was removed from fence's async waiter list. - * returns -ENOENT if waiter was not found on fence's async waiter list. - * - * Cancels a previously registered async wait. Will fail gracefully if - * @waiter was never registered or if @fence has already signaled @waiter. - */ -int sync_fence_cancel_async(struct sync_fence *fence, - struct sync_fence_waiter *waiter); - -/** - * sync_fence_wait() - wait on fence - * @fence: fence to wait on - * @tiemout: timeout in ms - * - * Wait for @fence to be signaled or have an error. Waits indefinitely - * if @timeout < 0 - */ -int sync_fence_wait(struct sync_fence *fence, long timeout); +void sync_file_install(struct sync_file *sync_file, int fd); #ifdef CONFIG_DEBUG_FS void sync_timeline_debug_add(struct sync_timeline *obj); void sync_timeline_debug_remove(struct sync_timeline *obj); -void sync_fence_debug_add(struct sync_fence *fence); -void sync_fence_debug_remove(struct sync_fence *fence); +void sync_file_debug_add(struct sync_file *fence); +void sync_file_debug_remove(struct sync_file *fence); void sync_dump(void); #else # define sync_timeline_debug_add(obj) # define sync_timeline_debug_remove(obj) -# define sync_fence_debug_add(fence) -# define sync_fence_debug_remove(fence) +# define sync_file_debug_add(fence) +# define sync_file_debug_remove(fence) # define sync_dump() #endif -int sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode, - int wake_flags, void *key); #endif /* _LINUX_SYNC_H */ diff --git a/drivers/staging/android/sync_debug.c b/drivers/staging/android/sync_debug.c index f45d13cdd42b..5a7ec58fbc09 100644 --- a/drivers/staging/android/sync_debug.c +++ b/drivers/staging/android/sync_debug.c @@ -26,14 +26,16 @@ #include <linux/uaccess.h> #include <linux/anon_inodes.h> #include <linux/time64.h> -#include "sync.h" +#include "sw_sync.h" #ifdef CONFIG_DEBUG_FS +static struct dentry *dbgfs; + static LIST_HEAD(sync_timeline_list_head); static DEFINE_SPINLOCK(sync_timeline_list_lock); -static LIST_HEAD(sync_fence_list_head); -static DEFINE_SPINLOCK(sync_fence_list_lock); +static LIST_HEAD(sync_file_list_head); +static DEFINE_SPINLOCK(sync_file_list_lock); void sync_timeline_debug_add(struct sync_timeline *obj) { @@ -53,22 +55,22 @@ void sync_timeline_debug_remove(struct sync_timeline *obj) spin_unlock_irqrestore(&sync_timeline_list_lock, flags); } -void sync_fence_debug_add(struct sync_fence *fence) +void sync_file_debug_add(struct sync_file *sync_file) { unsigned long flags; - spin_lock_irqsave(&sync_fence_list_lock, flags); - list_add_tail(&fence->sync_fence_list, &sync_fence_list_head); - spin_unlock_irqrestore(&sync_fence_list_lock, flags); + spin_lock_irqsave(&sync_file_list_lock, flags); + list_add_tail(&sync_file->sync_file_list, &sync_file_list_head); + spin_unlock_irqrestore(&sync_file_list_lock, flags); } -void sync_fence_debug_remove(struct sync_fence *fence) +void sync_file_debug_remove(struct sync_file *sync_file) { unsigned long flags; - spin_lock_irqsave(&sync_fence_list_lock, flags); - list_del(&fence->sync_fence_list); - spin_unlock_irqrestore(&sync_fence_list_lock, flags); + spin_lock_irqsave(&sync_file_list_lock, flags); + list_del(&sync_file->sync_file_list); + spin_unlock_irqrestore(&sync_file_list_lock, flags); } static const char *sync_status_str(int status) @@ -82,39 +84,40 @@ static const char *sync_status_str(int status) return "error"; } -static void sync_print_pt(struct seq_file *s, struct fence *pt, bool fence) +static void sync_print_fence(struct seq_file *s, struct fence *fence, bool show) { int status = 1; + struct sync_timeline *parent = fence_parent(fence); - if (fence_is_signaled_locked(pt)) - status = pt->status; + if (fence_is_signaled_locked(fence)) + status = fence->status; - seq_printf(s, " %s%spt %s", - fence && pt->ops->get_timeline_name ? - pt->ops->get_timeline_name(pt) : "", - fence ? "_" : "", + seq_printf(s, " %s%sfence %s", + show ? parent->name : "", + show ? "_" : "", sync_status_str(status)); if (status <= 0) { struct timespec64 ts64 = - ktime_to_timespec64(pt->timestamp); + ktime_to_timespec64(fence->timestamp); seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec); } - if ((!fence || pt->ops->timeline_value_str) && - pt->ops->fence_value_str) { + if ((!fence || fence->ops->timeline_value_str) && + fence->ops->fence_value_str) { char value[64]; bool success; - pt->ops->fence_value_str(pt, value, sizeof(value)); + fence->ops->fence_value_str(fence, value, sizeof(value)); success = strlen(value); if (success) seq_printf(s, ": %s", value); if (success && fence) { - pt->ops->timeline_value_str(pt, value, sizeof(value)); + fence->ops->timeline_value_str(fence, value, + sizeof(value)); if (strlen(value)) seq_printf(s, " / %s", value); @@ -142,38 +145,23 @@ static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj) spin_lock_irqsave(&obj->child_list_lock, flags); list_for_each(pos, &obj->child_list_head) { - struct sync_pt *pt = - container_of(pos, struct sync_pt, child_list); - sync_print_pt(s, &pt->base, false); + struct fence *fence = + container_of(pos, struct fence, child_list); + sync_print_fence(s, fence, false); } spin_unlock_irqrestore(&obj->child_list_lock, flags); } -static void sync_print_fence(struct seq_file *s, struct sync_fence *fence) +static void sync_print_sync_file(struct seq_file *s, + struct sync_file *sync_file) { - wait_queue_t *pos; - unsigned long flags; int i; - seq_printf(s, "[%p] %s: %s\n", fence, fence->name, - sync_status_str(atomic_read(&fence->status))); - - for (i = 0; i < fence->num_fences; ++i) { - sync_print_pt(s, fence->cbs[i].sync_pt, true); - } - - spin_lock_irqsave(&fence->wq.lock, flags); - list_for_each_entry(pos, &fence->wq.task_list, task_list) { - struct sync_fence_waiter *waiter; - - if (pos->func != &sync_fence_wake_up_wq) - continue; + seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name, + sync_status_str(atomic_read(&sync_file->status))); - waiter = container_of(pos, struct sync_fence_waiter, work); - - seq_printf(s, "waiter %pF\n", waiter->callback); - } - spin_unlock_irqrestore(&fence->wq.lock, flags); + for (i = 0; i < sync_file->num_fences; ++i) + sync_print_fence(s, sync_file->cbs[i].fence, true); } static int sync_debugfs_show(struct seq_file *s, void *unused) @@ -196,33 +184,152 @@ static int sync_debugfs_show(struct seq_file *s, void *unused) seq_puts(s, "fences:\n--------------\n"); - spin_lock_irqsave(&sync_fence_list_lock, flags); - list_for_each(pos, &sync_fence_list_head) { - struct sync_fence *fence = - container_of(pos, struct sync_fence, sync_fence_list); + spin_lock_irqsave(&sync_file_list_lock, flags); + list_for_each(pos, &sync_file_list_head) { + struct sync_file *sync_file = + container_of(pos, struct sync_file, sync_file_list); - sync_print_fence(s, fence); + sync_print_sync_file(s, sync_file); seq_puts(s, "\n"); } - spin_unlock_irqrestore(&sync_fence_list_lock, flags); + spin_unlock_irqrestore(&sync_file_list_lock, flags); return 0; } -static int sync_debugfs_open(struct inode *inode, struct file *file) +static int sync_info_debugfs_open(struct inode *inode, struct file *file) { return single_open(file, sync_debugfs_show, inode->i_private); } -static const struct file_operations sync_debugfs_fops = { - .open = sync_debugfs_open, +static const struct file_operations sync_info_debugfs_fops = { + .open = sync_info_debugfs_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; +/* + * *WARNING* + * + * improper use of this can result in deadlocking kernel drivers from userspace. + */ + +/* opening sw_sync create a new sync obj */ +static int sw_sync_debugfs_open(struct inode *inode, struct file *file) +{ + struct sw_sync_timeline *obj; + char task_comm[TASK_COMM_LEN]; + + get_task_comm(task_comm, current); + + obj = sw_sync_timeline_create(task_comm); + if (!obj) + return -ENOMEM; + + file->private_data = obj; + + return 0; +} + +static int sw_sync_debugfs_release(struct inode *inode, struct file *file) +{ + struct sw_sync_timeline *obj = file->private_data; + + sync_timeline_destroy(&obj->obj); + return 0; +} + +static long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj, + unsigned long arg) +{ + int fd = get_unused_fd_flags(O_CLOEXEC); + int err; + struct fence *fence; + struct sync_file *sync_file; + struct sw_sync_create_fence_data data; + + if (fd < 0) + return fd; + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) { + err = -EFAULT; + goto err; + } + + fence = sw_sync_pt_create(obj, data.value); + if (!fence) { + err = -ENOMEM; + goto err; + } + + data.name[sizeof(data.name) - 1] = '\0'; + sync_file = sync_file_create(data.name, fence); + if (!sync_file) { + fence_put(fence); + err = -ENOMEM; + goto err; + } + + data.fence = fd; + if (copy_to_user((void __user *)arg, &data, sizeof(data))) { + sync_file_put(sync_file); + err = -EFAULT; + goto err; + } + + sync_file_install(sync_file, fd); + + return 0; + +err: + put_unused_fd(fd); + return err; +} + +static long sw_sync_ioctl_inc(struct sw_sync_timeline *obj, unsigned long arg) +{ + u32 value; + + if (copy_from_user(&value, (void __user *)arg, sizeof(value))) + return -EFAULT; + + sw_sync_timeline_inc(obj, value); + + return 0; +} + +static long sw_sync_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct sw_sync_timeline *obj = file->private_data; + + switch (cmd) { + case SW_SYNC_IOC_CREATE_FENCE: + return sw_sync_ioctl_create_fence(obj, arg); + + case SW_SYNC_IOC_INC: + return sw_sync_ioctl_inc(obj, arg); + + default: + return -ENOTTY; + } +} + +static const struct file_operations sw_sync_debugfs_fops = { + .open = sw_sync_debugfs_open, + .release = sw_sync_debugfs_release, + .unlocked_ioctl = sw_sync_ioctl, + .compat_ioctl = sw_sync_ioctl, +}; + static __init int sync_debugfs_init(void) { - debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops); + dbgfs = debugfs_create_dir("sync", NULL); + + debugfs_create_file("info", 0444, dbgfs, NULL, &sync_info_debugfs_fops); + debugfs_create_file("sw_sync", 0644, dbgfs, NULL, + &sw_sync_debugfs_fops); + return 0; } late_initcall(sync_debugfs_init); diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c index bcd9924d4631..914fd1005467 100644 --- a/drivers/staging/android/timed_gpio.c +++ b/drivers/staging/android/timed_gpio.c @@ -92,9 +92,8 @@ static int timed_gpio_probe(struct platform_device *pdev) if (!pdata) return -EBUSY; - gpio_data = devm_kzalloc(&pdev->dev, - sizeof(*gpio_data) * pdata->num_gpios, - GFP_KERNEL); + gpio_data = devm_kcalloc(&pdev->dev, pdata->num_gpios, + sizeof(*gpio_data), GFP_KERNEL); if (!gpio_data) return -ENOMEM; diff --git a/drivers/staging/android/trace/sync.h b/drivers/staging/android/trace/sync.h index 77edb977a7bf..a0f80f41677e 100644 --- a/drivers/staging/android/trace/sync.h +++ b/drivers/staging/android/trace/sync.h @@ -32,50 +32,6 @@ TRACE_EVENT(sync_timeline, TP_printk("name=%s value=%s", __get_str(name), __entry->value) ); -TRACE_EVENT(sync_wait, - TP_PROTO(struct sync_fence *fence, int begin), - - TP_ARGS(fence, begin), - - TP_STRUCT__entry( - __string(name, fence->name) - __field(s32, status) - __field(u32, begin) - ), - - TP_fast_assign( - __assign_str(name, fence->name); - __entry->status = atomic_read(&fence->status); - __entry->begin = begin; - ), - - TP_printk("%s name=%s state=%d", __entry->begin ? "begin" : "end", - __get_str(name), __entry->status) -); - -TRACE_EVENT(sync_pt, - TP_PROTO(struct fence *pt), - - TP_ARGS(pt), - - TP_STRUCT__entry( - __string(timeline, pt->ops->get_timeline_name(pt)) - __array(char, value, 32) - ), - - TP_fast_assign( - __assign_str(timeline, pt->ops->get_timeline_name(pt)); - if (pt->ops->fence_value_str) { - pt->ops->fence_value_str(pt, __entry->value, - sizeof(__entry->value)); - } else { - __entry->value[0] = '\0'; - } - ), - - TP_printk("name=%s value=%s", __get_str(timeline), __entry->value) -); - #endif /* if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ) */ /* This part must be outside protection */ diff --git a/drivers/staging/android/uapi/ashmem.h b/drivers/staging/android/uapi/ashmem.h index ba4743c71d6b..13df42d200b7 100644 --- a/drivers/staging/android/uapi/ashmem.h +++ b/drivers/staging/android/uapi/ashmem.h @@ -13,6 +13,7 @@ #define _UAPI_LINUX_ASHMEM_H #include <linux/ioctl.h> +#include <linux/types.h> #define ASHMEM_NAME_LEN 256 diff --git a/drivers/staging/android/uapi/sync.h b/drivers/staging/android/uapi/sync.h index e964c751f6b8..a0cf357e598d 100644 --- a/drivers/staging/android/uapi/sync.h +++ b/drivers/staging/android/uapi/sync.h @@ -27,51 +27,39 @@ struct sync_merge_data { }; /** - * struct sync_pt_info - detailed sync_pt information - * @len: length of sync_pt_info including any driver_data + * struct sync_fence_info - detailed fence information * @obj_name: name of parent sync_timeline * @driver_name: name of driver implementing the parent - * @status: status of the sync_pt 0:active 1:signaled <0:error + * @status: status of the fence 0:active 1:signaled <0:error * @timestamp_ns: timestamp of status change in nanoseconds - * @driver_data: any driver dependent data */ -struct sync_pt_info { - __u32 len; +struct sync_fence_info { char obj_name[32]; char driver_name[32]; __s32 status; __u64 timestamp_ns; - - __u8 driver_data[0]; }; /** - * struct sync_fence_info_data - data returned from fence info ioctl + * struct sync_file_info - data returned from fence info ioctl * @len: ioctl caller writes the size of the buffer its passing in. - * ioctl returns length of sync_fence_data returned to userspace - * including pt_info. + * ioctl returns length of sync_file_info returned to + * userspace including pt_info. * @name: name of fence * @status: status of fence. 1: signaled 0:active <0:error - * @pt_info: a sync_pt_info struct for every sync_pt in the fence + * @sync_fence_info: array of sync_fence_info for every fence in the sync_file */ -struct sync_fence_info_data { +struct sync_file_info { __u32 len; char name[32]; __s32 status; - __u8 pt_info[0]; + __u8 sync_fence_info[0]; }; #define SYNC_IOC_MAGIC '>' /** - * DOC: SYNC_IOC_WAIT - wait for a fence to signal - * - * pass timeout in milliseconds. Waits indefinitely timeout < 0. - */ -#define SYNC_IOC_WAIT _IOW(SYNC_IOC_MAGIC, 0, __s32) - -/** * DOC: SYNC_IOC_MERGE - merge two fences * * Takes a struct sync_merge_data. Creates a new fence containing copies of @@ -83,15 +71,14 @@ struct sync_fence_info_data { /** * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence * - * Takes a struct sync_fence_info_data with extra space allocated for pt_info. + * Takes a struct sync_file_info_data with extra space allocated for pt_info. * Caller should write the size of the buffer into len. On return, len is - * updated to reflect the total size of the sync_fence_info_data including + * updated to reflect the total size of the sync_file_info_data including * pt_info. * * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence. * To iterate over the sync_pt_infos, use the sync_pt_info.len field. */ -#define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2,\ - struct sync_fence_info_data) +#define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2, struct sync_file_info) #endif /* _UAPI_LINUX_SYNC_H */ diff --git a/drivers/staging/board/armadillo800eva.c b/drivers/staging/board/armadillo800eva.c index 912c96b0536d..bb63ece4d766 100644 --- a/drivers/staging/board/armadillo800eva.c +++ b/drivers/staging/board/armadillo800eva.c @@ -27,7 +27,6 @@ #include "board.h" - static struct fb_videomode lcdc0_mode = { .name = "AMPIER/AM-800480", .xres = 800, diff --git a/drivers/staging/board/board.c b/drivers/staging/board/board.c index 965afc79aadd..45807d8287d1 100644 --- a/drivers/staging/board/board.c +++ b/drivers/staging/board/board.c @@ -155,7 +155,6 @@ static int board_staging_add_dev_domain(struct platform_device *pdev, if (IS_ERR(pd)) { pr_err("Cannot find genpd %s (%ld)\n", domain, PTR_ERR(pd)); return PTR_ERR(pd); - } pr_debug("Found genpd %s for device %s\n", pd->name, pdev->name); diff --git a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c index b8e2f611fd47..7b8be5293883 100644 --- a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c +++ b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c @@ -32,8 +32,8 @@ #define WZRD_CLK_CFG_REG(n) (0x200 + 4 * (n)) -#define WZRD_CLkOUT0_FRAC_EN BIT(18) -#define WZRD_CLkFBOUT_FRAC_EN BIT(26) +#define WZRD_CLKOUT0_FRAC_EN BIT(18) +#define WZRD_CLKFBOUT_FRAC_EN BIT(26) #define WZRD_CLKFBOUT_MULT_SHIFT 8 #define WZRD_CLKFBOUT_MULT_MASK (0xff << WZRD_CLKFBOUT_MULT_SHIFT) @@ -71,6 +71,7 @@ struct clk_wzrd { int speed_grade; bool suspended; }; + #define to_clk_wzrd(_nb) container_of(_nb, struct clk_wzrd, nb) /* maximum frequencies for input/output clocks per speed grade */ @@ -195,9 +196,9 @@ static int clk_wzrd_probe(struct platform_device *pdev) /* we don't support fractional div/mul yet */ reg = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(0)) & - WZRD_CLkFBOUT_FRAC_EN; + WZRD_CLKFBOUT_FRAC_EN; reg |= readl(clk_wzrd->base + WZRD_CLK_CFG_REG(2)) & - WZRD_CLkOUT0_FRAC_EN; + WZRD_CLKOUT0_FRAC_EN; if (reg) dev_warn(&pdev->dev, "fractional div/mul not supported\n"); diff --git a/drivers/staging/comedi/TODO b/drivers/staging/comedi/TODO index b68fbdb5eebf..f733c017f181 100644 --- a/drivers/staging/comedi/TODO +++ b/drivers/staging/comedi/TODO @@ -3,6 +3,7 @@ TODO: - Lindent - remove all wrappers - audit userspace interface + - Fix coverity 1195261 - cleanup the individual comedi drivers as well Please send patches to Greg Kroah-Hartman <greg@kroah.com> and diff --git a/drivers/staging/comedi/comedi.h b/drivers/staging/comedi/comedi.h index 83bd309d011b..ad5297f6d418 100644 --- a/drivers/staging/comedi/comedi.h +++ b/drivers/staging/comedi/comedi.h @@ -1,6 +1,6 @@ /* - * include/comedi.h (installed as /usr/include/comedi.h) - * header file for comedi + * comedi.h + * header file for COMEDI user API * * COMEDI - Linux Control and Measurement Device Interface * Copyright (C) 1998-2001 David A. Schleef <ds@schleef.org> @@ -72,12 +72,12 @@ #define CR_AREF(a) (((a) >> 24) & 0x03) #define CR_FLAGS_MASK 0xfc000000 -#define CR_ALT_FILTER (1 << 26) +#define CR_ALT_FILTER 0x04000000 #define CR_DITHER CR_ALT_FILTER #define CR_DEGLITCH CR_ALT_FILTER -#define CR_ALT_SOURCE (1 << 27) -#define CR_EDGE (1 << 30) -#define CR_INVERT (1 << 31) +#define CR_ALT_SOURCE 0x08000000 +#define CR_EDGE 0x40000000 +#define CR_INVERT 0x80000000 #define AREF_GROUND 0x00 /* analog ref = analog ground */ #define AREF_COMMON 0x01 /* analog ref = analog common */ @@ -120,13 +120,6 @@ #define INSN_WAIT (5 | INSN_MASK_WRITE | INSN_MASK_SPECIAL) #define INSN_INTTRIG (6 | INSN_MASK_WRITE | INSN_MASK_SPECIAL) -/* trigger flags */ -/* These flags are used in comedi_trig structures */ - -#define TRIG_DITHER 0x0002 /* enable dithering */ -#define TRIG_DEGLITCH 0x0004 /* enable deglitching */ -#define TRIG_CONFIG 0x0010 /* perform configuration, not triggering */ - /* command flags */ /* These flags are used in comedi_cmd structures */ @@ -190,11 +183,8 @@ #define SDF_MAXDATA 0x0010 /* maxdata depends on channel */ #define SDF_FLAGS 0x0020 /* flags depend on channel */ #define SDF_RANGETYPE 0x0040 /* range type depends on channel */ -#define SDF_MODE0 0x0080 /* can do mode 0 */ -#define SDF_MODE1 0x0100 /* can do mode 1 */ -#define SDF_MODE2 0x0200 /* can do mode 2 */ -#define SDF_MODE3 0x0400 /* can do mode 3 */ -#define SDF_MODE4 0x0800 /* can do mode 4 */ +#define SDF_PWM_COUNTER 0x0080 /* PWM can automatically switch off */ +#define SDF_PWM_HBRIDGE 0x0100 /* PWM is signed (H-bridge) */ #define SDF_CMD 0x1000 /* can do commands (deprecated) */ #define SDF_SOFT_CALIBRATED 0x2000 /* subdevice uses software calibration */ #define SDF_CMD_WRITE 0x4000 /* can do output commands */ @@ -217,30 +207,94 @@ #define SDF_RUNNING 0x08000000 /* subdevice is acquiring data */ #define SDF_LSAMPL 0x10000000 /* subdevice uses 32-bit samples */ #define SDF_PACKED 0x20000000 /* subdevice can do packed DIO */ -/* re recycle these flags for PWM */ -#define SDF_PWM_COUNTER SDF_MODE0 /* PWM can automatically switch off */ -#define SDF_PWM_HBRIDGE SDF_MODE1 /* PWM is signed (H-bridge) */ /* subdevice types */ +/** + * enum comedi_subdevice_type - COMEDI subdevice types + * @COMEDI_SUBD_UNUSED: Unused subdevice. + * @COMEDI_SUBD_AI: Analog input. + * @COMEDI_SUBD_AO: Analog output. + * @COMEDI_SUBD_DI: Digital input. + * @COMEDI_SUBD_DO: Digital output. + * @COMEDI_SUBD_DIO: Digital input/output. + * @COMEDI_SUBD_COUNTER: Counter. + * @COMEDI_SUBD_TIMER: Timer. + * @COMEDI_SUBD_MEMORY: Memory, EEPROM, DPRAM. + * @COMEDI_SUBD_CALIB: Calibration DACs. + * @COMEDI_SUBD_PROC: Processor, DSP. + * @COMEDI_SUBD_SERIAL: Serial I/O. + * @COMEDI_SUBD_PWM: Pulse-Width Modulation output. + */ enum comedi_subdevice_type { - COMEDI_SUBD_UNUSED, /* unused by driver */ - COMEDI_SUBD_AI, /* analog input */ - COMEDI_SUBD_AO, /* analog output */ - COMEDI_SUBD_DI, /* digital input */ - COMEDI_SUBD_DO, /* digital output */ - COMEDI_SUBD_DIO, /* digital input/output */ - COMEDI_SUBD_COUNTER, /* counter */ - COMEDI_SUBD_TIMER, /* timer */ - COMEDI_SUBD_MEMORY, /* memory, EEPROM, DPRAM */ - COMEDI_SUBD_CALIB, /* calibration DACs */ - COMEDI_SUBD_PROC, /* processor, DSP */ - COMEDI_SUBD_SERIAL, /* serial IO */ - COMEDI_SUBD_PWM /* PWM */ + COMEDI_SUBD_UNUSED, + COMEDI_SUBD_AI, + COMEDI_SUBD_AO, + COMEDI_SUBD_DI, + COMEDI_SUBD_DO, + COMEDI_SUBD_DIO, + COMEDI_SUBD_COUNTER, + COMEDI_SUBD_TIMER, + COMEDI_SUBD_MEMORY, + COMEDI_SUBD_CALIB, + COMEDI_SUBD_PROC, + COMEDI_SUBD_SERIAL, + COMEDI_SUBD_PWM }; /* configuration instructions */ +/** + * enum configuration_ids - COMEDI configuration instruction codes + * @INSN_CONFIG_DIO_INPUT: Configure digital I/O as input. + * @INSN_CONFIG_DIO_OUTPUT: Configure digital I/O as output. + * @INSN_CONFIG_DIO_OPENDRAIN: Configure digital I/O as open-drain (or open + * collector) output. + * @INSN_CONFIG_ANALOG_TRIG: Configure analog trigger. + * @INSN_CONFIG_ALT_SOURCE: Configure alternate input source. + * @INSN_CONFIG_DIGITAL_TRIG: Configure digital trigger. + * @INSN_CONFIG_BLOCK_SIZE: Configure block size for DMA transfers. + * @INSN_CONFIG_TIMER_1: Configure divisor for external clock. + * @INSN_CONFIG_FILTER: Configure a filter. + * @INSN_CONFIG_CHANGE_NOTIFY: Configure change notification for digital + * inputs. (New drivers should use + * %INSN_CONFIG_DIGITAL_TRIG instead.) + * @INSN_CONFIG_SERIAL_CLOCK: Configure clock for serial I/O. + * @INSN_CONFIG_BIDIRECTIONAL_DATA: Send and receive byte over serial I/O. + * @INSN_CONFIG_DIO_QUERY: Query direction of digital I/O channel. + * @INSN_CONFIG_PWM_OUTPUT: Configure pulse-width modulator output. + * @INSN_CONFIG_GET_PWM_OUTPUT: Get pulse-width modulator output configuration. + * @INSN_CONFIG_ARM: Arm a subdevice or channel. + * @INSN_CONFIG_DISARM: Disarm a subdevice or channel. + * @INSN_CONFIG_GET_COUNTER_STATUS: Get counter status. + * @INSN_CONFIG_RESET: Reset a subdevice or channel. + * @INSN_CONFIG_GPCT_SINGLE_PULSE_GENERATOR: Configure counter/timer as + * single pulse generator. + * @INSN_CONFIG_GPCT_PULSE_TRAIN_GENERATOR: Configure counter/timer as + * pulse train generator. + * @INSN_CONFIG_GPCT_QUADRATURE_ENCODER: Configure counter as a quadrature + * encoder. + * @INSN_CONFIG_SET_GATE_SRC: Set counter/timer gate source. + * @INSN_CONFIG_GET_GATE_SRC: Get counter/timer gate source. + * @INSN_CONFIG_SET_CLOCK_SRC: Set counter/timer master clock source. + * @INSN_CONFIG_GET_CLOCK_SRC: Get counter/timer master clock source. + * @INSN_CONFIG_SET_OTHER_SRC: Set counter/timer "other" source. + * @INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE: Get size (in bytes) of subdevice's + * on-board FIFOs used during streaming + * input/output. + * @INSN_CONFIG_SET_COUNTER_MODE: Set counter/timer mode. + * @INSN_CONFIG_8254_SET_MODE: (Deprecated) Same as + * %INSN_CONFIG_SET_COUNTER_MODE. + * @INSN_CONFIG_8254_READ_STATUS: Read status of 8254 counter channel. + * @INSN_CONFIG_SET_ROUTING: Set routing for a channel. + * @INSN_CONFIG_GET_ROUTING: Get routing for a channel. + * @INSN_CONFIG_PWM_SET_PERIOD: Set PWM period in nanoseconds. + * @INSN_CONFIG_PWM_GET_PERIOD: Get PWM period in nanoseconds. + * @INSN_CONFIG_GET_PWM_STATUS: Get PWM status. + * @INSN_CONFIG_PWM_SET_H_BRIDGE: Set PWM H bridge duty cycle and polarity for + * a relay simultaneously. + * @INSN_CONFIG_PWM_GET_H_BRIDGE: Get PWM H bridge duty cycle and polarity. + */ enum configuration_ids { INSN_CONFIG_DIO_INPUT = 0, INSN_CONFIG_DIO_OUTPUT = 1, @@ -265,72 +319,76 @@ enum configuration_ids { INSN_CONFIG_DISARM = 32, INSN_CONFIG_GET_COUNTER_STATUS = 33, INSN_CONFIG_RESET = 34, - /* Use CTR as single pulsegenerator */ INSN_CONFIG_GPCT_SINGLE_PULSE_GENERATOR = 1001, - /* Use CTR as pulsetraingenerator */ INSN_CONFIG_GPCT_PULSE_TRAIN_GENERATOR = 1002, - /* Use the counter as encoder */ INSN_CONFIG_GPCT_QUADRATURE_ENCODER = 1003, - INSN_CONFIG_SET_GATE_SRC = 2001, /* Set gate source */ - INSN_CONFIG_GET_GATE_SRC = 2002, /* Get gate source */ - /* Set master clock source */ + INSN_CONFIG_SET_GATE_SRC = 2001, + INSN_CONFIG_GET_GATE_SRC = 2002, INSN_CONFIG_SET_CLOCK_SRC = 2003, - INSN_CONFIG_GET_CLOCK_SRC = 2004, /* Get master clock source */ - INSN_CONFIG_SET_OTHER_SRC = 2005, /* Set other source */ - /* INSN_CONFIG_GET_OTHER_SRC = 2006,*//* Get other source */ - /* Get size in bytes of subdevice's on-board fifos used during - * streaming input/output - */ + INSN_CONFIG_GET_CLOCK_SRC = 2004, + INSN_CONFIG_SET_OTHER_SRC = 2005, INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE = 2006, INSN_CONFIG_SET_COUNTER_MODE = 4097, - /* INSN_CONFIG_8254_SET_MODE is deprecated */ INSN_CONFIG_8254_SET_MODE = INSN_CONFIG_SET_COUNTER_MODE, INSN_CONFIG_8254_READ_STATUS = 4098, INSN_CONFIG_SET_ROUTING = 4099, INSN_CONFIG_GET_ROUTING = 4109, - /* PWM */ - INSN_CONFIG_PWM_SET_PERIOD = 5000, /* sets frequency */ - INSN_CONFIG_PWM_GET_PERIOD = 5001, /* gets frequency */ - INSN_CONFIG_GET_PWM_STATUS = 5002, /* is it running? */ - /* sets H bridge: duty cycle and sign bit for a relay at the - * same time - */ + INSN_CONFIG_PWM_SET_PERIOD = 5000, + INSN_CONFIG_PWM_GET_PERIOD = 5001, + INSN_CONFIG_GET_PWM_STATUS = 5002, INSN_CONFIG_PWM_SET_H_BRIDGE = 5003, - /* gets H bridge data: duty cycle and the sign bit */ INSN_CONFIG_PWM_GET_H_BRIDGE = 5004 }; -/* - * Settings for INSN_CONFIG_DIGITAL_TRIG: - * data[0] = INSN_CONFIG_DIGITAL_TRIG - * data[1] = trigger ID - * data[2] = configuration operation - * data[3] = configuration parameter 1 - * data[4] = configuration parameter 2 - * data[5] = configuration parameter 3 +/** + * enum comedi_digital_trig_op - operations for configuring a digital trigger + * @COMEDI_DIGITAL_TRIG_DISABLE: Return digital trigger to its default, + * inactive, unconfigured state. + * @COMEDI_DIGITAL_TRIG_ENABLE_EDGES: Set rising and/or falling edge inputs + * that each can fire the trigger. + * @COMEDI_DIGITAL_TRIG_ENABLE_LEVELS: Set a combination of high and/or low + * level inputs that can fire the trigger. + * + * These are used with the %INSN_CONFIG_DIGITAL_TRIG configuration instruction. + * The data for the configuration instruction is as follows... * - * operation parameter 1 parameter 2 parameter 3 - * --------------------------------- ----------- ----------- ----------- - * COMEDI_DIGITAL_TRIG_DISABLE - * COMEDI_DIGITAL_TRIG_ENABLE_EDGES left-shift rising-edges falling-edges - * COMEDI_DIGITAL_TRIG_ENABLE_LEVELS left-shift high-levels low-levels + * data[%0] = %INSN_CONFIG_DIGITAL_TRIG * - * COMEDI_DIGITAL_TRIG_DISABLE returns the trigger to its default, inactive, - * unconfigured state. + * data[%1] = trigger ID * - * COMEDI_DIGITAL_TRIG_ENABLE_EDGES sets the rising and/or falling edge inputs - * that each can fire the trigger. + * data[%2] = configuration operation * - * COMEDI_DIGITAL_TRIG_ENABLE_LEVELS sets a combination of high and/or low - * level inputs that can fire the trigger. + * data[%3] = configuration parameter 1 * - * "left-shift" is useful if the trigger has more than 32 inputs to specify the - * first input for this configuration. + * data[%4] = configuration parameter 2 * - * Some sequences of INSN_CONFIG_DIGITAL_TRIG instructions may have a (partly) + * data[%5] = configuration parameter 3 + * + * The trigger ID (data[%1]) is used to differentiate multiple digital triggers + * belonging to the same subdevice. The configuration operation (data[%2]) is + * one of the enum comedi_digital_trig_op values. The configuration + * parameters (data[%3], data[%4], and data[%5]) depend on the operation; they + * are not used with %COMEDI_DIGITAL_TRIG_DISABLE. + * + * For %COMEDI_DIGITAL_TRIG_ENABLE_EDGES and %COMEDI_DIGITAL_TRIG_ENABLE_LEVELS, + * configuration parameter 1 (data[%3]) contains a "left-shift" value that + * specifies the input corresponding to bit 0 of configuration parameters 2 + * and 3. This is useful if the trigger has more than 32 inputs. + * + * For %COMEDI_DIGITAL_TRIG_ENABLE_EDGES, configuration parameter 2 (data[%4]) + * specifies which of up to 32 inputs have rising-edge sensitivity, and + * configuration parameter 3 (data[%5]) specifies which of up to 32 inputs + * have falling-edge sensitivity that can fire the trigger. + * + * For %COMEDI_DIGITAL_TRIG_ENABLE_LEVELS, configuration parameter 2 (data[%4]) + * specifies which of up to 32 inputs must be at a high level, and + * configuration parameter 3 (data[%5]) specifies which of up to 32 inputs + * must be at a low level for the trigger to fire. + * + * Some sequences of %INSN_CONFIG_DIGITAL_TRIG instructions may have a (partly) * accumulative effect, depending on the low-level driver. This is useful - * when setting up a trigger that has more than 32 inputs or has a combination - * of edge and level triggered inputs. + * when setting up a trigger that has more than 32 inputs, or has a combination + * of edge- and level-triggered inputs. */ enum comedi_digital_trig_op { COMEDI_DIGITAL_TRIG_DISABLE = 0, @@ -338,18 +396,49 @@ enum comedi_digital_trig_op { COMEDI_DIGITAL_TRIG_ENABLE_LEVELS = 2 }; +/** + * enum comedi_io_direction - COMEDI I/O directions + * @COMEDI_INPUT: Input. + * @COMEDI_OUTPUT: Output. + * @COMEDI_OPENDRAIN: Open-drain (or open-collector) output. + * + * These are used by the %INSN_CONFIG_DIO_QUERY configuration instruction to + * report a direction. They may also be used in other places where a direction + * needs to be specified. + */ enum comedi_io_direction { COMEDI_INPUT = 0, COMEDI_OUTPUT = 1, COMEDI_OPENDRAIN = 2 }; +/** + * enum comedi_support_level - support level for a COMEDI feature + * @COMEDI_UNKNOWN_SUPPORT: Unspecified support for feature. + * @COMEDI_SUPPORTED: Feature is supported. + * @COMEDI_UNSUPPORTED: Feature is unsupported. + */ enum comedi_support_level { COMEDI_UNKNOWN_SUPPORT = 0, COMEDI_SUPPORTED, COMEDI_UNSUPPORTED }; +/** + * enum comedi_counter_status_flags - counter status bits + * @COMEDI_COUNTER_ARMED: Counter is armed. + * @COMEDI_COUNTER_COUNTING: Counter is counting. + * @COMEDI_COUNTER_TERMINAL_COUNT: Counter reached terminal count. + * + * These bitwise values are used by the %INSN_CONFIG_GET_COUNTER_STATUS + * configuration instruction to report the status of a counter. + */ +enum comedi_counter_status_flags { + COMEDI_COUNTER_ARMED = 0x1, + COMEDI_COUNTER_COUNTING = 0x2, + COMEDI_COUNTER_TERMINAL_COUNT = 0x4, +}; + /* ioctls */ #define CIO 'd' @@ -357,7 +446,7 @@ enum comedi_support_level { #define COMEDI_DEVINFO _IOR(CIO, 1, struct comedi_devinfo) #define COMEDI_SUBDINFO _IOR(CIO, 2, struct comedi_subdinfo) #define COMEDI_CHANINFO _IOR(CIO, 3, struct comedi_chaninfo) -#define COMEDI_TRIG _IOWR(CIO, 4, comedi_trig) +/* _IOWR(CIO, 4, ...) is reserved */ #define COMEDI_LOCK _IO(CIO, 5) #define COMEDI_UNLOCK _IO(CIO, 6) #define COMEDI_CANCEL _IO(CIO, 7) @@ -374,21 +463,19 @@ enum comedi_support_level { /* structures */ -struct comedi_trig { - unsigned int subdev; /* subdevice */ - unsigned int mode; /* mode */ - unsigned int flags; - unsigned int n_chan; /* number of channels */ - unsigned int *chanlist; /* channel/range list */ - short *data; /* data list, size depends on subd flags */ - unsigned int n; /* number of scans */ - unsigned int trigsrc; - unsigned int trigvar; - unsigned int trigvar1; - unsigned int data_len; - unsigned int unused[3]; -}; - +/** + * struct comedi_insn - COMEDI instruction + * @insn: COMEDI instruction type (%INSN_xxx). + * @n: Length of @data[]. + * @data: Pointer to data array operated on by the instruction. + * @subdev: Subdevice index. + * @chanspec: A packed "chanspec" value consisting of channel number, + * analog range index, analog reference type, and flags. + * @unused: Reserved for future use. + * + * This is used with the %COMEDI_INSN ioctl, and indirectly with the + * %COMEDI_INSNLIST ioctl. + */ struct comedi_insn { unsigned int insn; unsigned int n; @@ -398,11 +485,95 @@ struct comedi_insn { unsigned int unused[3]; }; +/** + * struct comedi_insnlist - list of COMEDI instructions + * @n_insns: Number of COMEDI instructions. + * @insns: Pointer to array COMEDI instructions. + * + * This is used with the %COMEDI_INSNLIST ioctl. + */ struct comedi_insnlist { unsigned int n_insns; struct comedi_insn __user *insns; }; +/** + * struct comedi_cmd - COMEDI asynchronous acquisition command details + * @subdev: Subdevice index. + * @flags: Command flags (%CMDF_xxx). + * @start_src: "Start acquisition" trigger source (%TRIG_xxx). + * @start_arg: "Start acquisition" trigger argument. + * @scan_begin_src: "Scan begin" trigger source. + * @scan_begin_arg: "Scan begin" trigger argument. + * @convert_src: "Convert" trigger source. + * @convert_arg: "Convert" trigger argument. + * @scan_end_src: "Scan end" trigger source. + * @scan_end_arg: "Scan end" trigger argument. + * @stop_src: "Stop acquisition" trigger source. + * @stop_arg: "Stop acquisition" trigger argument. + * @chanlist: Pointer to array of "chanspec" values, containing a + * sequence of channel numbers packed with analog range + * index, etc. + * @chanlist_len: Number of channels in sequence. + * @data: Pointer to miscellaneous set-up data (not used). + * @data_len: Length of miscellaneous set-up data. + * + * This is used with the %COMEDI_CMD or %COMEDI_CMDTEST ioctl to set-up + * or validate an asynchronous acquisition command. The ioctl may modify + * the &struct comedi_cmd and copy it back to the caller. + * + * Optional command @flags values that can be ORed together... + * + * %CMDF_BOGUS - makes %COMEDI_CMD ioctl return error %EAGAIN instead of + * starting the command. + * + * %CMDF_PRIORITY - requests "hard real-time" processing (which is not + * supported in this version of COMEDI). + * + * %CMDF_WAKE_EOS - requests the command makes data available for reading + * after every "scan" period. + * + * %CMDF_WRITE - marks the command as being in the "write" (to device) + * direction. This does not need to be specified by the caller unless the + * subdevice supports commands in either direction. + * + * %CMDF_RAWDATA - prevents the command from "munging" the data between the + * COMEDI sample format and the raw hardware sample format. + * + * %CMDF_ROUND_NEAREST - requests timing periods to be rounded to nearest + * supported values. + * + * %CMDF_ROUND_DOWN - requests timing periods to be rounded down to supported + * values (frequencies rounded up). + * + * %CMDF_ROUND_UP - requests timing periods to be rounded up to supported + * values (frequencies rounded down). + * + * Trigger source values for @start_src, @scan_begin_src, @convert_src, + * @scan_end_src, and @stop_src... + * + * %TRIG_ANY - "all ones" value used to test which trigger sources are + * supported. + * + * %TRIG_INVALID - "all zeroes" value used to indicate that all requested + * trigger sources are invalid. + * + * %TRIG_NONE - never trigger (often used as a @stop_src value). + * + * %TRIG_NOW - trigger after '_arg' nanoseconds. + * + * %TRIG_FOLLOW - trigger follows another event. + * + * %TRIG_TIMER - trigger every '_arg' nanoseconds. + * + * %TRIG_COUNT - trigger when count '_arg' is reached. + * + * %TRIG_EXT - trigger on external signal specified by '_arg'. + * + * %TRIG_INT - trigger on internal, software trigger specified by '_arg'. + * + * %TRIG_OTHER - trigger on other, driver-defined signal specified by '_arg'. + */ struct comedi_cmd { unsigned int subdev; unsigned int flags; @@ -422,13 +593,31 @@ struct comedi_cmd { unsigned int stop_src; unsigned int stop_arg; - unsigned int *chanlist; /* channel/range list */ + unsigned int *chanlist; unsigned int chanlist_len; - short __user *data; /* data list, size depends on subd flags */ + short __user *data; unsigned int data_len; }; +/** + * struct comedi_chaninfo - used to retrieve per-channel information + * @subdev: Subdevice index. + * @maxdata_list: Optional pointer to per-channel maximum data values. + * @flaglist: Optional pointer to per-channel flags. + * @rangelist: Optional pointer to per-channel range types. + * @unused: Reserved for future use. + * + * This is used with the %COMEDI_CHANINFO ioctl to get per-channel information + * for the subdevice. Use of this requires knowledge of the number of channels + * and subdevice flags obtained using the %COMEDI_SUBDINFO ioctl. + * + * The @maxdata_list member must be %NULL unless the %SDF_MAXDATA subdevice + * flag is set. The @flaglist member must be %NULL unless the %SDF_FLAGS + * subdevice flag is set. The @rangelist member must be %NULL unless the + * %SDF_RANGETYPE subdevice flag is set. Otherwise, the arrays they point to + * must be at least as long as the number of channels. + */ struct comedi_chaninfo { unsigned int subdev; unsigned int __user *maxdata_list; @@ -437,17 +626,149 @@ struct comedi_chaninfo { unsigned int unused[4]; }; +/** + * struct comedi_rangeinfo - used to retrieve the range table for a channel + * @range_type: Encodes subdevice index (bits 27:24), channel index + * (bits 23:16) and range table length (bits 15:0). + * @range_ptr: Pointer to array of @struct comedi_krange to be filled + * in with the range table for the channel or subdevice. + * + * This is used with the %COMEDI_RANGEINFO ioctl to retrieve the range table + * for a specific channel (if the subdevice has the %SDF_RANGETYPE flag set to + * indicate that the range table depends on the channel), or for the subdevice + * as a whole (if the %SDF_RANGETYPE flag is clear, indicating the range table + * is shared by all channels). + * + * The @range_type value is an input to the ioctl and comes from a previous + * use of the %COMEDI_SUBDINFO ioctl (if the %SDF_RANGETYPE flag is clear), + * or the %COMEDI_CHANINFO ioctl (if the %SDF_RANGETYPE flag is set). + */ struct comedi_rangeinfo { unsigned int range_type; void __user *range_ptr; }; +/** + * struct comedi_krange - describes a range in a range table + * @min: Minimum value in millionths (1e-6) of a unit. + * @max: Maximum value in millionths (1e-6) of a unit. + * @flags: Indicates the units (in bits 7:0) OR'ed with optional flags. + * + * A range table is associated with a single channel, or with all channels in a + * subdevice, and a list of one or more ranges. A %struct comedi_krange + * describes the physical range of units for one of those ranges. Sample + * values in COMEDI are unsigned from %0 up to some 'maxdata' value. The + * mapping from sample values to physical units is assumed to be nomimally + * linear (for the purpose of describing the range), with sample value %0 + * mapping to @min, and the 'maxdata' sample value mapping to @max. + * + * The currently defined units are %UNIT_volt (%0), %UNIT_mA (%1), and + * %UNIT_none (%2). The @min and @max values are the physical range multiplied + * by 1e6, so a @max value of %1000000 (with %UNIT_volt) represents a maximal + * value of 1 volt. + * + * The only defined flag value is %RF_EXTERNAL (%0x100), indicating that the + * the range needs to be multiplied by an external reference. + */ struct comedi_krange { - int min; /* fixed point, multiply by 1e-6 */ - int max; /* fixed point, multiply by 1e-6 */ + int min; + int max; unsigned int flags; }; +/** + * struct comedi_subdinfo - used to retrieve information about a subdevice + * @type: Type of subdevice from &enum comedi_subdevice_type. + * @n_chan: Number of channels the subdevice supports. + * @subd_flags: A mixture of static and dynamic flags describing + * aspects of the subdevice and its current state. + * @timer_type: Timer type. Always set to %5 ("nanosecond timer"). + * @len_chanlist: Maximum length of a channel list if the subdevice + * supports asynchronous acquisition commands. + * @maxdata: Maximum sample value for all channels if the + * %SDF_MAXDATA subdevice flag is clear. + * @flags: Channel flags for all channels if the %SDF_FLAGS + * subdevice flag is clear. + * @range_type: The range type for all channels if the %SDF_RANGETYPE + * subdevice flag is clear. Encodes the subdevice index + * (bits 27:24), a dummy channel index %0 (bits 23:16), + * and the range table length (bits 15:0). + * @settling_time_0: Not used. + * @insn_bits_support: Set to %COMEDI_SUPPORTED if the subdevice supports the + * %INSN_BITS instruction, or to %COMEDI_UNSUPPORTED if it + * does not. + * @unused: Reserved for future use. + * + * This is used with the %COMEDI_SUBDINFO ioctl which copies an array of + * &struct comedi_subdinfo back to user space, with one element per subdevice. + * Use of this requires knowledge of the number of subdevices obtained from + * the %COMEDI_DEVINFO ioctl. + * + * These are the @subd_flags values that may be ORed together... + * + * %SDF_BUSY - the subdevice is busy processing an asynchronous command or a + * synchronous instruction. + * + * %SDF_BUSY_OWNER - the subdevice is busy processing an asynchronous + * acquisition command started on the current file object (the file object + * issuing the %COMEDI_SUBDINFO ioctl). + * + * %SDF_LOCKED - the subdevice is locked by a %COMEDI_LOCK ioctl. + * + * %SDF_LOCK_OWNER - the subdevice is locked by a %COMEDI_LOCK ioctl from the + * current file object. + * + * %SDF_MAXDATA - maximum sample values are channel-specific. + * + * %SDF_FLAGS - channel flags are channel-specific. + * + * %SDF_RANGETYPE - range types are channel-specific. + * + * %SDF_PWM_COUNTER - PWM can switch off automatically. + * + * %SDF_PWM_HBRIDGE - or PWM is signed (H-bridge). + * + * %SDF_CMD - the subdevice supports asynchronous commands. + * + * %SDF_SOFT_CALIBRATED - the subdevice uses software calibration. + * + * %SDF_CMD_WRITE - the subdevice supports asynchronous commands in the output + * ("write") direction. + * + * %SDF_CMD_READ - the subdevice supports asynchronous commands in the input + * ("read") direction. + * + * %SDF_READABLE - the subdevice is readable (e.g. analog input). + * + * %SDF_WRITABLE (aliased as %SDF_WRITEABLE) - the subdevice is writable (e.g. + * analog output). + * + * %SDF_INTERNAL - the subdevice has no externally visible lines. + * + * %SDF_GROUND - the subdevice can use ground as an analog reference. + * + * %SDF_COMMON - the subdevice can use a common analog reference. + * + * %SDF_DIFF - the subdevice can use differential inputs (or outputs). + * + * %SDF_OTHER - the subdevice can use some other analog reference. + * + * %SDF_DITHER - the subdevice can do dithering. + * + * %SDF_DEGLITCH - the subdevice can do deglitching. + * + * %SDF_MMAP - this is never set. + * + * %SDF_RUNNING - an asynchronous command is still running. + * + * %SDF_LSAMPL - the subdevice uses "long" (32-bit) samples (for asynchronous + * command data). + * + * %SDF_PACKED - the subdevice packs several DIO samples into a single sample + * (for asynchronous command data). + * + * No "channel flags" (@flags) values are currently defined. + */ struct comedi_subdinfo { unsigned int type; unsigned int n_chan; @@ -455,14 +776,26 @@ struct comedi_subdinfo { unsigned int timer_type; unsigned int len_chanlist; unsigned int maxdata; - unsigned int flags; /* channel flags */ - unsigned int range_type; /* lookup in kernel */ + unsigned int flags; + unsigned int range_type; unsigned int settling_time_0; - /* see support_level enum for values */ unsigned insn_bits_support; unsigned int unused[8]; }; +/** + * struct comedi_devinfo - used to retrieve information about a COMEDI device + * @version_code: COMEDI version code. + * @n_subdevs: Number of subdevices the device has. + * @driver_name: Null-terminated COMEDI driver name. + * @board_name: Null-terminated COMEDI board name. + * @read_subdevice: Index of the current "read" subdevice (%-1 if none). + * @write_subdevice: Index of the current "write" subdevice (%-1 if none). + * @unused: Reserved for future use. + * + * This is used with the %COMEDI_DEVINFO ioctl to get basic information about + * the device. + */ struct comedi_devinfo { unsigned int version_code; unsigned int n_subdevs; @@ -473,11 +806,45 @@ struct comedi_devinfo { int unused[30]; }; +/** + * struct comedi_devconfig - used to configure a legacy COMEDI device + * @board_name: Null-terminated string specifying the type of board + * to configure. + * @options: An array of integer configuration options. + * + * This is used with the %COMEDI_DEVCONFIG ioctl to configure a "legacy" COMEDI + * device, such as an ISA card. Not all COMEDI drivers support this. Those + * that do either expect the specified board name to match one of a list of + * names registered with the COMEDI core, or expect the specified board name + * to match the COMEDI driver name itself. The configuration options are + * handled in a driver-specific manner. + */ struct comedi_devconfig { char board_name[COMEDI_NAMELEN]; int options[COMEDI_NDEVCONFOPTS]; }; +/** + * struct comedi_bufconfig - used to set or get buffer size for a subdevice + * @subdevice: Subdevice index. + * @flags: Not used. + * @maximum_size: Maximum allowed buffer size. + * @size: Buffer size. + * @unused: Reserved for future use. + * + * This is used with the %COMEDI_BUFCONFIG ioctl to get or configure the + * maximum buffer size and current buffer size for a COMEDI subdevice that + * supports asynchronous commands. If the subdevice does not support + * asynchronous commands, @maximum_size and @size are ignored and set to 0. + * + * On ioctl input, non-zero values of @maximum_size and @size specify a + * new maximum size and new current size (in bytes), respectively. These + * will by rounded up to a multiple of %PAGE_SIZE. Specifying a new maximum + * size requires admin capabilities. + * + * On ioctl output, @maximum_size and @size and set to the current maximum + * buffer size and current buffer size, respectively. + */ struct comedi_bufconfig { unsigned int subdevice; unsigned int flags; @@ -488,6 +855,23 @@ struct comedi_bufconfig { unsigned int unused[4]; }; +/** + * struct comedi_bufinfo - used to manipulate buffer position for a subdevice + * @subdevice: Subdevice index. + * @bytes_read: Specify amount to advance read position for an + * asynchronous command in the input ("read") direction. + * @buf_write_ptr: Current write position (index) within the buffer. + * @buf_read_ptr: Current read position (index) within the buffer. + * @buf_write_count: Total amount written, modulo 2^32. + * @buf_read_count: Total amount read, modulo 2^32. + * @bytes_written: Specify amount to advance write position for an + * asynchronous command in the output ("write") direction. + * @unused: Reserved for future use. + * + * This is used with the %COMEDI_BUFINFO ioctl to optionally advance the + * current read or write position in an asynchronous acquisition data buffer, + * and to get the current read and write positions in the buffer. + */ struct comedi_bufinfo { unsigned int subdevice; unsigned int bytes_read; @@ -510,13 +894,13 @@ struct comedi_bufinfo { #define RANGE_LENGTH(b) ((b) & 0xffff) #define RF_UNIT(flags) ((flags) & 0xff) -#define RF_EXTERNAL (1 << 8) +#define RF_EXTERNAL 0x100 #define UNIT_volt 0 #define UNIT_mA 1 #define UNIT_none 2 -#define COMEDI_MIN_SPEED ((unsigned int)0xffffffff) +#define COMEDI_MIN_SPEED 0xffffffffu /**********************************************************/ /* everything after this line is ALPHA */ @@ -849,13 +1233,6 @@ enum ni_660x_pfi_routing { #define NI_EXT_PFI(x) (NI_USUAL_PFI_SELECT(x) - 1) #define NI_EXT_RTSI(x) (NI_USUAL_RTSI_SELECT(x) - 1) -/* status bits for INSN_CONFIG_GET_COUNTER_STATUS */ -enum comedi_counter_status_flags { - COMEDI_COUNTER_ARMED = 0x1, - COMEDI_COUNTER_COUNTING = 0x2, - COMEDI_COUNTER_TERMINAL_COUNT = 0x4, -}; - /* * Clock sources for CDIO subdevice on NI m-series boards. Used as the * scan_begin_arg for a comedi_command. These sources may also be bitwise-or'd diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c index d57fadef47fc..7c7b477b0f28 100644 --- a/drivers/staging/comedi/comedi_fops.c +++ b/drivers/staging/comedi/comedi_fops.c @@ -686,13 +686,6 @@ static bool __comedi_is_subdevice_running(struct comedi_subdevice *s) return comedi_is_runflags_running(runflags); } -static bool comedi_is_subdevice_idle(struct comedi_subdevice *s) -{ - unsigned runflags = comedi_get_subdevice_runflags(s); - - return !(runflags & COMEDI_SRF_BUSY_MASK); -} - bool comedi_can_auto_free_spriv(struct comedi_subdevice *s) { unsigned runflags = __comedi_get_subdevice_runflags(s); @@ -1111,6 +1104,9 @@ static int do_bufinfo_ioctl(struct comedi_device *dev, struct comedi_bufinfo bi; struct comedi_subdevice *s; struct comedi_async *async; + unsigned int runflags; + int retval = 0; + bool become_nonbusy = false; if (copy_from_user(&bi, arg, sizeof(bi))) return -EFAULT; @@ -1122,48 +1118,56 @@ static int do_bufinfo_ioctl(struct comedi_device *dev, async = s->async; - if (!async) { - dev_dbg(dev->class_dev, - "subdevice does not have async capability\n"); - bi.buf_write_ptr = 0; - bi.buf_read_ptr = 0; - bi.buf_write_count = 0; - bi.buf_read_count = 0; - bi.bytes_read = 0; - bi.bytes_written = 0; - goto copyback; - } - if (!s->busy) { - bi.bytes_read = 0; - bi.bytes_written = 0; - goto copyback_position; - } - if (s->busy != file) - return -EACCES; - - if (bi.bytes_read && !(async->cmd.flags & CMDF_WRITE)) { - bi.bytes_read = comedi_buf_read_alloc(s, bi.bytes_read); - comedi_buf_read_free(s, bi.bytes_read); + if (!async || s->busy != file) + return -EINVAL; - if (comedi_is_subdevice_idle(s) && - comedi_buf_read_n_available(s) == 0) { - do_become_nonbusy(dev, s); + runflags = comedi_get_subdevice_runflags(s); + if (!(async->cmd.flags & CMDF_WRITE)) { + /* command was set up in "read" direction */ + if (bi.bytes_read) { + comedi_buf_read_alloc(s, bi.bytes_read); + bi.bytes_read = comedi_buf_read_free(s, bi.bytes_read); } + /* + * If nothing left to read, and command has stopped, and + * {"read" position not updated or command stopped normally}, + * then become non-busy. + */ + if (comedi_buf_read_n_available(s) == 0 && + !comedi_is_runflags_running(runflags) && + (bi.bytes_read == 0 || + !comedi_is_runflags_in_error(runflags))) { + become_nonbusy = true; + if (comedi_is_runflags_in_error(runflags)) + retval = -EPIPE; + } + bi.bytes_written = 0; + } else { + /* command was set up in "write" direction */ + if (!comedi_is_runflags_running(runflags)) { + bi.bytes_written = 0; + become_nonbusy = true; + if (comedi_is_runflags_in_error(runflags)) + retval = -EPIPE; + } else if (bi.bytes_written) { + comedi_buf_write_alloc(s, bi.bytes_written); + bi.bytes_written = + comedi_buf_write_free(s, bi.bytes_written); + } + bi.bytes_read = 0; } - if (bi.bytes_written && (async->cmd.flags & CMDF_WRITE)) { - bi.bytes_written = - comedi_buf_write_alloc(s, bi.bytes_written); - comedi_buf_write_free(s, bi.bytes_written); - } - -copyback_position: bi.buf_write_count = async->buf_write_count; bi.buf_write_ptr = async->buf_write_ptr; bi.buf_read_count = async->buf_read_count; bi.buf_read_ptr = async->buf_read_ptr; -copyback: + if (become_nonbusy) + do_become_nonbusy(dev, s); + + if (retval) + return retval; + if (copy_to_user(arg, &bi, sizeof(bi))) return -EFAULT; @@ -2220,7 +2224,7 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma) retval = -EFAULT; goto done; } - if (size & (~PAGE_MASK)) { + if (offset_in_page(size)) { retval = -EFAULT; goto done; } diff --git a/drivers/staging/comedi/comedi_pcmcia.h b/drivers/staging/comedi/comedi_pcmcia.h index 5d3db2b9b4a1..5a572c200a8b 100644 --- a/drivers/staging/comedi/comedi_pcmcia.h +++ b/drivers/staging/comedi/comedi_pcmcia.h @@ -39,7 +39,8 @@ void comedi_pcmcia_driver_unregister(struct comedi_driver *, struct pcmcia_driver *); /** - * module_comedi_pcmcia_driver() - Helper macro for registering a comedi PCMCIA driver + * module_comedi_pcmcia_driver() - Helper macro for registering a comedi + * PCMCIA driver * @__comedi_driver: comedi_driver struct * @__pcmcia_driver: pcmcia_driver struct * diff --git a/drivers/staging/comedi/drivers/addi_apci_3xxx.c b/drivers/staging/comedi/drivers/addi_apci_3xxx.c index 995096c78844..b6af3eba91fd 100644 --- a/drivers/staging/comedi/drivers/addi_apci_3xxx.c +++ b/drivers/staging/comedi/drivers/addi_apci_3xxx.c @@ -496,7 +496,7 @@ static int apci3xxx_ai_ns_to_timer(struct comedi_device *dev, switch (flags & CMDF_ROUND_MASK) { case CMDF_ROUND_NEAREST: default: - timer = (*ns + base / 2) / base; + timer = DIV_ROUND_CLOSEST(*ns, base); break; case CMDF_ROUND_DOWN: timer = *ns / base; diff --git a/drivers/staging/comedi/drivers/amplc_pci230.c b/drivers/staging/comedi/drivers/amplc_pci230.c index 4b39f6960c0a..907c39cc89d7 100644 --- a/drivers/staging/comedi/drivers/amplc_pci230.c +++ b/drivers/staging/comedi/drivers/amplc_pci230.c @@ -637,12 +637,12 @@ static unsigned int pci230_divide_ns(uint64_t ns, unsigned int timebase, switch (flags & CMDF_ROUND_MASK) { default: case CMDF_ROUND_NEAREST: - div += (rem + (timebase / 2)) / timebase; + div += DIV_ROUND_CLOSEST(rem, timebase); break; case CMDF_ROUND_DOWN: break; case CMDF_ROUND_UP: - div += (rem + timebase - 1) / timebase; + div += DIV_ROUND_UP(rem, timebase); break; } return div > UINT_MAX ? UINT_MAX : (unsigned int)div; diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c index d33b8fe872a7..c773b8ca6599 100644 --- a/drivers/staging/comedi/drivers/cb_pcidas64.c +++ b/drivers/staging/comedi/drivers/cb_pcidas64.c @@ -1376,7 +1376,7 @@ static int set_ai_fifo_segment_length(struct comedi_device *dev, num_entries = fifo->max_segment_length; /* 1 == 256 entries, 2 == 512 entries, etc */ - num_increments = (num_entries + increment_size / 2) / increment_size; + num_increments = DIV_ROUND_CLOSEST(num_entries, increment_size); bits = (~(num_increments - 1)) & fifo->fifo_size_reg_mask; devpriv->fifo_size_bits &= ~fifo->fifo_size_reg_mask; @@ -1480,35 +1480,39 @@ static int alloc_and_init_dma_members(struct comedi_device *dev) /* allocate pci dma buffers */ for (i = 0; i < ai_dma_ring_count(board); i++) { devpriv->ai_buffer[i] = - pci_alloc_consistent(pcidev, DMA_BUFFER_SIZE, - &devpriv->ai_buffer_bus_addr[i]); + dma_alloc_coherent(&pcidev->dev, DMA_BUFFER_SIZE, + &devpriv->ai_buffer_bus_addr[i], + GFP_KERNEL); if (!devpriv->ai_buffer[i]) return -ENOMEM; } for (i = 0; i < AO_DMA_RING_COUNT; i++) { if (ao_cmd_is_supported(board)) { devpriv->ao_buffer[i] = - pci_alloc_consistent(pcidev, DMA_BUFFER_SIZE, - &devpriv-> - ao_buffer_bus_addr[i]); + dma_alloc_coherent(&pcidev->dev, + DMA_BUFFER_SIZE, + &devpriv-> + ao_buffer_bus_addr[i], + GFP_KERNEL); if (!devpriv->ao_buffer[i]) return -ENOMEM; } } /* allocate dma descriptors */ devpriv->ai_dma_desc = - pci_alloc_consistent(pcidev, sizeof(struct plx_dma_desc) * - ai_dma_ring_count(board), - &devpriv->ai_dma_desc_bus_addr); + dma_alloc_coherent(&pcidev->dev, sizeof(struct plx_dma_desc) * + ai_dma_ring_count(board), + &devpriv->ai_dma_desc_bus_addr, GFP_KERNEL); if (!devpriv->ai_dma_desc) return -ENOMEM; if (ao_cmd_is_supported(board)) { devpriv->ao_dma_desc = - pci_alloc_consistent(pcidev, - sizeof(struct plx_dma_desc) * - AO_DMA_RING_COUNT, - &devpriv->ao_dma_desc_bus_addr); + dma_alloc_coherent(&pcidev->dev, + sizeof(struct plx_dma_desc) * + AO_DMA_RING_COUNT, + &devpriv->ao_dma_desc_bus_addr, + GFP_KERNEL); if (!devpriv->ao_dma_desc) return -ENOMEM; } @@ -1564,31 +1568,31 @@ static void cb_pcidas64_free_dma(struct comedi_device *dev) /* free pci dma buffers */ for (i = 0; i < ai_dma_ring_count(board); i++) { if (devpriv->ai_buffer[i]) - pci_free_consistent(pcidev, - DMA_BUFFER_SIZE, - devpriv->ai_buffer[i], - devpriv->ai_buffer_bus_addr[i]); + dma_free_coherent(&pcidev->dev, + DMA_BUFFER_SIZE, + devpriv->ai_buffer[i], + devpriv->ai_buffer_bus_addr[i]); } for (i = 0; i < AO_DMA_RING_COUNT; i++) { if (devpriv->ao_buffer[i]) - pci_free_consistent(pcidev, - DMA_BUFFER_SIZE, - devpriv->ao_buffer[i], - devpriv->ao_buffer_bus_addr[i]); + dma_free_coherent(&pcidev->dev, + DMA_BUFFER_SIZE, + devpriv->ao_buffer[i], + devpriv->ao_buffer_bus_addr[i]); } /* free dma descriptors */ if (devpriv->ai_dma_desc) - pci_free_consistent(pcidev, - sizeof(struct plx_dma_desc) * - ai_dma_ring_count(board), - devpriv->ai_dma_desc, - devpriv->ai_dma_desc_bus_addr); + dma_free_coherent(&pcidev->dev, + sizeof(struct plx_dma_desc) * + ai_dma_ring_count(board), + devpriv->ai_dma_desc, + devpriv->ai_dma_desc_bus_addr); if (devpriv->ao_dma_desc) - pci_free_consistent(pcidev, - sizeof(struct plx_dma_desc) * - AO_DMA_RING_COUNT, - devpriv->ao_dma_desc, - devpriv->ao_dma_desc_bus_addr); + dma_free_coherent(&pcidev->dev, + sizeof(struct plx_dma_desc) * + AO_DMA_RING_COUNT, + devpriv->ao_dma_desc, + devpriv->ao_dma_desc_bus_addr); } static inline void warn_external_queue(struct comedi_device *dev) @@ -2004,7 +2008,7 @@ static unsigned int get_divisor(unsigned int ns, unsigned int flags) break; case CMDF_ROUND_NEAREST: default: - divisor = (ns + TIMER_BASE / 2) / TIMER_BASE; + divisor = DIV_ROUND_CLOSEST(ns, TIMER_BASE); break; } return divisor; diff --git a/drivers/staging/comedi/drivers/cb_pcimdda.c b/drivers/staging/comedi/drivers/cb_pcimdda.c index 19210d89f2b2..84ef45457c60 100644 --- a/drivers/staging/comedi/drivers/cb_pcimdda.c +++ b/drivers/staging/comedi/drivers/cb_pcimdda.c @@ -1,77 +1,78 @@ /* - comedi/drivers/cb_pcimdda.c - Computer Boards PCIM-DDA06-16 Comedi driver - Author: Calin Culianu <calin@ajvar.org> - - COMEDI - Linux Control and Measurement Device Interface - Copyright (C) 2000 David A. Schleef <ds@schleef.org> - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. -*/ + * comedi/drivers/cb_pcimdda.c + * Computer Boards PCIM-DDA06-16 Comedi driver + * Author: Calin Culianu <calin@ajvar.org> + * + * COMEDI - Linux Control and Measurement Device Interface + * Copyright (C) 2000 David A. Schleef <ds@schleef.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ /* -Driver: cb_pcimdda -Description: Measurement Computing PCIM-DDA06-16 -Devices: [Measurement Computing] PCIM-DDA06-16 (cb_pcimdda) -Author: Calin Culianu <calin@ajvar.org> -Updated: Mon, 14 Apr 2008 15:15:51 +0100 -Status: works - -All features of the PCIM-DDA06-16 board are supported. This board -has 6 16-bit AO channels, and the usual 8255 DIO setup. (24 channels, -configurable in banks of 8 and 4, etc.). This board does not support commands. - -The board has a peculiar way of specifying AO gain/range settings -- You have -1 jumper bank on the card, which either makes all 6 AO channels either -5 Volt unipolar, 5V bipolar, 10 Volt unipolar or 10V bipolar. - -Since there is absolutely _no_ way to tell in software how this jumper is set -(well, at least according to the rather thin spec. from Measurement Computing - that comes with the board), the driver assumes the jumper is at its factory -default setting of +/-5V. - -Also of note is the fact that this board features another jumper, whose -state is also completely invisible to software. It toggles two possible AO -output modes on the board: - - - Update Mode: Writing to an AO channel instantaneously updates the actual - signal output by the DAC on the board (this is the factory default). - - Simultaneous XFER Mode: Writing to an AO channel has no effect until - you read from any one of the AO channels. This is useful for loading - all 6 AO values, and then reading from any one of the AO channels on the - device to instantly update all 6 AO values in unison. Useful for some - control apps, I would assume? If your jumper is in this setting, then you - need to issue your comedi_data_write()s to load all the values you want, - then issue one comedi_data_read() on any channel on the AO subdevice - to initiate the simultaneous XFER. - -Configuration Options: not applicable, uses PCI auto config -*/ + * Driver: cb_pcimdda + * Description: Measurement Computing PCIM-DDA06-16 + * Devices: [Measurement Computing] PCIM-DDA06-16 (cb_pcimdda) + * Author: Calin Culianu <calin@ajvar.org> + * Updated: Mon, 14 Apr 2008 15:15:51 +0100 + * Status: works + * + * All features of the PCIM-DDA06-16 board are supported. + * This board has 6 16-bit AO channels, and the usual 8255 DIO setup. + * (24 channels, configurable in banks of 8 and 4, etc.). + * This board does not support commands. + * + * The board has a peculiar way of specifying AO gain/range settings -- You have + * 1 jumper bank on the card, which either makes all 6 AO channels either + * 5 Volt unipolar, 5V bipolar, 10 Volt unipolar or 10V bipolar. + * + * Since there is absolutely _no_ way to tell in software how this jumper is set + * (well, at least according to the rather thin spec. from Measurement Computing + * that comes with the board), the driver assumes the jumper is at its factory + * default setting of +/-5V. + * + * Also of note is the fact that this board features another jumper, whose + * state is also completely invisible to software. It toggles two possible AO + * output modes on the board: + * + * - Update Mode: Writing to an AO channel instantaneously updates the actual + * signal output by the DAC on the board (this is the factory default). + * - Simultaneous XFER Mode: Writing to an AO channel has no effect until + * you read from any one of the AO channels. This is useful for loading + * all 6 AO values, and then reading from any one of the AO channels on the + * device to instantly update all 6 AO values in unison. Useful for some + * control apps, I would assume? If your jumper is in this setting, then you + * need to issue your comedi_data_write()s to load all the values you want, + * then issue one comedi_data_read() on any channel on the AO subdevice + * to initiate the simultaneous XFER. + * + * Configuration Options: not applicable, uses PCI auto config + */ /* - This is a driver for the Computer Boards PCIM-DDA06-16 Analog Output - card. This board has a unique register layout and as such probably - deserves its own driver file. - - It is theoretically possible to integrate this board into the cb_pcidda - file, but since that isn't my code, I didn't want to significantly - modify that file to support this board (I thought it impolite to do so). - - At any rate, if you feel ambitious, please feel free to take - the code out of this file and combine it with a more unified driver - file. - - I would like to thank Timothy Curry <Timothy.Curry@rdec.redstone.army.mil> - for lending me a board so that I could write this driver. - - -Calin Culianu <calin@ajvar.org> + * This is a driver for the Computer Boards PCIM-DDA06-16 Analog Output + * card. This board has a unique register layout and as such probably + * deserves its own driver file. + * + * It is theoretically possible to integrate this board into the cb_pcidda + * file, but since that isn't my code, I didn't want to significantly + * modify that file to support this board (I thought it impolite to do so). + * + * At any rate, if you feel ambitious, please feel free to take + * the code out of this file and combine it with a more unified driver + * file. + * + * I would like to thank Timothy Curry <Timothy.Curry@rdec.redstone.army.mil> + * for lending me a board so that I could write this driver. + * + * -Calin Culianu <calin@ajvar.org> */ #include <linux/module.h> diff --git a/drivers/staging/comedi/drivers/comedi_isadma.c b/drivers/staging/comedi/drivers/comedi_isadma.c index 6ba71d114a95..68ef9b1750be 100644 --- a/drivers/staging/comedi/drivers/comedi_isadma.c +++ b/drivers/staging/comedi/drivers/comedi_isadma.c @@ -132,8 +132,7 @@ unsigned int comedi_isadma_poll(struct comedi_isadma *dma) result = result1; if (result >= desc->size || result == 0) return 0; - else - return desc->size - result; + return desc->size - result; } EXPORT_SYMBOL_GPL(comedi_isadma_poll); diff --git a/drivers/staging/comedi/drivers/contec_pci_dio.c b/drivers/staging/comedi/drivers/contec_pci_dio.c index 4956a49a6140..5f848396c2f7 100644 --- a/drivers/staging/comedi/drivers/contec_pci_dio.c +++ b/drivers/staging/comedi/drivers/contec_pci_dio.c @@ -1,29 +1,30 @@ /* - comedi/drivers/contec_pci_dio.c - - COMEDI - Linux Control and Measurement Device Interface - Copyright (C) 2000 David A. Schleef <ds@schleef.org> - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + * comedi/drivers/contec_pci_dio.c + * + * COMEDI - Linux Control and Measurement Device Interface + * Copyright (C) 2000 David A. Schleef <ds@schleef.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. -*/ /* -Driver: contec_pci_dio -Description: Contec PIO1616L digital I/O board -Devices: [Contec] PIO1616L (contec_pci_dio) -Author: Stefano Rivoir <s.rivoir@gts.it> -Updated: Wed, 27 Jun 2007 13:00:06 +0100 -Status: works - -Configuration Options: not applicable, uses comedi PCI auto config -*/ + * Driver: contec_pci_dio + * Description: Contec PIO1616L digital I/O board + * Devices: [Contec] PIO1616L (contec_pci_dio) + * Author: Stefano Rivoir <s.rivoir@gts.it> + * Updated: Wed, 27 Jun 2007 13:00:06 +0100 + * Status: works + * + * Configuration Options: not applicable, uses comedi PCI auto config + */ #include <linux/module.h> diff --git a/drivers/staging/comedi/drivers/dt2801.c b/drivers/staging/comedi/drivers/dt2801.c index 80e38dedd359..6c7b4d27c27c 100644 --- a/drivers/staging/comedi/drivers/dt2801.c +++ b/drivers/staging/comedi/drivers/dt2801.c @@ -68,17 +68,17 @@ Configuration options: /* Command modifiers (only used with read/write), EXTTRIG can be used with some other commands. */ -#define DT_MOD_DMA (1<<4) -#define DT_MOD_CONT (1<<5) -#define DT_MOD_EXTCLK (1<<6) -#define DT_MOD_EXTTRIG (1<<7) +#define DT_MOD_DMA BIT(4) +#define DT_MOD_CONT BIT(5) +#define DT_MOD_EXTCLK BIT(6) +#define DT_MOD_EXTTRIG BIT(7) /* Bits in status register */ -#define DT_S_DATA_OUT_READY (1<<0) -#define DT_S_DATA_IN_FULL (1<<1) -#define DT_S_READY (1<<2) -#define DT_S_COMMAND (1<<3) -#define DT_S_COMPOSITE_ERROR (1<<7) +#define DT_S_DATA_OUT_READY BIT(0) +#define DT_S_DATA_IN_FULL BIT(1) +#define DT_S_READY BIT(2) +#define DT_S_COMMAND BIT(3) +#define DT_S_COMPOSITE_ERROR BIT(7) /* registers */ #define DT2801_DATA 0 diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c index 5a536a00066f..40bf00984fa5 100644 --- a/drivers/staging/comedi/drivers/dt282x.c +++ b/drivers/staging/comedi/drivers/dt282x.c @@ -371,13 +371,13 @@ static unsigned int dt282x_ns_to_timer(unsigned int *ns, unsigned int flags) switch (flags & CMDF_ROUND_MASK) { case CMDF_ROUND_NEAREST: default: - divider = (*ns + base / 2) / base; + divider = DIV_ROUND_CLOSEST(*ns, base); break; case CMDF_ROUND_DOWN: divider = (*ns) / base; break; case CMDF_ROUND_UP: - divider = (*ns + base - 1) / base; + divider = DIV_ROUND_UP(*ns, base); break; } if (divider < 256) { diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c index ab7a332fbcc4..19e0b7be8495 100644 --- a/drivers/staging/comedi/drivers/dt3000.c +++ b/drivers/staging/comedi/drivers/dt3000.c @@ -361,7 +361,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec, switch (flags & CMDF_ROUND_MASK) { case CMDF_ROUND_NEAREST: default: - divider = (*nanosec + base / 2) / base; + divider = DIV_ROUND_CLOSEST(*nanosec, base); break; case CMDF_ROUND_DOWN: divider = (*nanosec) / base; diff --git a/drivers/staging/comedi/drivers/gsc_hpdi.c b/drivers/staging/comedi/drivers/gsc_hpdi.c index 46ca5d938d5b..63b5cbc44bda 100644 --- a/drivers/staging/comedi/drivers/gsc_hpdi.c +++ b/drivers/staging/comedi/drivers/gsc_hpdi.c @@ -499,18 +499,18 @@ static void gsc_hpdi_free_dma(struct comedi_device *dev) /* free pci dma buffers */ for (i = 0; i < NUM_DMA_BUFFERS; i++) { if (devpriv->dio_buffer[i]) - pci_free_consistent(pcidev, - DMA_BUFFER_SIZE, - devpriv->dio_buffer[i], - devpriv->dio_buffer_phys_addr[i]); + dma_free_coherent(&pcidev->dev, + DMA_BUFFER_SIZE, + devpriv->dio_buffer[i], + devpriv->dio_buffer_phys_addr[i]); } /* free dma descriptors */ if (devpriv->dma_desc) - pci_free_consistent(pcidev, - sizeof(struct plx_dma_desc) * - NUM_DMA_DESCRIPTORS, - devpriv->dma_desc, - devpriv->dma_desc_phys_addr); + dma_free_coherent(&pcidev->dev, + sizeof(struct plx_dma_desc) * + NUM_DMA_DESCRIPTORS, + devpriv->dma_desc, + devpriv->dma_desc_phys_addr); } static int gsc_hpdi_init(struct comedi_device *dev) @@ -630,14 +630,16 @@ static int gsc_hpdi_auto_attach(struct comedi_device *dev, /* allocate pci dma buffers */ for (i = 0; i < NUM_DMA_BUFFERS; i++) { devpriv->dio_buffer[i] = - pci_alloc_consistent(pcidev, DMA_BUFFER_SIZE, - &devpriv->dio_buffer_phys_addr[i]); + dma_alloc_coherent(&pcidev->dev, DMA_BUFFER_SIZE, + &devpriv->dio_buffer_phys_addr[i], + GFP_KERNEL); } /* allocate dma descriptors */ - devpriv->dma_desc = pci_alloc_consistent(pcidev, - sizeof(struct plx_dma_desc) * - NUM_DMA_DESCRIPTORS, - &devpriv->dma_desc_phys_addr); + devpriv->dma_desc = dma_alloc_coherent(&pcidev->dev, + sizeof(struct plx_dma_desc) * + NUM_DMA_DESCRIPTORS, + &devpriv->dma_desc_phys_addr, + GFP_KERNEL); if (devpriv->dma_desc_phys_addr & 0xf) { dev_warn(dev->class_dev, " dma descriptors not quad-word aligned (bug)\n"); diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c index fa7ae2c04556..8f24702c3380 100644 --- a/drivers/staging/comedi/drivers/mite.c +++ b/drivers/staging/comedi/drivers/mite.c @@ -297,7 +297,6 @@ int mite_buf_change(struct mite_dma_descriptor_ring *ring, { struct comedi_async *async = s->async; unsigned int n_links; - int i; if (ring->descriptors) { dma_free_coherent(ring->hw_dev, @@ -326,17 +325,58 @@ int mite_buf_change(struct mite_dma_descriptor_ring *ring, } ring->n_links = n_links; - for (i = 0; i < n_links; i++) { + return mite_init_ring_descriptors(ring, s, n_links << PAGE_SHIFT); +} +EXPORT_SYMBOL_GPL(mite_buf_change); + +/* + * initializes the ring buffer descriptors to provide correct DMA transfer links + * to the exact amount of memory required. When the ring buffer is allocated in + * mite_buf_change, the default is to initialize the ring to refer to the entire + * DMA data buffer. A command may call this function later to re-initialize and + * shorten the amount of memory that will be transferred. + */ +int mite_init_ring_descriptors(struct mite_dma_descriptor_ring *ring, + struct comedi_subdevice *s, + unsigned int nbytes) +{ + struct comedi_async *async = s->async; + unsigned int n_full_links = nbytes >> PAGE_SHIFT; + unsigned int remainder = nbytes % PAGE_SIZE; + int i; + + dev_dbg(s->device->class_dev, + "mite: init ring buffer to %u bytes\n", nbytes); + + if ((n_full_links + (remainder > 0 ? 1 : 0)) > ring->n_links) { + dev_err(s->device->class_dev, + "mite: ring buffer too small for requested init\n"); + return -ENOMEM; + } + + /* We set the descriptors for all full links. */ + for (i = 0; i < n_full_links; ++i) { ring->descriptors[i].count = cpu_to_le32(PAGE_SIZE); ring->descriptors[i].addr = cpu_to_le32(async->buf_map->page_list[i].dma_addr); ring->descriptors[i].next = - cpu_to_le32(ring->descriptors_dma_addr + (i + - 1) * - sizeof(struct mite_dma_descriptor)); + cpu_to_le32(ring->descriptors_dma_addr + + (i + 1) * sizeof(struct mite_dma_descriptor)); } - ring->descriptors[n_links - 1].next = - cpu_to_le32(ring->descriptors_dma_addr); + + /* the last link is either a remainder or was a full link. */ + if (remainder > 0) { + /* set the lesser count for the remainder link */ + ring->descriptors[i].count = cpu_to_le32(remainder); + ring->descriptors[i].addr = + cpu_to_le32(async->buf_map->page_list[i].dma_addr); + /* increment i so that assignment below refs last link */ + ++i; + } + + /* Assign the last link->next to point back to the head of the list. */ + ring->descriptors[i - 1].next = cpu_to_le32(ring->descriptors_dma_addr); + /* * barrier is meant to insure that all the writes to the dma descriptors * have completed before the dma controller is commanded to read them @@ -344,7 +384,7 @@ int mite_buf_change(struct mite_dma_descriptor_ring *ring, smp_wmb(); return 0; } -EXPORT_SYMBOL_GPL(mite_buf_change); +EXPORT_SYMBOL_GPL(mite_init_ring_descriptors); void mite_prep_dma(struct mite_channel *mite_chan, unsigned int num_device_bits, unsigned int num_memory_bits) @@ -552,6 +592,7 @@ int mite_sync_output_dma(struct mite_channel *mite_chan, unsigned int old_alloc_count = async->buf_read_alloc_count; u32 nbytes_ub, nbytes_lb; int count; + bool finite_regen = (cmd->stop_src == TRIG_NONE && stop_count != 0); /* read alloc as much as we can */ comedi_buf_read_alloc(s, async->prealloc_bufsz); @@ -561,11 +602,24 @@ int mite_sync_output_dma(struct mite_channel *mite_chan, nbytes_ub = mite_bytes_read_from_memory_ub(mite_chan); if (cmd->stop_src == TRIG_COUNT && (int)(nbytes_ub - stop_count) > 0) nbytes_ub = stop_count; - if ((int)(nbytes_ub - old_alloc_count) > 0) { + + if ((!finite_regen || stop_count > old_alloc_count) && + ((int)(nbytes_ub - old_alloc_count) > 0)) { dev_warn(s->device->class_dev, "mite: DMA underrun\n"); async->events |= COMEDI_CB_OVERFLOW; return -1; } + + if (finite_regen) { + /* + * This is a special case where we continuously output a finite + * buffer. In this case, we do not free any of the memory, + * hence we expect that old_alloc_count will reach a maximum of + * stop_count bytes. + */ + return 0; + } + count = nbytes_lb - async->buf_read_count; if (count <= 0) return 0; diff --git a/drivers/staging/comedi/drivers/mite.h b/drivers/staging/comedi/drivers/mite.h index c32d4e4ddccc..87534b07ec81 100644 --- a/drivers/staging/comedi/drivers/mite.h +++ b/drivers/staging/comedi/drivers/mite.h @@ -110,6 +110,9 @@ void mite_prep_dma(struct mite_channel *mite_chan, unsigned int num_device_bits, unsigned int num_memory_bits); int mite_buf_change(struct mite_dma_descriptor_ring *ring, struct comedi_subdevice *s); +int mite_init_ring_descriptors(struct mite_dma_descriptor_ring *ring, + struct comedi_subdevice *s, + unsigned int nbytes); enum mite_registers { /* diff --git a/drivers/staging/comedi/drivers/ni_mio_c_common.c b/drivers/staging/comedi/drivers/ni_mio_c_common.c new file mode 100644 index 000000000000..e69de29bb2d1 --- /dev/null +++ b/drivers/staging/comedi/drivers/ni_mio_c_common.c diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c index 5e8130a7d670..d1226c97664b 100644 --- a/drivers/staging/comedi/drivers/ni_mio_common.c +++ b/drivers/staging/comedi/drivers/ni_mio_common.c @@ -1166,8 +1166,7 @@ static void ni_ai_fifo_read(struct comedi_device *dev, comedi_buf_write_samples(s, &data, 1); } } else { - if (n > sizeof(devpriv->ai_fifo_buffer) / - sizeof(devpriv->ai_fifo_buffer[0])) { + if (n > ARRAY_SIZE(devpriv->ai_fifo_buffer)) { dev_err(dev->class_dev, "bug! ai_fifo_buffer too small\n"); async->events |= COMEDI_CB_ERROR; @@ -1242,9 +1241,7 @@ static void ni_handle_fifo_dregs(struct comedi_device *dev) NISTC_AI_STATUS1_FIFO_E; while (fifo_empty == 0) { for (i = 0; - i < - sizeof(devpriv->ai_fifo_buffer) / - sizeof(devpriv->ai_fifo_buffer[0]); i++) { + i < ARRAY_SIZE(devpriv->ai_fifo_buffer); i++) { fifo_empty = ni_stc_readw(dev, NISTC_AI_STATUS1_REG) & NISTC_AI_STATUS1_FIFO_E; @@ -1500,7 +1497,8 @@ static void handle_b_interrupt(struct comedi_device *dev, s->async->events |= COMEDI_CB_OVERFLOW; } - if (b_status & NISTC_AO_STATUS1_BC_TC) + if (s->async->cmd.stop_src != TRIG_NONE && + b_status & NISTC_AO_STATUS1_BC_TC) s->async->events |= COMEDI_CB_EOA; #ifndef PCIDMA @@ -2054,13 +2052,13 @@ static int ni_ns_to_timer(const struct comedi_device *dev, unsigned nanosec, switch (flags & CMDF_ROUND_MASK) { case CMDF_ROUND_NEAREST: default: - divider = (nanosec + devpriv->clock_ns / 2) / devpriv->clock_ns; + divider = DIV_ROUND_CLOSEST(nanosec, devpriv->clock_ns); break; case CMDF_ROUND_DOWN: divider = (nanosec) / devpriv->clock_ns; break; case CMDF_ROUND_UP: - divider = (nanosec + devpriv->clock_ns - 1) / devpriv->clock_ns; + divider = DIV_ROUND_UP(nanosec, devpriv->clock_ns); break; } return divider - 1; @@ -2073,6 +2071,37 @@ static unsigned ni_timer_to_ns(const struct comedi_device *dev, int timer) return devpriv->clock_ns * (timer + 1); } +static void ni_cmd_set_mite_transfer(struct mite_dma_descriptor_ring *ring, + struct comedi_subdevice *sdev, + const struct comedi_cmd *cmd, + unsigned int max_count) { +#ifdef PCIDMA + unsigned int nbytes = max_count; + + if (cmd->stop_arg > 0 && cmd->stop_arg < max_count) + nbytes = cmd->stop_arg; + nbytes *= comedi_bytes_per_scan(sdev); + + if (nbytes > sdev->async->prealloc_bufsz) { + if (cmd->stop_arg > 0) + dev_err(sdev->device->class_dev, + "ni_cmd_set_mite_transfer: tried exact data transfer limits greater than buffer size\n"); + + /* + * we can only transfer up to the size of the buffer. In this + * case, the user is expected to continue to write into the + * comedi buffer (already implemented as a ring buffer). + */ + nbytes = sdev->async->prealloc_bufsz; + } + + mite_init_ring_descriptors(ring, sdev, nbytes); +#else + dev_err(sdev->device->class_dev, + "ni_cmd_set_mite_transfer: exact data transfer limits not implemented yet without DMA\n"); +#endif +} + static unsigned ni_min_ai_scan_period_ns(struct comedi_device *dev, unsigned num_channels) { @@ -2428,7 +2457,8 @@ static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) ni_stc_writew(dev, mode2, NISTC_AI_MODE2_REG); break; case TRIG_EXT: - mode1 |= NISTC_AI_MODE1_CONVERT_SRC(1 + cmd->convert_arg); + mode1 |= NISTC_AI_MODE1_CONVERT_SRC(1 + + CR_CHAN(cmd->convert_arg)); if ((cmd->convert_arg & CR_INVERT) == 0) mode1 |= NISTC_AI_MODE1_CONVERT_POLARITY; ni_stc_writew(dev, mode1, NISTC_AI_MODE1_REG); @@ -2902,8 +2932,6 @@ static int ni_ao_inttrig(struct comedi_device *dev, ni_stc_writew(dev, NISTC_AO_CMD1_UI_ARM | NISTC_AO_CMD1_UC_ARM | NISTC_AO_CMD1_BC_ARM | - NISTC_AO_CMD1_DAC1_UPDATE_MODE | - NISTC_AO_CMD1_DAC0_UPDATE_MODE | devpriv->ao_cmd1, NISTC_AO_CMD1_REG); @@ -2913,42 +2941,68 @@ static int ni_ao_inttrig(struct comedi_device *dev, return 0; } -static int ni_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s) +/* + * begin ni_ao_cmd. + * Organized similar to NI-STC and MHDDK examples. + * ni_ao_cmd is broken out into configuration sub-routines for clarity. + */ + +static void ni_ao_cmd_personalize(struct comedi_device *dev, + const struct comedi_cmd *cmd) { const struct ni_board_struct *board = dev->board_ptr; - struct ni_private *devpriv = dev->private; - const struct comedi_cmd *cmd = &s->async->cmd; - int bits; - int i; - unsigned trigvar; - unsigned val; - - if (dev->irq == 0) { - dev_err(dev->class_dev, "cannot run command without an irq\n"); - return -EIO; - } + unsigned bits; ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG); - ni_stc_writew(dev, NISTC_AO_CMD1_DISARM, NISTC_AO_CMD1_REG); + bits = + /* fast CPU interface--only eseries */ + /* ((slow CPU interface) ? 0 : AO_Fast_CPU) | */ + NISTC_AO_PERSONAL_BC_SRC_SEL | + 0 /* (use_original_pulse ? 0 : NISTC_AO_PERSONAL_UPDATE_TIMEBASE) */ | + /* + * FIXME: start setting following bit when appropriate. Need to + * determine whether board is E4 or E1. + * FROM MHHDK: + * if board is E4 or E1 + * Set bit "NISTC_AO_PERSONAL_UPDATE_PW" to 0 + * else + * set it to 1 + */ + NISTC_AO_PERSONAL_UPDATE_PW | + /* FIXME: when should we set following bit to zero? */ + NISTC_AO_PERSONAL_TMRDACWR_PW | + (board->ao_fifo_depth ? + NISTC_AO_PERSONAL_FIFO_ENA : NISTC_AO_PERSONAL_DMA_PIO_CTRL) + ; +#if 0 + /* + * FIXME: + * add something like ".has_individual_dacs = 0" to ni_board_struct + * since, as F Hess pointed out, not all in m series have singles. not + * sure if e-series all have duals... + */ - if (devpriv->is_6xxx) { - ni_ao_win_outw(dev, NI611X_AO_MISC_CLEAR_WG, - NI611X_AO_MISC_REG); + /* + * F Hess: windows driver does not set NISTC_AO_PERSONAL_NUM_DAC bit for + * 6281, verified with bus analyzer. + */ + if (devpriv->is_m_series) + bits |= NISTC_AO_PERSONAL_NUM_DAC; +#endif + ni_stc_writew(dev, bits, NISTC_AO_PERSONAL_REG); - bits = 0; - for (i = 0; i < cmd->chanlist_len; i++) { - int chan; + ni_stc_writew(dev, NISTC_RESET_AO_CFG_END, NISTC_RESET_REG); +} - chan = CR_CHAN(cmd->chanlist[i]); - bits |= 1 << chan; - ni_ao_win_outw(dev, chan, NI611X_AO_WAVEFORM_GEN_REG); - } - ni_ao_win_outw(dev, bits, NI611X_AO_TIMED_REG); - } +static void ni_ao_cmd_set_trigger(struct comedi_device *dev, + const struct comedi_cmd *cmd) +{ + struct ni_private *devpriv = dev->private; - ni_ao_config_chanlist(dev, s, cmd->chanlist, cmd->chanlist_len, 1); + ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG); + /* sync */ if (cmd->stop_src == TRIG_NONE) { devpriv->ao_mode1 |= NISTC_AO_MODE1_CONTINUOUS; devpriv->ao_mode1 &= ~NISTC_AO_MODE1_TRIGGER_ONCE; @@ -2958,177 +3012,351 @@ static int ni_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s) } ni_stc_writew(dev, devpriv->ao_mode1, NISTC_AO_MODE1_REG); - val = devpriv->ao_trigger_select; - switch (cmd->start_src) { - case TRIG_INT: - case TRIG_NOW: - val &= ~(NISTC_AO_TRIG_START1_POLARITY | - NISTC_AO_TRIG_START1_SEL_MASK); - val |= NISTC_AO_TRIG_START1_EDGE | - NISTC_AO_TRIG_START1_SYNC; - break; - case TRIG_EXT: - val = NISTC_AO_TRIG_START1_SEL(CR_CHAN(cmd->start_arg) + 1); - if (cmd->start_arg & CR_INVERT) { - /* 0=active high, 1=active low. see daq-stc 3-24 (p186) */ - val |= NISTC_AO_TRIG_START1_POLARITY; - } - if (cmd->start_arg & CR_EDGE) { - /* 0=edge detection disabled, 1=enabled */ - val |= NISTC_AO_TRIG_START1_EDGE; + { + unsigned int trigsel = devpriv->ao_trigger_select; + + switch (cmd->start_src) { + case TRIG_INT: + case TRIG_NOW: + trigsel &= ~(NISTC_AO_TRIG_START1_POLARITY | + NISTC_AO_TRIG_START1_SEL_MASK); + trigsel |= NISTC_AO_TRIG_START1_EDGE | + NISTC_AO_TRIG_START1_SYNC; + break; + case TRIG_EXT: + trigsel = NISTC_AO_TRIG_START1_SEL( + CR_CHAN(cmd->start_arg) + 1); + if (cmd->start_arg & CR_INVERT) + /* + * 0=active high, 1=active low. + * see daq-stc 3-24 (p186) + */ + trigsel |= NISTC_AO_TRIG_START1_POLARITY; + if (cmd->start_arg & CR_EDGE) + /* 0=edge detection disabled, 1=enabled */ + trigsel |= NISTC_AO_TRIG_START1_EDGE; + break; + default: + BUG(); + break; } + + devpriv->ao_trigger_select = trigsel; ni_stc_writew(dev, devpriv->ao_trigger_select, NISTC_AO_TRIG_SEL_REG); - break; - default: - BUG(); - break; } - devpriv->ao_trigger_select = val; - ni_stc_writew(dev, devpriv->ao_trigger_select, NISTC_AO_TRIG_SEL_REG); + /* AO_Delayed_START1 = 0, we do not support delayed start...yet */ + /* sync */ + /* select DA_START1 as PFI6/AO_START1 when configured as an output */ devpriv->ao_mode3 &= ~NISTC_AO_MODE3_TRIG_LEN; ni_stc_writew(dev, devpriv->ao_mode3, NISTC_AO_MODE3_REG); + ni_stc_writew(dev, NISTC_RESET_AO_CFG_END, NISTC_RESET_REG); +} + +static void ni_ao_cmd_set_counters(struct comedi_device *dev, + const struct comedi_cmd *cmd) +{ + struct ni_private *devpriv = dev->private; + /* Not supporting 'waveform staging' or 'local buffer with pauses' */ + + ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG); + /* + * This relies on ao_mode1/(Trigger_Once | Continuous) being set in + * set_trigger above. It is unclear whether we really need to re-write + * this register with these values. The mhddk examples for e-series + * show writing this in both places, but the examples for m-series show + * a single write in the set_counters function (here). + */ ni_stc_writew(dev, devpriv->ao_mode1, NISTC_AO_MODE1_REG); + + /* sync (upload number of buffer iterations -1) */ + /* indicate that we want to use BC_Load_A_Register as the source */ devpriv->ao_mode2 &= ~NISTC_AO_MODE2_BC_INIT_LOAD_SRC; ni_stc_writew(dev, devpriv->ao_mode2, NISTC_AO_MODE2_REG); - if (cmd->stop_src == TRIG_NONE) - ni_stc_writel(dev, 0xffffff, NISTC_AO_BC_LOADA_REG); - else - ni_stc_writel(dev, 0, NISTC_AO_BC_LOADA_REG); + + /* + * if the BC_TC interrupt is still issued in spite of UC, BC, UI + * ignoring BC_TC, then we will need to find a way to ignore that + * interrupt in continuous mode. + */ + ni_stc_writel(dev, 0, NISTC_AO_BC_LOADA_REG); /* iter once */ + + /* sync (issue command to load number of buffer iterations -1) */ ni_stc_writew(dev, NISTC_AO_CMD1_BC_LOAD, NISTC_AO_CMD1_REG); + + /* sync (upload number of updates in buffer) */ + /* indicate that we want to use UC_Load_A_Register as the source */ devpriv->ao_mode2 &= ~NISTC_AO_MODE2_UC_INIT_LOAD_SRC; ni_stc_writew(dev, devpriv->ao_mode2, NISTC_AO_MODE2_REG); - switch (cmd->stop_src) { - case TRIG_COUNT: + + /* + * if a user specifies '0', this automatically assumes the entire 24bit + * address space is available for the (multiple iterations of single + * buffer) MISB. Otherwise, stop_arg specifies the MISB length that + * will be used, regardless of whether we are in continuous mode or not. + * In continuous mode, the output will just iterate indefinitely over + * the MISB. + */ + { + unsigned int stop_arg = cmd->stop_arg > 0 ? + (cmd->stop_arg & 0xffffff) : 0xffffff; + if (devpriv->is_m_series) { - /* this is how the NI example code does it for m-series boards, verified correct with 6259 */ - ni_stc_writel(dev, cmd->stop_arg - 1, - NISTC_AO_UC_LOADA_REG); + /* + * this is how the NI example code does it for m-series + * boards, verified correct with 6259 + */ + ni_stc_writel(dev, stop_arg - 1, NISTC_AO_UC_LOADA_REG); + + /* sync (issue cmd to load number of updates in MISB) */ ni_stc_writew(dev, NISTC_AO_CMD1_UC_LOAD, NISTC_AO_CMD1_REG); } else { - ni_stc_writel(dev, cmd->stop_arg, - NISTC_AO_UC_LOADA_REG); + ni_stc_writel(dev, stop_arg, NISTC_AO_UC_LOADA_REG); + + /* sync (issue cmd to load number of updates in MISB) */ ni_stc_writew(dev, NISTC_AO_CMD1_UC_LOAD, NISTC_AO_CMD1_REG); - ni_stc_writel(dev, cmd->stop_arg - 1, - NISTC_AO_UC_LOADA_REG); + + /* + * sync (upload number of updates-1 in MISB) + * --eseries only? + */ + ni_stc_writel(dev, stop_arg - 1, NISTC_AO_UC_LOADA_REG); } - break; - case TRIG_NONE: - ni_stc_writel(dev, 0xffffff, NISTC_AO_UC_LOADA_REG); - ni_stc_writew(dev, NISTC_AO_CMD1_UC_LOAD, NISTC_AO_CMD1_REG); - ni_stc_writel(dev, 0xffffff, NISTC_AO_UC_LOADA_REG); - break; - default: - ni_stc_writel(dev, 0, NISTC_AO_UC_LOADA_REG); - ni_stc_writew(dev, NISTC_AO_CMD1_UC_LOAD, NISTC_AO_CMD1_REG); - ni_stc_writel(dev, cmd->stop_arg, NISTC_AO_UC_LOADA_REG); } - devpriv->ao_mode1 &= ~(NISTC_AO_MODE1_UPDATE_SRC_MASK | - NISTC_AO_MODE1_UI_SRC_MASK | - NISTC_AO_MODE1_UPDATE_SRC_POLARITY | - NISTC_AO_MODE1_UI_SRC_POLARITY); + ni_stc_writew(dev, NISTC_RESET_AO_CFG_END, NISTC_RESET_REG); +} + +static void ni_ao_cmd_set_update(struct comedi_device *dev, + const struct comedi_cmd *cmd) +{ + struct ni_private *devpriv = dev->private; + + ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG); + + /* + * zero out these bit fields to be set below. Does an ao-reset do this + * automatically? + */ + devpriv->ao_mode1 &= ~( + NISTC_AO_MODE1_UI_SRC_MASK | + NISTC_AO_MODE1_UI_SRC_POLARITY | + NISTC_AO_MODE1_UPDATE_SRC_MASK | + NISTC_AO_MODE1_UPDATE_SRC_POLARITY + ); + switch (cmd->scan_begin_src) { case TRIG_TIMER: - devpriv->ao_cmd2 &= ~NISTC_AO_CMD2_BC_GATE_ENA; - trigvar = - ni_ns_to_timer(dev, cmd->scan_begin_arg, - CMDF_ROUND_NEAREST); - ni_stc_writel(dev, 1, NISTC_AO_UI_LOADA_REG); - ni_stc_writew(dev, NISTC_AO_CMD1_UI_LOAD, NISTC_AO_CMD1_REG); - ni_stc_writel(dev, trigvar, NISTC_AO_UI_LOADA_REG); + devpriv->ao_cmd2 &= ~NISTC_AO_CMD2_BC_GATE_ENA; + + /* + * NOTE: there are several other ways of configuring internal + * updates, but we'll only support one for now: using + * AO_IN_TIMEBASE, w/o waveform staging, w/o a delay between + * START1 and first update, and also w/o local buffer mode w/ + * pauses. + */ + + /* + * This is already done above: + * devpriv->ao_mode1 &= ~( + * // set UPDATE_Source to UI_TC: + * NISTC_AO_MODE1_UPDATE_SRC_MASK | + * // set UPDATE_Source_Polarity to rising (required?) + * NISTC_AO_MODE1_UPDATE_SRC_POLARITY | + * // set UI_Source to AO_IN_TIMEBASE1: + * NISTC_AO_MODE1_UI_SRC_MASK | + * // set UI_Source_Polarity to rising (required?) + * NISTC_AO_MODE1_UI_SRC_POLARITY + * ); + */ + + /* + * TODO: use ao_ui_clock_source to allow all possible signals + * to be routed to UI_Source_Select. See tSTC.h for + * eseries/ni67xx and tMSeries.h for mseries. + */ + + { + unsigned trigvar = ni_ns_to_timer(dev, + cmd->scan_begin_arg, + CMDF_ROUND_NEAREST); + + /* + * Wait N TB3 ticks after the start trigger before + * clocking(N must be >=2). + */ + /* following line: 2-1 per STC */ + ni_stc_writel(dev, 1, NISTC_AO_UI_LOADA_REG); + ni_stc_writew(dev, NISTC_AO_CMD1_UI_LOAD, + NISTC_AO_CMD1_REG); + /* following line: N-1 per STC */ + ni_stc_writel(dev, trigvar - 1, NISTC_AO_UI_LOADA_REG); + } break; case TRIG_EXT: - devpriv->ao_mode1 |= - NISTC_AO_MODE1_UPDATE_SRC(cmd->scan_begin_arg); + /* FIXME: assert scan_begin_arg != 0, ret failure otherwise */ + devpriv->ao_cmd2 |= NISTC_AO_CMD2_BC_GATE_ENA; + devpriv->ao_mode1 |= NISTC_AO_MODE1_UPDATE_SRC( + CR_CHAN(cmd->scan_begin_arg)); if (cmd->scan_begin_arg & CR_INVERT) devpriv->ao_mode1 |= NISTC_AO_MODE1_UPDATE_SRC_POLARITY; - devpriv->ao_cmd2 |= NISTC_AO_CMD2_BC_GATE_ENA; break; default: BUG(); break; } + ni_stc_writew(dev, devpriv->ao_cmd2, NISTC_AO_CMD2_REG); ni_stc_writew(dev, devpriv->ao_mode1, NISTC_AO_MODE1_REG); devpriv->ao_mode2 &= ~(NISTC_AO_MODE2_UI_RELOAD_MODE(3) | NISTC_AO_MODE2_UI_INIT_LOAD_SRC); ni_stc_writew(dev, devpriv->ao_mode2, NISTC_AO_MODE2_REG); + /* Configure DAQ-STC for Timed update mode */ + devpriv->ao_cmd1 |= NISTC_AO_CMD1_DAC1_UPDATE_MODE | + NISTC_AO_CMD1_DAC0_UPDATE_MODE; + /* We are not using UPDATE2-->don't have to set DACx_Source_Select */ + ni_stc_writew(dev, devpriv->ao_cmd1, NISTC_AO_CMD1_REG); + + ni_stc_writew(dev, NISTC_RESET_AO_CFG_END, NISTC_RESET_REG); +} + +static void ni_ao_cmd_set_channels(struct comedi_device *dev, + struct comedi_subdevice *s) +{ + struct ni_private *devpriv = dev->private; + const struct comedi_cmd *cmd = &s->async->cmd; + unsigned bits = 0; + + ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG); + + if (devpriv->is_6xxx) { + unsigned int i; + + bits = 0; + for (i = 0; i < cmd->chanlist_len; ++i) { + int chan = CR_CHAN(cmd->chanlist[i]); + + bits |= 1 << chan; + ni_ao_win_outw(dev, chan, NI611X_AO_WAVEFORM_GEN_REG); + } + ni_ao_win_outw(dev, bits, NI611X_AO_TIMED_REG); + } + + ni_ao_config_chanlist(dev, s, cmd->chanlist, cmd->chanlist_len, 1); + if (cmd->scan_end_arg > 1) { devpriv->ao_mode1 |= NISTC_AO_MODE1_MULTI_CHAN; - ni_stc_writew(dev, - NISTC_AO_OUT_CTRL_CHANS(cmd->scan_end_arg - 1) | - NISTC_AO_OUT_CTRL_UPDATE_SEL_HIGHZ, - NISTC_AO_OUT_CTRL_REG); - } else { - unsigned bits; + bits = NISTC_AO_OUT_CTRL_CHANS(cmd->scan_end_arg - 1) + | NISTC_AO_OUT_CTRL_UPDATE_SEL_HIGHZ; + } else { devpriv->ao_mode1 &= ~NISTC_AO_MODE1_MULTI_CHAN; bits = NISTC_AO_OUT_CTRL_UPDATE_SEL_HIGHZ; - if (devpriv->is_m_series || devpriv->is_6xxx) { + if (devpriv->is_m_series | devpriv->is_6xxx) bits |= NISTC_AO_OUT_CTRL_CHANS(0); - } else { - bits |= - NISTC_AO_OUT_CTRL_CHANS(CR_CHAN(cmd->chanlist[0])); - } - ni_stc_writew(dev, bits, NISTC_AO_OUT_CTRL_REG); + else + bits |= NISTC_AO_OUT_CTRL_CHANS( + CR_CHAN(cmd->chanlist[0])); } + ni_stc_writew(dev, devpriv->ao_mode1, NISTC_AO_MODE1_REG); + ni_stc_writew(dev, bits, NISTC_AO_OUT_CTRL_REG); - ni_stc_writew(dev, NISTC_AO_CMD1_DAC1_UPDATE_MODE | - NISTC_AO_CMD1_DAC0_UPDATE_MODE, - NISTC_AO_CMD1_REG); + ni_stc_writew(dev, NISTC_RESET_AO_CFG_END, NISTC_RESET_REG); +} + +static void ni_ao_cmd_set_stop_conditions(struct comedi_device *dev, + const struct comedi_cmd *cmd) +{ + struct ni_private *devpriv = dev->private; + + ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG); devpriv->ao_mode3 |= NISTC_AO_MODE3_STOP_ON_OVERRUN_ERR; ni_stc_writew(dev, devpriv->ao_mode3, NISTC_AO_MODE3_REG); + /* + * Since we are not supporting waveform staging, we ignore these errors: + * NISTC_AO_MODE3_STOP_ON_BC_TC_ERR, + * NISTC_AO_MODE3_STOP_ON_BC_TC_TRIG_ERR + */ + + ni_stc_writew(dev, NISTC_RESET_AO_CFG_END, NISTC_RESET_REG); +} + +static void ni_ao_cmd_set_fifo_mode(struct comedi_device *dev) +{ + struct ni_private *devpriv = dev->private; + + ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG); + devpriv->ao_mode2 &= ~NISTC_AO_MODE2_FIFO_MODE_MASK; #ifdef PCIDMA devpriv->ao_mode2 |= NISTC_AO_MODE2_FIFO_MODE_HF_F; #else devpriv->ao_mode2 |= NISTC_AO_MODE2_FIFO_MODE_HF; #endif + /* NOTE: this is where use_onboard_memory=True would be implemented */ devpriv->ao_mode2 &= ~NISTC_AO_MODE2_FIFO_REXMIT_ENA; ni_stc_writew(dev, devpriv->ao_mode2, NISTC_AO_MODE2_REG); - bits = NISTC_AO_PERSONAL_BC_SRC_SEL | - NISTC_AO_PERSONAL_UPDATE_PW | - NISTC_AO_PERSONAL_TMRDACWR_PW; - if (board->ao_fifo_depth) - bits |= NISTC_AO_PERSONAL_FIFO_ENA; - else - bits |= NISTC_AO_PERSONAL_DMA_PIO_CTRL; -#if 0 - /* - * F Hess: windows driver does not set NISTC_AO_PERSONAL_NUM_DAC bit - * for 6281, verified with bus analyzer. - */ - if (devpriv->is_m_series) - bits |= NISTC_AO_PERSONAL_NUM_DAC; -#endif - ni_stc_writew(dev, bits, NISTC_AO_PERSONAL_REG); - /* enable sending of ao dma requests */ + /* enable sending of ao fifo requests (dma request) */ ni_stc_writew(dev, NISTC_AO_START_AOFREQ_ENA, NISTC_AO_START_SEL_REG); ni_stc_writew(dev, NISTC_RESET_AO_CFG_END, NISTC_RESET_REG); - if (cmd->stop_src == TRIG_COUNT) { - ni_stc_writew(dev, NISTC_INTB_ACK_AO_BC_TC, - NISTC_INTB_ACK_REG); + /* we are not supporting boards with virtual fifos */ +} + +static void ni_ao_cmd_set_interrupts(struct comedi_device *dev, + struct comedi_subdevice *s) +{ + if (s->async->cmd.stop_src == TRIG_COUNT) ni_set_bits(dev, NISTC_INTB_ENA_REG, NISTC_INTB_ENA_AO_BC_TC, 1); - } s->async->inttrig = ni_ao_inttrig; +} + +static int ni_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s) +{ + struct ni_private *devpriv = dev->private; + const struct comedi_cmd *cmd = &s->async->cmd; + + if (dev->irq == 0) { + dev_err(dev->class_dev, "cannot run command without an irq"); + return -EIO; + } + + /* ni_ao_reset should have already been done */ + ni_ao_cmd_personalize(dev, cmd); + /* clearing fifo and preload happens elsewhere */ + ni_ao_cmd_set_trigger(dev, cmd); + ni_ao_cmd_set_counters(dev, cmd); + ni_ao_cmd_set_update(dev, cmd); + ni_ao_cmd_set_channels(dev, s); + ni_ao_cmd_set_stop_conditions(dev, cmd); + ni_ao_cmd_set_fifo_mode(dev); + ni_cmd_set_mite_transfer(devpriv->ao_mite_ring, s, cmd, 0x00ffffff); + ni_ao_cmd_set_interrupts(dev, s); + + /* + * arm(ing) and star(ting) happen in ni_ao_inttrig, which _must_ be + * called for ao commands since 1) TRIG_NOW is not supported and 2) DMA + * must be setup and initially written to before arm/start happen. + */ return 0; } +/* end ni_ao_cmd */ + static int ni_ao_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { @@ -3187,11 +3415,7 @@ static int ni_ao_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0); err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len); - - if (cmd->stop_src == TRIG_COUNT) - err |= comedi_check_trigger_arg_max(&cmd->stop_arg, 0x00ffffff); - else /* TRIG_NONE */ - err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0); + err |= comedi_check_trigger_arg_max(&cmd->stop_arg, 0x00ffffff); if (err) return 3; @@ -3214,48 +3438,70 @@ static int ni_ao_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, static int ni_ao_reset(struct comedi_device *dev, struct comedi_subdevice *s) { + /* See 3.6.1.2 "Resetting", of DAQ-STC Technical Reference Manual */ + + /* + * In the following, the "--sync" comments are meant to denote + * asynchronous boundaries for setting the registers as described in the + * DAQ-STC mostly in the order also described in the DAQ-STC. + */ + struct ni_private *devpriv = dev->private; ni_release_ao_mite_channel(dev); + /* --sync (reset AO) */ + if (devpriv->is_m_series) + /* following example in mhddk for m-series */ + ni_stc_writew(dev, NISTC_RESET_AO, NISTC_RESET_REG); + + /*--sync (start config) */ ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG); + + /*--sync (Disarm) */ ni_stc_writew(dev, NISTC_AO_CMD1_DISARM, NISTC_AO_CMD1_REG); - ni_set_bits(dev, NISTC_INTB_ENA_REG, ~0, 0); - ni_stc_writew(dev, NISTC_AO_PERSONAL_BC_SRC_SEL, NISTC_AO_PERSONAL_REG); - ni_stc_writew(dev, NISTC_INTB_ACK_AO_ALL, NISTC_INTB_ACK_REG); - ni_stc_writew(dev, NISTC_AO_PERSONAL_BC_SRC_SEL | - NISTC_AO_PERSONAL_UPDATE_PW | - NISTC_AO_PERSONAL_TMRDACWR_PW, - NISTC_AO_PERSONAL_REG); - ni_stc_writew(dev, 0, NISTC_AO_OUT_CTRL_REG); - ni_stc_writew(dev, 0, NISTC_AO_START_SEL_REG); - devpriv->ao_cmd1 = 0; - ni_stc_writew(dev, devpriv->ao_cmd1, NISTC_AO_CMD1_REG); - devpriv->ao_cmd2 = 0; - ni_stc_writew(dev, devpriv->ao_cmd2, NISTC_AO_CMD2_REG); + + /* + * --sync + * (clear bunch of registers--mseries mhddk examples do not include + * this) + */ + devpriv->ao_cmd1 = 0; + devpriv->ao_cmd2 = 0; devpriv->ao_mode1 = 0; - ni_stc_writew(dev, devpriv->ao_mode1, NISTC_AO_MODE1_REG); devpriv->ao_mode2 = 0; - ni_stc_writew(dev, devpriv->ao_mode2, NISTC_AO_MODE2_REG); if (devpriv->is_m_series) devpriv->ao_mode3 = NISTC_AO_MODE3_LAST_GATE_DISABLE; else devpriv->ao_mode3 = 0; - ni_stc_writew(dev, devpriv->ao_mode3, NISTC_AO_MODE3_REG); devpriv->ao_trigger_select = 0; - ni_stc_writew(dev, devpriv->ao_trigger_select, - NISTC_AO_TRIG_SEL_REG); - if (devpriv->is_6xxx) { - unsigned immediate_bits = 0; - unsigned i; - for (i = 0; i < s->n_chan; ++i) - immediate_bits |= 1 << i; - ni_ao_win_outw(dev, immediate_bits, NI671X_AO_IMMEDIATE_REG); + ni_stc_writew(dev, 0, NISTC_AO_PERSONAL_REG); + ni_stc_writew(dev, 0, NISTC_AO_CMD1_REG); + ni_stc_writew(dev, 0, NISTC_AO_CMD2_REG); + ni_stc_writew(dev, 0, NISTC_AO_MODE1_REG); + ni_stc_writew(dev, 0, NISTC_AO_MODE2_REG); + ni_stc_writew(dev, 0, NISTC_AO_OUT_CTRL_REG); + ni_stc_writew(dev, devpriv->ao_mode3, NISTC_AO_MODE3_REG); + ni_stc_writew(dev, 0, NISTC_AO_START_SEL_REG); + ni_stc_writew(dev, 0, NISTC_AO_TRIG_SEL_REG); + + /*--sync (disable interrupts) */ + ni_set_bits(dev, NISTC_INTB_ENA_REG, ~0, 0); + + /*--sync (ack) */ + ni_stc_writew(dev, NISTC_AO_PERSONAL_BC_SRC_SEL, NISTC_AO_PERSONAL_REG); + ni_stc_writew(dev, NISTC_INTB_ACK_AO_ALL, NISTC_INTB_ACK_REG); + + /*--not in DAQ-STC. which doc? */ + if (devpriv->is_6xxx) { + ni_ao_win_outw(dev, (1u << s->n_chan) - 1u, + NI671X_AO_IMMEDIATE_REG); ni_ao_win_outw(dev, NI611X_AO_MISC_CLEAR_WG, NI611X_AO_MISC_REG); } ni_stc_writew(dev, NISTC_RESET_AO_CFG_END, NISTC_RESET_REG); + /*--end */ return 0; } @@ -3381,7 +3627,9 @@ static int ni_cdio_cmdtest(struct comedi_device *dev, err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0); err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len); - err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0); + err |= comedi_check_trigger_arg_max(&cmd->stop_arg, + s->async->prealloc_bufsz / + comedi_bytes_per_scan(s)); if (err) return 3; @@ -3458,6 +3706,7 @@ static int ni_cdo_inttrig(struct comedi_device *dev, static int ni_cdio_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { + struct ni_private *devpriv = dev->private; const struct comedi_cmd *cmd = &s->async->cmd; unsigned cdo_mode_bits; int retval; @@ -3482,6 +3731,10 @@ static int ni_cdio_cmd(struct comedi_device *dev, struct comedi_subdevice *s) if (retval < 0) return retval; + ni_cmd_set_mite_transfer(devpriv->cdo_mite_ring, s, cmd, + s->async->prealloc_bufsz / + comedi_bytes_per_scan(s)); + s->async->inttrig = ni_cdo_inttrig; return 0; @@ -3980,34 +4233,30 @@ static int ni_m_series_pwm_config(struct comedi_device *dev, case INSN_CONFIG_PWM_OUTPUT: switch (data[1]) { case CMDF_ROUND_NEAREST: - up_count = - (data[2] + - devpriv->clock_ns / 2) / devpriv->clock_ns; + up_count = DIV_ROUND_CLOSEST(data[2], + devpriv->clock_ns); break; case CMDF_ROUND_DOWN: up_count = data[2] / devpriv->clock_ns; break; case CMDF_ROUND_UP: up_count = - (data[2] + devpriv->clock_ns - - 1) / devpriv->clock_ns; + DIV_ROUND_UP(data[2], devpriv->clock_ns); break; default: return -EINVAL; } switch (data[3]) { case CMDF_ROUND_NEAREST: - down_count = - (data[4] + - devpriv->clock_ns / 2) / devpriv->clock_ns; + down_count = DIV_ROUND_CLOSEST(data[4], + devpriv->clock_ns); break; case CMDF_ROUND_DOWN: down_count = data[4] / devpriv->clock_ns; break; case CMDF_ROUND_UP: down_count = - (data[4] + devpriv->clock_ns - - 1) / devpriv->clock_ns; + DIV_ROUND_UP(data[4], devpriv->clock_ns); break; default: return -EINVAL; @@ -4044,34 +4293,30 @@ static int ni_6143_pwm_config(struct comedi_device *dev, case INSN_CONFIG_PWM_OUTPUT: switch (data[1]) { case CMDF_ROUND_NEAREST: - up_count = - (data[2] + - devpriv->clock_ns / 2) / devpriv->clock_ns; + up_count = DIV_ROUND_CLOSEST(data[2], + devpriv->clock_ns); break; case CMDF_ROUND_DOWN: up_count = data[2] / devpriv->clock_ns; break; case CMDF_ROUND_UP: up_count = - (data[2] + devpriv->clock_ns - - 1) / devpriv->clock_ns; + DIV_ROUND_UP(data[2], devpriv->clock_ns); break; default: return -EINVAL; } switch (data[3]) { case CMDF_ROUND_NEAREST: - down_count = - (data[4] + - devpriv->clock_ns / 2) / devpriv->clock_ns; + down_count = DIV_ROUND_CLOSEST(data[4], + devpriv->clock_ns); break; case CMDF_ROUND_DOWN: down_count = data[4] / devpriv->clock_ns; break; case CMDF_ROUND_UP: down_count = - (data[4] + devpriv->clock_ns - - 1) / devpriv->clock_ns; + DIV_ROUND_UP(data[4], devpriv->clock_ns); break; default: return -EINVAL; @@ -4665,9 +4910,9 @@ static int ni_mseries_get_pll_parameters(unsigned reference_period_ns, *freq_divider = best_div; *freq_multiplier = best_mult; - *actual_period_ns = - (best_period_picosec * fudge_factor_80_to_20Mhz + - (pico_per_nano / 2)) / pico_per_nano; + *actual_period_ns = DIV_ROUND_CLOSEST(best_period_picosec * + fudge_factor_80_to_20Mhz, + pico_per_nano); return 0; } @@ -5024,7 +5269,6 @@ static irqreturn_t ni_E_interrupt(int irq, void *d) unsigned long flags; #ifdef PCIDMA struct ni_private *devpriv = dev->private; - struct mite_struct *mite = devpriv->mite; #endif if (!dev->attached) @@ -5036,8 +5280,7 @@ static irqreturn_t ni_E_interrupt(int irq, void *d) a_status = ni_stc_readw(dev, NISTC_AI_STATUS1_REG); b_status = ni_stc_readw(dev, NISTC_AO_STATUS1_REG); #ifdef PCIDMA - if (mite) { - struct ni_private *devpriv = dev->private; + if (devpriv->mite) { unsigned long flags_too; spin_lock_irqsave(&devpriv->mite_channel_lock, flags_too); @@ -5053,7 +5296,7 @@ static irqreturn_t ni_E_interrupt(int irq, void *d) ao_mite_status = mite_get_status(devpriv->ao_mite_chan); if (ao_mite_status & CHSR_LINKC) writel(CHOR_CLRLC, - mite->mite_io_addr + + devpriv->mite->mite_io_addr + MITE_CHOR(devpriv-> ao_mite_chan->channel)); } diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c index ac79099bc23e..7112c3fec8bb 100644 --- a/drivers/staging/comedi/drivers/ni_pcidio.c +++ b/drivers/staging/comedi/drivers/ni_pcidio.c @@ -525,13 +525,13 @@ static int ni_pcidio_ns_to_timer(int *nanosec, unsigned int flags) switch (flags & CMDF_ROUND_MASK) { case CMDF_ROUND_NEAREST: default: - divider = (*nanosec + base / 2) / base; + divider = DIV_ROUND_CLOSEST(*nanosec, base); break; case CMDF_ROUND_DOWN: divider = (*nanosec) / base; break; case CMDF_ROUND_UP: - divider = (*nanosec + base - 1) / base; + divider = DIV_ROUND_UP(*nanosec, base); break; } diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c index 30a5a75d1fe7..231e37d6b7c6 100644 --- a/drivers/staging/comedi/drivers/ni_pcimio.c +++ b/drivers/staging/comedi/drivers/ni_pcimio.c @@ -26,7 +26,8 @@ Devices: [National Instruments] PCI-MIO-16XE-50 (ni_pcimio), PXI-6040E, PCI-6030E, PCI-6031E, PCI-6032E, PCI-6033E, PCI-6071E, PCI-6023E, PCI-6024E, PCI-6025E, PXI-6025E, PCI-6034E, PCI-6035E, PCI-6052E, PCI-6110, PCI-6111, PCI-6220, PCI-6221, PCI-6224, PXI-6224, - PCI-6225, PXI-6225, PCI-6229, PCI-6250, PCI-6251, PCIe-6251, PXIe-6251, + PCI-6225, PXI-6225, PCI-6229, PCI-6250, + PCI-6251, PXI-6251, PCIe-6251, PXIe-6251, PCI-6254, PCI-6259, PCIe-6259, PCI-6280, PCI-6281, PXI-6281, PCI-6284, PCI-6289, PCI-6711, PXI-6711, PCI-6713, PXI-6713, @@ -193,6 +194,7 @@ enum ni_pcimio_boardid { BOARD_PCI6229, BOARD_PCI6250, BOARD_PCI6251, + BOARD_PXI6251, BOARD_PCIE6251, BOARD_PXIE6251, BOARD_PCI6254, @@ -811,6 +813,21 @@ static const struct ni_board_struct ni_boards[] = { .ao_speed = 350, .caldac = { caldac_none }, }, + [BOARD_PXI6251] = { + .name = "pxi-6251", + .n_adchan = 16, + .ai_maxdata = 0xffff, + .ai_fifo_depth = 4095, + .gainlkup = ai_gain_628x, + .ai_speed = 800, + .n_aochan = 2, + .ao_maxdata = 0xffff, + .ao_fifo_depth = 8191, + .ao_range_table = &range_ni_M_625x_ao, + .reg_type = ni_reg_625x, + .ao_speed = 350, + .caldac = { caldac_none }, + }, [BOARD_PCIE6251] = { .name = "pcie-6251", .n_adchan = 16, @@ -1290,6 +1307,7 @@ static const struct pci_device_id ni_pcimio_pci_table[] = { { PCI_VDEVICE(NI, 0x71bc), BOARD_PCI6221_37PIN }, { PCI_VDEVICE(NI, 0x717d), BOARD_PCIE6251 }, { PCI_VDEVICE(NI, 0x72e8), BOARD_PXIE6251 }, + { PCI_VDEVICE(NI, 0x70ad), BOARD_PXI6251 }, { 0 } }; MODULE_DEVICE_TABLE(pci, ni_pcimio_pci_table); diff --git a/drivers/staging/comedi/drivers/ni_tiocmd.c b/drivers/staging/comedi/drivers/ni_tiocmd.c index 437f723bb34d..823e47910004 100644 --- a/drivers/staging/comedi/drivers/ni_tiocmd.c +++ b/drivers/staging/comedi/drivers/ni_tiocmd.c @@ -92,7 +92,7 @@ static int ni_tio_input_inttrig(struct comedi_device *dev, unsigned long flags; int ret = 0; - if (trig_num != cmd->start_src) + if (trig_num != cmd->start_arg) return -EINVAL; spin_lock_irqsave(&counter->lock, flags); diff --git a/drivers/staging/comedi/drivers/rtd520.c b/drivers/staging/comedi/drivers/rtd520.c index 68ac02b68cb2..9b6c56773247 100644 --- a/drivers/staging/comedi/drivers/rtd520.c +++ b/drivers/staging/comedi/drivers/rtd520.c @@ -892,9 +892,8 @@ static int rtd_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) devpriv->xfer_count = cmd->chanlist_len; } else { /* make a multiple of scan length */ devpriv->xfer_count = - (devpriv->xfer_count + - cmd->chanlist_len - 1) - / cmd->chanlist_len; + DIV_ROUND_UP(devpriv->xfer_count, + cmd->chanlist_len); devpriv->xfer_count *= cmd->chanlist_len; } devpriv->flags |= SEND_EOS; diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c index 35f0f676eb28..c5e08635e01e 100644 --- a/drivers/staging/comedi/drivers/s626.c +++ b/drivers/staging/comedi/drivers/s626.c @@ -1167,12 +1167,6 @@ static void s626_set_clk_mult(struct comedi_device *dev, s626_set_mode(dev, chan, mode, false); } -static uint16_t s626_get_clk_mult(struct comedi_device *dev, - unsigned int chan) -{ - return S626_GET_STD_CLKMULT(s626_get_mode(dev, chan)); -} - /* * Return/set the clock polarity. */ @@ -1188,12 +1182,6 @@ static void s626_set_clk_pol(struct comedi_device *dev, s626_set_mode(dev, chan, mode, false); } -static uint16_t s626_get_clk_pol(struct comedi_device *dev, - unsigned int chan) -{ - return S626_GET_STD_CLKPOL(s626_get_mode(dev, chan)); -} - /* * Return/set the encoder mode. */ @@ -1209,27 +1197,6 @@ static void s626_set_enc_mode(struct comedi_device *dev, s626_set_mode(dev, chan, mode, false); } -static uint16_t s626_get_enc_mode(struct comedi_device *dev, - unsigned int chan) -{ - return S626_GET_STD_ENCMODE(s626_get_mode(dev, chan)); -} - -/* - * Return/set the index polarity. - */ -static void s626_set_index_pol(struct comedi_device *dev, - unsigned int chan, uint16_t value) -{ - uint16_t mode; - - mode = s626_get_mode(dev, chan); - mode &= ~S626_STDMSK_INDXPOL; - mode |= S626_SET_STD_INDXPOL(value != 0); - - s626_set_mode(dev, chan, mode, false); -} - static uint16_t s626_get_index_pol(struct comedi_device *dev, unsigned int chan) { diff --git a/drivers/staging/dgap/Kconfig b/drivers/staging/dgap/Kconfig deleted file mode 100644 index 3bbe9e122365..000000000000 --- a/drivers/staging/dgap/Kconfig +++ /dev/null @@ -1,6 +0,0 @@ -config DGAP - tristate "Digi EPCA PCI products" - default n - depends on TTY && HAS_IOMEM - ---help--- - Driver for the Digi International EPCA PCI based product line diff --git a/drivers/staging/dgap/Makefile b/drivers/staging/dgap/Makefile deleted file mode 100644 index 0063d044ca71..000000000000 --- a/drivers/staging/dgap/Makefile +++ /dev/null @@ -1 +0,0 @@ -obj-$(CONFIG_DGAP) += dgap.o diff --git a/drivers/staging/dgap/dgap.c b/drivers/staging/dgap/dgap.c deleted file mode 100644 index 294c1c83aa4d..000000000000 --- a/drivers/staging/dgap/dgap.c +++ /dev/null @@ -1,7057 +0,0 @@ -/* - * Copyright 2003 Digi International (www.digi.com) - * Scott H Kilau <Scott_Kilau at digi dot com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the - * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR - * PURPOSE. See the GNU General Public License for more details. - * - */ - -/* - * In the original out of kernel Digi dgap driver, firmware - * loading was done via user land to driver handshaking. - * - * For cards that support a concentrator (port expander), - * I believe the concentrator its self told the card which - * concentrator is actually attached and then that info - * was used to tell user land which concentrator firmware - * image was to be downloaded. I think even the BIOS or - * FEP images required could change with the connection - * of a particular concentrator. - * - * Since I have no access to any of these cards or - * concentrators, I cannot put the correct concentrator - * firmware file names into the firmware_info structure - * as is now done for the BIOS and FEP images. - * - * I think, but am not certain, that the cards supporting - * concentrators will function without them. So support - * of these cards has been left in this driver. - * - * In order to fully support those cards, they would - * either have to be acquired for dissection or maybe - * Digi International could provide some assistance. - */ -#undef DIGI_CONCENTRATORS_SUPPORTED - -#define pr_fmt(fmt) "dgap: " fmt - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/pci.h> -#include <linux/delay.h> /* For udelay */ -#include <linux/slab.h> -#include <linux/uaccess.h> -#include <linux/sched.h> - -#include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */ -#include <linux/ctype.h> -#include <linux/tty.h> -#include <linux/tty_flip.h> -#include <linux/serial_reg.h> -#include <linux/io.h> /* For read[bwl]/write[bwl] */ - -#include <linux/string.h> -#include <linux/device.h> -#include <linux/kdev_t.h> -#include <linux/firmware.h> - -#include "dgap.h" - -/* - * File operations permitted on Control/Management major. - */ -static const struct file_operations dgap_board_fops = { - .owner = THIS_MODULE, -}; - -static uint dgap_numboards; -static struct board_t *dgap_board[MAXBOARDS]; -static ulong dgap_poll_counter; -static int dgap_driver_state = DRIVER_INITIALIZED; -static int dgap_poll_tick = 20; /* Poll interval - 20 ms */ - -static struct class *dgap_class; - -static uint dgap_count = 500; - -/* - * Poller stuff - */ -static DEFINE_SPINLOCK(dgap_poll_lock); /* Poll scheduling lock */ -static ulong dgap_poll_time; /* Time of next poll */ -static uint dgap_poll_stop; /* Used to tell poller to stop */ -static struct timer_list dgap_poll_timer; - -/* - SUPPORTED PRODUCTS - - Card Model Number of Ports Interface - ---------------------------------------------------------------- - Acceleport Xem 4 - 64 (EIA232 & EIA422) - Acceleport Xr 4 & 8 (EIA232) - Acceleport Xr 920 4 & 8 (EIA232) - Acceleport C/X 8 - 128 (EIA232) - Acceleport EPC/X 8 - 224 (EIA232) - Acceleport Xr/422 4 & 8 (EIA422) - Acceleport 2r/920 2 (EIA232) - Acceleport 4r/920 4 (EIA232) - Acceleport 8r/920 8 (EIA232) - - IBM 8-Port Asynchronous PCI Adapter (EIA232) - IBM 128-Port Asynchronous PCI Adapter (EIA232 & EIA422) -*/ - -static struct pci_device_id dgap_pci_tbl[] = { - { DIGI_VID, PCI_DEV_XEM_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, - { DIGI_VID, PCI_DEV_CX_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, - { DIGI_VID, PCI_DEV_CX_IBM_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 }, - { DIGI_VID, PCI_DEV_EPCJ_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 }, - { DIGI_VID, PCI_DEV_920_2_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, - { DIGI_VID, PCI_DEV_920_4_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 }, - { DIGI_VID, PCI_DEV_920_8_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 }, - { DIGI_VID, PCI_DEV_XR_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 }, - { DIGI_VID, PCI_DEV_XRJ_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 }, - { DIGI_VID, PCI_DEV_XR_422_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 }, - { DIGI_VID, PCI_DEV_XR_IBM_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 }, - { DIGI_VID, PCI_DEV_XR_SAIP_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 }, - { DIGI_VID, PCI_DEV_XR_BULL_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 }, - { DIGI_VID, PCI_DEV_920_8_HP_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 13 }, - { DIGI_VID, PCI_DEV_XEM_HP_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 14 }, - {0,} /* 0 terminated list. */ -}; -MODULE_DEVICE_TABLE(pci, dgap_pci_tbl); - -/* - * A generic list of Product names, PCI Vendor ID, and PCI Device ID. - */ -struct board_id { - uint config_type; - u8 *name; - uint maxports; - uint dpatype; -}; - -static struct board_id dgap_ids[] = { - {PPCM, PCI_DEV_XEM_NAME, 64, (T_PCXM | T_PCLITE | T_PCIBUS)}, - {PCX, PCI_DEV_CX_NAME, 128, (T_CX | T_PCIBUS) }, - {PCX, PCI_DEV_CX_IBM_NAME, 128, (T_CX | T_PCIBUS) }, - {PEPC, PCI_DEV_EPCJ_NAME, 224, (T_EPC | T_PCIBUS) }, - {APORT2_920P, PCI_DEV_920_2_NAME, 2, (T_PCXR | T_PCLITE | T_PCIBUS)}, - {APORT4_920P, PCI_DEV_920_4_NAME, 4, (T_PCXR | T_PCLITE | T_PCIBUS)}, - {APORT8_920P, PCI_DEV_920_8_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS)}, - {PAPORT8, PCI_DEV_XR_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS)}, - {PAPORT8, PCI_DEV_XRJ_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS)}, - {PAPORT8, PCI_DEV_XR_422_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS)}, - {PAPORT8, PCI_DEV_XR_IBM_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS)}, - {PAPORT8, PCI_DEV_XR_SAIP_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS)}, - {PAPORT8, PCI_DEV_XR_BULL_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS)}, - {APORT8_920P, PCI_DEV_920_8_HP_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS)}, - {PPCM, PCI_DEV_XEM_HP_NAME, 64, (T_PCXM | T_PCLITE | T_PCIBUS)}, - {0,} /* 0 terminated list. */ -}; - -struct firmware_info { - u8 *conf_name; /* dgap.conf */ - u8 *bios_name; /* BIOS filename */ - u8 *fep_name; /* FEP filename */ - u8 *con_name; /* Concentrator filename FIXME*/ - int num; /* sequence number */ -}; - -/* - * Firmware - BIOS, FEP, and CONC filenames - */ -static struct firmware_info fw_info[] = { - { "dgap/dgap.conf", "dgap/sxbios.bin", "dgap/sxfep.bin", NULL, 0 }, - { "dgap/dgap.conf", "dgap/cxpbios.bin", "dgap/cxpfep.bin", NULL, 1 }, - { "dgap/dgap.conf", "dgap/cxpbios.bin", "dgap/cxpfep.bin", NULL, 2 }, - { "dgap/dgap.conf", "dgap/pcibios.bin", "dgap/pcifep.bin", NULL, 3 }, - { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 4 }, - { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 5 }, - { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 6 }, - { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 7 }, - { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 8 }, - { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 9 }, - { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 10 }, - { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 11 }, - { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 12 }, - { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 13 }, - { "dgap/dgap.conf", "dgap/sxbios.bin", "dgap/sxfep.bin", NULL, 14 }, - {NULL,} -}; - -/* - * Default transparent print information. - */ -static struct digi_t dgap_digi_init = { - .digi_flags = DIGI_COOK, /* Flags */ - .digi_maxcps = 100, /* Max CPS */ - .digi_maxchar = 50, /* Max chars in print queue */ - .digi_bufsize = 100, /* Printer buffer size */ - .digi_onlen = 4, /* size of printer on string */ - .digi_offlen = 4, /* size of printer off string */ - .digi_onstr = "\033[5i", /* ANSI printer on string ] */ - .digi_offstr = "\033[4i", /* ANSI printer off string ] */ - .digi_term = "ansi" /* default terminal type */ -}; - -/* - * Define a local default termios struct. All ports will be created - * with this termios initially. - * - * This defines a raw port at 9600 baud, 8 data bits, no parity, - * 1 stop bit. - */ - -static struct ktermios dgap_default_termios = { - .c_iflag = (DEFAULT_IFLAGS), /* iflags */ - .c_oflag = (DEFAULT_OFLAGS), /* oflags */ - .c_cflag = (DEFAULT_CFLAGS), /* cflags */ - .c_lflag = (DEFAULT_LFLAGS), /* lflags */ - .c_cc = INIT_C_CC, - .c_line = 0, -}; - -/* - * Our needed internal static variables from dgap_parse.c - */ -static struct cnode dgap_head; -#define MAXCWORD 200 -static char dgap_cword[MAXCWORD]; - -struct toklist { - int token; - char *string; -}; - -static struct toklist dgap_brdtype[] = { - { PCX, "Digi_AccelePort_C/X_PCI" }, - { PEPC, "Digi_AccelePort_EPC/X_PCI" }, - { PPCM, "Digi_AccelePort_Xem_PCI" }, - { APORT2_920P, "Digi_AccelePort_2r_920_PCI" }, - { APORT4_920P, "Digi_AccelePort_4r_920_PCI" }, - { APORT8_920P, "Digi_AccelePort_8r_920_PCI" }, - { PAPORT4, "Digi_AccelePort_4r_PCI(EIA-232/RS-422)" }, - { PAPORT8, "Digi_AccelePort_8r_PCI(EIA-232/RS-422)" }, - { 0, NULL } -}; - -static struct toklist dgap_tlist[] = { - { BEGIN, "config_begin" }, - { END, "config_end" }, - { BOARD, "board" }, - { PCIINFO, "pciinfo" }, - { LINE, "line" }, - { CONC, "conc" }, - { CONC, "concentrator" }, - { CX, "cx" }, - { CX, "ccon" }, - { EPC, "epccon" }, - { EPC, "epc" }, - { MOD, "module" }, - { ID, "id" }, - { STARTO, "start" }, - { SPEED, "speed" }, - { CABLE, "cable" }, - { CONNECT, "connect" }, - { METHOD, "method" }, - { STATUS, "status" }, - { CUSTOM, "Custom" }, - { BASIC, "Basic" }, - { MEM, "mem" }, - { MEM, "memory" }, - { PORTS, "ports" }, - { MODEM, "modem" }, - { NPORTS, "nports" }, - { TTYN, "ttyname" }, - { CU, "cuname" }, - { PRINT, "prname" }, - { CMAJOR, "major" }, - { ALTPIN, "altpin" }, - { USEINTR, "useintr" }, - { TTSIZ, "ttysize" }, - { CHSIZ, "chsize" }, - { BSSIZ, "boardsize" }, - { UNTSIZ, "schedsize" }, - { F2SIZ, "f2200size" }, - { VPSIZ, "vpixsize" }, - { 0, NULL } -}; - -/* - * get a word from the input stream, also keep track of current line number. - * words are separated by whitespace. - */ -static char *dgap_getword(char **in) -{ - char *ret_ptr = *in; - - char *ptr = strpbrk(*in, " \t\n"); - - /* If no word found, return null */ - if (!ptr) - return NULL; - - /* Mark new location for our buffer */ - *ptr = '\0'; - *in = ptr + 1; - - /* Eat any extra spaces/tabs/newlines that might be present */ - while (*in && **in && ((**in == ' ') || - (**in == '\t') || - (**in == '\n'))) { - **in = '\0'; - *in = *in + 1; - } - - return ret_ptr; -} - - -/* - * Get a token from the input file; return 0 if end of file is reached - */ -static int dgap_gettok(char **in) -{ - char *w; - struct toklist *t; - - if (strstr(dgap_cword, "board")) { - w = dgap_getword(in); - if (!w) - return 0; - snprintf(dgap_cword, MAXCWORD, "%s", w); - for (t = dgap_brdtype; t->token != 0; t++) { - if (!strcmp(w, t->string)) - return t->token; - } - } else { - while ((w = dgap_getword(in))) { - snprintf(dgap_cword, MAXCWORD, "%s", w); - for (t = dgap_tlist; t->token != 0; t++) { - if (!strcmp(w, t->string)) - return t->token; - } - } - } - - return 0; -} - -/* - * dgap_checknode: see if all the necessary info has been supplied for a node - * before creating the next node. - */ -static int dgap_checknode(struct cnode *p) -{ - switch (p->type) { - case LNODE: - if (p->u.line.v_speed == 0) { - pr_err("line speed not specified"); - return 1; - } - return 0; - - case CNODE: - if (p->u.conc.v_speed == 0) { - pr_err("concentrator line speed not specified"); - return 1; - } - if (p->u.conc.v_nport == 0) { - pr_err("number of ports on concentrator not specified"); - return 1; - } - if (p->u.conc.v_id == 0) { - pr_err("concentrator id letter not specified"); - return 1; - } - return 0; - - case MNODE: - if (p->u.module.v_nport == 0) { - pr_err("number of ports on EBI module not specified"); - return 1; - } - if (p->u.module.v_id == 0) { - pr_err("EBI module id letter not specified"); - return 1; - } - return 0; - } - return 0; -} - -/* - * Given a board pointer, returns whether we should use interrupts or not. - */ -static uint dgap_config_get_useintr(struct board_t *bd) -{ - struct cnode *p; - - if (!bd) - return 0; - - for (p = bd->bd_config; p; p = p->next) { - if (p->type == INTRNODE) { - /* - * check for pcxr types. - */ - return p->u.useintr; - } - } - - /* If not found, then don't turn on interrupts. */ - return 0; -} - -/* - * Given a board pointer, returns whether we turn on altpin or not. - */ -static uint dgap_config_get_altpin(struct board_t *bd) -{ - struct cnode *p; - - if (!bd) - return 0; - - for (p = bd->bd_config; p; p = p->next) { - if (p->type == ANODE) { - /* - * check for pcxr types. - */ - return p->u.altpin; - } - } - - /* If not found, then don't turn on interrupts. */ - return 0; -} - -/* - * Given a specific type of board, if found, detached link and - * returns the first occurrence in the list. - */ -static struct cnode *dgap_find_config(int type, int bus, int slot) -{ - struct cnode *p, *prev, *prev2, *found; - - p = &dgap_head; - - while (p->next) { - prev = p; - p = p->next; - - if (p->type != BNODE) - continue; - - if (p->u.board.type != type) - continue; - - if (p->u.board.v_pcibus && - p->u.board.pcibus != bus) - continue; - - if (p->u.board.v_pcislot && - p->u.board.pcislot != slot) - continue; - - found = p; - /* - * Keep walking thru the list till we - * find the next board. - */ - while (p->next) { - prev2 = p; - p = p->next; - - if (p->type != BNODE) - continue; - - /* - * Mark the end of our 1 board - * chain of configs. - */ - prev2->next = NULL; - - /* - * Link the "next" board to the - * previous board, effectively - * "unlinking" our board from - * the main config. - */ - prev->next = p; - - return found; - } - /* - * It must be the last board in the list. - */ - prev->next = NULL; - return found; - } - return NULL; -} - -/* - * Given a board pointer, walks the config link, counting up - * all ports user specified should be on the board. - * (This does NOT mean they are all actually present right now tho) - */ -static uint dgap_config_get_num_prts(struct board_t *bd) -{ - int count = 0; - struct cnode *p; - - if (!bd) - return 0; - - for (p = bd->bd_config; p; p = p->next) { - switch (p->type) { - case BNODE: - /* - * check for pcxr types. - */ - if (p->u.board.type > EPCFE) - count += p->u.board.nport; - break; - case CNODE: - count += p->u.conc.nport; - break; - case MNODE: - count += p->u.module.nport; - break; - } - } - return count; -} - -static char *dgap_create_config_string(struct board_t *bd, char *string) -{ - char *ptr = string; - struct cnode *p; - struct cnode *q; - int speed; - - if (!bd) { - *ptr = 0xff; - return string; - } - - for (p = bd->bd_config; p; p = p->next) { - switch (p->type) { - case LNODE: - *ptr = '\0'; - ptr++; - *ptr = p->u.line.speed; - ptr++; - break; - case CNODE: - /* - * Because the EPC/con concentrators can have EM modules - * hanging off of them, we have to walk ahead in the - * list and keep adding the number of ports on each EM - * to the config. UGH! - */ - speed = p->u.conc.speed; - q = p->next; - if (q && (q->type == MNODE)) { - *ptr = (p->u.conc.nport + 0x80); - ptr++; - p = q; - while (q->next && (q->next->type) == MNODE) { - *ptr = (q->u.module.nport + 0x80); - ptr++; - p = q; - q = q->next; - } - *ptr = q->u.module.nport; - ptr++; - } else { - *ptr = p->u.conc.nport; - ptr++; - } - - *ptr = speed; - ptr++; - break; - } - } - - *ptr = 0xff; - return string; -} - -/* - * Parse a configuration file read into memory as a string. - */ -static int dgap_parsefile(char **in) -{ - struct cnode *p, *brd, *line, *conc; - int rc; - char *s; - int linecnt = 0; - - p = &dgap_head; - brd = line = conc = NULL; - - /* perhaps we are adding to an existing list? */ - while (p->next) - p = p->next; - - /* file must start with a BEGIN */ - while ((rc = dgap_gettok(in)) != BEGIN) { - if (rc == 0) { - pr_err("unexpected EOF"); - return -1; - } - } - - for (; ;) { - int board_type = 0; - int conc_type = 0; - int module_type = 0; - - rc = dgap_gettok(in); - if (rc == 0) { - pr_err("unexpected EOF"); - return -1; - } - - switch (rc) { - case BEGIN: /* should only be 1 begin */ - pr_err("unexpected config_begin\n"); - return -1; - - case END: - return 0; - - case BOARD: /* board info */ - if (dgap_checknode(p)) - return -1; - - p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL); - if (!p->next) - return -ENOMEM; - - p = p->next; - - p->type = BNODE; - p->u.board.status = kstrdup("No", GFP_KERNEL); - line = conc = NULL; - brd = p; - linecnt = -1; - - board_type = dgap_gettok(in); - if (board_type == 0) { - pr_err("board !!type not specified"); - return -1; - } - - p->u.board.type = board_type; - - break; - - case MEM: /* memory address */ - if (p->type != BNODE) { - pr_err("memory address only valid for boards"); - return -1; - } - s = dgap_getword(in); - if (!s) { - pr_err("unexpected end of file"); - return -1; - } - kfree(p->u.board.addrstr); - p->u.board.addrstr = kstrdup(s, GFP_KERNEL); - if (kstrtoul(s, 0, &p->u.board.addr)) { - pr_err("bad number for memory address"); - return -1; - } - p->u.board.v_addr = 1; - break; - - case PCIINFO: /* pci information */ - if (p->type != BNODE) { - pr_err("memory address only valid for boards"); - return -1; - } - s = dgap_getword(in); - if (!s) { - pr_err("unexpected end of file"); - return -1; - } - kfree(p->u.board.pcibusstr); - p->u.board.pcibusstr = kstrdup(s, GFP_KERNEL); - if (kstrtoul(s, 0, &p->u.board.pcibus)) { - pr_err("bad number for pci bus"); - return -1; - } - p->u.board.v_pcibus = 1; - s = dgap_getword(in); - if (!s) { - pr_err("unexpected end of file"); - return -1; - } - kfree(p->u.board.pcislotstr); - p->u.board.pcislotstr = kstrdup(s, GFP_KERNEL); - if (kstrtoul(s, 0, &p->u.board.pcislot)) { - pr_err("bad number for pci slot"); - return -1; - } - p->u.board.v_pcislot = 1; - break; - - case METHOD: - if (p->type != BNODE) { - pr_err("install method only valid for boards"); - return -1; - } - s = dgap_getword(in); - if (!s) { - pr_err("unexpected end of file"); - return -1; - } - kfree(p->u.board.method); - p->u.board.method = kstrdup(s, GFP_KERNEL); - p->u.board.v_method = 1; - break; - - case STATUS: - if (p->type != BNODE) { - pr_err("config status only valid for boards"); - return -1; - } - s = dgap_getword(in); - if (!s) { - pr_err("unexpected end of file"); - return -1; - } - kfree(p->u.board.status); - p->u.board.status = kstrdup(s, GFP_KERNEL); - break; - - case NPORTS: /* number of ports */ - if (p->type == BNODE) { - s = dgap_getword(in); - if (!s) { - pr_err("unexpected end of file"); - return -1; - } - if (kstrtol(s, 0, &p->u.board.nport)) { - pr_err("bad number for number of ports"); - return -1; - } - p->u.board.v_nport = 1; - } else if (p->type == CNODE) { - s = dgap_getword(in); - if (!s) { - pr_err("unexpected end of file"); - return -1; - } - if (kstrtol(s, 0, &p->u.conc.nport)) { - pr_err("bad number for number of ports"); - return -1; - } - p->u.conc.v_nport = 1; - } else if (p->type == MNODE) { - s = dgap_getword(in); - if (!s) { - pr_err("unexpected end of file"); - return -1; - } - if (kstrtol(s, 0, &p->u.module.nport)) { - pr_err("bad number for number of ports"); - return -1; - } - p->u.module.v_nport = 1; - } else { - pr_err("nports only valid for concentrators or modules"); - return -1; - } - break; - - case ID: /* letter ID used in tty name */ - s = dgap_getword(in); - if (!s) { - pr_err("unexpected end of file"); - return -1; - } - kfree(p->u.board.status); - p->u.board.status = kstrdup(s, GFP_KERNEL); - - if (p->type == CNODE) { - kfree(p->u.conc.id); - p->u.conc.id = kstrdup(s, GFP_KERNEL); - p->u.conc.v_id = 1; - } else if (p->type == MNODE) { - kfree(p->u.module.id); - p->u.module.id = kstrdup(s, GFP_KERNEL); - p->u.module.v_id = 1; - } else { - pr_err("id only valid for concentrators or modules"); - return -1; - } - break; - - case STARTO: /* start offset of ID */ - if (p->type == BNODE) { - s = dgap_getword(in); - if (!s) { - pr_err("unexpected end of file"); - return -1; - } - if (kstrtol(s, 0, &p->u.board.start)) { - pr_err("bad number for start of tty count"); - return -1; - } - p->u.board.v_start = 1; - } else if (p->type == CNODE) { - s = dgap_getword(in); - if (!s) { - pr_err("unexpected end of file"); - return -1; - } - if (kstrtol(s, 0, &p->u.conc.start)) { - pr_err("bad number for start of tty count"); - return -1; - } - p->u.conc.v_start = 1; - } else if (p->type == MNODE) { - s = dgap_getword(in); - if (!s) { - pr_err("unexpected end of file"); - return -1; - } - if (kstrtol(s, 0, &p->u.module.start)) { - pr_err("bad number for start of tty count"); - return -1; - } - p->u.module.v_start = 1; - } else { - pr_err("start only valid for concentrators or modules"); - return -1; - } - break; - - case TTYN: /* tty name prefix */ - if (dgap_checknode(p)) - return -1; - - p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL); - if (!p->next) - return -ENOMEM; - - p = p->next; - p->type = TNODE; - - s = dgap_getword(in); - if (!s) { - pr_err("unexpeced end of file"); - return -1; - } - p->u.ttyname = kstrdup(s, GFP_KERNEL); - if (!p->u.ttyname) - return -1; - - break; - - case CU: /* cu name prefix */ - if (dgap_checknode(p)) - return -1; - - p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL); - if (!p->next) - return -ENOMEM; - - p = p->next; - p->type = CUNODE; - - s = dgap_getword(in); - if (!s) { - pr_err("unexpeced end of file"); - return -1; - } - p->u.cuname = kstrdup(s, GFP_KERNEL); - if (!p->u.cuname) - return -1; - - break; - - case LINE: /* line information */ - if (dgap_checknode(p)) - return -1; - if (!brd) { - pr_err("must specify board before line info"); - return -1; - } - switch (brd->u.board.type) { - case PPCM: - pr_err("line not valid for PC/em"); - return -1; - } - - p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL); - if (!p->next) - return -ENOMEM; - - p = p->next; - p->type = LNODE; - conc = NULL; - line = p; - linecnt++; - break; - - case CONC: /* concentrator information */ - if (dgap_checknode(p)) - return -1; - if (!line) { - pr_err("must specify line info before concentrator"); - return -1; - } - - p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL); - if (!p->next) - return -ENOMEM; - - p = p->next; - p->type = CNODE; - conc = p; - - if (linecnt) - brd->u.board.conc2++; - else - brd->u.board.conc1++; - - conc_type = dgap_gettok(in); - if (conc_type == 0 || - (conc_type != CX && conc_type != EPC)) { - pr_err("failed to set a type of concentratros"); - return -1; - } - - p->u.conc.type = conc_type; - - break; - - case MOD: /* EBI module */ - if (dgap_checknode(p)) - return -1; - if (!brd) { - pr_err("must specify board info before EBI modules"); - return -1; - } - switch (brd->u.board.type) { - case PPCM: - linecnt = 0; - break; - default: - if (!conc) { - pr_err("must specify concentrator info before EBI module"); - return -1; - } - } - - p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL); - if (!p->next) - return -ENOMEM; - - p = p->next; - p->type = MNODE; - - if (linecnt) - brd->u.board.module2++; - else - brd->u.board.module1++; - - module_type = dgap_gettok(in); - if (module_type == 0 || - (module_type != PORTS && module_type != MODEM)) { - pr_err("failed to set a type of module"); - return -1; - } - - p->u.module.type = module_type; - - break; - - case CABLE: - if (p->type == LNODE) { - s = dgap_getword(in); - if (!s) { - pr_err("unexpected end of file"); - return -1; - } - kfree(p->u.line.cable); - p->u.line.cable = kstrdup(s, GFP_KERNEL); - p->u.line.v_cable = 1; - } - break; - - case SPEED: /* sync line speed indication */ - if (p->type == LNODE) { - s = dgap_getword(in); - if (!s) { - pr_err("unexpected end of file"); - return -1; - } - if (kstrtol(s, 0, &p->u.line.speed)) { - pr_err("bad number for line speed"); - return -1; - } - p->u.line.v_speed = 1; - } else if (p->type == CNODE) { - s = dgap_getword(in); - if (!s) { - pr_err("unexpected end of file"); - return -1; - } - if (kstrtol(s, 0, &p->u.conc.speed)) { - pr_err("bad number for line speed"); - return -1; - } - p->u.conc.v_speed = 1; - } else { - pr_err("speed valid only for lines or concentrators."); - return -1; - } - break; - - case CONNECT: - if (p->type == CNODE) { - s = dgap_getword(in); - if (!s) { - pr_err("unexpected end of file"); - return -1; - } - kfree(p->u.conc.connect); - p->u.conc.connect = kstrdup(s, GFP_KERNEL); - p->u.conc.v_connect = 1; - } - break; - case PRINT: /* transparent print name prefix */ - if (dgap_checknode(p)) - return -1; - - p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL); - if (!p->next) - return -ENOMEM; - - p = p->next; - p->type = PNODE; - - s = dgap_getword(in); - if (!s) { - pr_err("unexpeced end of file"); - return -1; - } - p->u.printname = kstrdup(s, GFP_KERNEL); - if (!p->u.printname) - return -1; - - break; - - case CMAJOR: /* major number */ - if (dgap_checknode(p)) - return -1; - - p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL); - if (!p->next) - return -ENOMEM; - - p = p->next; - p->type = JNODE; - - s = dgap_getword(in); - if (!s) { - pr_err("unexpected end of file"); - return -1; - } - if (kstrtol(s, 0, &p->u.majornumber)) { - pr_err("bad number for major number"); - return -1; - } - break; - - case ALTPIN: /* altpin setting */ - if (dgap_checknode(p)) - return -1; - - p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL); - if (!p->next) - return -ENOMEM; - - p = p->next; - p->type = ANODE; - - s = dgap_getword(in); - if (!s) { - pr_err("unexpected end of file"); - return -1; - } - if (kstrtol(s, 0, &p->u.altpin)) { - pr_err("bad number for altpin"); - return -1; - } - break; - - case USEINTR: /* enable interrupt setting */ - if (dgap_checknode(p)) - return -1; - - p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL); - if (!p->next) - return -ENOMEM; - - p = p->next; - p->type = INTRNODE; - s = dgap_getword(in); - if (!s) { - pr_err("unexpected end of file"); - return -1; - } - if (kstrtol(s, 0, &p->u.useintr)) { - pr_err("bad number for useintr"); - return -1; - } - break; - - case TTSIZ: /* size of tty structure */ - if (dgap_checknode(p)) - return -1; - - p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL); - if (!p->next) - return -ENOMEM; - - p = p->next; - p->type = TSNODE; - - s = dgap_getword(in); - if (!s) { - pr_err("unexpected end of file"); - return -1; - } - if (kstrtol(s, 0, &p->u.ttysize)) { - pr_err("bad number for ttysize"); - return -1; - } - break; - - case CHSIZ: /* channel structure size */ - if (dgap_checknode(p)) - return -1; - - p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL); - if (!p->next) - return -ENOMEM; - - p = p->next; - p->type = CSNODE; - - s = dgap_getword(in); - if (!s) { - pr_err("unexpected end of file"); - return -1; - } - if (kstrtol(s, 0, &p->u.chsize)) { - pr_err("bad number for chsize"); - return -1; - } - break; - - case BSSIZ: /* board structure size */ - if (dgap_checknode(p)) - return -1; - - p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL); - if (!p->next) - return -ENOMEM; - - p = p->next; - p->type = BSNODE; - - s = dgap_getword(in); - if (!s) { - pr_err("unexpected end of file"); - return -1; - } - if (kstrtol(s, 0, &p->u.bssize)) { - pr_err("bad number for bssize"); - return -1; - } - break; - - case UNTSIZ: /* sched structure size */ - if (dgap_checknode(p)) - return -1; - - p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL); - if (!p->next) - return -ENOMEM; - - p = p->next; - p->type = USNODE; - - s = dgap_getword(in); - if (!s) { - pr_err("unexpected end of file"); - return -1; - } - if (kstrtol(s, 0, &p->u.unsize)) { - pr_err("bad number for schedsize"); - return -1; - } - break; - - case F2SIZ: /* f2200 structure size */ - if (dgap_checknode(p)) - return -1; - - p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL); - if (!p->next) - return -ENOMEM; - - p = p->next; - p->type = FSNODE; - - s = dgap_getword(in); - if (!s) { - pr_err("unexpected end of file"); - return -1; - } - if (kstrtol(s, 0, &p->u.f2size)) { - pr_err("bad number for f2200size"); - return -1; - } - break; - - case VPSIZ: /* vpix structure size */ - if (dgap_checknode(p)) - return -1; - - p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL); - if (!p->next) - return -ENOMEM; - - p = p->next; - p->type = VSNODE; - - s = dgap_getword(in); - if (!s) { - pr_err("unexpected end of file"); - return -1; - } - if (kstrtol(s, 0, &p->u.vpixsize)) { - pr_err("bad number for vpixsize"); - return -1; - } - break; - } - } -} - -static void dgap_cleanup_nodes(void) -{ - struct cnode *p; - - p = &dgap_head; - - while (p) { - struct cnode *tmp = p->next; - - if (p->type == NULLNODE) { - p = tmp; - continue; - } - - switch (p->type) { - case BNODE: - kfree(p->u.board.addrstr); - kfree(p->u.board.pcibusstr); - kfree(p->u.board.pcislotstr); - kfree(p->u.board.method); - break; - case CNODE: - kfree(p->u.conc.id); - kfree(p->u.conc.connect); - break; - case MNODE: - kfree(p->u.module.id); - break; - case TNODE: - kfree(p->u.ttyname); - break; - case CUNODE: - kfree(p->u.cuname); - break; - case LNODE: - kfree(p->u.line.cable); - break; - case PNODE: - kfree(p->u.printname); - break; - } - - kfree(p->u.board.status); - kfree(p); - p = tmp; - } -} - -/* - * Retrives the current custom baud rate from FEP memory, - * and returns it back to the user. - * Returns 0 on error. - */ -static uint dgap_get_custom_baud(struct channel_t *ch) -{ - u8 __iomem *vaddr; - ulong offset; - - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return 0; - - if (!ch->ch_bd || ch->ch_bd->magic != DGAP_BOARD_MAGIC) - return 0; - - if (!(ch->ch_bd->bd_flags & BD_FEP5PLUS)) - return 0; - - vaddr = ch->ch_bd->re_map_membase; - - if (!vaddr) - return 0; - - /* - * Go get from fep mem, what the fep - * believes the custom baud rate is. - */ - offset = (ioread16(vaddr + ECS_SEG) << 4) + (ch->ch_portnum * 0x28) - + LINE_SPEED; - - return readw(vaddr + offset); -} - -/* - * Remap PCI memory. - */ -static int dgap_remap(struct board_t *brd) -{ - if (!brd || brd->magic != DGAP_BOARD_MAGIC) - return -EIO; - - if (!request_mem_region(brd->membase, 0x200000, "dgap")) - return -ENOMEM; - - if (!request_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000, "dgap")) - goto err_req_mem; - - brd->re_map_membase = ioremap(brd->membase, 0x200000); - if (!brd->re_map_membase) - goto err_remap_mem; - - brd->re_map_port = ioremap((brd->membase + PCI_IO_OFFSET), 0x200000); - if (!brd->re_map_port) - goto err_remap_port; - - return 0; - -err_remap_port: - iounmap(brd->re_map_membase); -err_remap_mem: - release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000); -err_req_mem: - release_mem_region(brd->membase, 0x200000); - - return -ENOMEM; -} - -static void dgap_unmap(struct board_t *brd) -{ - iounmap(brd->re_map_port); - iounmap(brd->re_map_membase); - release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000); - release_mem_region(brd->membase, 0x200000); -} - -/* - * dgap_parity_scan() - * - * Convert the FEP5 way of reporting parity errors and breaks into - * the Linux line discipline way. - */ -static void dgap_parity_scan(struct channel_t *ch, unsigned char *cbuf, - unsigned char *fbuf, int *len) -{ - int l = *len; - int count = 0; - unsigned char *in, *cout, *fout; - unsigned char c; - - in = cbuf; - cout = cbuf; - fout = fbuf; - - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return; - - while (l--) { - c = *in++; - switch (ch->pscan_state) { - default: - /* reset to sanity and fall through */ - ch->pscan_state = 0; - - case 0: - /* No FF seen yet */ - if (c == (unsigned char)'\377') - /* delete this character from stream */ - ch->pscan_state = 1; - else { - *cout++ = c; - *fout++ = TTY_NORMAL; - count += 1; - } - break; - - case 1: - /* first FF seen */ - if (c == (unsigned char)'\377') { - /* doubled ff, transform to single ff */ - *cout++ = c; - *fout++ = TTY_NORMAL; - count += 1; - ch->pscan_state = 0; - } else { - /* save value examination in next state */ - ch->pscan_savechar = c; - ch->pscan_state = 2; - } - break; - - case 2: - /* third character of ff sequence */ - - *cout++ = c; - - if (ch->pscan_savechar == 0x0) { - if (c == 0x0) { - ch->ch_err_break++; - *fout++ = TTY_BREAK; - } else { - ch->ch_err_parity++; - *fout++ = TTY_PARITY; - } - } - - count += 1; - ch->pscan_state = 0; - } - } - *len = count; -} - -/*======================================================================= - * - * dgap_input - Process received data. - * - * ch - Pointer to channel structure. - * - *=======================================================================*/ - -static void dgap_input(struct channel_t *ch) -{ - struct board_t *bd; - struct bs_t __iomem *bs; - struct tty_struct *tp; - struct tty_ldisc *ld; - uint rmask; - uint head; - uint tail; - int data_len; - ulong lock_flags; - ulong lock_flags2; - int flip_len; - int len; - int n; - u8 *buf; - u8 tmpchar; - int s; - - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return; - - tp = ch->ch_tun.un_tty; - - bs = ch->ch_bs; - if (!bs) - return; - - bd = ch->ch_bd; - if (!bd || bd->magic != DGAP_BOARD_MAGIC) - return; - - spin_lock_irqsave(&bd->bd_lock, lock_flags); - spin_lock_irqsave(&ch->ch_lock, lock_flags2); - - /* - * Figure the number of characters in the buffer. - * Exit immediately if none. - */ - - rmask = ch->ch_rsize - 1; - - head = readw(&bs->rx_head); - head &= rmask; - tail = readw(&bs->rx_tail); - tail &= rmask; - - data_len = (head - tail) & rmask; - - if (data_len == 0) { - writeb(1, &bs->idata); - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - return; - } - - /* - * If the device is not open, or CREAD is off, flush - * input data and return immediately. - */ - if ((bd->state != BOARD_READY) || !tp || - (tp->magic != TTY_MAGIC) || - !(ch->ch_tun.un_flags & UN_ISOPEN) || - !C_CREAD(tp) || - (ch->ch_tun.un_flags & UN_CLOSING)) { - writew(head, &bs->rx_tail); - writeb(1, &bs->idata); - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - return; - } - - /* - * If we are throttled, simply don't read any data. - */ - if (ch->ch_flags & CH_RXBLOCK) { - writeb(1, &bs->idata); - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - return; - } - - /* - * Ignore oruns. - */ - tmpchar = readb(&bs->orun); - if (tmpchar) { - ch->ch_err_overrun++; - writeb(0, &bs->orun); - } - - /* Decide how much data we can send into the tty layer */ - flip_len = TTY_FLIPBUF_SIZE; - - /* Chop down the length, if needed */ - len = min(data_len, flip_len); - len = min(len, (N_TTY_BUF_SIZE - 1)); - - ld = tty_ldisc_ref(tp); - -#ifdef TTY_DONT_FLIP - /* - * If the DONT_FLIP flag is on, don't flush our buffer, and act - * like the ld doesn't have any space to put the data right now. - */ - if (test_bit(TTY_DONT_FLIP, &tp->flags)) - len = 0; -#endif - - /* - * If we were unable to get a reference to the ld, - * don't flush our buffer, and act like the ld doesn't - * have any space to put the data right now. - */ - if (!ld) { - len = 0; - } else { - /* - * If ld doesn't have a pointer to a receive_buf function, - * flush the data, then act like the ld doesn't have any - * space to put the data right now. - */ - if (!ld->ops->receive_buf) { - writew(head, &bs->rx_tail); - len = 0; - } - } - - if (len <= 0) { - writeb(1, &bs->idata); - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - if (ld) - tty_ldisc_deref(ld); - return; - } - - buf = ch->ch_bd->flipbuf; - n = len; - - /* - * n now contains the most amount of data we can copy, - * bounded either by our buffer size or the amount - * of data the card actually has pending... - */ - while (n) { - s = ((head >= tail) ? head : ch->ch_rsize) - tail; - s = min(s, n); - - if (s <= 0) - break; - - memcpy_fromio(buf, ch->ch_raddr + tail, s); - - tail += s; - buf += s; - - n -= s; - /* Flip queue if needed */ - tail &= rmask; - } - - writew(tail, &bs->rx_tail); - writeb(1, &bs->idata); - ch->ch_rxcount += len; - - /* - * If we are completely raw, we don't need to go through a lot - * of the tty layers that exist. - * In this case, we take the shortest and fastest route we - * can to relay the data to the user. - * - * On the other hand, if we are not raw, we need to go through - * the tty layer, which has its API more well defined. - */ - if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) { - dgap_parity_scan(ch, ch->ch_bd->flipbuf, - ch->ch_bd->flipflagbuf, &len); - - len = tty_buffer_request_room(tp->port, len); - tty_insert_flip_string_flags(tp->port, ch->ch_bd->flipbuf, - ch->ch_bd->flipflagbuf, len); - } else { - len = tty_buffer_request_room(tp->port, len); - tty_insert_flip_string(tp->port, ch->ch_bd->flipbuf, len); - } - - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - - /* Tell the tty layer its okay to "eat" the data now */ - tty_flip_buffer_push(tp->port); - - if (ld) - tty_ldisc_deref(ld); -} - -static void dgap_write_wakeup(struct board_t *bd, struct channel_t *ch, - struct un_t *un, u32 mask) -{ - if (!(un->un_flags & mask)) - return; - - un->un_flags &= ~mask; - - if (!(un->un_flags & UN_ISOPEN)) - return; - - tty_wakeup(un->un_tty); - wake_up_interruptible(&un->un_flags_wait); -} - -/************************************************************************ - * Determines when CARRIER changes state and takes appropriate - * action. - ************************************************************************/ -static void dgap_carrier(struct channel_t *ch) -{ - struct board_t *bd; - - int virt_carrier = 0; - int phys_carrier = 0; - - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return; - - bd = ch->ch_bd; - - if (!bd || bd->magic != DGAP_BOARD_MAGIC) - return; - - /* Make sure altpin is always set correctly */ - if (ch->ch_digi.digi_flags & DIGI_ALTPIN) { - ch->ch_dsr = DM_CD; - ch->ch_cd = DM_DSR; - } else { - ch->ch_dsr = DM_DSR; - ch->ch_cd = DM_CD; - } - - if (ch->ch_mistat & D_CD(ch)) - phys_carrier = 1; - - if (ch->ch_digi.digi_flags & DIGI_FORCEDCD) - virt_carrier = 1; - - if (ch->ch_c_cflag & CLOCAL) - virt_carrier = 1; - - /* - * Test for a VIRTUAL carrier transition to HIGH. - */ - if (((ch->ch_flags & CH_FCAR) == 0) && (virt_carrier == 1)) { - /* - * When carrier rises, wake any threads waiting - * for carrier in the open routine. - */ - - if (waitqueue_active(&(ch->ch_flags_wait))) - wake_up_interruptible(&ch->ch_flags_wait); - } - - /* - * Test for a PHYSICAL carrier transition to HIGH. - */ - if (((ch->ch_flags & CH_CD) == 0) && (phys_carrier == 1)) { - /* - * When carrier rises, wake any threads waiting - * for carrier in the open routine. - */ - - if (waitqueue_active(&(ch->ch_flags_wait))) - wake_up_interruptible(&ch->ch_flags_wait); - } - - /* - * Test for a PHYSICAL transition to low, so long as we aren't - * currently ignoring physical transitions (which is what "virtual - * carrier" indicates). - * - * The transition of the virtual carrier to low really doesn't - * matter... it really only means "ignore carrier state", not - * "make pretend that carrier is there". - */ - if ((virt_carrier == 0) && - ((ch->ch_flags & CH_CD) != 0) && - (phys_carrier == 0)) { - /* - * When carrier drops: - * - * Drop carrier on all open units. - * - * Flush queues, waking up any task waiting in the - * line discipline. - * - * Send a hangup to the control terminal. - * - * Enable all select calls. - */ - if (waitqueue_active(&(ch->ch_flags_wait))) - wake_up_interruptible(&ch->ch_flags_wait); - - if (ch->ch_tun.un_open_count > 0) - tty_hangup(ch->ch_tun.un_tty); - - if (ch->ch_pun.un_open_count > 0) - tty_hangup(ch->ch_pun.un_tty); - } - - /* - * Make sure that our cached values reflect the current reality. - */ - if (virt_carrier == 1) - ch->ch_flags |= CH_FCAR; - else - ch->ch_flags &= ~CH_FCAR; - - if (phys_carrier == 1) - ch->ch_flags |= CH_CD; - else - ch->ch_flags &= ~CH_CD; -} - -/*======================================================================= - * - * dgap_event - FEP to host event processing routine. - * - * bd - Board of current event. - * - *=======================================================================*/ -static int dgap_event(struct board_t *bd) -{ - struct channel_t *ch; - ulong lock_flags; - ulong lock_flags2; - struct bs_t __iomem *bs; - u8 __iomem *event; - u8 __iomem *vaddr; - struct ev_t __iomem *eaddr; - uint head; - uint tail; - int port; - int reason; - int modem; - - if (!bd || bd->magic != DGAP_BOARD_MAGIC) - return -EIO; - - spin_lock_irqsave(&bd->bd_lock, lock_flags); - - vaddr = bd->re_map_membase; - - if (!vaddr) { - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - return -EIO; - } - - eaddr = (struct ev_t __iomem *)(vaddr + EVBUF); - - /* Get our head and tail */ - head = readw(&eaddr->ev_head); - tail = readw(&eaddr->ev_tail); - - /* - * Forget it if pointers out of range. - */ - - if (head >= EVMAX - EVSTART || tail >= EVMAX - EVSTART || - (head | tail) & 03) { - /* Let go of board lock */ - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - return -EIO; - } - - /* - * Loop to process all the events in the buffer. - */ - while (tail != head) { - /* - * Get interrupt information. - */ - - event = bd->re_map_membase + tail + EVSTART; - - port = ioread8(event); - reason = ioread8(event + 1); - modem = ioread8(event + 2); - ioread8(event + 3); - - /* - * Make sure the interrupt is valid. - */ - if (port >= bd->nasync) - goto next; - - if (!(reason & (IFMODEM | IFBREAK | IFTLW | IFTEM | IFDATA))) - goto next; - - ch = bd->channels[port]; - - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - goto next; - - /* - * If we have made it here, the event was valid. - * Lock down the channel. - */ - spin_lock_irqsave(&ch->ch_lock, lock_flags2); - - bs = ch->ch_bs; - - if (!bs) { - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - goto next; - } - - /* - * Process received data. - */ - if (reason & IFDATA) { - /* - * ALL LOCKS *MUST* BE DROPPED BEFORE CALLING INPUT! - * input could send some data to ld, which in turn - * could do a callback to one of our other functions. - */ - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - - dgap_input(ch); - - spin_lock_irqsave(&bd->bd_lock, lock_flags); - spin_lock_irqsave(&ch->ch_lock, lock_flags2); - - if (ch->ch_flags & CH_RACTIVE) - ch->ch_flags |= CH_RENABLE; - else - writeb(1, &bs->idata); - - if (ch->ch_flags & CH_RWAIT) { - ch->ch_flags &= ~CH_RWAIT; - - wake_up_interruptible - (&ch->ch_tun.un_flags_wait); - } - } - - /* - * Process Modem change signals. - */ - if (reason & IFMODEM) { - ch->ch_mistat = modem; - dgap_carrier(ch); - } - - /* - * Process break. - */ - if (reason & IFBREAK) { - if (ch->ch_tun.un_tty) { - /* A break has been indicated */ - ch->ch_err_break++; - tty_buffer_request_room - (ch->ch_tun.un_tty->port, 1); - tty_insert_flip_char(ch->ch_tun.un_tty->port, - 0, TTY_BREAK); - tty_flip_buffer_push(ch->ch_tun.un_tty->port); - } - } - - /* - * Process Transmit low. - */ - if (reason & IFTLW) { - dgap_write_wakeup(bd, ch, &ch->ch_tun, UN_LOW); - dgap_write_wakeup(bd, ch, &ch->ch_pun, UN_LOW); - if (ch->ch_flags & CH_WLOW) { - ch->ch_flags &= ~CH_WLOW; - wake_up_interruptible(&ch->ch_flags_wait); - } - } - - /* - * Process Transmit empty. - */ - if (reason & IFTEM) { - dgap_write_wakeup(bd, ch, &ch->ch_tun, UN_EMPTY); - dgap_write_wakeup(bd, ch, &ch->ch_pun, UN_EMPTY); - if (ch->ch_flags & CH_WEMPTY) { - ch->ch_flags &= ~CH_WEMPTY; - wake_up_interruptible(&ch->ch_flags_wait); - } - } - - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - -next: - tail = (tail + 4) & (EVMAX - EVSTART - 4); - } - - writew(tail, &eaddr->ev_tail); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - - return 0; -} - -/* - * Our board poller function. - */ -static void dgap_poll_tasklet(unsigned long data) -{ - struct board_t *bd = (struct board_t *)data; - ulong lock_flags; - char __iomem *vaddr; - u16 head, tail; - - if (!bd || (bd->magic != DGAP_BOARD_MAGIC)) - return; - - if (bd->inhibit_poller) - return; - - spin_lock_irqsave(&bd->bd_lock, lock_flags); - - vaddr = bd->re_map_membase; - - /* - * If board is ready, parse deeper to see if there is anything to do. - */ - if (bd->state == BOARD_READY) { - struct ev_t __iomem *eaddr; - - if (!bd->re_map_membase) { - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - return; - } - if (!bd->re_map_port) { - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - return; - } - - if (!bd->nasync) - goto out; - - eaddr = (struct ev_t __iomem *)(vaddr + EVBUF); - - /* Get our head and tail */ - head = readw(&eaddr->ev_head); - tail = readw(&eaddr->ev_tail); - - /* - * If there is an event pending. Go service it. - */ - if (head != tail) { - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - dgap_event(bd); - spin_lock_irqsave(&bd->bd_lock, lock_flags); - } - -out: - /* - * If board is doing interrupts, ACK the interrupt. - */ - if (bd->intr_running) - readb(bd->re_map_port + 2); - - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - return; - } - - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); -} - -/* - * dgap_found_board() - * - * A board has been found, init it. - */ -static struct board_t *dgap_found_board(struct pci_dev *pdev, int id, - int boardnum) -{ - struct board_t *brd; - unsigned int pci_irq; - int i; - int ret; - - /* get the board structure and prep it */ - brd = kzalloc(sizeof(struct board_t), GFP_KERNEL); - if (!brd) - return ERR_PTR(-ENOMEM); - - /* store the info for the board we've found */ - brd->magic = DGAP_BOARD_MAGIC; - brd->boardnum = boardnum; - brd->vendor = dgap_pci_tbl[id].vendor; - brd->device = dgap_pci_tbl[id].device; - brd->pdev = pdev; - brd->pci_bus = pdev->bus->number; - brd->pci_slot = PCI_SLOT(pdev->devfn); - brd->name = dgap_ids[id].name; - brd->maxports = dgap_ids[id].maxports; - brd->type = dgap_ids[id].config_type; - brd->dpatype = dgap_ids[id].dpatype; - brd->dpastatus = BD_NOFEP; - init_waitqueue_head(&brd->state_wait); - - spin_lock_init(&brd->bd_lock); - - brd->inhibit_poller = FALSE; - brd->wait_for_bios = 0; - brd->wait_for_fep = 0; - - for (i = 0; i < MAXPORTS; i++) - brd->channels[i] = NULL; - - /* store which card & revision we have */ - pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &brd->subvendor); - pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &brd->subdevice); - pci_read_config_byte(pdev, PCI_REVISION_ID, &brd->rev); - - pci_irq = pdev->irq; - brd->irq = pci_irq; - - /* get the PCI Base Address Registers */ - - /* Xr Jupiter and EPC use BAR 2 */ - if (brd->device == PCI_DEV_XRJ_DID || brd->device == PCI_DEV_EPCJ_DID) { - brd->membase = pci_resource_start(pdev, 2); - brd->membase_end = pci_resource_end(pdev, 2); - } - /* Everyone else uses BAR 0 */ - else { - brd->membase = pci_resource_start(pdev, 0); - brd->membase_end = pci_resource_end(pdev, 0); - } - - if (!brd->membase) { - ret = -ENODEV; - goto free_brd; - } - - if (brd->membase & 1) - brd->membase &= ~3; - else - brd->membase &= ~15; - - /* - * On the PCI boards, there is no IO space allocated - * The I/O registers will be in the first 3 bytes of the - * upper 2MB of the 4MB memory space. The board memory - * will be mapped into the low 2MB of the 4MB memory space - */ - brd->port = brd->membase + PCI_IO_OFFSET; - brd->port_end = brd->port + PCI_IO_SIZE_DGAP; - - /* - * Special initialization for non-PLX boards - */ - if (brd->device != PCI_DEV_XRJ_DID && brd->device != PCI_DEV_EPCJ_DID) { - unsigned short cmd; - - pci_write_config_byte(pdev, 0x40, 0); - pci_write_config_byte(pdev, 0x46, 0); - - /* Limit burst length to 2 doubleword transactions */ - pci_write_config_byte(pdev, 0x42, 1); - - /* - * Enable IO and mem if not already done. - * This was needed for support on Itanium. - */ - pci_read_config_word(pdev, PCI_COMMAND, &cmd); - cmd |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY); - pci_write_config_word(pdev, PCI_COMMAND, cmd); - } - - /* init our poll helper tasklet */ - tasklet_init(&brd->helper_tasklet, dgap_poll_tasklet, - (unsigned long)brd); - - ret = dgap_remap(brd); - if (ret) - goto free_brd; - - pr_info("dgap: board %d: %s (rev %d), irq %ld\n", - boardnum, brd->name, brd->rev, brd->irq); - - return brd; - -free_brd: - kfree(brd); - - return ERR_PTR(ret); -} - -/* - * dgap_intr() - * - * Driver interrupt handler. - */ -static irqreturn_t dgap_intr(int irq, void *voidbrd) -{ - struct board_t *brd = voidbrd; - - if (!brd) - return IRQ_NONE; - - /* - * Check to make sure its for us. - */ - if (brd->magic != DGAP_BOARD_MAGIC) - return IRQ_NONE; - - brd->intr_count++; - - /* - * Schedule tasklet to run at a better time. - */ - tasklet_schedule(&brd->helper_tasklet); - return IRQ_HANDLED; -} - -/***************************************************************************** -* -* Function: -* -* dgap_poll_handler -* -* Author: -* -* Scott H Kilau -* -* Parameters: -* -* dummy -- ignored -* -* Return Values: -* -* none -* -* Description: -* -* As each timer expires, it determines (a) whether the "transmit" -* waiter needs to be woken up, and (b) whether the poller needs to -* be rescheduled. -* -******************************************************************************/ - -static void dgap_poll_handler(ulong dummy) -{ - unsigned int i; - struct board_t *brd; - unsigned long lock_flags; - ulong new_time; - - dgap_poll_counter++; - - /* - * Do not start the board state machine until - * driver tells us its up and running, and has - * everything it needs. - */ - if (dgap_driver_state != DRIVER_READY) - goto schedule_poller; - - /* - * If we have just 1 board, or the system is not SMP, - * then use the typical old style poller. - * Otherwise, use our new tasklet based poller, which should - * speed things up for multiple boards. - */ - if ((dgap_numboards == 1) || (num_online_cpus() <= 1)) { - for (i = 0; i < dgap_numboards; i++) { - brd = dgap_board[i]; - - if (brd->state == BOARD_FAILED) - continue; - if (!brd->intr_running) - /* Call the real board poller directly */ - dgap_poll_tasklet((unsigned long)brd); - } - } else { - /* - * Go thru each board, kicking off a - * tasklet for each if needed - */ - for (i = 0; i < dgap_numboards; i++) { - brd = dgap_board[i]; - - /* - * Attempt to grab the board lock. - * - * If we can't get it, no big deal, the next poll - * will get it. Basically, I just really don't want - * to spin in here, because I want to kick off my - * tasklets as fast as I can, and then get out the - * poller. - */ - if (!spin_trylock(&brd->bd_lock)) - continue; - - /* - * If board is in a failed state, don't bother - * scheduling a tasklet - */ - if (brd->state == BOARD_FAILED) { - spin_unlock(&brd->bd_lock); - continue; - } - - /* Schedule a poll helper task */ - if (!brd->intr_running) - tasklet_schedule(&brd->helper_tasklet); - - /* - * Can't do DGAP_UNLOCK here, as we don't have - * lock_flags because we did a trylock above. - */ - spin_unlock(&brd->bd_lock); - } - } - -schedule_poller: - - /* - * Schedule ourself back at the nominal wakeup interval. - */ - spin_lock_irqsave(&dgap_poll_lock, lock_flags); - dgap_poll_time += dgap_jiffies_from_ms(dgap_poll_tick); - - new_time = dgap_poll_time - jiffies; - - if ((ulong)new_time >= 2 * dgap_poll_tick) { - dgap_poll_time = - jiffies + dgap_jiffies_from_ms(dgap_poll_tick); - } - - dgap_poll_timer.function = dgap_poll_handler; - dgap_poll_timer.data = 0; - dgap_poll_timer.expires = dgap_poll_time; - spin_unlock_irqrestore(&dgap_poll_lock, lock_flags); - - if (!dgap_poll_stop) - add_timer(&dgap_poll_timer); -} - -/*======================================================================= - * - * dgap_cmdb - Sends a 2 byte command to the FEP. - * - * ch - Pointer to channel structure. - * cmd - Command to be sent. - * byte1 - Integer containing first byte to be sent. - * byte2 - Integer containing second byte to be sent. - * ncmds - Wait until ncmds or fewer cmds are left - * in the cmd buffer before returning. - * - *=======================================================================*/ -static void dgap_cmdb(struct channel_t *ch, u8 cmd, u8 byte1, - u8 byte2, uint ncmds) -{ - char __iomem *vaddr; - struct __iomem cm_t *cm_addr; - uint count; - uint n; - u16 head; - u16 tail; - - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return; - - /* - * Check if board is still alive. - */ - if (ch->ch_bd->state == BOARD_FAILED) - return; - - /* - * Make sure the pointers are in range before - * writing to the FEP memory. - */ - vaddr = ch->ch_bd->re_map_membase; - - if (!vaddr) - return; - - cm_addr = (struct cm_t __iomem *)(vaddr + CMDBUF); - head = readw(&cm_addr->cm_head); - - /* - * Forget it if pointers out of range. - */ - if (head >= (CMDMAX - CMDSTART) || (head & 03)) { - ch->ch_bd->state = BOARD_FAILED; - return; - } - - /* - * Put the data in the circular command buffer. - */ - writeb(cmd, (vaddr + head + CMDSTART + 0)); - writeb((u8)ch->ch_portnum, (vaddr + head + CMDSTART + 1)); - writeb(byte1, (vaddr + head + CMDSTART + 2)); - writeb(byte2, (vaddr + head + CMDSTART + 3)); - - head = (head + 4) & (CMDMAX - CMDSTART - 4); - - writew(head, &cm_addr->cm_head); - - /* - * Wait if necessary before updating the head - * pointer to limit the number of outstanding - * commands to the FEP. If the time spent waiting - * is outlandish, declare the FEP dead. - */ - for (count = dgap_count ;;) { - head = readw(&cm_addr->cm_head); - tail = readw(&cm_addr->cm_tail); - - n = (head - tail) & (CMDMAX - CMDSTART - 4); - - if (n <= ncmds * sizeof(struct cm_t)) - break; - - if (--count == 0) { - ch->ch_bd->state = BOARD_FAILED; - return; - } - udelay(10); - } -} - -/*======================================================================= - * - * dgap_cmdw - Sends a 1 word command to the FEP. - * - * ch - Pointer to channel structure. - * cmd - Command to be sent. - * word - Integer containing word to be sent. - * ncmds - Wait until ncmds or fewer cmds are left - * in the cmd buffer before returning. - * - *=======================================================================*/ -static void dgap_cmdw(struct channel_t *ch, u8 cmd, u16 word, uint ncmds) -{ - char __iomem *vaddr; - struct __iomem cm_t *cm_addr; - uint count; - uint n; - u16 head; - u16 tail; - - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return; - - /* - * Check if board is still alive. - */ - if (ch->ch_bd->state == BOARD_FAILED) - return; - - /* - * Make sure the pointers are in range before - * writing to the FEP memory. - */ - vaddr = ch->ch_bd->re_map_membase; - if (!vaddr) - return; - - cm_addr = (struct cm_t __iomem *)(vaddr + CMDBUF); - head = readw(&cm_addr->cm_head); - - /* - * Forget it if pointers out of range. - */ - if (head >= (CMDMAX - CMDSTART) || (head & 03)) { - ch->ch_bd->state = BOARD_FAILED; - return; - } - - /* - * Put the data in the circular command buffer. - */ - writeb(cmd, (vaddr + head + CMDSTART + 0)); - writeb((u8)ch->ch_portnum, (vaddr + head + CMDSTART + 1)); - writew((u16)word, (vaddr + head + CMDSTART + 2)); - - head = (head + 4) & (CMDMAX - CMDSTART - 4); - - writew(head, &cm_addr->cm_head); - - /* - * Wait if necessary before updating the head - * pointer to limit the number of outstanding - * commands to the FEP. If the time spent waiting - * is outlandish, declare the FEP dead. - */ - for (count = dgap_count ;;) { - head = readw(&cm_addr->cm_head); - tail = readw(&cm_addr->cm_tail); - - n = (head - tail) & (CMDMAX - CMDSTART - 4); - - if (n <= ncmds * sizeof(struct cm_t)) - break; - - if (--count == 0) { - ch->ch_bd->state = BOARD_FAILED; - return; - } - udelay(10); - } -} - -/*======================================================================= - * - * dgap_cmdw_ext - Sends a extended word command to the FEP. - * - * ch - Pointer to channel structure. - * cmd - Command to be sent. - * word - Integer containing word to be sent. - * ncmds - Wait until ncmds or fewer cmds are left - * in the cmd buffer before returning. - * - *=======================================================================*/ -static void dgap_cmdw_ext(struct channel_t *ch, u16 cmd, u16 word, uint ncmds) -{ - char __iomem *vaddr; - struct __iomem cm_t *cm_addr; - uint count; - uint n; - u16 head; - u16 tail; - - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return; - - /* - * Check if board is still alive. - */ - if (ch->ch_bd->state == BOARD_FAILED) - return; - - /* - * Make sure the pointers are in range before - * writing to the FEP memory. - */ - vaddr = ch->ch_bd->re_map_membase; - if (!vaddr) - return; - - cm_addr = (struct cm_t __iomem *)(vaddr + CMDBUF); - head = readw(&cm_addr->cm_head); - - /* - * Forget it if pointers out of range. - */ - if (head >= (CMDMAX - CMDSTART) || (head & 03)) { - ch->ch_bd->state = BOARD_FAILED; - return; - } - - /* - * Put the data in the circular command buffer. - */ - - /* Write an FF to tell the FEP that we want an extended command */ - writeb((u8)0xff, (vaddr + head + CMDSTART + 0)); - - writeb((u8)ch->ch_portnum, (vaddr + head + CMDSTART + 1)); - writew((u16)cmd, (vaddr + head + CMDSTART + 2)); - - /* - * If the second part of the command won't fit, - * put it at the beginning of the circular buffer. - */ - if (((head + 4) >= ((CMDMAX - CMDSTART)) || (head & 03))) - writew((u16)word, (vaddr + CMDSTART)); - else - writew((u16)word, (vaddr + head + CMDSTART + 4)); - - head = (head + 8) & (CMDMAX - CMDSTART - 4); - - writew(head, &cm_addr->cm_head); - - /* - * Wait if necessary before updating the head - * pointer to limit the number of outstanding - * commands to the FEP. If the time spent waiting - * is outlandish, declare the FEP dead. - */ - for (count = dgap_count ;;) { - head = readw(&cm_addr->cm_head); - tail = readw(&cm_addr->cm_tail); - - n = (head - tail) & (CMDMAX - CMDSTART - 4); - - if (n <= ncmds * sizeof(struct cm_t)) - break; - - if (--count == 0) { - ch->ch_bd->state = BOARD_FAILED; - return; - } - udelay(10); - } -} - -/*======================================================================= - * - * dgap_wmove - Write data to FEP buffer. - * - * ch - Pointer to channel structure. - * buf - Pointer to characters to be moved. - * cnt - Number of characters to move. - * - *=======================================================================*/ -static void dgap_wmove(struct channel_t *ch, char *buf, uint cnt) -{ - int n; - char __iomem *taddr; - struct bs_t __iomem *bs; - u16 head; - - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return; - - /* - * Check parameters. - */ - bs = ch->ch_bs; - head = readw(&bs->tx_head); - - /* - * If pointers are out of range, just return. - */ - if ((cnt > ch->ch_tsize) || - (unsigned)(head - ch->ch_tstart) >= ch->ch_tsize) - return; - - /* - * If the write wraps over the top of the circular buffer, - * move the portion up to the wrap point, and reset the - * pointers to the bottom. - */ - n = ch->ch_tstart + ch->ch_tsize - head; - - if (cnt >= n) { - cnt -= n; - taddr = ch->ch_taddr + head; - memcpy_toio(taddr, buf, n); - head = ch->ch_tstart; - buf += n; - } - - /* - * Move rest of data. - */ - taddr = ch->ch_taddr + head; - n = cnt; - memcpy_toio(taddr, buf, n); - head += cnt; - - writew(head, &bs->tx_head); -} - -/* - * Calls the firmware to reset this channel. - */ -static void dgap_firmware_reset_port(struct channel_t *ch) -{ - dgap_cmdb(ch, CHRESET, 0, 0, 0); - - /* - * Now that the channel is reset, we need to make sure - * all the current settings get reapplied to the port - * in the firmware. - * - * So we will set the driver's cache of firmware - * settings all to 0, and then call param. - */ - ch->ch_fepiflag = 0; - ch->ch_fepcflag = 0; - ch->ch_fepoflag = 0; - ch->ch_fepstartc = 0; - ch->ch_fepstopc = 0; - ch->ch_fepastartc = 0; - ch->ch_fepastopc = 0; - ch->ch_mostat = 0; - ch->ch_hflow = 0; -} - -/*======================================================================= - * - * dgap_param - Set Digi parameters. - * - * struct tty_struct * - TTY for port. - * - *=======================================================================*/ -static int dgap_param(struct channel_t *ch, struct board_t *bd, u32 un_type) -{ - u16 head; - u16 cflag; - u16 iflag; - u8 mval; - u8 hflow; - - /* - * If baud rate is zero, flush queues, and set mval to drop DTR. - */ - if ((ch->ch_c_cflag & (CBAUD)) == 0) { - /* flush rx */ - head = readw(&ch->ch_bs->rx_head); - writew(head, &ch->ch_bs->rx_tail); - - /* flush tx */ - head = readw(&ch->ch_bs->tx_head); - writew(head, &ch->ch_bs->tx_tail); - - ch->ch_flags |= (CH_BAUD0); - - /* Drop RTS and DTR */ - ch->ch_mval &= ~(D_RTS(ch) | D_DTR(ch)); - mval = D_DTR(ch) | D_RTS(ch); - ch->ch_baud_info = 0; - - } else if (ch->ch_custom_speed && (bd->bd_flags & BD_FEP5PLUS)) { - /* - * Tell the fep to do the command - */ - - dgap_cmdw_ext(ch, 0xff01, ch->ch_custom_speed, 0); - - /* - * Now go get from fep mem, what the fep - * believes the custom baud rate is. - */ - ch->ch_custom_speed = dgap_get_custom_baud(ch); - ch->ch_baud_info = ch->ch_custom_speed; - - /* Handle transition from B0 */ - if (ch->ch_flags & CH_BAUD0) { - ch->ch_flags &= ~(CH_BAUD0); - ch->ch_mval |= (D_RTS(ch) | D_DTR(ch)); - } - mval = D_DTR(ch) | D_RTS(ch); - - } else { - /* - * Set baud rate, character size, and parity. - */ - - - int iindex = 0; - int jindex = 0; - int baud = 0; - - ulong bauds[4][16] = { - { /* slowbaud */ - 0, 50, 75, 110, - 134, 150, 200, 300, - 600, 1200, 1800, 2400, - 4800, 9600, 19200, 38400 }, - { /* slowbaud & CBAUDEX */ - 0, 57600, 115200, 230400, - 460800, 150, 200, 921600, - 600, 1200, 1800, 2400, - 4800, 9600, 19200, 38400 }, - { /* fastbaud */ - 0, 57600, 76800, 115200, - 14400, 57600, 230400, 76800, - 115200, 230400, 28800, 460800, - 921600, 9600, 19200, 38400 }, - { /* fastbaud & CBAUDEX */ - 0, 57600, 115200, 230400, - 460800, 150, 200, 921600, - 600, 1200, 1800, 2400, - 4800, 9600, 19200, 38400 } - }; - - /* - * Only use the TXPrint baud rate if the - * terminal unit is NOT open - */ - if (!(ch->ch_tun.un_flags & UN_ISOPEN) && - un_type == DGAP_PRINT) - baud = C_BAUD(ch->ch_pun.un_tty) & 0xff; - else - baud = C_BAUD(ch->ch_tun.un_tty) & 0xff; - - if (ch->ch_c_cflag & CBAUDEX) - iindex = 1; - - if (ch->ch_digi.digi_flags & DIGI_FAST) - iindex += 2; - - jindex = baud; - - if ((iindex >= 0) && (iindex < 4) && - (jindex >= 0) && (jindex < 16)) - baud = bauds[iindex][jindex]; - else - baud = 0; - - if (baud == 0) - baud = 9600; - - ch->ch_baud_info = baud; - - /* - * CBAUD has bit position 0x1000 set these days to - * indicate Linux baud rate remap. - * We use a different bit assignment for high speed. - * Clear this bit out while grabbing the parts of - * "cflag" we want. - */ - cflag = ch->ch_c_cflag & ((CBAUD ^ CBAUDEX) | PARODD | PARENB | - CSTOPB | CSIZE); - - /* - * HUPCL bit is used by FEP to indicate fast baud - * table is to be used. - */ - if ((ch->ch_digi.digi_flags & DIGI_FAST) || - (ch->ch_c_cflag & CBAUDEX)) - cflag |= HUPCL; - - if ((ch->ch_c_cflag & CBAUDEX) && - !(ch->ch_digi.digi_flags & DIGI_FAST)) { - /* - * The below code is trying to guarantee that only - * baud rates 115200, 230400, 460800, 921600 are - * remapped. We use exclusive or because the various - * baud rates share common bit positions and therefore - * can't be tested for easily. - */ - tcflag_t tcflag = (ch->ch_c_cflag & CBAUD) | CBAUDEX; - int baudpart = 0; - - /* - * Map high speed requests to index - * into FEP's baud table - */ - switch (tcflag) { - case B57600: - baudpart = 1; - break; -#ifdef B76800 - case B76800: - baudpart = 2; - break; -#endif - case B115200: - baudpart = 3; - break; - case B230400: - baudpart = 9; - break; - case B460800: - baudpart = 11; - break; -#ifdef B921600 - case B921600: - baudpart = 12; - break; -#endif - default: - baudpart = 0; - } - - if (baudpart) - cflag = (cflag & ~(CBAUD | CBAUDEX)) | baudpart; - } - - cflag &= 0xffff; - - if (cflag != ch->ch_fepcflag) { - ch->ch_fepcflag = (u16)(cflag & 0xffff); - - /* - * Okay to have channel and board - * locks held calling this - */ - dgap_cmdw(ch, SCFLAG, (u16)cflag, 0); - } - - /* Handle transition from B0 */ - if (ch->ch_flags & CH_BAUD0) { - ch->ch_flags &= ~(CH_BAUD0); - ch->ch_mval |= (D_RTS(ch) | D_DTR(ch)); - } - mval = D_DTR(ch) | D_RTS(ch); - } - - /* - * Get input flags. - */ - iflag = ch->ch_c_iflag & (IGNBRK | BRKINT | IGNPAR | PARMRK | - INPCK | ISTRIP | IXON | IXANY | IXOFF); - - if ((ch->ch_startc == _POSIX_VDISABLE) || - (ch->ch_stopc == _POSIX_VDISABLE)) { - iflag &= ~(IXON | IXOFF); - ch->ch_c_iflag &= ~(IXON | IXOFF); - } - - /* - * Only the IBM Xr card can switch between - * 232 and 422 modes on the fly - */ - if (bd->device == PCI_DEV_XR_IBM_DID) { - if (ch->ch_digi.digi_flags & DIGI_422) - dgap_cmdb(ch, SCOMMODE, MODE_422, 0, 0); - else - dgap_cmdb(ch, SCOMMODE, MODE_232, 0, 0); - } - - if (ch->ch_digi.digi_flags & DIGI_ALTPIN) - iflag |= IALTPIN; - - if (iflag != ch->ch_fepiflag) { - ch->ch_fepiflag = iflag; - - /* Okay to have channel and board locks held calling this */ - dgap_cmdw(ch, SIFLAG, (u16)ch->ch_fepiflag, 0); - } - - /* - * Select hardware handshaking. - */ - hflow = 0; - - if (ch->ch_c_cflag & CRTSCTS) - hflow |= (D_RTS(ch) | D_CTS(ch)); - if (ch->ch_digi.digi_flags & RTSPACE) - hflow |= D_RTS(ch); - if (ch->ch_digi.digi_flags & DTRPACE) - hflow |= D_DTR(ch); - if (ch->ch_digi.digi_flags & CTSPACE) - hflow |= D_CTS(ch); - if (ch->ch_digi.digi_flags & DSRPACE) - hflow |= D_DSR(ch); - if (ch->ch_digi.digi_flags & DCDPACE) - hflow |= D_CD(ch); - - if (hflow != ch->ch_hflow) { - ch->ch_hflow = hflow; - - /* Okay to have channel and board locks held calling this */ - dgap_cmdb(ch, SHFLOW, (u8)hflow, 0xff, 0); - } - - /* - * Set RTS and/or DTR Toggle if needed, - * but only if product is FEP5+ based. - */ - if (bd->bd_flags & BD_FEP5PLUS) { - u16 hflow2 = 0; - - if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) - hflow2 |= (D_RTS(ch)); - if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) - hflow2 |= (D_DTR(ch)); - - dgap_cmdw_ext(ch, 0xff03, hflow2, 0); - } - - /* - * Set modem control lines. - */ - - mval ^= ch->ch_mforce & (mval ^ ch->ch_mval); - - if (ch->ch_mostat ^ mval) { - ch->ch_mostat = mval; - - /* Okay to have channel and board locks held calling this */ - dgap_cmdb(ch, SMODEM, (u8)mval, D_RTS(ch) | D_DTR(ch), 0); - } - - /* - * Read modem signals, and then call carrier function. - */ - ch->ch_mistat = readb(&ch->ch_bs->m_stat); - dgap_carrier(ch); - - /* - * Set the start and stop characters. - */ - if (ch->ch_startc != ch->ch_fepstartc || - ch->ch_stopc != ch->ch_fepstopc) { - ch->ch_fepstartc = ch->ch_startc; - ch->ch_fepstopc = ch->ch_stopc; - - /* Okay to have channel and board locks held calling this */ - dgap_cmdb(ch, SFLOWC, ch->ch_fepstartc, ch->ch_fepstopc, 0); - } - - /* - * Set the Auxiliary start and stop characters. - */ - if (ch->ch_astartc != ch->ch_fepastartc || - ch->ch_astopc != ch->ch_fepastopc) { - ch->ch_fepastartc = ch->ch_astartc; - ch->ch_fepastopc = ch->ch_astopc; - - /* Okay to have channel and board locks held calling this */ - dgap_cmdb(ch, SAFLOWC, ch->ch_fepastartc, ch->ch_fepastopc, 0); - } - - return 0; -} - -/* - * dgap_block_til_ready() - * - * Wait for DCD, if needed. - */ -static int dgap_block_til_ready(struct tty_struct *tty, struct file *file, - struct channel_t *ch) -{ - int retval = 0; - struct un_t *un; - ulong lock_flags; - uint old_flags; - int sleep_on_un_flags; - - if (!tty || tty->magic != TTY_MAGIC || !file || !ch || - ch->magic != DGAP_CHANNEL_MAGIC) - return -EIO; - - un = tty->driver_data; - if (!un || un->magic != DGAP_UNIT_MAGIC) - return -EIO; - - spin_lock_irqsave(&ch->ch_lock, lock_flags); - - ch->ch_wopen++; - - /* Loop forever */ - while (1) { - sleep_on_un_flags = 0; - - /* - * If board has failed somehow during our sleep, - * bail with error. - */ - if (ch->ch_bd->state == BOARD_FAILED) { - retval = -EIO; - break; - } - - /* If tty was hung up, break out of loop and set error. */ - if (tty_hung_up_p(file)) { - retval = -EAGAIN; - break; - } - - /* - * If either unit is in the middle of the fragile part of close, - * we just cannot touch the channel safely. - * Go back to sleep, knowing that when the channel can be - * touched safely, the close routine will signal the - * ch_wait_flags to wake us back up. - */ - if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & - UN_CLOSING)) { - /* - * Our conditions to leave cleanly and happily: - * 1) NONBLOCKING on the tty is set. - * 2) CLOCAL is set. - * 3) DCD (fake or real) is active. - */ - - if (file->f_flags & O_NONBLOCK) - break; - - if (tty->flags & (1 << TTY_IO_ERROR)) - break; - - if (ch->ch_flags & CH_CD) - break; - - if (ch->ch_flags & CH_FCAR) - break; - } else { - sleep_on_un_flags = 1; - } - - /* - * If there is a signal pending, the user probably - * interrupted (ctrl-c) us. - * Leave loop with error set. - */ - if (signal_pending(current)) { - retval = -ERESTARTSYS; - break; - } - - /* - * Store the flags before we let go of channel lock - */ - if (sleep_on_un_flags) - old_flags = ch->ch_tun.un_flags | ch->ch_pun.un_flags; - else - old_flags = ch->ch_flags; - - /* - * Let go of channel lock before calling schedule. - * Our poller will get any FEP events and wake us up when DCD - * eventually goes active. - */ - - spin_unlock_irqrestore(&ch->ch_lock, lock_flags); - - /* - * Wait for something in the flags to change - * from the current value. - */ - if (sleep_on_un_flags) { - retval = wait_event_interruptible(un->un_flags_wait, - (old_flags != (ch->ch_tun.un_flags | - ch->ch_pun.un_flags))); - } else { - retval = wait_event_interruptible(ch->ch_flags_wait, - (old_flags != ch->ch_flags)); - } - - /* - * We got woken up for some reason. - * Before looping around, grab our channel lock. - */ - spin_lock_irqsave(&ch->ch_lock, lock_flags); - } - - ch->ch_wopen--; - - spin_unlock_irqrestore(&ch->ch_lock, lock_flags); - - return retval; -} - -/* - * dgap_tty_flush_buffer() - * - * Flush Tx buffer (make in == out) - */ -static void dgap_tty_flush_buffer(struct tty_struct *tty) -{ - struct board_t *bd; - struct channel_t *ch; - struct un_t *un; - ulong lock_flags; - ulong lock_flags2; - u16 head; - - if (!tty || tty->magic != TTY_MAGIC) - return; - - un = tty->driver_data; - if (!un || un->magic != DGAP_UNIT_MAGIC) - return; - - ch = un->un_ch; - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return; - - bd = ch->ch_bd; - if (!bd || bd->magic != DGAP_BOARD_MAGIC) - return; - - spin_lock_irqsave(&bd->bd_lock, lock_flags); - spin_lock_irqsave(&ch->ch_lock, lock_flags2); - - ch->ch_flags &= ~CH_STOP; - head = readw(&ch->ch_bs->tx_head); - dgap_cmdw(ch, FLUSHTX, (u16)head, 0); - dgap_cmdw(ch, RESUMETX, 0, 0); - if (ch->ch_tun.un_flags & (UN_LOW | UN_EMPTY)) { - ch->ch_tun.un_flags &= ~(UN_LOW | UN_EMPTY); - wake_up_interruptible(&ch->ch_tun.un_flags_wait); - } - if (ch->ch_pun.un_flags & (UN_LOW | UN_EMPTY)) { - ch->ch_pun.un_flags &= ~(UN_LOW | UN_EMPTY); - wake_up_interruptible(&ch->ch_pun.un_flags_wait); - } - - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - tty_wakeup(tty); -} - -/* - * dgap_tty_hangup() - * - * Hangup the port. Like a close, but don't wait for output to drain. - */ -static void dgap_tty_hangup(struct tty_struct *tty) -{ - struct board_t *bd; - struct channel_t *ch; - struct un_t *un; - - if (!tty || tty->magic != TTY_MAGIC) - return; - - un = tty->driver_data; - if (!un || un->magic != DGAP_UNIT_MAGIC) - return; - - ch = un->un_ch; - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return; - - bd = ch->ch_bd; - if (!bd || bd->magic != DGAP_BOARD_MAGIC) - return; - - /* flush the transmit queues */ - dgap_tty_flush_buffer(tty); -} - -/* - * dgap_tty_chars_in_buffer() - * - * Return number of characters that have not been transmitted yet. - * - * This routine is used by the line discipline to determine if there - * is data waiting to be transmitted/drained/flushed or not. - */ -static int dgap_tty_chars_in_buffer(struct tty_struct *tty) -{ - struct board_t *bd; - struct channel_t *ch; - struct un_t *un; - struct bs_t __iomem *bs; - u8 tbusy; - uint chars; - u16 thead, ttail, tmask, chead, ctail; - ulong lock_flags = 0; - ulong lock_flags2 = 0; - - if (!tty) - return 0; - - un = tty->driver_data; - if (!un || un->magic != DGAP_UNIT_MAGIC) - return 0; - - ch = un->un_ch; - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return 0; - - bd = ch->ch_bd; - if (!bd || bd->magic != DGAP_BOARD_MAGIC) - return 0; - - bs = ch->ch_bs; - if (!bs) - return 0; - - spin_lock_irqsave(&bd->bd_lock, lock_flags); - spin_lock_irqsave(&ch->ch_lock, lock_flags2); - - tmask = (ch->ch_tsize - 1); - - /* Get Transmit queue pointers */ - thead = readw(&bs->tx_head) & tmask; - ttail = readw(&bs->tx_tail) & tmask; - - /* Get tbusy flag */ - tbusy = readb(&bs->tbusy); - - /* Get Command queue pointers */ - chead = readw(&ch->ch_cm->cm_head); - ctail = readw(&ch->ch_cm->cm_tail); - - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - - /* - * The only way we know for sure if there is no pending - * data left to be transferred, is if: - * 1) Transmit head and tail are equal (empty). - * 2) Command queue head and tail are equal (empty). - * 3) The "TBUSY" flag is 0. (Transmitter not busy). - */ - - if ((ttail == thead) && (tbusy == 0) && (chead == ctail)) { - chars = 0; - } else { - if (thead >= ttail) - chars = thead - ttail; - else - chars = thead - ttail + ch->ch_tsize; - /* - * Fudge factor here. - * If chars is zero, we know that the command queue had - * something in it or tbusy was set. Because we cannot - * be sure if there is still some data to be transmitted, - * lets lie, and tell ld we have 1 byte left. - */ - if (chars == 0) { - /* - * If TBUSY is still set, and our tx buffers are empty, - * force the firmware to send me another wakeup after - * TBUSY has been cleared. - */ - if (tbusy != 0) { - spin_lock_irqsave(&ch->ch_lock, lock_flags); - un->un_flags |= UN_EMPTY; - writeb(1, &bs->iempty); - spin_unlock_irqrestore(&ch->ch_lock, - lock_flags); - } - chars = 1; - } - } - - return chars; -} - -static int dgap_wait_for_drain(struct tty_struct *tty) -{ - struct channel_t *ch; - struct un_t *un; - struct bs_t __iomem *bs; - int ret = 0; - uint count = 1; - ulong lock_flags = 0; - - if (!tty || tty->magic != TTY_MAGIC) - return -EIO; - - un = tty->driver_data; - if (!un || un->magic != DGAP_UNIT_MAGIC) - return -EIO; - - ch = un->un_ch; - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return -EIO; - - bs = ch->ch_bs; - if (!bs) - return -EIO; - - /* Loop until data is drained */ - while (count != 0) { - count = dgap_tty_chars_in_buffer(tty); - - if (count == 0) - break; - - /* Set flag waiting for drain */ - spin_lock_irqsave(&ch->ch_lock, lock_flags); - un->un_flags |= UN_EMPTY; - writeb(1, &bs->iempty); - spin_unlock_irqrestore(&ch->ch_lock, lock_flags); - - /* Go to sleep till we get woken up */ - ret = wait_event_interruptible(un->un_flags_wait, - ((un->un_flags & UN_EMPTY) == 0)); - /* If ret is non-zero, user ctrl-c'ed us */ - if (ret) - break; - } - - spin_lock_irqsave(&ch->ch_lock, lock_flags); - un->un_flags &= ~(UN_EMPTY); - spin_unlock_irqrestore(&ch->ch_lock, lock_flags); - - return ret; -} - -/* - * dgap_maxcps_room - * - * Reduces bytes_available to the max number of characters - * that can be sent currently given the maxcps value, and - * returns the new bytes_available. This only affects printer - * output. - */ -static int dgap_maxcps_room(struct channel_t *ch, struct un_t *un, - int bytes_available) -{ - /* - * If its not the Transparent print device, return - * the full data amount. - */ - if (un->un_type != DGAP_PRINT) - return bytes_available; - - if (ch->ch_digi.digi_maxcps > 0 && ch->ch_digi.digi_bufsize > 0) { - int cps_limit = 0; - unsigned long current_time = jiffies; - unsigned long buffer_time = current_time + - (HZ * ch->ch_digi.digi_bufsize) / - ch->ch_digi.digi_maxcps; - - if (ch->ch_cpstime < current_time) { - /* buffer is empty */ - ch->ch_cpstime = current_time; /* reset ch_cpstime */ - cps_limit = ch->ch_digi.digi_bufsize; - } else if (ch->ch_cpstime < buffer_time) { - /* still room in the buffer */ - cps_limit = ((buffer_time - ch->ch_cpstime) * - ch->ch_digi.digi_maxcps) / HZ; - } else { - /* no room in the buffer */ - cps_limit = 0; - } - - bytes_available = min(cps_limit, bytes_available); - } - - return bytes_available; -} - -static inline void dgap_set_firmware_event(struct un_t *un, unsigned int event) -{ - struct channel_t *ch; - struct bs_t __iomem *bs; - - if (!un || un->magic != DGAP_UNIT_MAGIC) - return; - ch = un->un_ch; - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return; - bs = ch->ch_bs; - if (!bs) - return; - - if ((event & UN_LOW) != 0) { - if ((un->un_flags & UN_LOW) == 0) { - un->un_flags |= UN_LOW; - writeb(1, &bs->ilow); - } - } - if ((event & UN_LOW) != 0) { - if ((un->un_flags & UN_EMPTY) == 0) { - un->un_flags |= UN_EMPTY; - writeb(1, &bs->iempty); - } - } -} - -/* - * dgap_tty_write_room() - * - * Return space available in Tx buffer - */ -static int dgap_tty_write_room(struct tty_struct *tty) -{ - struct channel_t *ch; - struct un_t *un; - struct bs_t __iomem *bs; - u16 head, tail, tmask; - int ret; - ulong lock_flags = 0; - - if (!tty) - return 0; - - un = tty->driver_data; - if (!un || un->magic != DGAP_UNIT_MAGIC) - return 0; - - ch = un->un_ch; - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return 0; - - bs = ch->ch_bs; - if (!bs) - return 0; - - spin_lock_irqsave(&ch->ch_lock, lock_flags); - - tmask = ch->ch_tsize - 1; - head = readw(&bs->tx_head) & tmask; - tail = readw(&bs->tx_tail) & tmask; - - ret = tail - head - 1; - if (ret < 0) - ret += ch->ch_tsize; - - /* Limit printer to maxcps */ - ret = dgap_maxcps_room(ch, un, ret); - - /* - * If we are printer device, leave space for - * possibly both the on and off strings. - */ - if (un->un_type == DGAP_PRINT) { - if (!(ch->ch_flags & CH_PRON)) - ret -= ch->ch_digi.digi_onlen; - ret -= ch->ch_digi.digi_offlen; - } else { - if (ch->ch_flags & CH_PRON) - ret -= ch->ch_digi.digi_offlen; - } - - if (ret < 0) - ret = 0; - - /* - * Schedule FEP to wake us up if needed. - * - * TODO: This might be overkill... - * Do we really need to schedule callbacks from the FEP - * in every case? Can we get smarter based on ret? - */ - dgap_set_firmware_event(un, UN_LOW | UN_EMPTY); - spin_unlock_irqrestore(&ch->ch_lock, lock_flags); - - return ret; -} - -/* - * dgap_tty_write() - * - * Take data from the user or kernel and send it out to the FEP. - * In here exists all the Transparent Print magic as well. - */ -static int dgap_tty_write(struct tty_struct *tty, const unsigned char *buf, - int count) -{ - struct channel_t *ch; - struct un_t *un; - struct bs_t __iomem *bs; - char __iomem *vaddr; - u16 head, tail, tmask, remain; - int bufcount, n; - ulong lock_flags; - - if (!tty) - return 0; - - un = tty->driver_data; - if (!un || un->magic != DGAP_UNIT_MAGIC) - return 0; - - ch = un->un_ch; - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return 0; - - bs = ch->ch_bs; - if (!bs) - return 0; - - if (!count) - return 0; - - spin_lock_irqsave(&ch->ch_lock, lock_flags); - - /* Get our space available for the channel from the board */ - tmask = ch->ch_tsize - 1; - head = readw(&(bs->tx_head)) & tmask; - tail = readw(&(bs->tx_tail)) & tmask; - - bufcount = tail - head - 1; - if (bufcount < 0) - bufcount += ch->ch_tsize; - - /* - * Limit printer output to maxcps overall, with bursts allowed - * up to bufsize characters. - */ - bufcount = dgap_maxcps_room(ch, un, bufcount); - - /* - * Take minimum of what the user wants to send, and the - * space available in the FEP buffer. - */ - count = min(count, bufcount); - - /* - * Bail if no space left. - */ - if (count <= 0) { - dgap_set_firmware_event(un, UN_LOW | UN_EMPTY); - spin_unlock_irqrestore(&ch->ch_lock, lock_flags); - return 0; - } - - /* - * Output the printer ON string, if we are in terminal mode, but - * need to be in printer mode. - */ - if ((un->un_type == DGAP_PRINT) && !(ch->ch_flags & CH_PRON)) { - dgap_wmove(ch, ch->ch_digi.digi_onstr, - (int)ch->ch_digi.digi_onlen); - head = readw(&bs->tx_head) & tmask; - ch->ch_flags |= CH_PRON; - } - - /* - * On the other hand, output the printer OFF string, if we are - * currently in printer mode, but need to output to the terminal. - */ - if ((un->un_type != DGAP_PRINT) && (ch->ch_flags & CH_PRON)) { - dgap_wmove(ch, ch->ch_digi.digi_offstr, - (int)ch->ch_digi.digi_offlen); - head = readw(&bs->tx_head) & tmask; - ch->ch_flags &= ~CH_PRON; - } - - n = count; - - /* - * If the write wraps over the top of the circular buffer, - * move the portion up to the wrap point, and reset the - * pointers to the bottom. - */ - remain = ch->ch_tstart + ch->ch_tsize - head; - - if (n >= remain) { - n -= remain; - vaddr = ch->ch_taddr + head; - - memcpy_toio(vaddr, (u8 *)buf, remain); - - head = ch->ch_tstart; - buf += remain; - } - - if (n > 0) { - /* - * Move rest of data. - */ - vaddr = ch->ch_taddr + head; - remain = n; - - memcpy_toio(vaddr, (u8 *)buf, remain); - head += remain; - } - - if (count) { - ch->ch_txcount += count; - head &= tmask; - writew(head, &bs->tx_head); - } - - dgap_set_firmware_event(un, UN_LOW | UN_EMPTY); - - /* - * If this is the print device, and the - * printer is still on, we need to turn it - * off before going idle. If the buffer is - * non-empty, wait until it goes empty. - * Otherwise turn it off right now. - */ - if ((un->un_type == DGAP_PRINT) && (ch->ch_flags & CH_PRON)) { - tail = readw(&bs->tx_tail) & tmask; - - if (tail != head) { - un->un_flags |= UN_EMPTY; - writeb(1, &bs->iempty); - } else { - dgap_wmove(ch, ch->ch_digi.digi_offstr, - (int)ch->ch_digi.digi_offlen); - head = readw(&bs->tx_head) & tmask; - ch->ch_flags &= ~CH_PRON; - } - } - - /* Update printer buffer empty time. */ - if ((un->un_type == DGAP_PRINT) && (ch->ch_digi.digi_maxcps > 0) - && (ch->ch_digi.digi_bufsize > 0)) { - ch->ch_cpstime += (HZ * count) / ch->ch_digi.digi_maxcps; - } - - spin_unlock_irqrestore(&ch->ch_lock, lock_flags); - - return count; -} - -/* - * dgap_tty_put_char() - * - * Put a character into ch->ch_buf - * - * - used by the line discipline for OPOST processing - */ -static int dgap_tty_put_char(struct tty_struct *tty, unsigned char c) -{ - /* - * Simply call tty_write. - */ - dgap_tty_write(tty, &c, 1); - return 1; -} - -/* - * Return modem signals to ld. - */ -static int dgap_tty_tiocmget(struct tty_struct *tty) -{ - struct channel_t *ch; - struct un_t *un; - int result; - u8 mstat; - ulong lock_flags; - - if (!tty || tty->magic != TTY_MAGIC) - return -EIO; - - un = tty->driver_data; - if (!un || un->magic != DGAP_UNIT_MAGIC) - return -EIO; - - ch = un->un_ch; - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return -EIO; - - spin_lock_irqsave(&ch->ch_lock, lock_flags); - - mstat = readb(&ch->ch_bs->m_stat); - /* Append any outbound signals that might be pending... */ - mstat |= ch->ch_mostat; - - spin_unlock_irqrestore(&ch->ch_lock, lock_flags); - - result = 0; - - if (mstat & D_DTR(ch)) - result |= TIOCM_DTR; - if (mstat & D_RTS(ch)) - result |= TIOCM_RTS; - if (mstat & D_CTS(ch)) - result |= TIOCM_CTS; - if (mstat & D_DSR(ch)) - result |= TIOCM_DSR; - if (mstat & D_RI(ch)) - result |= TIOCM_RI; - if (mstat & D_CD(ch)) - result |= TIOCM_CD; - - return result; -} - -/* - * dgap_tty_tiocmset() - * - * Set modem signals, called by ld. - */ -static int dgap_tty_tiocmset(struct tty_struct *tty, - unsigned int set, unsigned int clear) -{ - struct board_t *bd; - struct channel_t *ch; - struct un_t *un; - ulong lock_flags; - ulong lock_flags2; - - if (!tty || tty->magic != TTY_MAGIC) - return -EIO; - - un = tty->driver_data; - if (!un || un->magic != DGAP_UNIT_MAGIC) - return -EIO; - - ch = un->un_ch; - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return -EIO; - - bd = ch->ch_bd; - if (!bd || bd->magic != DGAP_BOARD_MAGIC) - return -EIO; - - spin_lock_irqsave(&bd->bd_lock, lock_flags); - spin_lock_irqsave(&ch->ch_lock, lock_flags2); - - if (set & TIOCM_RTS) { - ch->ch_mforce |= D_RTS(ch); - ch->ch_mval |= D_RTS(ch); - } - - if (set & TIOCM_DTR) { - ch->ch_mforce |= D_DTR(ch); - ch->ch_mval |= D_DTR(ch); - } - - if (clear & TIOCM_RTS) { - ch->ch_mforce |= D_RTS(ch); - ch->ch_mval &= ~(D_RTS(ch)); - } - - if (clear & TIOCM_DTR) { - ch->ch_mforce |= D_DTR(ch); - ch->ch_mval &= ~(D_DTR(ch)); - } - - dgap_param(ch, bd, un->un_type); - - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - - return 0; -} - -/* - * dgap_tty_send_break() - * - * Send a Break, called by ld. - */ -static int dgap_tty_send_break(struct tty_struct *tty, int msec) -{ - struct board_t *bd; - struct channel_t *ch; - struct un_t *un; - ulong lock_flags; - ulong lock_flags2; - - if (!tty || tty->magic != TTY_MAGIC) - return -EIO; - - un = tty->driver_data; - if (!un || un->magic != DGAP_UNIT_MAGIC) - return -EIO; - - ch = un->un_ch; - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return -EIO; - - bd = ch->ch_bd; - if (!bd || bd->magic != DGAP_BOARD_MAGIC) - return -EIO; - - switch (msec) { - case -1: - msec = 0xFFFF; - break; - case 0: - msec = 1; - break; - default: - msec /= 10; - break; - } - - spin_lock_irqsave(&bd->bd_lock, lock_flags); - spin_lock_irqsave(&ch->ch_lock, lock_flags2); -#if 0 - dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0); -#endif - dgap_cmdw(ch, SBREAK, (u16)msec, 0); - - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - - return 0; -} - -/* - * dgap_tty_wait_until_sent() - * - * wait until data has been transmitted, called by ld. - */ -static void dgap_tty_wait_until_sent(struct tty_struct *tty, int timeout) -{ - dgap_wait_for_drain(tty); -} - -/* - * dgap_send_xchar() - * - * send a high priority character, called by ld. - */ -static void dgap_tty_send_xchar(struct tty_struct *tty, char c) -{ - struct board_t *bd; - struct channel_t *ch; - struct un_t *un; - ulong lock_flags; - ulong lock_flags2; - - if (!tty || tty->magic != TTY_MAGIC) - return; - - un = tty->driver_data; - if (!un || un->magic != DGAP_UNIT_MAGIC) - return; - - ch = un->un_ch; - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return; - - bd = ch->ch_bd; - if (!bd || bd->magic != DGAP_BOARD_MAGIC) - return; - - spin_lock_irqsave(&bd->bd_lock, lock_flags); - spin_lock_irqsave(&ch->ch_lock, lock_flags2); - - /* - * This is technically what we should do. - * However, the NIST tests specifically want - * to see each XON or XOFF character that it - * sends, so lets just send each character - * by hand... - */ -#if 0 - if (c == STOP_CHAR(tty)) - dgap_cmdw(ch, RPAUSE, 0, 0); - else if (c == START_CHAR(tty)) - dgap_cmdw(ch, RRESUME, 0, 0); - else - dgap_wmove(ch, &c, 1); -#else - dgap_wmove(ch, &c, 1); -#endif - - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); -} - -/* - * Return modem signals to ld. - */ -static int dgap_get_modem_info(struct channel_t *ch, unsigned int __user *value) -{ - int result; - u8 mstat; - ulong lock_flags; - - spin_lock_irqsave(&ch->ch_lock, lock_flags); - - mstat = readb(&ch->ch_bs->m_stat); - /* Append any outbound signals that might be pending... */ - mstat |= ch->ch_mostat; - - spin_unlock_irqrestore(&ch->ch_lock, lock_flags); - - result = 0; - - if (mstat & D_DTR(ch)) - result |= TIOCM_DTR; - if (mstat & D_RTS(ch)) - result |= TIOCM_RTS; - if (mstat & D_CTS(ch)) - result |= TIOCM_CTS; - if (mstat & D_DSR(ch)) - result |= TIOCM_DSR; - if (mstat & D_RI(ch)) - result |= TIOCM_RI; - if (mstat & D_CD(ch)) - result |= TIOCM_CD; - - return put_user(result, value); -} - -/* - * dgap_set_modem_info() - * - * Set modem signals, called by ld. - */ -static int dgap_set_modem_info(struct channel_t *ch, struct board_t *bd, - struct un_t *un, unsigned int command, - unsigned int __user *value) -{ - int ret; - unsigned int arg; - ulong lock_flags; - ulong lock_flags2; - - ret = get_user(arg, value); - if (ret) - return ret; - - switch (command) { - case TIOCMBIS: - if (arg & TIOCM_RTS) { - ch->ch_mforce |= D_RTS(ch); - ch->ch_mval |= D_RTS(ch); - } - - if (arg & TIOCM_DTR) { - ch->ch_mforce |= D_DTR(ch); - ch->ch_mval |= D_DTR(ch); - } - - break; - - case TIOCMBIC: - if (arg & TIOCM_RTS) { - ch->ch_mforce |= D_RTS(ch); - ch->ch_mval &= ~(D_RTS(ch)); - } - - if (arg & TIOCM_DTR) { - ch->ch_mforce |= D_DTR(ch); - ch->ch_mval &= ~(D_DTR(ch)); - } - - break; - - case TIOCMSET: - ch->ch_mforce = D_DTR(ch) | D_RTS(ch); - - if (arg & TIOCM_RTS) - ch->ch_mval |= D_RTS(ch); - else - ch->ch_mval &= ~(D_RTS(ch)); - - if (arg & TIOCM_DTR) - ch->ch_mval |= (D_DTR(ch)); - else - ch->ch_mval &= ~(D_DTR(ch)); - - break; - - default: - return -EINVAL; - } - - spin_lock_irqsave(&bd->bd_lock, lock_flags); - spin_lock_irqsave(&ch->ch_lock, lock_flags2); - - dgap_param(ch, bd, un->un_type); - - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - - return 0; -} - -/* - * dgap_tty_digigeta() - * - * Ioctl to get the information for ditty. - * - * - * - */ -static int dgap_tty_digigeta(struct channel_t *ch, - struct digi_t __user *retinfo) -{ - struct digi_t tmp; - ulong lock_flags; - - if (!retinfo) - return -EFAULT; - - memset(&tmp, 0, sizeof(tmp)); - - spin_lock_irqsave(&ch->ch_lock, lock_flags); - memcpy(&tmp, &ch->ch_digi, sizeof(tmp)); - spin_unlock_irqrestore(&ch->ch_lock, lock_flags); - - if (copy_to_user(retinfo, &tmp, sizeof(*retinfo))) - return -EFAULT; - - return 0; -} - -/* - * dgap_tty_digiseta() - * - * Ioctl to set the information for ditty. - * - * - * - */ -static int dgap_tty_digiseta(struct channel_t *ch, struct board_t *bd, - struct un_t *un, struct digi_t __user *new_info) -{ - struct digi_t new_digi; - ulong lock_flags = 0; - unsigned long lock_flags2; - - if (copy_from_user(&new_digi, new_info, sizeof(struct digi_t))) - return -EFAULT; - - spin_lock_irqsave(&bd->bd_lock, lock_flags); - spin_lock_irqsave(&ch->ch_lock, lock_flags2); - - memcpy(&ch->ch_digi, &new_digi, sizeof(struct digi_t)); - - if (ch->ch_digi.digi_maxcps < 1) - ch->ch_digi.digi_maxcps = 1; - - if (ch->ch_digi.digi_maxcps > 10000) - ch->ch_digi.digi_maxcps = 10000; - - if (ch->ch_digi.digi_bufsize < 10) - ch->ch_digi.digi_bufsize = 10; - - if (ch->ch_digi.digi_maxchar < 1) - ch->ch_digi.digi_maxchar = 1; - - if (ch->ch_digi.digi_maxchar > ch->ch_digi.digi_bufsize) - ch->ch_digi.digi_maxchar = ch->ch_digi.digi_bufsize; - - if (ch->ch_digi.digi_onlen > DIGI_PLEN) - ch->ch_digi.digi_onlen = DIGI_PLEN; - - if (ch->ch_digi.digi_offlen > DIGI_PLEN) - ch->ch_digi.digi_offlen = DIGI_PLEN; - - dgap_param(ch, bd, un->un_type); - - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - - return 0; -} - -/* - * dgap_tty_digigetedelay() - * - * Ioctl to get the current edelay setting. - * - * - * - */ -static int dgap_tty_digigetedelay(struct tty_struct *tty, int __user *retinfo) -{ - struct channel_t *ch; - struct un_t *un; - int tmp; - ulong lock_flags; - - if (!retinfo) - return -EFAULT; - - if (!tty || tty->magic != TTY_MAGIC) - return -EFAULT; - - un = tty->driver_data; - if (!un || un->magic != DGAP_UNIT_MAGIC) - return -EFAULT; - - ch = un->un_ch; - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return -EFAULT; - - memset(&tmp, 0, sizeof(tmp)); - - spin_lock_irqsave(&ch->ch_lock, lock_flags); - tmp = readw(&ch->ch_bs->edelay); - spin_unlock_irqrestore(&ch->ch_lock, lock_flags); - - if (copy_to_user(retinfo, &tmp, sizeof(*retinfo))) - return -EFAULT; - - return 0; -} - -/* - * dgap_tty_digisetedelay() - * - * Ioctl to set the EDELAY setting - * - */ -static int dgap_tty_digisetedelay(struct channel_t *ch, struct board_t *bd, - struct un_t *un, int __user *new_info) -{ - int new_digi; - ulong lock_flags; - ulong lock_flags2; - - if (copy_from_user(&new_digi, new_info, sizeof(int))) - return -EFAULT; - - spin_lock_irqsave(&bd->bd_lock, lock_flags); - spin_lock_irqsave(&ch->ch_lock, lock_flags2); - - writew((u16)new_digi, &ch->ch_bs->edelay); - - dgap_param(ch, bd, un->un_type); - - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - - return 0; -} - -/* - * dgap_tty_digigetcustombaud() - * - * Ioctl to get the current custom baud rate setting. - */ -static int dgap_tty_digigetcustombaud(struct channel_t *ch, struct un_t *un, - int __user *retinfo) -{ - int tmp; - ulong lock_flags; - - if (!retinfo) - return -EFAULT; - - memset(&tmp, 0, sizeof(tmp)); - - spin_lock_irqsave(&ch->ch_lock, lock_flags); - tmp = dgap_get_custom_baud(ch); - spin_unlock_irqrestore(&ch->ch_lock, lock_flags); - - if (copy_to_user(retinfo, &tmp, sizeof(*retinfo))) - return -EFAULT; - - return 0; -} - -/* - * dgap_tty_digisetcustombaud() - * - * Ioctl to set the custom baud rate setting - */ -static int dgap_tty_digisetcustombaud(struct channel_t *ch, struct board_t *bd, - struct un_t *un, int __user *new_info) -{ - uint new_rate; - ulong lock_flags; - ulong lock_flags2; - - if (copy_from_user(&new_rate, new_info, sizeof(unsigned int))) - return -EFAULT; - - if (bd->bd_flags & BD_FEP5PLUS) { - spin_lock_irqsave(&bd->bd_lock, lock_flags); - spin_lock_irqsave(&ch->ch_lock, lock_flags2); - - ch->ch_custom_speed = new_rate; - - dgap_param(ch, bd, un->un_type); - - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - } - - return 0; -} - -/* - * dgap_set_termios() - */ -static void dgap_tty_set_termios(struct tty_struct *tty, - struct ktermios *old_termios) -{ - struct board_t *bd; - struct channel_t *ch; - struct un_t *un; - unsigned long lock_flags; - unsigned long lock_flags2; - - if (!tty || tty->magic != TTY_MAGIC) - return; - - un = tty->driver_data; - if (!un || un->magic != DGAP_UNIT_MAGIC) - return; - - ch = un->un_ch; - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return; - - bd = ch->ch_bd; - if (!bd || bd->magic != DGAP_BOARD_MAGIC) - return; - - spin_lock_irqsave(&bd->bd_lock, lock_flags); - spin_lock_irqsave(&ch->ch_lock, lock_flags2); - - ch->ch_c_cflag = tty->termios.c_cflag; - ch->ch_c_iflag = tty->termios.c_iflag; - ch->ch_c_oflag = tty->termios.c_oflag; - ch->ch_c_lflag = tty->termios.c_lflag; - ch->ch_startc = tty->termios.c_cc[VSTART]; - ch->ch_stopc = tty->termios.c_cc[VSTOP]; - - dgap_carrier(ch); - dgap_param(ch, bd, un->un_type); - - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); -} - -static void dgap_tty_throttle(struct tty_struct *tty) -{ - struct board_t *bd; - struct channel_t *ch; - struct un_t *un; - ulong lock_flags; - ulong lock_flags2; - - if (!tty || tty->magic != TTY_MAGIC) - return; - - un = tty->driver_data; - if (!un || un->magic != DGAP_UNIT_MAGIC) - return; - - ch = un->un_ch; - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return; - - bd = ch->ch_bd; - if (!bd || bd->magic != DGAP_BOARD_MAGIC) - return; - - spin_lock_irqsave(&bd->bd_lock, lock_flags); - spin_lock_irqsave(&ch->ch_lock, lock_flags2); - - ch->ch_flags |= (CH_RXBLOCK); -#if 1 - dgap_cmdw(ch, RPAUSE, 0, 0); -#endif - - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); -} - -static void dgap_tty_unthrottle(struct tty_struct *tty) -{ - struct board_t *bd; - struct channel_t *ch; - struct un_t *un; - ulong lock_flags; - ulong lock_flags2; - - if (!tty || tty->magic != TTY_MAGIC) - return; - - un = tty->driver_data; - if (!un || un->magic != DGAP_UNIT_MAGIC) - return; - - ch = un->un_ch; - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return; - - bd = ch->ch_bd; - if (!bd || bd->magic != DGAP_BOARD_MAGIC) - return; - - spin_lock_irqsave(&bd->bd_lock, lock_flags); - spin_lock_irqsave(&ch->ch_lock, lock_flags2); - - ch->ch_flags &= ~(CH_RXBLOCK); - -#if 1 - dgap_cmdw(ch, RRESUME, 0, 0); -#endif - - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); -} - -static struct board_t *find_board_by_major(unsigned int major) -{ - unsigned int i; - - for (i = 0; i < MAXBOARDS; i++) { - struct board_t *brd = dgap_board[i]; - - if (!brd) - return NULL; - if (major == brd->serial_driver->major || - major == brd->print_driver->major) - return brd; - } - - return NULL; -} - -/************************************************************************ - * - * TTY Entry points and helper functions - * - ************************************************************************/ - -/* - * dgap_tty_open() - * - */ -static int dgap_tty_open(struct tty_struct *tty, struct file *file) -{ - struct board_t *brd; - struct channel_t *ch; - struct un_t *un; - struct bs_t __iomem *bs; - uint major; - uint minor; - int rc; - ulong lock_flags; - ulong lock_flags2; - u16 head; - - major = MAJOR(tty_devnum(tty)); - minor = MINOR(tty_devnum(tty)); - - brd = find_board_by_major(major); - if (!brd) - return -EIO; - - /* - * If board is not yet up to a state of READY, go to - * sleep waiting for it to happen or they cancel the open. - */ - rc = wait_event_interruptible(brd->state_wait, - (brd->state & BOARD_READY)); - - if (rc) - return rc; - - spin_lock_irqsave(&brd->bd_lock, lock_flags); - - /* The wait above should guarantee this cannot happen */ - if (brd->state != BOARD_READY) { - spin_unlock_irqrestore(&brd->bd_lock, lock_flags); - return -EIO; - } - - /* If opened device is greater than our number of ports, bail. */ - if (MINOR(tty_devnum(tty)) > brd->nasync) { - spin_unlock_irqrestore(&brd->bd_lock, lock_flags); - return -EIO; - } - - ch = brd->channels[minor]; - if (!ch) { - spin_unlock_irqrestore(&brd->bd_lock, lock_flags); - return -EIO; - } - - /* Grab channel lock */ - spin_lock_irqsave(&ch->ch_lock, lock_flags2); - - /* Figure out our type */ - if (major == brd->serial_driver->major) { - un = &brd->channels[minor]->ch_tun; - un->un_type = DGAP_SERIAL; - } else if (major == brd->print_driver->major) { - un = &brd->channels[minor]->ch_pun; - un->un_type = DGAP_PRINT; - } else { - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&brd->bd_lock, lock_flags); - return -EIO; - } - - /* Store our unit into driver_data, so we always have it available. */ - tty->driver_data = un; - - /* - * Error if channel info pointer is NULL. - */ - bs = ch->ch_bs; - if (!bs) { - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&brd->bd_lock, lock_flags); - return -EIO; - } - - /* - * Initialize tty's - */ - if (!(un->un_flags & UN_ISOPEN)) { - /* Store important variables. */ - un->un_tty = tty; - - /* Maybe do something here to the TTY struct as well? */ - } - - /* - * Initialize if neither terminal or printer is open. - */ - if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_ISOPEN)) { - ch->ch_mforce = 0; - ch->ch_mval = 0; - - /* - * Flush input queue. - */ - head = readw(&bs->rx_head); - writew(head, &bs->rx_tail); - - ch->ch_flags = 0; - ch->pscan_state = 0; - ch->pscan_savechar = 0; - - ch->ch_c_cflag = tty->termios.c_cflag; - ch->ch_c_iflag = tty->termios.c_iflag; - ch->ch_c_oflag = tty->termios.c_oflag; - ch->ch_c_lflag = tty->termios.c_lflag; - ch->ch_startc = tty->termios.c_cc[VSTART]; - ch->ch_stopc = tty->termios.c_cc[VSTOP]; - - /* TODO: flush our TTY struct here? */ - } - - dgap_carrier(ch); - /* - * Run param in case we changed anything - */ - dgap_param(ch, brd, un->un_type); - - /* - * follow protocol for opening port - */ - - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&brd->bd_lock, lock_flags); - - rc = dgap_block_til_ready(tty, file, ch); - - if (!un->un_tty) - return -ENODEV; - - /* No going back now, increment our unit and channel counters */ - spin_lock_irqsave(&ch->ch_lock, lock_flags); - ch->ch_open_count++; - un->un_open_count++; - un->un_flags |= (UN_ISOPEN); - spin_unlock_irqrestore(&ch->ch_lock, lock_flags); - - return rc; -} - -/* - * dgap_tty_close() - * - */ -static void dgap_tty_close(struct tty_struct *tty, struct file *file) -{ - struct board_t *bd; - struct channel_t *ch; - struct un_t *un; - ulong lock_flags; - - if (!tty || tty->magic != TTY_MAGIC) - return; - - un = tty->driver_data; - if (!un || un->magic != DGAP_UNIT_MAGIC) - return; - - ch = un->un_ch; - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return; - - bd = ch->ch_bd; - if (!bd || bd->magic != DGAP_BOARD_MAGIC) - return; - - spin_lock_irqsave(&ch->ch_lock, lock_flags); - - /* - * Determine if this is the last close or not - and if we agree about - * which type of close it is with the Line Discipline - */ - if ((tty->count == 1) && (un->un_open_count != 1)) { - /* - * Uh, oh. tty->count is 1, which means that the tty - * structure will be freed. un_open_count should always - * be one in these conditions. If it's greater than - * one, we've got real problems, since it means the - * serial port won't be shutdown. - */ - un->un_open_count = 1; - } - - if (--un->un_open_count < 0) - un->un_open_count = 0; - - ch->ch_open_count--; - - if (ch->ch_open_count && un->un_open_count) { - spin_unlock_irqrestore(&ch->ch_lock, lock_flags); - return; - } - - /* OK, its the last close on the unit */ - - un->un_flags |= UN_CLOSING; - - tty->closing = 1; - - /* - * Only officially close channel if count is 0 and - * DIGI_PRINTER bit is not set. - */ - if ((ch->ch_open_count == 0) && - !(ch->ch_digi.digi_flags & DIGI_PRINTER)) { - ch->ch_flags &= ~(CH_RXBLOCK); - - spin_unlock_irqrestore(&ch->ch_lock, lock_flags); - - /* wait for output to drain */ - /* This will also return if we take an interrupt */ - - dgap_wait_for_drain(tty); - - dgap_tty_flush_buffer(tty); - tty_ldisc_flush(tty); - - spin_lock_irqsave(&ch->ch_lock, lock_flags); - - tty->closing = 0; - - /* - * If we have HUPCL set, lower DTR and RTS - */ - if (ch->ch_c_cflag & HUPCL) { - ch->ch_mostat &= ~(D_RTS(ch) | D_DTR(ch)); - dgap_cmdb(ch, SMODEM, 0, D_DTR(ch) | D_RTS(ch), 0); - - /* - * Go to sleep to ensure RTS/DTR - * have been dropped for modems to see it. - */ - spin_unlock_irqrestore(&ch->ch_lock, - lock_flags); - - /* .25 second delay for dropping RTS/DTR */ - schedule_timeout_interruptible(msecs_to_jiffies(250)); - - spin_lock_irqsave(&ch->ch_lock, lock_flags); - } - - ch->pscan_state = 0; - ch->pscan_savechar = 0; - ch->ch_baud_info = 0; - } - - /* - * turn off print device when closing print device. - */ - if ((un->un_type == DGAP_PRINT) && (ch->ch_flags & CH_PRON)) { - dgap_wmove(ch, ch->ch_digi.digi_offstr, - (int)ch->ch_digi.digi_offlen); - ch->ch_flags &= ~CH_PRON; - } - - un->un_tty = NULL; - un->un_flags &= ~(UN_ISOPEN | UN_CLOSING); - tty->driver_data = NULL; - - wake_up_interruptible(&ch->ch_flags_wait); - wake_up_interruptible(&un->un_flags_wait); - - spin_unlock_irqrestore(&ch->ch_lock, lock_flags); -} - -static void dgap_tty_start(struct tty_struct *tty) -{ - struct board_t *bd; - struct channel_t *ch; - struct un_t *un; - ulong lock_flags; - ulong lock_flags2; - - if (!tty || tty->magic != TTY_MAGIC) - return; - - un = tty->driver_data; - if (!un || un->magic != DGAP_UNIT_MAGIC) - return; - - ch = un->un_ch; - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return; - - bd = ch->ch_bd; - if (!bd || bd->magic != DGAP_BOARD_MAGIC) - return; - - spin_lock_irqsave(&bd->bd_lock, lock_flags); - spin_lock_irqsave(&ch->ch_lock, lock_flags2); - - dgap_cmdw(ch, RESUMETX, 0, 0); - - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); -} - -static void dgap_tty_stop(struct tty_struct *tty) -{ - struct board_t *bd; - struct channel_t *ch; - struct un_t *un; - ulong lock_flags; - ulong lock_flags2; - - if (!tty || tty->magic != TTY_MAGIC) - return; - - un = tty->driver_data; - if (!un || un->magic != DGAP_UNIT_MAGIC) - return; - - ch = un->un_ch; - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return; - - bd = ch->ch_bd; - if (!bd || bd->magic != DGAP_BOARD_MAGIC) - return; - - spin_lock_irqsave(&bd->bd_lock, lock_flags); - spin_lock_irqsave(&ch->ch_lock, lock_flags2); - - dgap_cmdw(ch, PAUSETX, 0, 0); - - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); -} - -/* - * dgap_tty_flush_chars() - * - * Flush the cook buffer - * - * Note to self, and any other poor souls who venture here: - * - * flush in this case DOES NOT mean dispose of the data. - * instead, it means "stop buffering and send it if you - * haven't already." Just guess how I figured that out... SRW 2-Jun-98 - * - * It is also always called in interrupt context - JAR 8-Sept-99 - */ -static void dgap_tty_flush_chars(struct tty_struct *tty) -{ - struct board_t *bd; - struct channel_t *ch; - struct un_t *un; - ulong lock_flags; - ulong lock_flags2; - - if (!tty || tty->magic != TTY_MAGIC) - return; - - un = tty->driver_data; - if (!un || un->magic != DGAP_UNIT_MAGIC) - return; - - ch = un->un_ch; - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return; - - bd = ch->ch_bd; - if (!bd || bd->magic != DGAP_BOARD_MAGIC) - return; - - spin_lock_irqsave(&bd->bd_lock, lock_flags); - spin_lock_irqsave(&ch->ch_lock, lock_flags2); - - /* TODO: Do something here */ - - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); -} - -/***************************************************************************** - * - * The IOCTL function and all of its helpers - * - *****************************************************************************/ - -/* - * dgap_tty_ioctl() - * - * The usual assortment of ioctl's - */ -static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd, - unsigned long arg) -{ - struct board_t *bd; - struct channel_t *ch; - struct un_t *un; - int rc; - u16 head; - ulong lock_flags = 0; - ulong lock_flags2 = 0; - void __user *uarg = (void __user *)arg; - - if (!tty || tty->magic != TTY_MAGIC) - return -ENODEV; - - un = tty->driver_data; - if (!un || un->magic != DGAP_UNIT_MAGIC) - return -ENODEV; - - ch = un->un_ch; - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return -ENODEV; - - bd = ch->ch_bd; - if (!bd || bd->magic != DGAP_BOARD_MAGIC) - return -ENODEV; - - spin_lock_irqsave(&bd->bd_lock, lock_flags); - spin_lock_irqsave(&ch->ch_lock, lock_flags2); - - if (un->un_open_count <= 0) { - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - return -EIO; - } - - switch (cmd) { - /* Here are all the standard ioctl's that we MUST implement */ - case TCSBRK: - /* - * TCSBRK is SVID version: non-zero arg --> no break - * this behaviour is exploited by tcdrain(). - * - * According to POSIX.1 spec (7.2.2.1.2) breaks should be - * between 0.25 and 0.5 seconds so we'll ask for something - * in the middle: 0.375 seconds. - */ - rc = tty_check_change(tty); - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - if (rc) - return rc; - - rc = dgap_wait_for_drain(tty); - - if (rc) - return -EINTR; - - spin_lock_irqsave(&bd->bd_lock, lock_flags); - spin_lock_irqsave(&ch->ch_lock, lock_flags2); - - if (((cmd == TCSBRK) && (!arg)) || (cmd == TCSBRKP)) - dgap_cmdw(ch, SBREAK, (u16)SBREAK_TIME, 0); - - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - - return 0; - - case TCSBRKP: - /* support for POSIX tcsendbreak() - - * According to POSIX.1 spec (7.2.2.1.2) breaks should be - * between 0.25 and 0.5 seconds so we'll ask for something - * in the middle: 0.375 seconds. - */ - rc = tty_check_change(tty); - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - if (rc) - return rc; - - rc = dgap_wait_for_drain(tty); - if (rc) - return -EINTR; - - spin_lock_irqsave(&bd->bd_lock, lock_flags); - spin_lock_irqsave(&ch->ch_lock, lock_flags2); - - dgap_cmdw(ch, SBREAK, (u16)SBREAK_TIME, 0); - - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - - return 0; - - case TIOCSBRK: - /* - * FEP5 doesn't support turning on a break unconditionally. - * The FEP5 device will stop sending a break automatically - * after the specified time value that was sent when turning on - * the break. - */ - rc = tty_check_change(tty); - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - if (rc) - return rc; - - rc = dgap_wait_for_drain(tty); - if (rc) - return -EINTR; - - spin_lock_irqsave(&bd->bd_lock, lock_flags); - spin_lock_irqsave(&ch->ch_lock, lock_flags2); - - dgap_cmdw(ch, SBREAK, (u16)SBREAK_TIME, 0); - - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - - return 0; - - case TIOCCBRK: - /* - * FEP5 doesn't support turning off a break unconditionally. - * The FEP5 device will stop sending a break automatically - * after the specified time value that was sent when turning on - * the break. - */ - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - return 0; - - case TIOCGSOFTCAR: - - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - - return put_user(C_CLOCAL(tty) ? 1 : 0, - (unsigned long __user *)arg); - - case TIOCSSOFTCAR: - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - - rc = get_user(arg, (unsigned long __user *)arg); - if (rc) - return rc; - - spin_lock_irqsave(&bd->bd_lock, lock_flags); - spin_lock_irqsave(&ch->ch_lock, lock_flags2); - tty->termios.c_cflag = ((tty->termios.c_cflag & ~CLOCAL) | - (arg ? CLOCAL : 0)); - dgap_param(ch, bd, un->un_type); - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - - return 0; - - case TIOCMGET: - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - return dgap_get_modem_info(ch, uarg); - - case TIOCMBIS: - case TIOCMBIC: - case TIOCMSET: - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - return dgap_set_modem_info(ch, bd, un, cmd, uarg); - - /* - * Here are any additional ioctl's that we want to implement - */ - - case TCFLSH: - /* - * The linux tty driver doesn't have a flush - * input routine for the driver, assuming all backed - * up data is in the line disc. buffers. However, - * we all know that's not the case. Here, we - * act on the ioctl, but then lie and say we didn't - * so the line discipline will process the flush - * also. - */ - rc = tty_check_change(tty); - if (rc) { - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - return rc; - } - - if ((arg == TCIFLUSH) || (arg == TCIOFLUSH)) { - if (!(un->un_type == DGAP_PRINT)) { - head = readw(&ch->ch_bs->rx_head); - writew(head, &ch->ch_bs->rx_tail); - writeb(0, &ch->ch_bs->orun); - } - } - - if ((arg != TCOFLUSH) && (arg != TCIOFLUSH)) { - /* pretend we didn't recognize this IOCTL */ - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - - return -ENOIOCTLCMD; - } - - ch->ch_flags &= ~CH_STOP; - head = readw(&ch->ch_bs->tx_head); - dgap_cmdw(ch, FLUSHTX, (u16)head, 0); - dgap_cmdw(ch, RESUMETX, 0, 0); - if (ch->ch_tun.un_flags & (UN_LOW | UN_EMPTY)) { - ch->ch_tun.un_flags &= ~(UN_LOW | UN_EMPTY); - wake_up_interruptible(&ch->ch_tun.un_flags_wait); - } - if (ch->ch_pun.un_flags & (UN_LOW | UN_EMPTY)) { - ch->ch_pun.un_flags &= ~(UN_LOW | UN_EMPTY); - wake_up_interruptible(&ch->ch_pun.un_flags_wait); - } - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - tty_wakeup(tty); - - /* pretend we didn't recognize this IOCTL */ - return -ENOIOCTLCMD; - - case TCSETSF: - case TCSETSW: - /* - * The linux tty driver doesn't have a flush - * input routine for the driver, assuming all backed - * up data is in the line disc. buffers. However, - * we all know that's not the case. Here, we - * act on the ioctl, but then lie and say we didn't - * so the line discipline will process the flush - * also. - */ - if (cmd == TCSETSF) { - /* flush rx */ - ch->ch_flags &= ~CH_STOP; - head = readw(&ch->ch_bs->rx_head); - writew(head, &ch->ch_bs->rx_tail); - } - - /* now wait for all the output to drain */ - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - rc = dgap_wait_for_drain(tty); - if (rc) - return -EINTR; - - /* pretend we didn't recognize this */ - return -ENOIOCTLCMD; - - case TCSETAW: - - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - rc = dgap_wait_for_drain(tty); - if (rc) - return -EINTR; - - /* pretend we didn't recognize this */ - return -ENOIOCTLCMD; - - case TCXONC: - /* - * The Linux Line Discipline (LD) would do this for us if we - * let it, but we have the special firmware options to do this - * the "right way" regardless of hardware or software flow - * control so we'll do it outselves instead of letting the LD - * do it. - */ - rc = tty_check_change(tty); - if (rc) { - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - return rc; - } - - switch (arg) { - case TCOON: - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - dgap_tty_start(tty); - return 0; - case TCOOFF: - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - dgap_tty_stop(tty); - return 0; - case TCION: - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - /* Make the ld do it */ - return -ENOIOCTLCMD; - case TCIOFF: - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - /* Make the ld do it */ - return -ENOIOCTLCMD; - default: - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - return -EINVAL; - } - - case DIGI_GETA: - /* get information for ditty */ - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - return dgap_tty_digigeta(ch, uarg); - - case DIGI_SETAW: - case DIGI_SETAF: - - /* set information for ditty */ - if (cmd == (DIGI_SETAW)) { - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - rc = dgap_wait_for_drain(tty); - if (rc) - return -EINTR; - spin_lock_irqsave(&bd->bd_lock, lock_flags); - spin_lock_irqsave(&ch->ch_lock, lock_flags2); - } else - tty_ldisc_flush(tty); - /* fall thru */ - - case DIGI_SETA: - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - return dgap_tty_digiseta(ch, bd, un, uarg); - - case DIGI_GEDELAY: - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - return dgap_tty_digigetedelay(tty, uarg); - - case DIGI_SEDELAY: - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - return dgap_tty_digisetedelay(ch, bd, un, uarg); - - case DIGI_GETCUSTOMBAUD: - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - return dgap_tty_digigetcustombaud(ch, un, uarg); - - case DIGI_SETCUSTOMBAUD: - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - return dgap_tty_digisetcustombaud(ch, bd, un, uarg); - - case DIGI_RESET_PORT: - dgap_firmware_reset_port(ch); - dgap_param(ch, bd, un->un_type); - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - return 0; - - default: - spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); - spin_unlock_irqrestore(&bd->bd_lock, lock_flags); - - return -ENOIOCTLCMD; - } -} - -static const struct tty_operations dgap_tty_ops = { - .open = dgap_tty_open, - .close = dgap_tty_close, - .write = dgap_tty_write, - .write_room = dgap_tty_write_room, - .flush_buffer = dgap_tty_flush_buffer, - .chars_in_buffer = dgap_tty_chars_in_buffer, - .flush_chars = dgap_tty_flush_chars, - .ioctl = dgap_tty_ioctl, - .set_termios = dgap_tty_set_termios, - .stop = dgap_tty_stop, - .start = dgap_tty_start, - .throttle = dgap_tty_throttle, - .unthrottle = dgap_tty_unthrottle, - .hangup = dgap_tty_hangup, - .put_char = dgap_tty_put_char, - .tiocmget = dgap_tty_tiocmget, - .tiocmset = dgap_tty_tiocmset, - .break_ctl = dgap_tty_send_break, - .wait_until_sent = dgap_tty_wait_until_sent, - .send_xchar = dgap_tty_send_xchar -}; - -/************************************************************************ - * - * TTY Initialization/Cleanup Functions - * - ************************************************************************/ - -/* - * dgap_tty_register() - * - * Init the tty subsystem for this board. - */ -static int dgap_tty_register(struct board_t *brd) -{ - int rc; - - brd->serial_driver = tty_alloc_driver(MAXPORTS, - TTY_DRIVER_REAL_RAW | - TTY_DRIVER_DYNAMIC_DEV | - TTY_DRIVER_HARDWARE_BREAK); - if (IS_ERR(brd->serial_driver)) - return PTR_ERR(brd->serial_driver); - - snprintf(brd->serial_name, MAXTTYNAMELEN, "tty_dgap_%d_", - brd->boardnum); - brd->serial_driver->name = brd->serial_name; - brd->serial_driver->name_base = 0; - brd->serial_driver->major = 0; - brd->serial_driver->minor_start = 0; - brd->serial_driver->type = TTY_DRIVER_TYPE_SERIAL; - brd->serial_driver->subtype = SERIAL_TYPE_NORMAL; - brd->serial_driver->init_termios = dgap_default_termios; - brd->serial_driver->driver_name = DRVSTR; - - /* - * Entry points for driver. Called by the kernel from - * tty_io.c and n_tty.c. - */ - tty_set_operations(brd->serial_driver, &dgap_tty_ops); - - /* - * If we're doing transparent print, we have to do all of the above - * again, separately so we don't get the LD confused about what major - * we are when we get into the dgap_tty_open() routine. - */ - brd->print_driver = tty_alloc_driver(MAXPORTS, - TTY_DRIVER_REAL_RAW | - TTY_DRIVER_DYNAMIC_DEV | - TTY_DRIVER_HARDWARE_BREAK); - if (IS_ERR(brd->print_driver)) { - rc = PTR_ERR(brd->print_driver); - goto free_serial_drv; - } - - snprintf(brd->print_name, MAXTTYNAMELEN, "pr_dgap_%d_", - brd->boardnum); - brd->print_driver->name = brd->print_name; - brd->print_driver->name_base = 0; - brd->print_driver->major = 0; - brd->print_driver->minor_start = 0; - brd->print_driver->type = TTY_DRIVER_TYPE_SERIAL; - brd->print_driver->subtype = SERIAL_TYPE_NORMAL; - brd->print_driver->init_termios = dgap_default_termios; - brd->print_driver->driver_name = DRVSTR; - - /* - * Entry points for driver. Called by the kernel from - * tty_io.c and n_tty.c. - */ - tty_set_operations(brd->print_driver, &dgap_tty_ops); - - /* Register tty devices */ - rc = tty_register_driver(brd->serial_driver); - if (rc < 0) - goto free_print_drv; - - /* Register Transparent Print devices */ - rc = tty_register_driver(brd->print_driver); - if (rc < 0) - goto unregister_serial_drv; - - return 0; - -unregister_serial_drv: - tty_unregister_driver(brd->serial_driver); -free_print_drv: - put_tty_driver(brd->print_driver); -free_serial_drv: - put_tty_driver(brd->serial_driver); - - return rc; -} - -static void dgap_tty_unregister(struct board_t *brd) -{ - tty_unregister_driver(brd->print_driver); - tty_unregister_driver(brd->serial_driver); - put_tty_driver(brd->print_driver); - put_tty_driver(brd->serial_driver); -} - -static int dgap_alloc_flipbuf(struct board_t *brd) -{ - /* - * allocate flip buffer for board. - */ - brd->flipbuf = kmalloc(MYFLIPLEN, GFP_KERNEL); - if (!brd->flipbuf) - return -ENOMEM; - - brd->flipflagbuf = kmalloc(MYFLIPLEN, GFP_KERNEL); - if (!brd->flipflagbuf) { - kfree(brd->flipbuf); - return -ENOMEM; - } - - return 0; -} - -static void dgap_free_flipbuf(struct board_t *brd) -{ - kfree(brd->flipbuf); - kfree(brd->flipflagbuf); -} - -static struct board_t *dgap_verify_board(struct device *p) -{ - struct board_t *bd; - - if (!p) - return NULL; - - bd = dev_get_drvdata(p); - if (!bd || bd->magic != DGAP_BOARD_MAGIC || bd->state != BOARD_READY) - return NULL; - - return bd; -} - -static ssize_t dgap_ports_state_show(struct device *p, - struct device_attribute *attr, - char *buf) -{ - struct board_t *bd; - int count = 0; - unsigned int i; - - bd = dgap_verify_board(p); - if (!bd) - return 0; - - for (i = 0; i < bd->nasync; i++) { - count += snprintf(buf + count, PAGE_SIZE - count, - "%d %s\n", bd->channels[i]->ch_portnum, - bd->channels[i]->ch_open_count ? "Open" : "Closed"); - } - return count; -} -static DEVICE_ATTR(ports_state, S_IRUSR, dgap_ports_state_show, NULL); - -static ssize_t dgap_ports_baud_show(struct device *p, - struct device_attribute *attr, - char *buf) -{ - struct board_t *bd; - int count = 0; - unsigned int i; - - bd = dgap_verify_board(p); - if (!bd) - return 0; - - for (i = 0; i < bd->nasync; i++) { - count += snprintf(buf + count, PAGE_SIZE - count, "%d %d\n", - bd->channels[i]->ch_portnum, - bd->channels[i]->ch_baud_info); - } - return count; -} -static DEVICE_ATTR(ports_baud, S_IRUSR, dgap_ports_baud_show, NULL); - -static ssize_t dgap_ports_msignals_show(struct device *p, - struct device_attribute *attr, - char *buf) -{ - struct board_t *bd; - int count = 0; - unsigned int i; - - bd = dgap_verify_board(p); - if (!bd) - return 0; - - for (i = 0; i < bd->nasync; i++) { - if (bd->channels[i]->ch_open_count) - count += snprintf(buf + count, PAGE_SIZE - count, - "%d %s %s %s %s %s %s\n", - bd->channels[i]->ch_portnum, - (bd->channels[i]->ch_mostat & - UART_MCR_RTS) ? "RTS" : "", - (bd->channels[i]->ch_mistat & - UART_MSR_CTS) ? "CTS" : "", - (bd->channels[i]->ch_mostat & - UART_MCR_DTR) ? "DTR" : "", - (bd->channels[i]->ch_mistat & - UART_MSR_DSR) ? "DSR" : "", - (bd->channels[i]->ch_mistat & - UART_MSR_DCD) ? "DCD" : "", - (bd->channels[i]->ch_mistat & - UART_MSR_RI) ? "RI" : ""); - else - count += snprintf(buf + count, PAGE_SIZE - count, - "%d\n", bd->channels[i]->ch_portnum); - } - return count; -} -static DEVICE_ATTR(ports_msignals, S_IRUSR, dgap_ports_msignals_show, NULL); - -static ssize_t dgap_ports_iflag_show(struct device *p, - struct device_attribute *attr, - char *buf) -{ - struct board_t *bd; - int count = 0; - unsigned int i; - - bd = dgap_verify_board(p); - if (!bd) - return 0; - - for (i = 0; i < bd->nasync; i++) - count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n", - bd->channels[i]->ch_portnum, - bd->channels[i]->ch_c_iflag); - return count; -} -static DEVICE_ATTR(ports_iflag, S_IRUSR, dgap_ports_iflag_show, NULL); - -static ssize_t dgap_ports_cflag_show(struct device *p, - struct device_attribute *attr, - char *buf) -{ - struct board_t *bd; - int count = 0; - unsigned int i; - - bd = dgap_verify_board(p); - if (!bd) - return 0; - - for (i = 0; i < bd->nasync; i++) - count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n", - bd->channels[i]->ch_portnum, - bd->channels[i]->ch_c_cflag); - return count; -} -static DEVICE_ATTR(ports_cflag, S_IRUSR, dgap_ports_cflag_show, NULL); - -static ssize_t dgap_ports_oflag_show(struct device *p, - struct device_attribute *attr, - char *buf) -{ - struct board_t *bd; - int count = 0; - unsigned int i; - - bd = dgap_verify_board(p); - if (!bd) - return 0; - - for (i = 0; i < bd->nasync; i++) - count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n", - bd->channels[i]->ch_portnum, - bd->channels[i]->ch_c_oflag); - return count; -} -static DEVICE_ATTR(ports_oflag, S_IRUSR, dgap_ports_oflag_show, NULL); - -static ssize_t dgap_ports_lflag_show(struct device *p, - struct device_attribute *attr, - char *buf) -{ - struct board_t *bd; - int count = 0; - unsigned int i; - - bd = dgap_verify_board(p); - if (!bd) - return 0; - - for (i = 0; i < bd->nasync; i++) - count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n", - bd->channels[i]->ch_portnum, - bd->channels[i]->ch_c_lflag); - return count; -} -static DEVICE_ATTR(ports_lflag, S_IRUSR, dgap_ports_lflag_show, NULL); - -static ssize_t dgap_ports_digi_flag_show(struct device *p, - struct device_attribute *attr, - char *buf) -{ - struct board_t *bd; - int count = 0; - unsigned int i; - - bd = dgap_verify_board(p); - if (!bd) - return 0; - - for (i = 0; i < bd->nasync; i++) - count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n", - bd->channels[i]->ch_portnum, - bd->channels[i]->ch_digi.digi_flags); - return count; -} -static DEVICE_ATTR(ports_digi_flag, S_IRUSR, dgap_ports_digi_flag_show, NULL); - -static ssize_t dgap_ports_rxcount_show(struct device *p, - struct device_attribute *attr, - char *buf) -{ - struct board_t *bd; - int count = 0; - unsigned int i; - - bd = dgap_verify_board(p); - if (!bd) - return 0; - - for (i = 0; i < bd->nasync; i++) - count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n", - bd->channels[i]->ch_portnum, - bd->channels[i]->ch_rxcount); - return count; -} -static DEVICE_ATTR(ports_rxcount, S_IRUSR, dgap_ports_rxcount_show, NULL); - -static ssize_t dgap_ports_txcount_show(struct device *p, - struct device_attribute *attr, - char *buf) -{ - struct board_t *bd; - int count = 0; - unsigned int i; - - bd = dgap_verify_board(p); - if (!bd) - return 0; - - for (i = 0; i < bd->nasync; i++) - count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n", - bd->channels[i]->ch_portnum, - bd->channels[i]->ch_txcount); - return count; -} -static DEVICE_ATTR(ports_txcount, S_IRUSR, dgap_ports_txcount_show, NULL); - -static ssize_t dgap_tty_state_show(struct device *d, - struct device_attribute *attr, - char *buf) -{ - struct board_t *bd; - struct channel_t *ch; - struct un_t *un; - - if (!d) - return 0; - un = dev_get_drvdata(d); - if (!un || un->magic != DGAP_UNIT_MAGIC) - return 0; - ch = un->un_ch; - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return 0; - bd = ch->ch_bd; - if (!bd || bd->magic != DGAP_BOARD_MAGIC) - return 0; - if (bd->state != BOARD_READY) - return 0; - - return snprintf(buf, PAGE_SIZE, "%s", un->un_open_count ? - "Open" : "Closed"); -} -static DEVICE_ATTR(state, S_IRUSR, dgap_tty_state_show, NULL); - -static ssize_t dgap_tty_baud_show(struct device *d, - struct device_attribute *attr, - char *buf) -{ - struct board_t *bd; - struct channel_t *ch; - struct un_t *un; - - if (!d) - return 0; - un = dev_get_drvdata(d); - if (!un || un->magic != DGAP_UNIT_MAGIC) - return 0; - ch = un->un_ch; - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return 0; - bd = ch->ch_bd; - if (!bd || bd->magic != DGAP_BOARD_MAGIC) - return 0; - if (bd->state != BOARD_READY) - return 0; - - return snprintf(buf, PAGE_SIZE, "%d\n", ch->ch_baud_info); -} -static DEVICE_ATTR(baud, S_IRUSR, dgap_tty_baud_show, NULL); - -static ssize_t dgap_tty_msignals_show(struct device *d, - struct device_attribute *attr, - char *buf) -{ - struct board_t *bd; - struct channel_t *ch; - struct un_t *un; - - if (!d) - return 0; - un = dev_get_drvdata(d); - if (!un || un->magic != DGAP_UNIT_MAGIC) - return 0; - ch = un->un_ch; - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return 0; - bd = ch->ch_bd; - if (!bd || bd->magic != DGAP_BOARD_MAGIC) - return 0; - if (bd->state != BOARD_READY) - return 0; - - if (ch->ch_open_count) { - return snprintf(buf, PAGE_SIZE, "%s %s %s %s %s %s\n", - (ch->ch_mostat & UART_MCR_RTS) ? "RTS" : "", - (ch->ch_mistat & UART_MSR_CTS) ? "CTS" : "", - (ch->ch_mostat & UART_MCR_DTR) ? "DTR" : "", - (ch->ch_mistat & UART_MSR_DSR) ? "DSR" : "", - (ch->ch_mistat & UART_MSR_DCD) ? "DCD" : "", - (ch->ch_mistat & UART_MSR_RI) ? "RI" : ""); - } - return 0; -} -static DEVICE_ATTR(msignals, S_IRUSR, dgap_tty_msignals_show, NULL); - -static ssize_t dgap_tty_iflag_show(struct device *d, - struct device_attribute *attr, - char *buf) -{ - struct board_t *bd; - struct channel_t *ch; - struct un_t *un; - - if (!d) - return 0; - un = dev_get_drvdata(d); - if (!un || un->magic != DGAP_UNIT_MAGIC) - return 0; - ch = un->un_ch; - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return 0; - bd = ch->ch_bd; - if (!bd || bd->magic != DGAP_BOARD_MAGIC) - return 0; - if (bd->state != BOARD_READY) - return 0; - - return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_iflag); -} -static DEVICE_ATTR(iflag, S_IRUSR, dgap_tty_iflag_show, NULL); - -static ssize_t dgap_tty_cflag_show(struct device *d, - struct device_attribute *attr, - char *buf) -{ - struct board_t *bd; - struct channel_t *ch; - struct un_t *un; - - if (!d) - return 0; - un = dev_get_drvdata(d); - if (!un || un->magic != DGAP_UNIT_MAGIC) - return 0; - ch = un->un_ch; - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return 0; - bd = ch->ch_bd; - if (!bd || bd->magic != DGAP_BOARD_MAGIC) - return 0; - if (bd->state != BOARD_READY) - return 0; - - return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_cflag); -} -static DEVICE_ATTR(cflag, S_IRUSR, dgap_tty_cflag_show, NULL); - -static ssize_t dgap_tty_oflag_show(struct device *d, - struct device_attribute *attr, - char *buf) -{ - struct board_t *bd; - struct channel_t *ch; - struct un_t *un; - - if (!d) - return 0; - un = dev_get_drvdata(d); - if (!un || un->magic != DGAP_UNIT_MAGIC) - return 0; - ch = un->un_ch; - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return 0; - bd = ch->ch_bd; - if (!bd || bd->magic != DGAP_BOARD_MAGIC) - return 0; - if (bd->state != BOARD_READY) - return 0; - - return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_oflag); -} -static DEVICE_ATTR(oflag, S_IRUSR, dgap_tty_oflag_show, NULL); - -static ssize_t dgap_tty_lflag_show(struct device *d, - struct device_attribute *attr, - char *buf) -{ - struct board_t *bd; - struct channel_t *ch; - struct un_t *un; - - if (!d) - return 0; - un = dev_get_drvdata(d); - if (!un || un->magic != DGAP_UNIT_MAGIC) - return 0; - ch = un->un_ch; - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return 0; - bd = ch->ch_bd; - if (!bd || bd->magic != DGAP_BOARD_MAGIC) - return 0; - if (bd->state != BOARD_READY) - return 0; - - return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_lflag); -} -static DEVICE_ATTR(lflag, S_IRUSR, dgap_tty_lflag_show, NULL); - -static ssize_t dgap_tty_digi_flag_show(struct device *d, - struct device_attribute *attr, - char *buf) -{ - struct board_t *bd; - struct channel_t *ch; - struct un_t *un; - - if (!d) - return 0; - un = dev_get_drvdata(d); - if (!un || un->magic != DGAP_UNIT_MAGIC) - return 0; - ch = un->un_ch; - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return 0; - bd = ch->ch_bd; - if (!bd || bd->magic != DGAP_BOARD_MAGIC) - return 0; - if (bd->state != BOARD_READY) - return 0; - - return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_digi.digi_flags); -} -static DEVICE_ATTR(digi_flag, S_IRUSR, dgap_tty_digi_flag_show, NULL); - -static ssize_t dgap_tty_rxcount_show(struct device *d, - struct device_attribute *attr, - char *buf) -{ - struct board_t *bd; - struct channel_t *ch; - struct un_t *un; - - if (!d) - return 0; - un = dev_get_drvdata(d); - if (!un || un->magic != DGAP_UNIT_MAGIC) - return 0; - ch = un->un_ch; - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return 0; - bd = ch->ch_bd; - if (!bd || bd->magic != DGAP_BOARD_MAGIC) - return 0; - if (bd->state != BOARD_READY) - return 0; - - return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_rxcount); -} -static DEVICE_ATTR(rxcount, S_IRUSR, dgap_tty_rxcount_show, NULL); - -static ssize_t dgap_tty_txcount_show(struct device *d, - struct device_attribute *attr, - char *buf) -{ - struct board_t *bd; - struct channel_t *ch; - struct un_t *un; - - if (!d) - return 0; - un = dev_get_drvdata(d); - if (!un || un->magic != DGAP_UNIT_MAGIC) - return 0; - ch = un->un_ch; - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return 0; - bd = ch->ch_bd; - if (!bd || bd->magic != DGAP_BOARD_MAGIC) - return 0; - if (bd->state != BOARD_READY) - return 0; - - return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_txcount); -} -static DEVICE_ATTR(txcount, S_IRUSR, dgap_tty_txcount_show, NULL); - -static ssize_t dgap_tty_name_show(struct device *d, - struct device_attribute *attr, - char *buf) -{ - struct board_t *bd; - struct channel_t *ch; - struct un_t *un; - int cn; - int bn; - struct cnode *cptr; - int found = FALSE; - int ncount = 0; - int starto = 0; - int i; - - if (!d) - return 0; - un = dev_get_drvdata(d); - if (!un || un->magic != DGAP_UNIT_MAGIC) - return 0; - ch = un->un_ch; - if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) - return 0; - bd = ch->ch_bd; - if (!bd || bd->magic != DGAP_BOARD_MAGIC) - return 0; - if (bd->state != BOARD_READY) - return 0; - - bn = bd->boardnum; - cn = ch->ch_portnum; - - for (cptr = bd->bd_config; cptr; cptr = cptr->next) { - if ((cptr->type == BNODE) && - ((cptr->u.board.type == APORT2_920P) || - (cptr->u.board.type == APORT4_920P) || - (cptr->u.board.type == APORT8_920P) || - (cptr->u.board.type == PAPORT4) || - (cptr->u.board.type == PAPORT8))) { - found = TRUE; - if (cptr->u.board.v_start) - starto = cptr->u.board.start; - else - starto = 1; - } - - if (cptr->type == TNODE && found == TRUE) { - char *ptr1; - - if (strstr(cptr->u.ttyname, "tty")) { - ptr1 = cptr->u.ttyname; - ptr1 += 3; - } else - ptr1 = cptr->u.ttyname; - - for (i = 0; i < dgap_config_get_num_prts(bd); i++) { - if (cn != i) - continue; - - return snprintf(buf, PAGE_SIZE, "%s%s%02d\n", - (un->un_type == DGAP_PRINT) ? - "pr" : "tty", - ptr1, i + starto); - } - } - - if (cptr->type == CNODE) { - for (i = 0; i < cptr->u.conc.nport; i++) { - if (cn != (i + ncount)) - continue; - - return snprintf(buf, PAGE_SIZE, "%s%s%02ld\n", - (un->un_type == DGAP_PRINT) ? - "pr" : "tty", - cptr->u.conc.id, - i + (cptr->u.conc.v_start ? - cptr->u.conc.start : 1)); - } - - ncount += cptr->u.conc.nport; - } - - if (cptr->type == MNODE) { - for (i = 0; i < cptr->u.module.nport; i++) { - if (cn != (i + ncount)) - continue; - - return snprintf(buf, PAGE_SIZE, "%s%s%02ld\n", - (un->un_type == DGAP_PRINT) ? - "pr" : "tty", - cptr->u.module.id, - i + (cptr->u.module.v_start ? - cptr->u.module.start : 1)); - } - - ncount += cptr->u.module.nport; - } - } - - return snprintf(buf, PAGE_SIZE, "%s_dgap_%d_%d\n", - (un->un_type == DGAP_PRINT) ? "pr" : "tty", bn, cn); -} -static DEVICE_ATTR(custom_name, S_IRUSR, dgap_tty_name_show, NULL); - -static struct attribute *dgap_sysfs_tty_entries[] = { - &dev_attr_state.attr, - &dev_attr_baud.attr, - &dev_attr_msignals.attr, - &dev_attr_iflag.attr, - &dev_attr_cflag.attr, - &dev_attr_oflag.attr, - &dev_attr_lflag.attr, - &dev_attr_digi_flag.attr, - &dev_attr_rxcount.attr, - &dev_attr_txcount.attr, - &dev_attr_custom_name.attr, - NULL -}; - - -/* this function creates the sys files that will export each signal status - * to sysfs each value will be put in a separate filename - */ -static void dgap_create_ports_sysfiles(struct board_t *bd) -{ - dev_set_drvdata(&bd->pdev->dev, bd); - device_create_file(&bd->pdev->dev, &dev_attr_ports_state); - device_create_file(&bd->pdev->dev, &dev_attr_ports_baud); - device_create_file(&bd->pdev->dev, &dev_attr_ports_msignals); - device_create_file(&bd->pdev->dev, &dev_attr_ports_iflag); - device_create_file(&bd->pdev->dev, &dev_attr_ports_cflag); - device_create_file(&bd->pdev->dev, &dev_attr_ports_oflag); - device_create_file(&bd->pdev->dev, &dev_attr_ports_lflag); - device_create_file(&bd->pdev->dev, &dev_attr_ports_digi_flag); - device_create_file(&bd->pdev->dev, &dev_attr_ports_rxcount); - device_create_file(&bd->pdev->dev, &dev_attr_ports_txcount); -} - -/* removes all the sys files created for that port */ -static void dgap_remove_ports_sysfiles(struct board_t *bd) -{ - device_remove_file(&bd->pdev->dev, &dev_attr_ports_state); - device_remove_file(&bd->pdev->dev, &dev_attr_ports_baud); - device_remove_file(&bd->pdev->dev, &dev_attr_ports_msignals); - device_remove_file(&bd->pdev->dev, &dev_attr_ports_iflag); - device_remove_file(&bd->pdev->dev, &dev_attr_ports_cflag); - device_remove_file(&bd->pdev->dev, &dev_attr_ports_oflag); - device_remove_file(&bd->pdev->dev, &dev_attr_ports_lflag); - device_remove_file(&bd->pdev->dev, &dev_attr_ports_digi_flag); - device_remove_file(&bd->pdev->dev, &dev_attr_ports_rxcount); - device_remove_file(&bd->pdev->dev, &dev_attr_ports_txcount); -} - -/* - * Copies the BIOS code from the user to the board, - * and starts the BIOS running. - */ -static void dgap_do_bios_load(struct board_t *brd, const u8 *ubios, int len) -{ - u8 __iomem *addr; - uint offset; - unsigned int i; - - if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase) - return; - - addr = brd->re_map_membase; - - /* - * clear POST area - */ - for (i = 0; i < 16; i++) - writeb(0, addr + POSTAREA + i); - - /* - * Download bios - */ - offset = 0x1000; - memcpy_toio(addr + offset, ubios, len); - - writel(0x0bf00401, addr); - writel(0, (addr + 4)); - - /* Clear the reset, and change states. */ - writeb(FEPCLR, brd->re_map_port); -} - -/* - * Checks to see if the BIOS completed running on the card. - */ -static int dgap_test_bios(struct board_t *brd) -{ - u8 __iomem *addr; - u16 word; - u16 err1; - u16 err2; - - if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase) - return -EINVAL; - - addr = brd->re_map_membase; - word = readw(addr + POSTAREA); - - /* - * It can take 5-6 seconds for a board to - * pass the bios self test and post results. - * Give it 10 seconds. - */ - brd->wait_for_bios = 0; - while (brd->wait_for_bios < 1000) { - /* Check to see if BIOS thinks board is good. (GD). */ - if (word == *(u16 *)"GD") - return 0; - msleep_interruptible(10); - brd->wait_for_bios++; - word = readw(addr + POSTAREA); - } - - /* Gave up on board after too long of time taken */ - err1 = readw(addr + SEQUENCE); - err2 = readw(addr + ERROR); - dev_warn(&brd->pdev->dev, "%s failed diagnostics. Error #(%x,%x).\n", - brd->name, err1, err2); - brd->state = BOARD_FAILED; - brd->dpastatus = BD_NOBIOS; - - return -EIO; -} - -/* - * Copies the FEP code from the user to the board, - * and starts the FEP running. - */ -static void dgap_do_fep_load(struct board_t *brd, const u8 *ufep, int len) -{ - u8 __iomem *addr; - uint offset; - - if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase) - return; - - addr = brd->re_map_membase; - - /* - * Download FEP - */ - offset = 0x1000; - memcpy_toio(addr + offset, ufep, len); - - /* - * If board is a concentrator product, we need to give - * it its config string describing how the concentrators look. - */ - if ((brd->type == PCX) || (brd->type == PEPC)) { - u8 string[100]; - u8 __iomem *config; - u8 *xconfig; - unsigned int i = 0; - - xconfig = dgap_create_config_string(brd, string); - - /* Write string to board memory */ - config = addr + CONFIG; - for (; i < CONFIGSIZE; i++, config++, xconfig++) { - writeb(*xconfig, config); - if ((*xconfig & 0xff) == 0xff) - break; - } - } - - writel(0xbfc01004, (addr + 0xc34)); - writel(0x3, (addr + 0xc30)); -} - -/* - * Waits for the FEP to report thats its ready for us to use. - */ -static int dgap_test_fep(struct board_t *brd) -{ - u8 __iomem *addr; - u16 word; - u16 err1; - u16 err2; - - if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase) - return -EINVAL; - - addr = brd->re_map_membase; - word = readw(addr + FEPSTAT); - - /* - * It can take 2-3 seconds for the FEP to - * be up and running. Give it 5 secs. - */ - brd->wait_for_fep = 0; - while (brd->wait_for_fep < 500) { - /* Check to see if FEP is up and running now. */ - if (word == *(u16 *)"OS") { - /* - * Check to see if the board can support FEP5+ commands. - */ - word = readw(addr + FEP5_PLUS); - if (word == *(u16 *)"5A") - brd->bd_flags |= BD_FEP5PLUS; - - return 0; - } - msleep_interruptible(10); - brd->wait_for_fep++; - word = readw(addr + FEPSTAT); - } - - /* Gave up on board after too long of time taken */ - err1 = readw(addr + SEQUENCE); - err2 = readw(addr + ERROR); - dev_warn(&brd->pdev->dev, - "FEPOS for %s not functioning. Error #(%x,%x).\n", - brd->name, err1, err2); - brd->state = BOARD_FAILED; - brd->dpastatus = BD_NOFEP; - - return -EIO; -} - -/* - * Physically forces the FEP5 card to reset itself. - */ -static void dgap_do_reset_board(struct board_t *brd) -{ - u8 check; - u32 check1; - u32 check2; - unsigned int i; - - if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || - !brd->re_map_membase || !brd->re_map_port) - return; - - /* FEPRST does not vary among supported boards */ - writeb(FEPRST, brd->re_map_port); - - for (i = 0; i <= 1000; i++) { - check = readb(brd->re_map_port) & 0xe; - if (check == FEPRST) - break; - udelay(10); - } - if (i > 1000) { - dev_warn(&brd->pdev->dev, - "dgap: Board not resetting... Failing board.\n"); - brd->state = BOARD_FAILED; - brd->dpastatus = BD_NOFEP; - return; - } - - /* - * Make sure there really is memory out there. - */ - writel(0xa55a3cc3, (brd->re_map_membase + LOWMEM)); - writel(0x5aa5c33c, (brd->re_map_membase + HIGHMEM)); - check1 = readl(brd->re_map_membase + LOWMEM); - check2 = readl(brd->re_map_membase + HIGHMEM); - - if ((check1 != 0xa55a3cc3) || (check2 != 0x5aa5c33c)) { - dev_warn(&brd->pdev->dev, - "No memory at %p for board.\n", - brd->re_map_membase); - brd->state = BOARD_FAILED; - brd->dpastatus = BD_NOFEP; - return; - } -} - -#ifdef DIGI_CONCENTRATORS_SUPPORTED -/* - * Sends a concentrator image into the FEP5 board. - */ -static void dgap_do_conc_load(struct board_t *brd, u8 *uaddr, int len) -{ - char __iomem *vaddr; - u16 offset; - struct downld_t *to_dp; - - if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase) - return; - - vaddr = brd->re_map_membase; - - offset = readw((u16 *)(vaddr + DOWNREQ)); - to_dp = (struct downld_t *)(vaddr + (int)offset); - memcpy_toio(to_dp, uaddr, len); - - /* Tell card we have data for it */ - writew(0, vaddr + (DOWNREQ)); - - brd->conc_dl_status = NO_PENDING_CONCENTRATOR_REQUESTS; -} -#endif - -#define EXPANSION_ROM_SIZE (64 * 1024) -#define FEP5_ROM_MAGIC (0xFEFFFFFF) - -static void dgap_get_vpd(struct board_t *brd) -{ - u32 magic; - u32 base_offset; - u16 rom_offset; - u16 vpd_offset; - u16 image_length; - u16 i; - u8 byte1; - u8 byte2; - - /* - * Poke the magic number at the PCI Rom Address location. - * If VPD is supported, the value read from that address - * will be non-zero. - */ - magic = FEP5_ROM_MAGIC; - pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic); - pci_read_config_dword(brd->pdev, PCI_ROM_ADDRESS, &magic); - - /* VPD not supported, bail */ - if (!magic) - return; - - /* - * To get to the OTPROM memory, we have to send the boards base - * address or'ed with 1 into the PCI Rom Address location. - */ - magic = brd->membase | 0x01; - pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic); - pci_read_config_dword(brd->pdev, PCI_ROM_ADDRESS, &magic); - - byte1 = readb(brd->re_map_membase); - byte2 = readb(brd->re_map_membase + 1); - - /* - * If the board correctly swapped to the OTPROM memory, - * the first 2 bytes (header) should be 0x55, 0xAA - */ - if (byte1 == 0x55 && byte2 == 0xAA) { - base_offset = 0; - - /* - * We have to run through all the OTPROM memory looking - * for the VPD offset. - */ - while (base_offset <= EXPANSION_ROM_SIZE) { - /* - * Lots of magic numbers here. - * - * The VPD offset is located inside the ROM Data - * Structure. - * - * We also have to remember the length of each - * ROM Data Structure, so we can "hop" to the next - * entry if the VPD isn't in the current - * ROM Data Structure. - */ - rom_offset = readw(brd->re_map_membase + - base_offset + 0x18); - image_length = readw(brd->re_map_membase + - rom_offset + 0x10) * 512; - vpd_offset = readw(brd->re_map_membase + - rom_offset + 0x08); - - /* Found the VPD entry */ - if (vpd_offset) - break; - - /* We didn't find a VPD entry, go to next ROM entry. */ - base_offset += image_length; - - byte1 = readb(brd->re_map_membase + base_offset); - byte2 = readb(brd->re_map_membase + base_offset + 1); - - /* - * If the new ROM offset doesn't have 0x55, 0xAA - * as its header, we have run out of ROM. - */ - if (byte1 != 0x55 || byte2 != 0xAA) - break; - } - - /* - * If we have a VPD offset, then mark the board - * as having a valid VPD, and copy VPDSIZE (512) bytes of - * that VPD to the buffer we have in our board structure. - */ - if (vpd_offset) { - brd->bd_flags |= BD_HAS_VPD; - for (i = 0; i < VPDSIZE; i++) { - brd->vpd[i] = readb(brd->re_map_membase + - vpd_offset + i); - } - } - } - - /* - * We MUST poke the magic number at the PCI Rom Address location again. - * This makes the card report the regular board memory back to us, - * rather than the OTPROM memory. - */ - magic = FEP5_ROM_MAGIC; - pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic); -} - - -static ssize_t dgap_driver_version_show(struct device_driver *ddp, char *buf) -{ - return snprintf(buf, PAGE_SIZE, "%s\n", DG_PART); -} -static DRIVER_ATTR(version, S_IRUSR, dgap_driver_version_show, NULL); - - -static ssize_t dgap_driver_boards_show(struct device_driver *ddp, char *buf) -{ - return snprintf(buf, PAGE_SIZE, "%d\n", dgap_numboards); -} -static DRIVER_ATTR(boards, S_IRUSR, dgap_driver_boards_show, NULL); - - -static ssize_t dgap_driver_maxboards_show(struct device_driver *ddp, char *buf) -{ - return snprintf(buf, PAGE_SIZE, "%d\n", MAXBOARDS); -} -static DRIVER_ATTR(maxboards, S_IRUSR, dgap_driver_maxboards_show, NULL); - - -static ssize_t dgap_driver_pollcounter_show(struct device_driver *ddp, - char *buf) -{ - return snprintf(buf, PAGE_SIZE, "%ld\n", dgap_poll_counter); -} -static DRIVER_ATTR(pollcounter, S_IRUSR, dgap_driver_pollcounter_show, NULL); - -static ssize_t dgap_driver_pollrate_show(struct device_driver *ddp, char *buf) -{ - return snprintf(buf, PAGE_SIZE, "%dms\n", dgap_poll_tick); -} - -static ssize_t dgap_driver_pollrate_store(struct device_driver *ddp, - const char *buf, size_t count) -{ - if (sscanf(buf, "%d\n", &dgap_poll_tick) != 1) - return -EINVAL; - return count; -} -static DRIVER_ATTR(pollrate, (S_IRUSR | S_IWUSR), dgap_driver_pollrate_show, - dgap_driver_pollrate_store); - - -static int dgap_create_driver_sysfiles(struct pci_driver *dgap_driver) -{ - int rc = 0; - struct device_driver *driverfs = &dgap_driver->driver; - - rc |= driver_create_file(driverfs, &driver_attr_version); - rc |= driver_create_file(driverfs, &driver_attr_boards); - rc |= driver_create_file(driverfs, &driver_attr_maxboards); - rc |= driver_create_file(driverfs, &driver_attr_pollrate); - rc |= driver_create_file(driverfs, &driver_attr_pollcounter); - - return rc; -} - -static void dgap_remove_driver_sysfiles(struct pci_driver *dgap_driver) -{ - struct device_driver *driverfs = &dgap_driver->driver; - - driver_remove_file(driverfs, &driver_attr_version); - driver_remove_file(driverfs, &driver_attr_boards); - driver_remove_file(driverfs, &driver_attr_maxboards); - driver_remove_file(driverfs, &driver_attr_pollrate); - driver_remove_file(driverfs, &driver_attr_pollcounter); -} - -static struct attribute_group dgap_tty_attribute_group = { - .name = NULL, - .attrs = dgap_sysfs_tty_entries, -}; - -static void dgap_create_tty_sysfs(struct un_t *un, struct device *c) -{ - int ret; - - ret = sysfs_create_group(&c->kobj, &dgap_tty_attribute_group); - if (ret) - return; - - dev_set_drvdata(c, un); -} - -static void dgap_remove_tty_sysfs(struct device *c) -{ - sysfs_remove_group(&c->kobj, &dgap_tty_attribute_group); -} - -/* - * Create pr and tty device entries - */ -static int dgap_tty_register_ports(struct board_t *brd) -{ - struct channel_t *ch; - int i; - int ret; - - brd->serial_ports = kcalloc(brd->nasync, sizeof(*brd->serial_ports), - GFP_KERNEL); - if (!brd->serial_ports) - return -ENOMEM; - - brd->printer_ports = kcalloc(brd->nasync, sizeof(*brd->printer_ports), - GFP_KERNEL); - if (!brd->printer_ports) { - ret = -ENOMEM; - goto free_serial_ports; - } - - for (i = 0; i < brd->nasync; i++) { - tty_port_init(&brd->serial_ports[i]); - tty_port_init(&brd->printer_ports[i]); - } - - ch = brd->channels[0]; - for (i = 0; i < brd->nasync; i++, ch = brd->channels[i]) { - struct device *classp; - - classp = tty_port_register_device(&brd->serial_ports[i], - brd->serial_driver, - i, NULL); - - if (IS_ERR(classp)) { - ret = PTR_ERR(classp); - goto unregister_ttys; - } - - dgap_create_tty_sysfs(&ch->ch_tun, classp); - ch->ch_tun.un_sysfs = classp; - - classp = tty_port_register_device(&brd->printer_ports[i], - brd->print_driver, - i, NULL); - - if (IS_ERR(classp)) { - ret = PTR_ERR(classp); - goto unregister_ttys; - } - - dgap_create_tty_sysfs(&ch->ch_pun, classp); - ch->ch_pun.un_sysfs = classp; - } - dgap_create_ports_sysfiles(brd); - - return 0; - -unregister_ttys: - while (i >= 0) { - ch = brd->channels[i]; - if (ch->ch_tun.un_sysfs) { - dgap_remove_tty_sysfs(ch->ch_tun.un_sysfs); - tty_unregister_device(brd->serial_driver, i); - } - - if (ch->ch_pun.un_sysfs) { - dgap_remove_tty_sysfs(ch->ch_pun.un_sysfs); - tty_unregister_device(brd->print_driver, i); - } - i--; - } - - for (i = 0; i < brd->nasync; i++) { - tty_port_destroy(&brd->serial_ports[i]); - tty_port_destroy(&brd->printer_ports[i]); - } - - kfree(brd->printer_ports); - brd->printer_ports = NULL; - -free_serial_ports: - kfree(brd->serial_ports); - brd->serial_ports = NULL; - - return ret; -} - -/* - * dgap_cleanup_tty() - * - * Uninitialize the TTY portion of this driver. Free all memory and - * resources. - */ -static void dgap_cleanup_tty(struct board_t *brd) -{ - struct device *dev; - unsigned int i; - - for (i = 0; i < brd->nasync; i++) { - tty_port_destroy(&brd->serial_ports[i]); - dev = brd->channels[i]->ch_tun.un_sysfs; - dgap_remove_tty_sysfs(dev); - tty_unregister_device(brd->serial_driver, i); - } - tty_unregister_driver(brd->serial_driver); - put_tty_driver(brd->serial_driver); - kfree(brd->serial_ports); - - for (i = 0; i < brd->nasync; i++) { - tty_port_destroy(&brd->printer_ports[i]); - dev = brd->channels[i]->ch_pun.un_sysfs; - dgap_remove_tty_sysfs(dev); - tty_unregister_device(brd->print_driver, i); - } - tty_unregister_driver(brd->print_driver); - put_tty_driver(brd->print_driver); - kfree(brd->printer_ports); -} - -static int dgap_request_irq(struct board_t *brd) -{ - int rc; - - if (!brd || brd->magic != DGAP_BOARD_MAGIC) - return -ENODEV; - - /* - * Set up our interrupt handler if we are set to do interrupts. - */ - if (dgap_config_get_useintr(brd) && brd->irq) { - rc = request_irq(brd->irq, dgap_intr, IRQF_SHARED, "DGAP", brd); - - if (!rc) - brd->intr_used = 1; - } - return 0; -} - -static void dgap_free_irq(struct board_t *brd) -{ - if (brd->intr_used && brd->irq) - free_irq(brd->irq, brd); -} - -static int dgap_firmware_load(struct pci_dev *pdev, int card_type, - struct board_t *brd) -{ - const struct firmware *fw; - char *tmp_ptr; - int ret; - char *dgap_config_buf; - - dgap_get_vpd(brd); - dgap_do_reset_board(brd); - - if (fw_info[card_type].conf_name) { - ret = request_firmware(&fw, fw_info[card_type].conf_name, - &pdev->dev); - if (ret) { - dev_err(&pdev->dev, "config file %s not found\n", - fw_info[card_type].conf_name); - return ret; - } - - dgap_config_buf = kzalloc(fw->size + 1, GFP_KERNEL); - if (!dgap_config_buf) { - release_firmware(fw); - return -ENOMEM; - } - - memcpy(dgap_config_buf, fw->data, fw->size); - release_firmware(fw); - - /* - * preserve dgap_config_buf - * as dgap_parsefile would - * otherwise alter it. - */ - tmp_ptr = dgap_config_buf; - - if (dgap_parsefile(&tmp_ptr) != 0) { - kfree(dgap_config_buf); - return -EINVAL; - } - kfree(dgap_config_buf); - } - - /* - * Match this board to a config the user created for us. - */ - brd->bd_config = - dgap_find_config(brd->type, brd->pci_bus, brd->pci_slot); - - /* - * Because the 4 port Xr products share the same PCI ID - * as the 8 port Xr products, if we receive a NULL config - * back, and this is a PAPORT8 board, retry with a - * PAPORT4 attempt as well. - */ - if (brd->type == PAPORT8 && !brd->bd_config) - brd->bd_config = - dgap_find_config(PAPORT4, brd->pci_bus, brd->pci_slot); - - if (!brd->bd_config) { - dev_err(&pdev->dev, "No valid configuration found\n"); - return -EINVAL; - } - - if (fw_info[card_type].bios_name) { - ret = request_firmware(&fw, fw_info[card_type].bios_name, - &pdev->dev); - if (ret) { - dev_err(&pdev->dev, "bios file %s not found\n", - fw_info[card_type].bios_name); - return ret; - } - dgap_do_bios_load(brd, fw->data, fw->size); - release_firmware(fw); - - /* Wait for BIOS to test board... */ - ret = dgap_test_bios(brd); - if (ret) - return ret; - } - - if (fw_info[card_type].fep_name) { - ret = request_firmware(&fw, fw_info[card_type].fep_name, - &pdev->dev); - if (ret) { - dev_err(&pdev->dev, "dgap: fep file %s not found\n", - fw_info[card_type].fep_name); - return ret; - } - dgap_do_fep_load(brd, fw->data, fw->size); - release_firmware(fw); - - /* Wait for FEP to load on board... */ - ret = dgap_test_fep(brd); - if (ret) - return ret; - } - -#ifdef DIGI_CONCENTRATORS_SUPPORTED - /* - * If this is a CX or EPCX, we need to see if the firmware - * is requesting a concentrator image from us. - */ - if ((bd->type == PCX) || (bd->type == PEPC)) { - chk_addr = (u16 *)(vaddr + DOWNREQ); - /* Nonzero if FEP is requesting concentrator image. */ - check = readw(chk_addr); - vaddr = brd->re_map_membase; - } - - if (fw_info[card_type].con_name && check && vaddr) { - ret = request_firmware(&fw, fw_info[card_type].con_name, - &pdev->dev); - if (ret) { - dev_err(&pdev->dev, "conc file %s not found\n", - fw_info[card_type].con_name); - return ret; - } - /* Put concentrator firmware loading code here */ - offset = readw((u16 *)(vaddr + DOWNREQ)); - memcpy_toio(offset, fw->data, fw->size); - - dgap_do_conc_load(brd, (char *)fw->data, fw->size) - release_firmware(fw); - } -#endif - - return 0; -} - -/* - * dgap_tty_init() - * - * Init the tty subsystem. Called once per board after board has been - * downloaded and init'ed. - */ -static int dgap_tty_init(struct board_t *brd) -{ - int i; - int tlw; - uint true_count; - u8 __iomem *vaddr; - u8 modem; - struct channel_t *ch; - struct bs_t __iomem *bs; - struct cm_t __iomem *cm; - int ret; - - /* - * Initialize board structure elements. - */ - - vaddr = brd->re_map_membase; - true_count = readw((vaddr + NCHAN)); - - brd->nasync = dgap_config_get_num_prts(brd); - - if (!brd->nasync) - brd->nasync = brd->maxports; - - if (brd->nasync > brd->maxports) - brd->nasync = brd->maxports; - - if (true_count != brd->nasync) { - dev_warn(&brd->pdev->dev, - "%s configured for %d ports, has %d ports.\n", - brd->name, brd->nasync, true_count); - - if ((brd->type == PPCM) && - (true_count == 64 || true_count == 0)) { - dev_warn(&brd->pdev->dev, - "Please make SURE the EBI cable running from the card\n"); - dev_warn(&brd->pdev->dev, - "to each EM module is plugged into EBI IN!\n"); - } - - brd->nasync = true_count; - - /* If no ports, don't bother going any further */ - if (!brd->nasync) { - brd->state = BOARD_FAILED; - brd->dpastatus = BD_NOFEP; - return -EIO; - } - } - - /* - * Allocate channel memory that might not have been allocated - * when the driver was first loaded. - */ - for (i = 0; i < brd->nasync; i++) { - brd->channels[i] = - kzalloc(sizeof(struct channel_t), GFP_KERNEL); - if (!brd->channels[i]) { - ret = -ENOMEM; - goto free_chan; - } - } - - ch = brd->channels[0]; - vaddr = brd->re_map_membase; - - bs = (struct bs_t __iomem *)((ulong)vaddr + CHANBUF); - cm = (struct cm_t __iomem *)((ulong)vaddr + CMDBUF); - - brd->bd_bs = bs; - - /* Set up channel variables */ - for (i = 0; i < brd->nasync; i++, ch = brd->channels[i], bs++) { - spin_lock_init(&ch->ch_lock); - - /* Store all our magic numbers */ - ch->magic = DGAP_CHANNEL_MAGIC; - ch->ch_tun.magic = DGAP_UNIT_MAGIC; - ch->ch_tun.un_type = DGAP_SERIAL; - ch->ch_tun.un_ch = ch; - ch->ch_tun.un_dev = i; - - ch->ch_pun.magic = DGAP_UNIT_MAGIC; - ch->ch_pun.un_type = DGAP_PRINT; - ch->ch_pun.un_ch = ch; - ch->ch_pun.un_dev = i; - - ch->ch_vaddr = vaddr; - ch->ch_bs = bs; - ch->ch_cm = cm; - ch->ch_bd = brd; - ch->ch_portnum = i; - ch->ch_digi = dgap_digi_init; - - /* - * Set up digi dsr and dcd bits based on altpin flag. - */ - if (dgap_config_get_altpin(brd)) { - ch->ch_dsr = DM_CD; - ch->ch_cd = DM_DSR; - ch->ch_digi.digi_flags |= DIGI_ALTPIN; - } else { - ch->ch_cd = DM_CD; - ch->ch_dsr = DM_DSR; - } - - ch->ch_taddr = vaddr + (ioread16(&ch->ch_bs->tx_seg) << 4); - ch->ch_raddr = vaddr + (ioread16(&ch->ch_bs->rx_seg) << 4); - ch->ch_tx_win = 0; - ch->ch_rx_win = 0; - ch->ch_tsize = readw(&ch->ch_bs->tx_max) + 1; - ch->ch_rsize = readw(&ch->ch_bs->rx_max) + 1; - ch->ch_tstart = 0; - ch->ch_rstart = 0; - - /* - * Set queue water marks, interrupt mask, - * and general tty parameters. - */ - tlw = ch->ch_tsize >= 2000 ? ((ch->ch_tsize * 5) / 8) : - ch->ch_tsize / 2; - ch->ch_tlw = tlw; - - dgap_cmdw(ch, STLOW, tlw, 0); - - dgap_cmdw(ch, SRLOW, ch->ch_rsize / 2, 0); - - dgap_cmdw(ch, SRHIGH, 7 * ch->ch_rsize / 8, 0); - - ch->ch_mistat = readb(&ch->ch_bs->m_stat); - - init_waitqueue_head(&ch->ch_flags_wait); - init_waitqueue_head(&ch->ch_tun.un_flags_wait); - init_waitqueue_head(&ch->ch_pun.un_flags_wait); - - /* Turn on all modem interrupts for now */ - modem = (DM_CD | DM_DSR | DM_CTS | DM_RI); - writeb(modem, &ch->ch_bs->m_int); - - /* - * Set edelay to 0 if interrupts are turned on, - * otherwise set edelay to the usual 100. - */ - if (brd->intr_used) - writew(0, &ch->ch_bs->edelay); - else - writew(100, &ch->ch_bs->edelay); - - writeb(1, &ch->ch_bs->idata); - } - - return 0; - -free_chan: - while (--i >= 0) { - kfree(brd->channels[i]); - brd->channels[i] = NULL; - } - return ret; -} - -/* - * dgap_tty_free() - * - * Free the channles which are allocated in dgap_tty_init(). - */ -static void dgap_tty_free(struct board_t *brd) -{ - int i; - - for (i = 0; i < brd->nasync; i++) - kfree(brd->channels[i]); -} - -static int dgap_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - int rc; - struct board_t *brd; - - if (dgap_numboards >= MAXBOARDS) - return -EPERM; - - rc = pci_enable_device(pdev); - if (rc) - return -EIO; - - brd = dgap_found_board(pdev, ent->driver_data, dgap_numboards); - if (IS_ERR(brd)) - return PTR_ERR(brd); - - rc = dgap_firmware_load(pdev, ent->driver_data, brd); - if (rc) - goto cleanup_brd; - - rc = dgap_alloc_flipbuf(brd); - if (rc) - goto cleanup_brd; - - rc = dgap_tty_register(brd); - if (rc) - goto free_flipbuf; - - rc = dgap_request_irq(brd); - if (rc) - goto unregister_tty; - - /* - * Do tty device initialization. - */ - rc = dgap_tty_init(brd); - if (rc < 0) - goto free_irq; - - rc = dgap_tty_register_ports(brd); - if (rc) - goto tty_free; - - brd->state = BOARD_READY; - brd->dpastatus = BD_RUNNING; - - dgap_board[dgap_numboards++] = brd; - - return 0; - -tty_free: - dgap_tty_free(brd); -free_irq: - dgap_free_irq(brd); -unregister_tty: - dgap_tty_unregister(brd); -free_flipbuf: - dgap_free_flipbuf(brd); -cleanup_brd: - dgap_cleanup_nodes(); - dgap_unmap(brd); - kfree(brd); - - return rc; -} - -/* - * dgap_cleanup_board() - * - * Free all the memory associated with a board - */ -static void dgap_cleanup_board(struct board_t *brd) -{ - unsigned int i; - - if (!brd || brd->magic != DGAP_BOARD_MAGIC) - return; - - dgap_free_irq(brd); - - tasklet_kill(&brd->helper_tasklet); - - dgap_unmap(brd); - - /* Free all allocated channels structs */ - for (i = 0; i < MAXPORTS ; i++) - kfree(brd->channels[i]); - - kfree(brd->flipbuf); - kfree(brd->flipflagbuf); - - dgap_board[brd->boardnum] = NULL; - - kfree(brd); -} - -static void dgap_stop(bool removesys, struct pci_driver *drv) -{ - unsigned long lock_flags; - - spin_lock_irqsave(&dgap_poll_lock, lock_flags); - dgap_poll_stop = 1; - spin_unlock_irqrestore(&dgap_poll_lock, lock_flags); - - del_timer_sync(&dgap_poll_timer); - if (removesys) - dgap_remove_driver_sysfiles(drv); - - device_destroy(dgap_class, MKDEV(DIGI_DGAP_MAJOR, 0)); - class_destroy(dgap_class); - unregister_chrdev(DIGI_DGAP_MAJOR, "dgap"); -} - -static void dgap_remove_one(struct pci_dev *dev) -{ - unsigned int i; - struct pci_driver *drv = to_pci_driver(dev->dev.driver); - - dgap_stop(true, drv); - for (i = 0; i < dgap_numboards; ++i) { - dgap_remove_ports_sysfiles(dgap_board[i]); - dgap_cleanup_tty(dgap_board[i]); - dgap_cleanup_board(dgap_board[i]); - } - - dgap_cleanup_nodes(); -} - -static struct pci_driver dgap_driver = { - .name = "dgap", - .probe = dgap_init_one, - .id_table = dgap_pci_tbl, - .remove = dgap_remove_one, -}; - -/* - * Start of driver. - */ -static int dgap_start(void) -{ - int rc; - unsigned long flags; - struct device *device; - - dgap_numboards = 0; - - pr_info("For the tools package please visit http://www.digi.com\n"); - - /* - * Register our base character device into the kernel. - */ - - /* - * Register management/dpa devices - */ - rc = register_chrdev(DIGI_DGAP_MAJOR, "dgap", &dgap_board_fops); - if (rc < 0) - return rc; - - dgap_class = class_create(THIS_MODULE, "dgap_mgmt"); - if (IS_ERR(dgap_class)) { - rc = PTR_ERR(dgap_class); - goto failed_class; - } - - device = device_create(dgap_class, NULL, - MKDEV(DIGI_DGAP_MAJOR, 0), - NULL, "dgap_mgmt"); - if (IS_ERR(device)) { - rc = PTR_ERR(device); - goto failed_device; - } - - /* Start the poller */ - spin_lock_irqsave(&dgap_poll_lock, flags); - setup_timer(&dgap_poll_timer, dgap_poll_handler, 0); - dgap_poll_timer.data = 0; - dgap_poll_time = jiffies + dgap_jiffies_from_ms(dgap_poll_tick); - dgap_poll_timer.expires = dgap_poll_time; - spin_unlock_irqrestore(&dgap_poll_lock, flags); - - add_timer(&dgap_poll_timer); - - return rc; - -failed_device: - class_destroy(dgap_class); -failed_class: - unregister_chrdev(DIGI_DGAP_MAJOR, "dgap"); - return rc; -} - -/************************************************************************ - * - * Driver load/unload functions - * - ************************************************************************/ - -/* - * init_module() - * - * Module load. This is where it all starts. - */ -static int dgap_init_module(void) -{ - int rc; - - pr_info("%s, Digi International Part Number %s\n", DG_NAME, DG_PART); - - rc = dgap_start(); - if (rc) - return rc; - - rc = pci_register_driver(&dgap_driver); - if (rc) { - dgap_stop(false, NULL); - return rc; - } - - rc = dgap_create_driver_sysfiles(&dgap_driver); - if (rc) - goto err_unregister; - - dgap_driver_state = DRIVER_READY; - - return 0; - -err_unregister: - pci_unregister_driver(&dgap_driver); - return rc; -} - -/* - * dgap_cleanup_module() - * - * Module unload. This is where it all ends. - */ -static void dgap_cleanup_module(void) -{ - if (dgap_numboards) - pci_unregister_driver(&dgap_driver); -} - -module_init(dgap_init_module); -module_exit(dgap_cleanup_module); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Digi International, http://www.digi.com"); -MODULE_DESCRIPTION("Driver for the Digi International EPCA PCI based product line"); -MODULE_SUPPORTED_DEVICE("dgap"); diff --git a/drivers/staging/dgap/dgap.h b/drivers/staging/dgap/dgap.h deleted file mode 100644 index c84dbf2a0684..000000000000 --- a/drivers/staging/dgap/dgap.h +++ /dev/null @@ -1,1229 +0,0 @@ -/* - * Copyright 2003 Digi International (www.digi.com) - * Scott H Kilau <Scott_Kilau at digi dot com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the - * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR - * PURPOSE. See the GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!! - * - ************************************************************************* - * - * Driver includes - * - *************************************************************************/ - -#ifndef __DGAP_DRIVER_H -#define __DGAP_DRIVER_H - -#include <linux/types.h> /* To pick up the varions Linux types */ -#include <linux/tty.h> /* To pick up the various tty structs/defines */ -#include <linux/interrupt.h> /* For irqreturn_t type */ - -#ifndef TRUE -# define TRUE 1 -#endif - -#ifndef FALSE -# define FALSE 0 -#endif - -#if !defined(TTY_FLIPBUF_SIZE) -# define TTY_FLIPBUF_SIZE 512 -#endif - -/************************************************************************* - * - * Driver defines - * - *************************************************************************/ - -/* - * Driver identification - */ -#define DG_NAME "dgap-1.3-16" -#define DG_PART "40002347_C" -#define DRVSTR "dgap" - -/* - * defines from dgap_pci.h - */ -#define PCIMAX 32 /* maximum number of PCI boards */ - -#define DIGI_VID 0x114F - -#define PCI_DEV_EPC_DID 0x0002 -#define PCI_DEV_XEM_DID 0x0004 -#define PCI_DEV_XR_DID 0x0005 -#define PCI_DEV_CX_DID 0x0006 -#define PCI_DEV_XRJ_DID 0x0009 /* PLX-based Xr adapter */ -#define PCI_DEV_XR_IBM_DID 0x0011 /* IBM 8-port Async Adapter */ -#define PCI_DEV_XR_BULL_DID 0x0013 /* BULL 8-port Async Adapter */ -#define PCI_DEV_XR_SAIP_DID 0x001c /* SAIP card - Xr adapter */ -#define PCI_DEV_XR_422_DID 0x0012 /* Xr-422 */ -#define PCI_DEV_920_2_DID 0x0034 /* XR-Plus 920 K, 2 port */ -#define PCI_DEV_920_4_DID 0x0026 /* XR-Plus 920 K, 4 port */ -#define PCI_DEV_920_8_DID 0x0027 /* XR-Plus 920 K, 8 port */ -#define PCI_DEV_EPCJ_DID 0x000a /* PLX 9060 chip for PCI */ -#define PCI_DEV_CX_IBM_DID 0x001b /* IBM 128-port Async Adapter */ -#define PCI_DEV_920_8_HP_DID 0x0058 /* HP XR-Plus 920 K, 8 port */ -#define PCI_DEV_XEM_HP_DID 0x0059 /* HP Xem PCI */ - -#define PCI_DEV_XEM_NAME "AccelePort XEM" -#define PCI_DEV_CX_NAME "AccelePort CX" -#define PCI_DEV_XR_NAME "AccelePort Xr" -#define PCI_DEV_XRJ_NAME "AccelePort Xr (PLX)" -#define PCI_DEV_XR_SAIP_NAME "AccelePort Xr (SAIP)" -#define PCI_DEV_920_2_NAME "AccelePort Xr920 2 port" -#define PCI_DEV_920_4_NAME "AccelePort Xr920 4 port" -#define PCI_DEV_920_8_NAME "AccelePort Xr920 8 port" -#define PCI_DEV_XR_422_NAME "AccelePort Xr 422" -#define PCI_DEV_EPCJ_NAME "AccelePort EPC (PLX)" -#define PCI_DEV_XR_BULL_NAME "AccelePort Xr (BULL)" -#define PCI_DEV_XR_IBM_NAME "AccelePort Xr (IBM)" -#define PCI_DEV_CX_IBM_NAME "AccelePort CX (IBM)" -#define PCI_DEV_920_8_HP_NAME "AccelePort Xr920 8 port (HP)" -#define PCI_DEV_XEM_HP_NAME "AccelePort XEM (HP)" - -/* - * On the PCI boards, there is no IO space allocated - * The I/O registers will be in the first 3 bytes of the - * upper 2MB of the 4MB memory space. The board memory - * will be mapped into the low 2MB of the 4MB memory space - */ - -/* Potential location of PCI Bios from E0000 to FFFFF*/ -#define PCI_BIOS_SIZE 0x00020000 - -/* Size of Memory and I/O for PCI (4MB) */ -#define PCI_RAM_SIZE 0x00400000 - -/* Size of Memory (2MB) */ -#define PCI_MEM_SIZE 0x00200000 - -/* Max PCI Window Size (2MB) */ -#define PCI_WIN_SIZE 0x00200000 - -#define PCI_WIN_SHIFT 21 /* 21 bits max */ - -/* Offset of I/0 in Memory (2MB) */ -#define PCI_IO_OFFSET 0x00200000 - -/* Size of IO (2MB) */ -#define PCI_IO_SIZE_DGAP 0x00200000 - -/* Number of boards we support at once. */ -#define MAXBOARDS 32 -#define MAXPORTS 224 -#define MAXTTYNAMELEN 200 - -/* Our 3 magic numbers for our board, channel and unit structs */ -#define DGAP_BOARD_MAGIC 0x5c6df104 -#define DGAP_CHANNEL_MAGIC 0x6c6df104 -#define DGAP_UNIT_MAGIC 0x7c6df104 - -/* Serial port types */ -#define DGAP_SERIAL 0 -#define DGAP_PRINT 1 - -#define SERIAL_TYPE_NORMAL 1 - -/* 4 extra for alignment play space */ -#define WRITEBUFLEN ((4096) + 4) -#define MYFLIPLEN N_TTY_BUF_SIZE - -#define SBREAK_TIME 0x25 -#define U2BSIZE 0x400 - -#define dgap_jiffies_from_ms(a) (((a) * HZ) / 1000) - -/* - * Our major for the mgmt devices. - * - * We can use 22, because Digi was allocated 22 and 23 for the epca driver. - * 22 has now become obsolete now that the "cu" devices have - * been removed from 2.6. - * Also, this *IS* the epca driver, just PCI only now. - */ -#ifndef DIGI_DGAP_MAJOR -# define DIGI_DGAP_MAJOR 22 -#endif - -/* - * The parameters we use to define the periods of the moving averages. - */ -#define MA_PERIOD (HZ / 10) -#define SMA_DUR (1 * HZ) -#define EMA_DUR (1 * HZ) -#define SMA_NPERIODS (SMA_DUR / MA_PERIOD) -#define EMA_NPERIODS (EMA_DUR / MA_PERIOD) - -/* - * Define a local default termios struct. All ports will be created - * with this termios initially. This is the same structure that is defined - * as the default in tty_io.c with the same settings overridden as in serial.c - * - * In short, this should match the internal serial ports' defaults. - */ -#define DEFAULT_IFLAGS (ICRNL | IXON) -#define DEFAULT_OFLAGS (OPOST | ONLCR) -#define DEFAULT_CFLAGS (B9600 | CS8 | CREAD | HUPCL | CLOCAL) -#define DEFAULT_LFLAGS (ISIG | ICANON | ECHO | ECHOE | ECHOK | \ - ECHOCTL | ECHOKE | IEXTEN) - -#ifndef _POSIX_VDISABLE -#define _POSIX_VDISABLE ('\0') -#endif - -#define SNIFF_MAX 65536 /* Sniff buffer size (2^n) */ -#define SNIFF_MASK (SNIFF_MAX - 1) /* Sniff wrap mask */ - -#define VPDSIZE (512) - -/************************************************************************ - * FEP memory offsets - ************************************************************************/ -#define START 0x0004L /* Execution start address */ - -#define CMDBUF 0x0d10L /* Command (cm_t) structure offset */ -#define CMDSTART 0x0400L /* Start of command buffer */ -#define CMDMAX 0x0800L /* End of command buffer */ - -#define EVBUF 0x0d18L /* Event (ev_t) structure */ -#define EVSTART 0x0800L /* Start of event buffer */ -#define EVMAX 0x0c00L /* End of event buffer */ -#define FEP5_PLUS 0x0E40 /* ASCII '5' and ASCII 'A' is here */ -#define ECS_SEG 0x0E44 /* Segment of the extended */ - /* channel structure */ -#define LINE_SPEED 0x10 /* Offset into ECS_SEG for line */ - /* speed if the fep has extended */ - /* capabilities */ - -/* BIOS MAGIC SPOTS */ -#define ERROR 0x0C14L /* BIOS error code */ -#define SEQUENCE 0x0C12L /* BIOS sequence indicator */ -#define POSTAREA 0x0C00L /* POST complete message area */ - -/* FEP MAGIC SPOTS */ -#define FEPSTAT POSTAREA /* OS here when FEP comes up */ -#define NCHAN 0x0C02L /* number of ports FEP sees */ -#define PANIC 0x0C10L /* PANIC area for FEP */ -#define KMEMEM 0x0C30L /* Memory for KME use */ -#define CONFIG 0x0CD0L /* Concentrator configuration info */ -#define CONFIGSIZE 0x0030 /* configuration info size */ -#define DOWNREQ 0x0D00 /* Download request buffer pointer */ - -#define CHANBUF 0x1000L /* Async channel (bs_t) structs */ -#define FEPOSSIZE 0x1FFF /* 8K FEPOS */ - -#define XEMPORTS 0xC02 /* - * Offset in board memory where FEP5 stores - * how many ports it has detected. - * NOTE: FEP5 reports 64 ports when the user - * has the cable in EBI OUT instead of EBI IN. - */ - -#define FEPCLR 0x00 -#define FEPMEM 0x02 -#define FEPRST 0x04 -#define FEPINT 0x08 -#define FEPMASK 0x0e -#define FEPWIN 0x80 - -#define LOWMEM 0x0100 -#define HIGHMEM 0x7f00 - -#define FEPTIMEOUT 200000 - -#define ENABLE_INTR 0x0e04 /* Enable interrupts flag */ -#define FEPPOLL_MIN 1 /* minimum of 1 millisecond */ -#define FEPPOLL_MAX 20 /* maximum of 20 milliseconds */ -#define FEPPOLL 0x0c26 /* Fep event poll interval */ - -#define IALTPIN 0x0080 /* Input flag to swap DSR <-> DCD */ - -/************************************************************************ - * FEP supported functions - ************************************************************************/ -#define SRLOW 0xe0 /* Set receive low water */ -#define SRHIGH 0xe1 /* Set receive high water */ -#define FLUSHTX 0xe2 /* Flush transmit buffer */ -#define PAUSETX 0xe3 /* Pause data transmission */ -#define RESUMETX 0xe4 /* Resume data transmission */ -#define SMINT 0xe5 /* Set Modem Interrupt */ -#define SAFLOWC 0xe6 /* Set Aux. flow control chars */ -#define SBREAK 0xe8 /* Send break */ -#define SMODEM 0xe9 /* Set 8530 modem control lines */ -#define SIFLAG 0xea /* Set UNIX iflags */ -#define SFLOWC 0xeb /* Set flow control characters */ -#define STLOW 0xec /* Set transmit low water mark */ -#define RPAUSE 0xee /* Pause receive */ -#define RRESUME 0xef /* Resume receive */ -#define CHRESET 0xf0 /* Reset Channel */ -#define BUFSETALL 0xf2 /* Set Tx & Rx buffer size avail*/ -#define SOFLAG 0xf3 /* Set UNIX oflags */ -#define SHFLOW 0xf4 /* Set hardware handshake */ -#define SCFLAG 0xf5 /* Set UNIX cflags */ -#define SVNEXT 0xf6 /* Set VNEXT character */ -#define SPINTFC 0xfc /* Reserved */ -#define SCOMMODE 0xfd /* Set RS232/422 mode */ - -/************************************************************************ - * Modes for SCOMMODE - ************************************************************************/ -#define MODE_232 0x00 -#define MODE_422 0x01 - -/************************************************************************ - * Event flags. - ************************************************************************/ -#define IFBREAK 0x01 /* Break received */ -#define IFTLW 0x02 /* Transmit low water */ -#define IFTEM 0x04 /* Transmitter empty */ -#define IFDATA 0x08 /* Receive data present */ -#define IFMODEM 0x20 /* Modem status change */ - -/************************************************************************ - * Modem flags - ************************************************************************/ -# define DM_RTS 0x02 /* Request to send */ -# define DM_CD 0x80 /* Carrier detect */ -# define DM_DSR 0x20 /* Data set ready */ -# define DM_CTS 0x10 /* Clear to send */ -# define DM_RI 0x40 /* Ring indicator */ -# define DM_DTR 0x01 /* Data terminal ready */ - -/* - * defines from dgap_conf.h - */ -#define NULLNODE 0 /* header node, not used */ -#define BNODE 1 /* Board node */ -#define LNODE 2 /* Line node */ -#define CNODE 3 /* Concentrator node */ -#define MNODE 4 /* EBI Module node */ -#define TNODE 5 /* tty name prefix node */ -#define CUNODE 6 /* cu name prefix (non-SCO) */ -#define PNODE 7 /* trans. print prefix node */ -#define JNODE 8 /* maJor number node */ -#define ANODE 9 /* altpin */ -#define TSNODE 10 /* tty structure size */ -#define CSNODE 11 /* channel structure size */ -#define BSNODE 12 /* board structure size */ -#define USNODE 13 /* unit schedule structure size */ -#define FSNODE 14 /* f2200 structure size */ -#define VSNODE 15 /* size of VPIX structures */ -#define INTRNODE 16 /* enable interrupt */ - -/* Enumeration of tokens */ -#define BEGIN 1 -#define END 2 -#define BOARD 10 - -#define EPCFS 11 /* start of EPC family definitions */ -#define ICX 11 -#define MCX 13 -#define PCX 14 -#define IEPC 15 -#define EEPC 16 -#define MEPC 17 -#define IPCM 18 -#define EPCM 19 -#define MPCM 20 -#define PEPC 21 -#define PPCM 22 -#ifdef CP -#define ICP 23 -#define ECP 24 -#define MCP 25 -#endif -#define EPCFE 25 /* end of EPC family definitions */ -#define PC2E 26 -#define PC4E 27 -#define PC4E8K 28 -#define PC8E 29 -#define PC8E8K 30 -#define PC16E 31 -#define MC2E8K 34 -#define MC4E8K 35 -#define MC8E8K 36 - -#define AVANFS 42 /* start of Avanstar family definitions */ -#define A8P 42 -#define A16P 43 -#define AVANFE 43 /* end of Avanstar family definitions */ - -#define DA2000FS 44 /* start of AccelePort 2000 family definitions */ -#define DA22 44 /* AccelePort 2002 */ -#define DA24 45 /* AccelePort 2004 */ -#define DA28 46 /* AccelePort 2008 */ -#define DA216 47 /* AccelePort 2016 */ -#define DAR4 48 /* AccelePort RAS 4 port */ -#define DAR8 49 /* AccelePort RAS 8 port */ -#define DDR24 50 /* DataFire RAS 24 port */ -#define DDR30 51 /* DataFire RAS 30 port */ -#define DDR48 52 /* DataFire RAS 48 port */ -#define DDR60 53 /* DataFire RAS 60 port */ -#define DA2000FE 53 /* end of AccelePort 2000/RAS family definitions */ - -#define PCXRFS 106 /* start of PCXR family definitions */ -#define APORT4 106 -#define APORT8 107 -#define PAPORT4 108 -#define PAPORT8 109 -#define APORT4_920I 110 -#define APORT8_920I 111 -#define APORT4_920P 112 -#define APORT8_920P 113 -#define APORT2_920P 114 -#define PCXRFE 117 /* end of PCXR family definitions */ - -#define LINE 82 -#ifdef T1 -#define T1M 83 -#define E1M 84 -#endif -#define CONC 64 -#define CX 65 -#define EPC 66 -#define MOD 67 -#define PORTS 68 -#define METHOD 69 -#define CUSTOM 70 -#define BASIC 71 -#define STATUS 72 -#define MODEM 73 -/* The following tokens can appear in multiple places */ -#define SPEED 74 -#define NPORTS 75 -#define ID 76 -#define CABLE 77 -#define CONNECT 78 -#define MEM 80 -#define DPSZ 81 - -#define TTYN 90 -#define CU 91 -#define PRINT 92 -#define XPRINT 93 -#define CMAJOR 94 -#define ALTPIN 95 -#define STARTO 96 -#define USEINTR 97 -#define PCIINFO 98 - -#define TTSIZ 100 -#define CHSIZ 101 -#define BSSIZ 102 -#define UNTSIZ 103 -#define F2SIZ 104 -#define VPSIZ 105 - -#define TOTAL_BOARD 2 -#define CURRENT_BRD 4 -#define BOARD_TYPE 6 -#define IO_ADDRESS 8 -#define MEM_ADDRESS 10 - -#define FIELDS_PER_PAGE 18 - -#define TB_FIELD 1 -#define CB_FIELD 3 -#define BT_FIELD 5 -#define IO_FIELD 7 -#define ID_FIELD 8 -#define ME_FIELD 9 -#define TTY_FIELD 11 -#define CU_FIELD 13 -#define PR_FIELD 15 -#define MPR_FIELD 17 - -#define MAX_FIELD 512 - -#define INIT 0 -#define NITEMS 128 -#define MAX_ITEM 512 - -#define DSCRINST 1 -#define DSCRNUM 3 -#define ALTPINQ 5 -#define SSAVE 7 - -#define DSCR "32" -#define ONETONINE "123456789" -#define ALL "1234567890" - -/* - * All the possible states the driver can be while being loaded. - */ -enum { - DRIVER_INITIALIZED = 0, - DRIVER_READY -}; - -/* - * All the possible states the board can be while booting up. - */ -enum { - BOARD_FAILED = 0, - BOARD_READY -}; - -/* - * All the possible states that a requested concentrator image can be in. - */ -enum { - NO_PENDING_CONCENTRATOR_REQUESTS = 0, - NEED_CONCENTRATOR, - REQUESTED_CONCENTRATOR -}; - -/* - * Modem line constants are defined as macros because DSR and - * DCD are swapable using the ditty altpin option. - */ -#define D_CD(ch) ch->ch_cd /* Carrier detect */ -#define D_DSR(ch) ch->ch_dsr /* Data set ready */ -#define D_RTS(ch) DM_RTS /* Request to send */ -#define D_CTS(ch) DM_CTS /* Clear to send */ -#define D_RI(ch) DM_RI /* Ring indicator */ -#define D_DTR(ch) DM_DTR /* Data terminal ready */ - -/************************************************************************* - * - * Structures and closely related defines. - * - *************************************************************************/ - -/* - * A structure to hold a statistics counter. We also - * compute moving averages for this counter. - */ -struct macounter { - u32 cnt; /* Total count */ - ulong accum; /* Acuumulator per period */ - ulong sma; /* Simple moving average */ - ulong ema; /* Exponential moving average */ -}; - -/************************************************************************ - * Device flag definitions for bd_flags. - ************************************************************************/ -#define BD_FEP5PLUS 0x0001 /* Supports FEP5 Plus commands */ -#define BD_HAS_VPD 0x0002 /* Board has VPD info available */ - -/* - * Per-board information - */ -struct board_t { - int magic; /* Board Magic number. */ - int boardnum; /* Board number: 0-3 */ - - int type; /* Type of board */ - char *name; /* Product Name */ - struct pci_dev *pdev; /* Pointer to the pci_dev struct */ - u16 vendor; /* PCI vendor ID */ - u16 device; /* PCI device ID */ - u16 subvendor; /* PCI subsystem vendor ID */ - u16 subdevice; /* PCI subsystem device ID */ - u8 rev; /* PCI revision ID */ - uint pci_bus; /* PCI bus value */ - uint pci_slot; /* PCI slot value */ - u16 maxports; /* MAX ports this board can handle */ - u8 vpd[VPDSIZE]; /* VPD of board, if found */ - u32 bd_flags; /* Board flags */ - - spinlock_t bd_lock; /* Used to protect board */ - - u32 state; /* State of card. */ - wait_queue_head_t state_wait; /* Place to sleep on for state change */ - - struct tasklet_struct helper_tasklet; /* Poll helper tasklet */ - - u32 wait_for_bios; - u32 wait_for_fep; - - struct cnode *bd_config; /* Config of board */ - - u16 nasync; /* Number of ports on card */ - - ulong irq; /* Interrupt request number */ - ulong intr_count; /* Count of interrupts */ - u32 intr_used; /* Non-zero if using interrupts */ - u32 intr_running; /* Non-zero if FEP knows its doing */ - /* interrupts */ - - ulong port; /* Start of base io port of the card */ - ulong port_end; /* End of base io port of the card */ - ulong membase; /* Start of base memory of the card */ - ulong membase_end; /* End of base memory of the card */ - - u8 __iomem *re_map_port; /* Remapped io port of the card */ - u8 __iomem *re_map_membase;/* Remapped memory of the card */ - - u8 inhibit_poller; /* Tells the poller to leave us alone */ - - struct channel_t *channels[MAXPORTS]; /* array of pointers to our */ - /* channels. */ - - struct tty_driver *serial_driver; - struct tty_port *serial_ports; - char serial_name[200]; - struct tty_driver *print_driver; - struct tty_port *printer_ports; - char print_name[200]; - - struct bs_t __iomem *bd_bs; /* Base structure pointer */ - - char *flipbuf; /* Our flip buffer, alloced if */ - /* board is found */ - char *flipflagbuf; /* Our flip flag buffer, alloced */ - /* if board is found */ - - u16 dpatype; /* The board "type", as defined */ - /* by DPA */ - u16 dpastatus; /* The board "status", as defined */ - /* by DPA */ - - u32 conc_dl_status; /* Status of any pending conc */ - /* download */ -}; - -/************************************************************************ - * Unit flag definitions for un_flags. - ************************************************************************/ -#define UN_ISOPEN 0x0001 /* Device is open */ -#define UN_CLOSING 0x0002 /* Line is being closed */ -#define UN_IMM 0x0004 /* Service immediately */ -#define UN_BUSY 0x0008 /* Some work this channel */ -#define UN_BREAKI 0x0010 /* Input break received */ -#define UN_PWAIT 0x0020 /* Printer waiting for terminal */ -#define UN_TIME 0x0040 /* Waiting on time */ -#define UN_EMPTY 0x0080 /* Waiting output queue empty */ -#define UN_LOW 0x0100 /* Waiting output low water mark*/ -#define UN_EXCL_OPEN 0x0200 /* Open for exclusive use */ -#define UN_WOPEN 0x0400 /* Device waiting for open */ -#define UN_WIOCTL 0x0800 /* Device waiting for open */ -#define UN_HANGUP 0x8000 /* Carrier lost */ - -struct device; - -/************************************************************************ - * Structure for terminal or printer unit. - ************************************************************************/ -struct un_t { - int magic; /* Unit Magic Number. */ - struct channel_t *un_ch; - u32 un_time; - u32 un_type; - int un_open_count; /* Counter of opens to port */ - struct tty_struct *un_tty;/* Pointer to unit tty structure */ - u32 un_flags; /* Unit flags */ - wait_queue_head_t un_flags_wait; /* Place to sleep to wait on unit */ - u32 un_dev; /* Minor device number */ - tcflag_t un_oflag; /* oflags being done on board */ - tcflag_t un_lflag; /* lflags being done on board */ - struct device *un_sysfs; -}; - -/************************************************************************ - * Device flag definitions for ch_flags. - ************************************************************************/ -#define CH_PRON 0x0001 /* Printer on string */ -#define CH_OUT 0x0002 /* Dial-out device open */ -#define CH_STOP 0x0004 /* Output is stopped */ -#define CH_STOPI 0x0008 /* Input is stopped */ -#define CH_CD 0x0010 /* Carrier is present */ -#define CH_FCAR 0x0020 /* Carrier forced on */ - -#define CH_RXBLOCK 0x0080 /* Enable rx blocked flag */ -#define CH_WLOW 0x0100 /* Term waiting low event */ -#define CH_WEMPTY 0x0200 /* Term waiting empty event */ -#define CH_RENABLE 0x0400 /* Buffer just emptied */ -#define CH_RACTIVE 0x0800 /* Process active in xxread() */ -#define CH_RWAIT 0x1000 /* Process waiting in xxread() */ -#define CH_BAUD0 0x2000 /* Used for checking B0 transitions */ -#define CH_HANGUP 0x8000 /* Hangup received */ - -/* - * Definitions for ch_sniff_flags - */ -#define SNIFF_OPEN 0x1 -#define SNIFF_WAIT_DATA 0x2 -#define SNIFF_WAIT_SPACE 0x4 - -/************************************************************************ - *** Definitions for Digi ditty(1) command. - ************************************************************************/ - -/************************************************************************ - * This module provides application access to special Digi - * serial line enhancements which are not standard UNIX(tm) features. - ************************************************************************/ - -#if !defined(TIOCMODG) - -#define TIOCMODG (('d'<<8) | 250) /* get modem ctrl state */ -#define TIOCMODS (('d'<<8) | 251) /* set modem ctrl state */ - -#ifndef TIOCM_LE -#define TIOCM_LE 0x01 /* line enable */ -#define TIOCM_DTR 0x02 /* data terminal ready */ -#define TIOCM_RTS 0x04 /* request to send */ -#define TIOCM_ST 0x08 /* secondary transmit */ -#define TIOCM_SR 0x10 /* secondary receive */ -#define TIOCM_CTS 0x20 /* clear to send */ -#define TIOCM_CAR 0x40 /* carrier detect */ -#define TIOCM_RNG 0x80 /* ring indicator */ -#define TIOCM_DSR 0x100 /* data set ready */ -#define TIOCM_RI TIOCM_RNG /* ring (alternate) */ -#define TIOCM_CD TIOCM_CAR /* carrier detect (alt) */ -#endif - -#endif - -#if !defined(TIOCMSET) -#define TIOCMSET (('d'<<8) | 252) /* set modem ctrl state */ -#define TIOCMGET (('d'<<8) | 253) /* set modem ctrl state */ -#endif - -#if !defined(TIOCMBIC) -#define TIOCMBIC (('d'<<8) | 254) /* set modem ctrl state */ -#define TIOCMBIS (('d'<<8) | 255) /* set modem ctrl state */ -#endif - -#if !defined(TIOCSDTR) -#define TIOCSDTR (('e'<<8) | 0) /* set DTR */ -#define TIOCCDTR (('e'<<8) | 1) /* clear DTR */ -#endif - -/************************************************************************ - * Ioctl command arguments for DIGI parameters. - ************************************************************************/ -#define DIGI_GETA (('e'<<8) | 94) /* Read params */ - -#define DIGI_SETA (('e'<<8) | 95) /* Set params */ -#define DIGI_SETAW (('e'<<8) | 96) /* Drain & set params */ -#define DIGI_SETAF (('e'<<8) | 97) /* Drain, flush & set params */ - -#define DIGI_KME (('e'<<8) | 98) /* Read/Write Host */ - /* Adapter Memory */ - -#define DIGI_GETFLOW (('e'<<8) | 99) /* Get startc/stopc flow */ - /* control characters */ -#define DIGI_SETFLOW (('e'<<8) | 100) /* Set startc/stopc flow */ - /* control characters */ -#define DIGI_GETAFLOW (('e'<<8) | 101) /* Get Aux. startc/stopc */ - /* flow control chars */ -#define DIGI_SETAFLOW (('e'<<8) | 102) /* Set Aux. startc/stopc */ - /* flow control chars */ - -#define DIGI_GEDELAY (('d'<<8) | 246) /* Get edelay */ -#define DIGI_SEDELAY (('d'<<8) | 247) /* Set edelay */ - -struct digiflow_t { - unsigned char startc; /* flow cntl start char */ - unsigned char stopc; /* flow cntl stop char */ -}; - -#ifdef FLOW_2200 -#define F2200_GETA (('e'<<8) | 104) /* Get 2x36 flow cntl flags */ -#define F2200_SETAW (('e'<<8) | 105) /* Set 2x36 flow cntl flags */ -#define F2200_MASK 0x03 /* 2200 flow cntl bit mask */ -#define FCNTL_2200 0x01 /* 2x36 terminal flow cntl */ -#define PCNTL_2200 0x02 /* 2x36 printer flow cntl */ -#define F2200_XON 0xf8 -#define P2200_XON 0xf9 -#define F2200_XOFF 0xfa -#define P2200_XOFF 0xfb - -#define FXOFF_MASK 0x03 /* 2200 flow status mask */ -#define RCVD_FXOFF 0x01 /* 2x36 Terminal XOFF rcvd */ -#define RCVD_PXOFF 0x02 /* 2x36 Printer XOFF rcvd */ -#endif - -/************************************************************************ - * Values for digi_flags - ************************************************************************/ -#define DIGI_IXON 0x0001 /* Handle IXON in the FEP */ -#define DIGI_FAST 0x0002 /* Fast baud rates */ -#define RTSPACE 0x0004 /* RTS input flow control */ -#define CTSPACE 0x0008 /* CTS output flow control */ -#define DSRPACE 0x0010 /* DSR output flow control */ -#define DCDPACE 0x0020 /* DCD output flow control */ -#define DTRPACE 0x0040 /* DTR input flow control */ -#define DIGI_COOK 0x0080 /* Cooked processing done in FEP */ -#define DIGI_FORCEDCD 0x0100 /* Force carrier */ -#define DIGI_ALTPIN 0x0200 /* Alternate RJ-45 pin config */ -#define DIGI_AIXON 0x0400 /* Aux flow control in fep */ -#define DIGI_PRINTER 0x0800 /* Hold port open for flow cntrl*/ -#define DIGI_PP_INPUT 0x1000 /* Change parallel port to input*/ -#define DIGI_DTR_TOGGLE 0x2000 /* Support DTR Toggle */ -#define DIGI_422 0x4000 /* for 422/232 selectable panel */ -#define DIGI_RTS_TOGGLE 0x8000 /* Support RTS Toggle */ - -/************************************************************************ - * These options are not supported on the comxi. - ************************************************************************/ -#define DIGI_COMXI (DIGI_FAST|DIGI_COOK|DSRPACE|DCDPACE|DTRPACE) - -#define DIGI_PLEN 28 /* String length */ -#define DIGI_TSIZ 10 /* Terminal string len */ - -/************************************************************************ - * Structure used with ioctl commands for DIGI parameters. - ************************************************************************/ -struct digi_t { - unsigned short digi_flags; /* Flags (see above) */ - unsigned short digi_maxcps; /* Max printer CPS */ - unsigned short digi_maxchar; /* Max chars in print queue */ - unsigned short digi_bufsize; /* Buffer size */ - unsigned char digi_onlen; /* Length of ON string */ - unsigned char digi_offlen; /* Length of OFF string */ - char digi_onstr[DIGI_PLEN]; /* Printer on string */ - char digi_offstr[DIGI_PLEN]; /* Printer off string */ - char digi_term[DIGI_TSIZ]; /* terminal string */ -}; - -/************************************************************************ - * KME definitions and structures. - ************************************************************************/ -#define RW_IDLE 0 /* Operation complete */ -#define RW_READ 1 /* Read Concentrator Memory */ -#define RW_WRITE 2 /* Write Concentrator Memory */ - -struct rw_t { - unsigned char rw_req; /* Request type */ - unsigned char rw_board; /* Host Adapter board number */ - unsigned char rw_conc; /* Concentrator number */ - unsigned char rw_reserved; /* Reserved for expansion */ - unsigned long rw_addr; /* Address in concentrator */ - unsigned short rw_size; /* Read/write request length */ - unsigned char rw_data[128]; /* Data to read/write */ -}; - -/************************************************************************ - * Structure to get driver status information - ************************************************************************/ -struct digi_dinfo { - unsigned long dinfo_nboards; /* # boards configured */ - char dinfo_reserved[12]; /* for future expansion */ - char dinfo_version[16]; /* driver version */ -}; - -#define DIGI_GETDD (('d'<<8) | 248) /* get driver info */ - -/************************************************************************ - * Structure used with ioctl commands for per-board information - * - * physsize and memsize differ when board has "windowed" memory - ************************************************************************/ -struct digi_info { - unsigned long info_bdnum; /* Board number (0 based) */ - unsigned long info_ioport; /* io port address */ - unsigned long info_physaddr; /* memory address */ - unsigned long info_physsize; /* Size of host mem window */ - unsigned long info_memsize; /* Amount of dual-port mem */ - /* on board */ - unsigned short info_bdtype; /* Board type */ - unsigned short info_nports; /* number of ports */ - char info_bdstate; /* board state */ - char info_reserved[7]; /* for future expansion */ -}; - -#define DIGI_GETBD (('d'<<8) | 249) /* get board info */ - -struct digi_stat { - unsigned int info_chan; /* Channel number (0 based) */ - unsigned int info_brd; /* Board number (0 based) */ - unsigned long info_cflag; /* cflag for channel */ - unsigned long info_iflag; /* iflag for channel */ - unsigned long info_oflag; /* oflag for channel */ - unsigned long info_mstat; /* mstat for channel */ - unsigned long info_tx_data; /* tx_data for channel */ - unsigned long info_rx_data; /* rx_data for channel */ - unsigned long info_hflow; /* hflow for channel */ - unsigned long info_reserved[8]; /* for future expansion */ -}; - -#define DIGI_GETSTAT (('d'<<8) | 244) /* get board info */ -/************************************************************************ - * - * Structure used with ioctl commands for per-channel information - * - ************************************************************************/ -struct digi_ch { - unsigned long info_bdnum; /* Board number (0 based) */ - unsigned long info_channel; /* Channel index number */ - unsigned long info_ch_cflag; /* Channel cflag */ - unsigned long info_ch_iflag; /* Channel iflag */ - unsigned long info_ch_oflag; /* Channel oflag */ - unsigned long info_chsize; /* Channel structure size */ - unsigned long info_sleep_stat; /* sleep status */ - dev_t info_dev; /* device number */ - unsigned char info_initstate; /* Channel init state */ - unsigned char info_running; /* Channel running state */ - long reserved[8]; /* reserved for future use */ -}; - -/* -* This structure is used with the DIGI_FEPCMD ioctl to -* tell the driver which port to send the command for. -*/ -struct digi_cmd { - int cmd; - int word; - int ncmds; - int chan; /* channel index (zero based) */ - int bdid; /* board index (zero based) */ -}; - -/* -* info_sleep_stat defines -*/ -#define INFO_RUNWAIT 0x0001 -#define INFO_WOPEN 0x0002 -#define INFO_TTIOW 0x0004 -#define INFO_CH_RWAIT 0x0008 -#define INFO_CH_WEMPTY 0x0010 -#define INFO_CH_WLOW 0x0020 -#define INFO_XXBUF_BUSY 0x0040 - -#define DIGI_GETCH (('d'<<8) | 245) /* get board info */ - -/* Board type definitions */ - -#define SUBTYPE 0007 -#define T_PCXI 0000 -#define T_PCXM 0001 -#define T_PCXE 0002 -#define T_PCXR 0003 -#define T_SP 0004 -#define T_SP_PLUS 0005 -# define T_HERC 0000 -# define T_HOU 0001 -# define T_LON 0002 -# define T_CHA 0003 -#define FAMILY 0070 -#define T_COMXI 0000 -#define T_PCXX 0010 -#define T_CX 0020 -#define T_EPC 0030 -#define T_PCLITE 0040 -#define T_SPXX 0050 -#define T_AVXX 0060 -#define T_DXB 0070 -#define T_A2K_4_8 0070 -#define BUSTYPE 0700 -#define T_ISABUS 0000 -#define T_MCBUS 0100 -#define T_EISABUS 0200 -#define T_PCIBUS 0400 - -/* Board State Definitions */ - -#define BD_RUNNING 0x0 -#define BD_REASON 0x7f -#define BD_NOTFOUND 0x1 -#define BD_NOIOPORT 0x2 -#define BD_NOMEM 0x3 -#define BD_NOBIOS 0x4 -#define BD_NOFEP 0x5 -#define BD_FAILED 0x6 -#define BD_ALLOCATED 0x7 -#define BD_TRIBOOT 0x8 -#define BD_BADKME 0x80 - -#define DIGI_LOOPBACK (('d'<<8) | 252) /* Enable/disable UART */ - /* internal loopback */ -#define DIGI_SPOLL (('d'<<8) | 254) /* change poller rate */ - -#define DIGI_SETCUSTOMBAUD _IOW('e', 106, int) /* Set integer baud rate */ -#define DIGI_GETCUSTOMBAUD _IOR('e', 107, int) /* Get integer baud rate */ -#define DIGI_RESET_PORT (('e'<<8) | 93) /* Reset port */ - -/************************************************************************ - * Channel information structure. - ************************************************************************/ -struct channel_t { - int magic; /* Channel Magic Number */ - struct bs_t __iomem *ch_bs; /* Base structure pointer */ - struct cm_t __iomem *ch_cm; /* Command queue pointer */ - struct board_t *ch_bd; /* Board structure pointer */ - u8 __iomem *ch_vaddr; /* FEP memory origin */ - u8 __iomem *ch_taddr; /* Write buffer origin */ - u8 __iomem *ch_raddr; /* Read buffer origin */ - struct digi_t ch_digi; /* Transparent Print structure */ - struct un_t ch_tun; /* Terminal unit info */ - struct un_t ch_pun; /* Printer unit info */ - - spinlock_t ch_lock; /* provide for serialization */ - wait_queue_head_t ch_flags_wait; - - u32 pscan_state; - u8 pscan_savechar; - - u32 ch_portnum; /* Port number, 0 offset. */ - u32 ch_open_count; /* open count */ - u32 ch_flags; /* Channel flags */ - - u32 ch_cpstime; /* Time for CPS calculations */ - - tcflag_t ch_c_iflag; /* channel iflags */ - tcflag_t ch_c_cflag; /* channel cflags */ - tcflag_t ch_c_oflag; /* channel oflags */ - tcflag_t ch_c_lflag; /* channel lflags */ - - u16 ch_fepiflag; /* FEP tty iflags */ - u16 ch_fepcflag; /* FEP tty cflags */ - u16 ch_fepoflag; /* FEP tty oflags */ - u16 ch_wopen; /* Waiting for open process cnt */ - u16 ch_tstart; /* Transmit buffer start */ - u16 ch_tsize; /* Transmit buffer size */ - u16 ch_rstart; /* Receive buffer start */ - u16 ch_rsize; /* Receive buffer size */ - u16 ch_rdelay; /* Receive delay time */ - - u16 ch_tlw; /* Our currently set low water mark */ - - u16 ch_cook; /* Output character mask */ - - u8 ch_card; /* Card channel is on */ - u8 ch_stopc; /* Stop character */ - u8 ch_startc; /* Start character */ - - u8 ch_mostat; /* FEP output modem status */ - u8 ch_mistat; /* FEP input modem status */ - u8 ch_mforce; /* Modem values to be forced */ - u8 ch_mval; /* Force values */ - u8 ch_fepstopc; /* FEP stop character */ - u8 ch_fepstartc; /* FEP start character */ - - u8 ch_astopc; /* Auxiliary Stop character */ - u8 ch_astartc; /* Auxiliary Start character */ - u8 ch_fepastopc; /* Auxiliary FEP stop char */ - u8 ch_fepastartc; /* Auxiliary FEP start char */ - - u8 ch_hflow; /* FEP hardware handshake */ - u8 ch_dsr; /* stores real dsr value */ - u8 ch_cd; /* stores real cd value */ - u8 ch_tx_win; /* channel tx buffer window */ - u8 ch_rx_win; /* channel rx buffer window */ - uint ch_custom_speed; /* Custom baud, if set */ - uint ch_baud_info; /* Current baud info for /proc output */ - ulong ch_rxcount; /* total of data received so far */ - ulong ch_txcount; /* total of data transmitted so far */ - ulong ch_err_parity; /* Count of parity errors on channel */ - ulong ch_err_frame; /* Count of framing errors on channel */ - ulong ch_err_break; /* Count of breaks on channel */ - ulong ch_err_overrun; /* Count of overruns on channel */ -}; - -/************************************************************************ - * Command structure definition. - ************************************************************************/ -struct cm_t { - unsigned short cm_head; /* Command buffer head offset */ - unsigned short cm_tail; /* Command buffer tail offset */ - unsigned short cm_start; /* start offset of buffer */ - unsigned short cm_max; /* last offset of buffer */ -}; - -/************************************************************************ - * Event structure definition. - ************************************************************************/ -struct ev_t { - unsigned short ev_head; /* Command buffer head offset */ - unsigned short ev_tail; /* Command buffer tail offset */ - unsigned short ev_start; /* start offset of buffer */ - unsigned short ev_max; /* last offset of buffer */ -}; - -/************************************************************************ - * Download buffer structure. - ************************************************************************/ -struct downld_t { - u8 dl_type; /* Header */ - u8 dl_seq; /* Download sequence */ - ushort dl_srev; /* Software revision number */ - ushort dl_lrev; /* Low revision number */ - ushort dl_hrev; /* High revision number */ - ushort dl_seg; /* Start segment address */ - ushort dl_size; /* Number of bytes to download */ - u8 dl_data[1024]; /* Download data */ -}; - -/************************************************************************ - * Per channel buffer structure - ************************************************************************ - * Base Structure Entries Usage Meanings to Host * - * * - * W = read write R = read only * - * C = changed by commands only * - * U = unknown (may be changed w/o notice) * - ************************************************************************/ -struct bs_t { - unsigned short tp_jmp; /* Transmit poll jump */ - unsigned short tc_jmp; /* Cooked procedure jump */ - unsigned short ri_jmp; /* Not currently used */ - unsigned short rp_jmp; /* Receive poll jump */ - - unsigned short tx_seg; /* W Tx segment */ - unsigned short tx_head; /* W Tx buffer head offset */ - unsigned short tx_tail; /* R Tx buffer tail offset */ - unsigned short tx_max; /* W Tx buffer size - 1 */ - - unsigned short rx_seg; /* W Rx segment */ - unsigned short rx_head; /* W Rx buffer head offset */ - unsigned short rx_tail; /* R Rx buffer tail offset */ - unsigned short rx_max; /* W Rx buffer size - 1 */ - - unsigned short tx_lw; /* W Tx buffer low water mark */ - unsigned short rx_lw; /* W Rx buffer low water mark */ - unsigned short rx_hw; /* W Rx buffer high water mark*/ - unsigned short incr; /* W Increment to next channel*/ - - unsigned short fepdev; /* U SCC device base address */ - unsigned short edelay; /* W Exception delay */ - unsigned short blen; /* W Break length */ - unsigned short btime; /* U Break complete time */ - - unsigned short iflag; /* C UNIX input flags */ - unsigned short oflag; /* C UNIX output flags */ - unsigned short cflag; /* C UNIX control flags */ - unsigned short wfill[13]; /* U Reserved for expansion */ - - unsigned char num; /* U Channel number */ - unsigned char ract; /* U Receiver active counter */ - unsigned char bstat; /* U Break status bits */ - unsigned char tbusy; /* W Transmit busy */ - unsigned char iempty; /* W Transmit empty event */ - /* enable */ - unsigned char ilow; /* W Transmit low-water event */ - /* enable */ - unsigned char idata; /* W Receive data interrupt */ - /* enable */ - unsigned char eflag; /* U Host event flags */ - - unsigned char tflag; /* U Transmit flags */ - unsigned char rflag; /* U Receive flags */ - unsigned char xmask; /* U Transmit ready flags */ - unsigned char xval; /* U Transmit ready value */ - unsigned char m_stat; /* RC Modem status bits */ - unsigned char m_change; /* U Modem bits which changed */ - unsigned char m_int; /* W Modem interrupt enable */ - /* bits */ - unsigned char m_last; /* U Last modem status */ - - unsigned char mtran; /* C Unreported modem trans */ - unsigned char orun; /* C Buffer overrun occurred */ - unsigned char astartc; /* W Auxiliary Xon char */ - unsigned char astopc; /* W Auxiliary Xoff char */ - unsigned char startc; /* W Xon character */ - unsigned char stopc; /* W Xoff character */ - unsigned char vnextc; /* W Vnext character */ - unsigned char hflow; /* C Software flow control */ - - unsigned char fillc; /* U Delay Fill character */ - unsigned char ochar; /* U Saved output character */ - unsigned char omask; /* U Output character mask */ - - unsigned char bfill[13]; /* U Reserved for expansion */ - - unsigned char scc[16]; /* U SCC registers */ -}; - -struct cnode { - struct cnode *next; - int type; - int numbrd; - - union { - struct { - char type; /* Board Type */ - long addr; /* Memory Address */ - char *addrstr; /* Memory Address in string */ - long pcibus; /* PCI BUS */ - char *pcibusstr; /* PCI BUS in string */ - long pcislot; /* PCI SLOT */ - char *pcislotstr; /* PCI SLOT in string */ - long nport; /* Number of Ports */ - char *id; /* tty id */ - long start; /* start of tty counting */ - char *method; /* Install method */ - char v_addr; - char v_pcibus; - char v_pcislot; - char v_nport; - char v_id; - char v_start; - char v_method; - char line1; - char line2; - char conc1; /* total concs in line1 */ - char conc2; /* total concs in line2 */ - char module1; /* total modules for line1 */ - char module2; /* total modules for line2 */ - char *status; /* config status */ - char *dimstatus; /* Y/N */ - int status_index; /* field pointer */ - } board; - - struct { - char *cable; - char v_cable; - long speed; - char v_speed; - } line; - - struct { - char type; - char *connect; - long speed; - long nport; - char *id; - char *idstr; - long start; - char v_connect; - char v_speed; - char v_nport; - char v_id; - char v_start; - } conc; - - struct { - char type; - long nport; - char *id; - char *idstr; - long start; - char v_nport; - char v_id; - char v_start; - } module; - - char *ttyname; - char *cuname; - char *printname; - long majornumber; - long altpin; - long ttysize; - long chsize; - long bssize; - long unsize; - long f2size; - long vpixsize; - long useintr; - } u; -}; -#endif diff --git a/drivers/staging/dgnc/dgnc_cls.c b/drivers/staging/dgnc/dgnc_cls.c index 72f0aaa6911f..0ff3139e52b6 100644 --- a/drivers/staging/dgnc/dgnc_cls.c +++ b/drivers/staging/dgnc/dgnc_cls.c @@ -823,7 +823,7 @@ static void cls_copy_data_from_uart_to_queue(struct channel_t *ch) tail = ch->ch_r_tail; /* Store how much space we have left in the queue */ - qleft = (tail - head - 1); + qleft = tail - head - 1; if (qleft < 0) qleft += RQUEUEMASK + 1; diff --git a/drivers/staging/dgnc/dgnc_driver.c b/drivers/staging/dgnc/dgnc_driver.c index fc6d2989e28f..4eb410e09609 100644 --- a/drivers/staging/dgnc/dgnc_driver.c +++ b/drivers/staging/dgnc/dgnc_driver.c @@ -125,12 +125,7 @@ static struct pci_driver dgnc_driver = { * ************************************************************************/ -/* - * dgnc_cleanup_module() - * - * Module unload. This is where it all ends. - */ -static void dgnc_cleanup_module(void) +static void cleanup(bool sysfiles) { int i; unsigned long flags; @@ -142,7 +137,8 @@ static void dgnc_cleanup_module(void) /* Turn off poller right away. */ del_timer_sync(&dgnc_poll_timer); - dgnc_remove_driver_sysfiles(&dgnc_driver); + if (sysfiles) + dgnc_remove_driver_sysfiles(&dgnc_driver); device_destroy(dgnc_class, MKDEV(dgnc_Major, 0)); class_destroy(dgnc_class); @@ -155,9 +151,17 @@ static void dgnc_cleanup_module(void) } dgnc_tty_post_uninit(); +} - if (dgnc_NumBoards) - pci_unregister_driver(&dgnc_driver); +/* + * dgnc_cleanup_module() + * + * Module unload. This is where it all ends. + */ +static void dgnc_cleanup_module(void) +{ + cleanup(true); + pci_unregister_driver(&dgnc_driver); } /* @@ -181,23 +185,14 @@ static int __init dgnc_init_module(void) * Find and configure all the cards */ rc = pci_register_driver(&dgnc_driver); - - /* - * If something went wrong in the scan, bail out of driver. - */ - if (rc < 0) { - /* Only unregister if it was actually registered. */ - if (dgnc_NumBoards) - pci_unregister_driver(&dgnc_driver); - else - pr_warn("WARNING: dgnc driver load failed. No Digi Neo or Classic boards found.\n"); - - dgnc_cleanup_module(); - } else { - dgnc_create_driver_sysfiles(&dgnc_driver); + if (rc) { + pr_warn("WARNING: dgnc driver load failed. No Digi Neo or Classic boards found.\n"); + cleanup(false); + return rc; } + dgnc_create_driver_sysfiles(&dgnc_driver); - return rc; + return 0; } module_init(dgnc_init_module); @@ -283,13 +278,13 @@ static int dgnc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* wake up and enable device */ rc = pci_enable_device(pdev); - if (rc < 0) { - rc = -EIO; - } else { - rc = dgnc_found_board(pdev, ent->driver_data); - if (rc == 0) - dgnc_NumBoards++; - } + if (rc) + return -EIO; + + rc = dgnc_found_board(pdev, ent->driver_data); + if (rc == 0) + dgnc_NumBoards++; + return rc; } diff --git a/drivers/staging/dgnc/dgnc_driver.h b/drivers/staging/dgnc/dgnc_driver.h index ce7cd9b96542..e4be81b66041 100644 --- a/drivers/staging/dgnc/dgnc_driver.h +++ b/drivers/staging/dgnc/dgnc_driver.h @@ -88,7 +88,6 @@ #define _POSIX_VDISABLE '\0' #endif - /* * All the possible states the driver can be while being loaded. */ @@ -106,7 +105,6 @@ enum { BOARD_READY }; - /************************************************************************* * * Structures and closely related defines. @@ -145,7 +143,6 @@ struct board_ops { ************************************************************************/ #define BD_IS_PCI_EXPRESS 0x0001 /* Is a PCI Express board */ - /* * Per-board information */ @@ -241,7 +238,6 @@ struct dgnc_board { }; - /************************************************************************ * Unit flag definitions for un_flags. ************************************************************************/ @@ -277,7 +273,6 @@ struct un_t { struct device *un_sysfs; }; - /************************************************************************ * Device flag definitions for ch_flags. ************************************************************************/ @@ -300,7 +295,6 @@ struct un_t { #define CH_FORCED_STOP 0x20000 /* Output is forcibly stopped */ #define CH_FORCED_STOPI 0x40000 /* Input is forcibly stopped */ - /* Our Read/Error/Write queue sizes */ #define RQUEUEMASK 0x1FFF /* 8 K - 1 */ #define EQUEUEMASK 0x1FFF /* 8 K - 1 */ @@ -309,7 +303,6 @@ struct un_t { #define EQUEUESIZE RQUEUESIZE #define WQUEUESIZE (WQUEUEMASK + 1) - /************************************************************************ * Channel information structure. ************************************************************************/ @@ -397,7 +390,6 @@ struct channel_t { ulong ch_intr_tx; /* Count of interrupts */ ulong ch_intr_rx; /* Count of interrupts */ - /* /proc/<board>/<channel> entries */ struct proc_dir_entry *proc_entry_pointer; struct dgnc_proc_entry *dgnc_channel_table; diff --git a/drivers/staging/dgnc/dgnc_mgmt.c b/drivers/staging/dgnc/dgnc_mgmt.c index 518fbd5e2d0e..ba29a8d913f2 100644 --- a/drivers/staging/dgnc/dgnc_mgmt.c +++ b/drivers/staging/dgnc/dgnc_mgmt.c @@ -192,7 +192,7 @@ long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) spin_lock_irqsave(&ch->ch_lock, flags); - mstat = (ch->ch_mostat | ch->ch_mistat); + mstat = ch->ch_mostat | ch->ch_mistat; if (mstat & UART_MCR_DTR) { ni.mstat |= TIOCM_DTR; diff --git a/drivers/staging/dgnc/dgnc_neo.c b/drivers/staging/dgnc/dgnc_neo.c index 39c76e78e56a..31ac437cb4a4 100644 --- a/drivers/staging/dgnc/dgnc_neo.c +++ b/drivers/staging/dgnc/dgnc_neo.c @@ -1306,10 +1306,10 @@ static int neo_drain(struct tty_struct *tty, uint seconds) /* * Go to sleep waiting for the tty layer to wake me back up when * the empty flag goes away. - * - * NOTE: TODO: Do something with time passed in. */ - rc = wait_event_interruptible(un->un_flags_wait, ((un->un_flags & UN_EMPTY) == 0)); + rc = wait_event_interruptible_timeout(un->un_flags_wait, + ((un->un_flags & UN_EMPTY) == 0), + msecs_to_jiffies(seconds * 1000)); /* If ret is non-zero, user ctrl-c'ed us */ return rc; @@ -1735,7 +1735,7 @@ static unsigned int neo_read_eeprom(unsigned char __iomem *base, unsigned int ad /* enable chip select */ writeb(NEO_EECS, base + NEO_EEREG); /* READ */ - enable = (address | 0x180); + enable = address | 0x180; for (bits = 9; bits--; ) { databit = (enable & (1 << bits)) ? NEO_EEDI : 0; diff --git a/drivers/staging/dgnc/dgnc_neo.h b/drivers/staging/dgnc/dgnc_neo.h index c528df5a0e5a..abddd48353d0 100644 --- a/drivers/staging/dgnc/dgnc_neo.h +++ b/drivers/staging/dgnc/dgnc_neo.h @@ -65,7 +65,6 @@ struct neo_uart_struct { #define NEO_EEDO 0x80 /* Data Out is an Input Pin */ #define NEO_EEREG 0x8E /* offset to EEPROM control reg */ - #define NEO_VPD_IMAGESIZE 0x40 /* size of image to read from EEPROM in words */ #define NEO_VPD_IMAGEBYTES (NEO_VPD_IMAGESIZE * 2) diff --git a/drivers/staging/dgnc/dgnc_pci.h b/drivers/staging/dgnc/dgnc_pci.h index 617d40d1ec19..4e170c47f4a3 100644 --- a/drivers/staging/dgnc/dgnc_pci.h +++ b/drivers/staging/dgnc/dgnc_pci.h @@ -59,7 +59,6 @@ #define PCI_DEVICE_NEO_EXPRESS_8RJ45_PCI_NAME "Neo 8 PCI Express RJ45" #define PCI_DEVICE_NEO_EXPRESS_4_IBM_PCI_NAME "Neo 4 PCI Express IBM" - /* Size of Memory and I/O for PCI (4 K) */ #define PCI_RAM_SIZE 0x1000 diff --git a/drivers/staging/dgnc/dgnc_tty.c b/drivers/staging/dgnc/dgnc_tty.c index 8b1ba65a6984..bcd2bdfb9c8f 100644 --- a/drivers/staging/dgnc/dgnc_tty.c +++ b/drivers/staging/dgnc/dgnc_tty.c @@ -443,15 +443,13 @@ void dgnc_tty_uninit(struct dgnc_board *brd) brd->PrintDriver.termios = NULL; } -/*======================================================================= - * +/* * dgnc_wmove - Write data to transmit queue. * * ch - Pointer to channel structure. * buf - Pointer to characters to be moved. * n - Number of characters to move. - * - *=======================================================================*/ + */ static void dgnc_wmove(struct channel_t *ch, char *buf, uint n) { int remain; @@ -489,13 +487,11 @@ static void dgnc_wmove(struct channel_t *ch, char *buf, uint n) ch->ch_w_head = head; } -/*======================================================================= - * +/* * dgnc_input - Process received data. * * ch - Pointer to channel structure. - * - *=======================================================================*/ + */ void dgnc_input(struct channel_t *ch) { struct dgnc_board *bd; @@ -796,7 +792,7 @@ static void dgnc_set_custom_speed(struct channel_t *ch, uint newrate) * And of course, rates above the dividend won't fly. */ if (newrate && newrate < ((ch->ch_bd->bd_dividend / 0xFFFF) + 1)) - newrate = ((ch->ch_bd->bd_dividend / 0xFFFF) + 1); + newrate = (ch->ch_bd->bd_dividend / 0xFFFF) + 1; if (newrate && newrate > ch->ch_bd->bd_dividend) newrate = ch->ch_bd->bd_dividend; @@ -1786,8 +1782,8 @@ static int dgnc_tty_write(struct tty_struct *tty, } /* Update printer buffer empty time. */ - if ((un->un_type == DGNC_PRINT) && (ch->ch_digi.digi_maxcps > 0) - && (ch->ch_digi.digi_bufsize > 0)) { + if ((un->un_type == DGNC_PRINT) && (ch->ch_digi.digi_maxcps > 0) && + (ch->ch_digi.digi_bufsize > 0)) { ch->ch_cpstime += (HZ * count) / ch->ch_digi.digi_maxcps; } @@ -1834,7 +1830,7 @@ static int dgnc_tty_tiocmget(struct tty_struct *tty) spin_lock_irqsave(&ch->ch_lock, flags); - mstat = (ch->ch_mostat | ch->ch_mistat); + mstat = ch->ch_mostat | ch->ch_mistat; spin_unlock_irqrestore(&ch->ch_lock, flags); @@ -2034,7 +2030,7 @@ static inline int dgnc_get_mstat(struct channel_t *ch) spin_lock_irqsave(&ch->ch_lock, flags); - mstat = (ch->ch_mostat | ch->ch_mistat); + mstat = ch->ch_mostat | ch->ch_mistat; spin_unlock_irqrestore(&ch->ch_lock, flags); @@ -2506,12 +2502,12 @@ static void dgnc_tty_flush_buffer(struct tty_struct *tty) /* Flush UARTs transmit FIFO */ ch->ch_bd->bd_ops->flush_uart_write(ch); - if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) { - ch->ch_tun.un_flags &= ~(UN_LOW|UN_EMPTY); + if (ch->ch_tun.un_flags & (UN_LOW | UN_EMPTY)) { + ch->ch_tun.un_flags &= ~(UN_LOW | UN_EMPTY); wake_up_interruptible(&ch->ch_tun.un_flags_wait); } - if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) { - ch->ch_pun.un_flags &= ~(UN_LOW|UN_EMPTY); + if (ch->ch_pun.un_flags & (UN_LOW | UN_EMPTY)) { + ch->ch_pun.un_flags &= ~(UN_LOW | UN_EMPTY); wake_up_interruptible(&ch->ch_pun.un_flags_wait); } @@ -2705,13 +2701,13 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd, if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) { ch->ch_tun.un_flags &= - ~(UN_LOW|UN_EMPTY); + ~(UN_LOW | UN_EMPTY); wake_up_interruptible(&ch->ch_tun.un_flags_wait); } if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) { ch->ch_pun.un_flags &= - ~(UN_LOW|UN_EMPTY); + ~(UN_LOW | UN_EMPTY); wake_up_interruptible(&ch->ch_pun.un_flags_wait); } } diff --git a/drivers/staging/dgnc/digi.h b/drivers/staging/dgnc/digi.h index cf9dcae7cc3f..523a2d34f747 100644 --- a/drivers/staging/dgnc/digi.h +++ b/drivers/staging/dgnc/digi.h @@ -31,21 +31,21 @@ #endif #if !defined(TIOCMSET) -#define TIOCMSET (('d'<<8) | 252) /* set modem ctrl state */ -#define TIOCMGET (('d'<<8) | 253) /* set modem ctrl state */ +#define TIOCMSET (('d' << 8) | 252) /* set modem ctrl state */ +#define TIOCMGET (('d' << 8) | 253) /* set modem ctrl state */ #endif #if !defined(TIOCMBIC) -#define TIOCMBIC (('d'<<8) | 254) /* set modem ctrl state */ -#define TIOCMBIS (('d'<<8) | 255) /* set modem ctrl state */ +#define TIOCMBIC (('d' << 8) | 254) /* set modem ctrl state */ +#define TIOCMBIS (('d' << 8) | 255) /* set modem ctrl state */ #endif -#define DIGI_GETA (('e'<<8) | 94) /* Read params */ -#define DIGI_SETA (('e'<<8) | 95) /* Set params */ -#define DIGI_SETAW (('e'<<8) | 96) /* Drain & set params */ -#define DIGI_SETAF (('e'<<8) | 97) /* Drain, flush & set params */ -#define DIGI_GET_NI_INFO (('d'<<8) | 250) /* Non-intelligent state info */ -#define DIGI_LOOPBACK (('d'<<8) | 252) /* +#define DIGI_GETA (('e' << 8) | 94) /* Read params */ +#define DIGI_SETA (('e' << 8) | 95) /* Set params */ +#define DIGI_SETAW (('e' << 8) | 96) /* Drain & set params */ +#define DIGI_SETAF (('e' << 8) | 97) /* Drain, flush & set params */ +#define DIGI_GET_NI_INFO (('d' << 8) | 250) /* Non-intelligent state info */ +#define DIGI_LOOPBACK (('d' << 8) | 252) /* * Enable/disable UART * internal loopback */ @@ -85,7 +85,7 @@ struct digi_dinfo { char dinfo_version[16]; /* driver version */ }; -#define DIGI_GETDD (('d'<<8) | 248) /* get driver info */ +#define DIGI_GETDD (('d' << 8) | 248) /* get driver info */ /************************************************************************ * Structure used with ioctl commands for per-board information @@ -105,7 +105,7 @@ struct digi_info { char info_reserved[7]; /* for future expansion */ }; -#define DIGI_GETBD (('d'<<8) | 249) /* get board info */ +#define DIGI_GETBD (('d' << 8) | 249) /* get board info */ struct digi_getbuffer /* Struct for holding buffer use counts */ { @@ -133,10 +133,10 @@ struct digi_getcounter { #define DIGI_SETCUSTOMBAUD _IOW('e', 106, int) /* Set integer baud rate */ #define DIGI_GETCUSTOMBAUD _IOR('e', 107, int) /* Get integer baud rate */ -#define DIGI_REALPORT_GETBUFFERS (('e'<<8) | 108) -#define DIGI_REALPORT_SENDIMMEDIATE (('e'<<8) | 109) -#define DIGI_REALPORT_GETCOUNTERS (('e'<<8) | 110) -#define DIGI_REALPORT_GETEVENTS (('e'<<8) | 111) +#define DIGI_REALPORT_GETBUFFERS (('e' << 8) | 108) +#define DIGI_REALPORT_SENDIMMEDIATE (('e' << 8) | 109) +#define DIGI_REALPORT_GETCOUNTERS (('e' << 8) | 110) +#define DIGI_REALPORT_GETEVENTS (('e' << 8) | 111) #define EV_OPU 0x0001 /* !<Output paused by client */ #define EV_OPS 0x0002 /* !<Output paused by reqular sw flowctrl */ diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c index beb9411658ba..e8cacaecf9ad 100644 --- a/drivers/staging/emxx_udc/emxx_udc.c +++ b/drivers/staging/emxx_udc/emxx_udc.c @@ -21,7 +21,6 @@ #include <linux/ioport.h> #include <linux/slab.h> #include <linux/errno.h> -#include <linux/init.h> #include <linux/list.h> #include <linux/interrupt.h> #include <linux/proc_fs.h> @@ -160,7 +159,7 @@ static void _nbu2ss_ep0_complete(struct usb_ep *_ep, struct usb_request *_req) recipient = (u8)(p_ctrl->bRequestType & USB_RECIP_MASK); selector = p_ctrl->wValue; if ((recipient == USB_RECIP_DEVICE) && - (selector == USB_DEVICE_TEST_MODE)) { + (selector == USB_DEVICE_TEST_MODE)) { test_mode = (u32)(p_ctrl->wIndex >> 8); _nbu2ss_set_test_mode(udc, test_mode); } @@ -271,21 +270,21 @@ static int _nbu2ss_ep_init(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep) data = EPn_EN | EPn_BCLR | EPn_DIR0; _nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data); - data = (EPn_ONAK | EPn_OSTL_EN | EPn_OSTL); + data = EPn_ONAK | EPn_OSTL_EN | EPn_OSTL; _nbu2ss_bitclr(&udc->p_regs->EP_REGS[num].EP_CONTROL, data); - data = (EPn_OUT_EN | EPn_OUT_END_EN); + data = EPn_OUT_EN | EPn_OUT_END_EN; _nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_INT_ENA, data); } else { /*---------------------------------------------------------*/ /* IN */ - data = (EPn_EN | EPn_BCLR | EPn_AUTO); + data = EPn_EN | EPn_BCLR | EPn_AUTO; _nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data); - data = (EPn_ISTL); + data = EPn_ISTL; _nbu2ss_bitclr(&udc->p_regs->EP_REGS[num].EP_CONTROL, data); - data = (EPn_IN_EN | EPn_IN_END_EN); + data = EPn_IN_EN | EPn_IN_END_EN; _nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_INT_ENA, data); } @@ -460,7 +459,7 @@ static void _nbu2ss_ep_in_end( if (length) _nbu2ss_writel(&preg->EP_REGS[num].EP_WRITE, data32); - data = (((((u32)length) << 5) & EPn_DW) | EPn_DEND); + data = ((((u32)length) << 5) & EPn_DW) | EPn_DEND; _nbu2ss_bitset(&preg->EP_REGS[num].EP_CONTROL, data); _nbu2ss_bitset(&preg->EP_REGS[num].EP_CONTROL, EPn_AUTO); @@ -526,10 +525,10 @@ static void _nbu2ss_dma_unmap_single( if (req->unaligned) { if (direct == USB_DIR_OUT) memcpy(req->req.buf, ep->virt_buf, - req->req.actual & 0xfffffffc); + req->req.actual & 0xfffffffc); } else dma_unmap_single(udc->gadget.dev.parent, - req->req.dma, req->req.length, + req->req.dma, req->req.length, (direct == USB_DIR_IN) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); @@ -538,7 +537,7 @@ static void _nbu2ss_dma_unmap_single( } else { if (!req->unaligned) dma_sync_single_for_cpu(udc->gadget.dev.parent, - req->req.dma, req->req.length, + req->req.dma, req->req.length, (direct == USB_DIR_IN) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); @@ -844,7 +843,7 @@ static int _nbu2ss_out_dma( /* Number of transfer packets */ mpkt = _nbu2ss_readl(&preg->EP_REGS[num].EP_PCKT_ADRS) & EPn_MPKT; - dmacnt = (length / mpkt); + dmacnt = length / mpkt; lmpkt = (length % mpkt) & ~(u32)0x03; if (dmacnt > DMA_MAX_COUNT) { @@ -1490,7 +1489,7 @@ static inline int _nbu2ss_req_feature(struct nbu2ss_udc *udc, bool bset) int result = -EOPNOTSUPP; if ((udc->ctrl.wLength != 0x0000) || - (direction != USB_DIR_OUT)) { + (direction != USB_DIR_OUT)) { return -EINVAL; } @@ -1648,7 +1647,7 @@ static int std_req_set_address(struct nbu2ss_udc *udc) u32 wValue = udc->ctrl.wValue; if ((udc->ctrl.bRequestType != 0x00) || - (udc->ctrl.wIndex != 0x0000) || + (udc->ctrl.wIndex != 0x0000) || (udc->ctrl.wLength != 0x0000)) { return -EINVAL; } @@ -1670,7 +1669,7 @@ static int std_req_set_configuration(struct nbu2ss_udc *udc) u32 ConfigValue = (u32)(udc->ctrl.wValue & 0x00ff); if ((udc->ctrl.wIndex != 0x0000) || - (udc->ctrl.wLength != 0x0000) || + (udc->ctrl.wLength != 0x0000) || (udc->ctrl.bRequestType != 0x00)) { return -EINVAL; } @@ -1949,7 +1948,7 @@ static void _nbu2ss_ep_done( #ifdef USE_DMA if ((ep->direct == USB_DIR_OUT) && (ep->epnum > 0) && - (req->req.dma != 0)) + (req->req.dma != 0)) _nbu2ss_dma_unmap_single(udc, ep, req, USB_DIR_OUT); #endif @@ -2277,7 +2276,7 @@ static int _nbu2ss_enable_controller(struct nbu2ss_udc *udc) _nbu2ss_writel(&udc->p_regs->AHBSCTR, WAIT_MODE); _nbu2ss_writel(&udc->p_regs->AHBMCTR, - HBUSREQ_MODE | HTRANS_MODE | WBURST_TYPE); + HBUSREQ_MODE | HTRANS_MODE | WBURST_TYPE); while (!(_nbu2ss_readl(&udc->p_regs->EPCTR) & PLL_LOCK)) { waitcnt++; @@ -2626,7 +2625,7 @@ static struct usb_request *nbu2ss_ep_alloc_request( req = kzalloc(sizeof(*req), gfp_flags); if (!req) - return 0; + return NULL; #ifdef USE_DMA req->req.dma = DMA_ADDR_INVALID; @@ -2701,7 +2700,7 @@ static int nbu2ss_ep_queue( if (unlikely(!udc->driver)) { dev_err(udc->dev, "%s, bogus device state %p\n", __func__, - udc->driver); + udc->driver); return -ESHUTDOWN; } @@ -2721,12 +2720,12 @@ static int nbu2ss_ep_queue( if (ep->epnum > 0) { if (ep->direct == USB_DIR_IN) memcpy(ep->virt_buf, req->req.buf, - req->req.length); + req->req.length); } } if ((ep->epnum > 0) && (ep->direct == USB_DIR_OUT) && - (req->req.dma != 0)) + (req->req.dma != 0)) _nbu2ss_dma_map_single(udc, ep, req, USB_DIR_OUT); #endif @@ -2741,12 +2740,12 @@ static int nbu2ss_ep_queue( result = _nbu2ss_start_transfer(udc, ep, req, FALSE); if (result < 0) { dev_err(udc->dev, " *** %s, result = %d\n", __func__, - result); + result); list_del(&req->queue); } else if ((ep->epnum > 0) && (ep->direct == USB_DIR_OUT)) { #ifdef USE_DMA if (req->req.length < 4 && - req->req.length == req->req.actual) + req->req.length == req->req.actual) #else if (req->req.length == req->req.actual) #endif @@ -3026,7 +3025,7 @@ static int nbu2ss_gad_wakeup(struct usb_gadget *pgadget) /*-------------------------------------------------------------------------*/ static int nbu2ss_gad_set_selfpowered(struct usb_gadget *pgadget, - int is_selfpowered) + int is_selfpowered) { struct nbu2ss_udc *udc; unsigned long flags; @@ -3180,7 +3179,8 @@ static void __init nbu2ss_drv_ep_init(struct nbu2ss_udc *udc) ep->ep.ops = &nbu2ss_ep_ops; usb_ep_set_maxpacket_limit(&ep->ep, - i == 0 ? EP0_PACKETSIZE : EP_PACKETSIZE); + i == 0 ? EP0_PACKETSIZE + : EP_PACKETSIZE); list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); INIT_LIST_HEAD(&ep->queue); @@ -3273,10 +3273,7 @@ static int nbu2ss_drv_probe(struct platform_device *pdev) /* VBUS Interrupt */ irq_set_irq_type(INT_VBUS, IRQ_TYPE_EDGE_BOTH); status = request_irq(INT_VBUS, - _nbu2ss_vbus_irq, - IRQF_SHARED, - driver_name, - udc); + _nbu2ss_vbus_irq, IRQF_SHARED, driver_name, udc); if (status != 0) { dev_err(udc->dev, "request_irq(INT_VBUS) failed\n"); diff --git a/drivers/staging/fbtft/Kconfig b/drivers/staging/fbtft/Kconfig index 883ff5b8fdab..6f5e82464d78 100644 --- a/drivers/staging/fbtft/Kconfig +++ b/drivers/staging/fbtft/Kconfig @@ -117,12 +117,24 @@ config FB_TFT_SSD1289 help Framebuffer support for SSD1289 +config FB_TFT_SSD1305 + tristate "FB driver for the SSD1305 OLED Controller" + depends on FB_TFT + help + Framebuffer support for SSD1305 + config FB_TFT_SSD1306 tristate "FB driver for the SSD1306 OLED Controller" depends on FB_TFT help Framebuffer support for SSD1306 +config FB_TFT_SSD1325 + tristate "FB driver for the SSD1325 OLED Controller" + depends on FB_TFT + help + Framebuffer support for SSD1305 + config FB_TFT_SSD1331 tristate "FB driver for the SSD1331 LCD Controller" depends on FB_TFT diff --git a/drivers/staging/fbtft/Makefile b/drivers/staging/fbtft/Makefile index 4f9071d96d01..2725ea9a4afc 100644 --- a/drivers/staging/fbtft/Makefile +++ b/drivers/staging/fbtft/Makefile @@ -21,7 +21,9 @@ obj-$(CONFIG_FB_TFT_RA8875) += fb_ra8875.o obj-$(CONFIG_FB_TFT_S6D02A1) += fb_s6d02a1.o obj-$(CONFIG_FB_TFT_S6D1121) += fb_s6d1121.o obj-$(CONFIG_FB_TFT_SSD1289) += fb_ssd1289.o +obj-$(CONFIG_FB_TFT_SSD1305) += fb_ssd1305.o obj-$(CONFIG_FB_TFT_SSD1306) += fb_ssd1306.o +obj-$(CONFIG_FB_TFT_SSD1305) += fb_ssd1325.o obj-$(CONFIG_FB_TFT_SSD1331) += fb_ssd1331.o obj-$(CONFIG_FB_TFT_SSD1351) += fb_ssd1351.o obj-$(CONFIG_FB_TFT_ST7735R) += fb_st7735r.o diff --git a/drivers/staging/fbtft/fb_agm1264k-fl.c b/drivers/staging/fbtft/fb_agm1264k-fl.c index 2a50cf957101..ba9fc444b848 100644 --- a/drivers/staging/fbtft/fb_agm1264k-fl.c +++ b/drivers/staging/fbtft/fb_agm1264k-fl.c @@ -272,8 +272,8 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len) int ret = 0; /* buffer to convert RGB565 -> grayscale16 -> Dithered image 1bpp */ - signed short *convert_buf = kmalloc(par->info->var.xres * - par->info->var.yres * sizeof(signed short), GFP_NOIO); + signed short *convert_buf = kmalloc_array(par->info->var.xres * + par->info->var.yres, sizeof(signed short), GFP_NOIO); if (!convert_buf) return -ENOMEM; diff --git a/drivers/staging/fbtft/fb_hx8340bn.c b/drivers/staging/fbtft/fb_hx8340bn.c index e1ed177f9184..9970ed74bb38 100644 --- a/drivers/staging/fbtft/fb_hx8340bn.c +++ b/drivers/staging/fbtft/fb_hx8340bn.c @@ -25,6 +25,7 @@ #include <linux/vmalloc.h> #include <linux/spi/spi.h> #include <linux/delay.h> +#include <video/mipi_display.h> #include "fbtft.h" @@ -45,56 +46,70 @@ static int init_display(struct fbtft_par *par) /* BTL221722-276L startup sequence, from datasheet */ - /* SETEXTCOM: Set extended command set (C1h) - This command is used to set extended command set access enable. - Enable: After command (C1h), must write: ffh,83h,40h */ + /* + * SETEXTCOM: Set extended command set (C1h) + * This command is used to set extended command set access enable. + * Enable: After command (C1h), must write: ffh,83h,40h + */ write_reg(par, 0xC1, 0xFF, 0x83, 0x40); - /* Sleep out - This command turns off sleep mode. - In this mode the DC/DC converter is enabled, Internal oscillator - is started, and panel scanning is started. */ + /* + * Sleep out + * This command turns off sleep mode. + * In this mode the DC/DC converter is enabled, Internal oscillator + * is started, and panel scanning is started. + */ write_reg(par, 0x11); mdelay(150); /* Undoc'd register? */ write_reg(par, 0xCA, 0x70, 0x00, 0xD9); - /* SETOSC: Set Internal Oscillator (B0h) - This command is used to set internal oscillator related settings */ - /* OSC_EN: Enable internal oscillator */ - /* Internal oscillator frequency: 125% x 2.52MHz */ + /* + * SETOSC: Set Internal Oscillator (B0h) + * This command is used to set internal oscillator related settings + * OSC_EN: Enable internal oscillator + * Internal oscillator frequency: 125% x 2.52MHz + */ write_reg(par, 0xB0, 0x01, 0x11); /* Drive ability setting */ write_reg(par, 0xC9, 0x90, 0x49, 0x10, 0x28, 0x28, 0x10, 0x00, 0x06); mdelay(20); - /* SETPWCTR5: Set Power Control 5(B5h) - This command is used to set VCOM Low and VCOM High Voltage */ - /* VCOMH 0110101 : 3.925 */ - /* VCOML 0100000 : -1.700 */ - /* 45h=69 VCOMH: "VMH" + 5d VCOML: "VMH" + 5d */ + /* + * SETPWCTR5: Set Power Control 5(B5h) + * This command is used to set VCOM Low and VCOM High Voltage + * VCOMH 0110101 : 3.925 + * VCOML 0100000 : -1.700 + * 45h=69 VCOMH: "VMH" + 5d VCOML: "VMH" + 5d + */ write_reg(par, 0xB5, 0x35, 0x20, 0x45); - /* SETPWCTR4: Set Power Control 4(B4h) - VRH[4:0]: Specify the VREG1 voltage adjusting. - VREG1 voltage is for gamma voltage setting. - BT[2:0]: Switch the output factor of step-up circuit 2 - for VGH and VGL voltage generation. */ + /* + * SETPWCTR4: Set Power Control 4(B4h) + * VRH[4:0]: Specify the VREG1 voltage adjusting. + * VREG1 voltage is for gamma voltage setting. + * BT[2:0]: Switch the output factor of step-up circuit 2 + * for VGH and VGL voltage generation. + */ write_reg(par, 0xB4, 0x33, 0x25, 0x4C); mdelay(10); - /* Interface Pixel Format (3Ah) - This command is used to define the format of RGB picture data, - which is to be transfer via the system and RGB interface. */ - /* RGB interface: 16 Bit/Pixel */ - write_reg(par, 0x3A, 0x05); - - /* Display on (29h) - This command is used to recover from DISPLAY OFF mode. - Output from the Frame Memory is enabled. */ - write_reg(par, 0x29); + /* + * Interface Pixel Format (3Ah) + * This command is used to define the format of RGB picture data, + * which is to be transfer via the system and RGB interface. + * RGB interface: 16 Bit/Pixel + */ + write_reg(par, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT); + + /* + * Display on (29h) + * This command is used to recover from DISPLAY OFF mode. + * Output from the Frame Memory is enabled. + */ + write_reg(par, MIPI_DCS_SET_DISPLAY_ON); mdelay(10); return 0; @@ -102,9 +117,9 @@ static int init_display(struct fbtft_par *par) static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye) { - write_reg(par, FBTFT_CASET, 0x00, xs, 0x00, xe); - write_reg(par, FBTFT_RASET, 0x00, ys, 0x00, ye); - write_reg(par, FBTFT_RAMWR); + write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS, 0x00, xs, 0x00, xe); + write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS, 0x00, ys, 0x00, ye); + write_reg(par, MIPI_DCS_WRITE_MEMORY_START); } static int set_var(struct fbtft_par *par) @@ -116,16 +131,19 @@ static int set_var(struct fbtft_par *par) #define MV BIT(5) switch (par->info->var.rotate) { case 0: - write_reg(par, 0x36, par->bgr << 3); + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, par->bgr << 3); break; case 270: - write_reg(par, 0x36, MX | MV | (par->bgr << 3)); + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, + MX | MV | (par->bgr << 3)); break; case 180: - write_reg(par, 0x36, MX | MY | (par->bgr << 3)); + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, + MX | MY | (par->bgr << 3)); break; case 90: - write_reg(par, 0x36, MY | MV | (par->bgr << 3)); + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, + MY | MV | (par->bgr << 3)); break; } @@ -133,12 +151,12 @@ static int set_var(struct fbtft_par *par) } /* - Gamma Curve selection, GC (only GC0 can be customized): - 0 = 2.2, 1 = 1.8, 2 = 2.5, 3 = 1.0 - Gamma string format: - OP0 OP1 CP0 CP1 CP2 CP3 CP4 MP0 MP1 MP2 MP3 MP4 MP5 CGM0 CGM1 - ON0 ON1 CN0 CN1 CN2 CN3 CN4 MN0 MN1 MN2 MN3 MN4 MN5 XXXX GC -*/ + * Gamma Curve selection, GC (only GC0 can be customized): + * 0 = 2.2, 1 = 1.8, 2 = 2.5, 3 = 1.0 + * Gamma string format: + * OP0 OP1 CP0 CP1 CP2 CP3 CP4 MP0 MP1 MP2 MP3 MP4 MP5 CGM0 CGM1 + * ON0 ON1 CN0 CN1 CN2 CN3 CN4 MN0 MN1 MN2 MN3 MN4 MN5 XXXX GC + */ #define CURVE(num, idx) curves[num * par->gamma.num_values + idx] static int set_gamma(struct fbtft_par *par, unsigned long *curves) { @@ -154,36 +172,38 @@ static int set_gamma(struct fbtft_par *par, unsigned long *curves) for (j = 0; j < par->gamma.num_values; j++) CURVE(i, j) &= mask[i * par->gamma.num_values + j]; - write_reg(par, 0x26, 1 << CURVE(1, 14)); /* Gamma Set (26h) */ + /* Gamma Set (26h) */ + write_reg(par, MIPI_DCS_SET_GAMMA_CURVE, 1 << CURVE(1, 14)); if (CURVE(1, 14)) return 0; /* only GC0 can be customized */ write_reg(par, 0xC2, - (CURVE(0, 8) << 4) | CURVE(0, 7), - (CURVE(0, 10) << 4) | CURVE(0, 9), - (CURVE(0, 12) << 4) | CURVE(0, 11), - CURVE(0, 2), - (CURVE(0, 4) << 4) | CURVE(0, 3), - CURVE(0, 5), - CURVE(0, 6), - (CURVE(0, 1) << 4) | CURVE(0, 0), - (CURVE(0, 14) << 2) | CURVE(0, 13)); + (CURVE(0, 8) << 4) | CURVE(0, 7), + (CURVE(0, 10) << 4) | CURVE(0, 9), + (CURVE(0, 12) << 4) | CURVE(0, 11), + CURVE(0, 2), + (CURVE(0, 4) << 4) | CURVE(0, 3), + CURVE(0, 5), + CURVE(0, 6), + (CURVE(0, 1) << 4) | CURVE(0, 0), + (CURVE(0, 14) << 2) | CURVE(0, 13)); write_reg(par, 0xC3, - (CURVE(1, 8) << 4) | CURVE(1, 7), - (CURVE(1, 10) << 4) | CURVE(1, 9), - (CURVE(1, 12) << 4) | CURVE(1, 11), - CURVE(1, 2), - (CURVE(1, 4) << 4) | CURVE(1, 3), - CURVE(1, 5), - CURVE(1, 6), - (CURVE(1, 1) << 4) | CURVE(1, 0)); + (CURVE(1, 8) << 4) | CURVE(1, 7), + (CURVE(1, 10) << 4) | CURVE(1, 9), + (CURVE(1, 12) << 4) | CURVE(1, 11), + CURVE(1, 2), + (CURVE(1, 4) << 4) | CURVE(1, 3), + CURVE(1, 5), + CURVE(1, 6), + (CURVE(1, 1) << 4) | CURVE(1, 0)); mdelay(10); return 0; } + #undef CURVE static struct fbtft_display display = { diff --git a/drivers/staging/fbtft/fb_hx8347d.c b/drivers/staging/fbtft/fb_hx8347d.c index 6ff76e531a37..450a61e3f99c 100644 --- a/drivers/staging/fbtft/fb_hx8347d.c +++ b/drivers/staging/fbtft/fb_hx8347d.c @@ -97,10 +97,10 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye) } /* - Gamma string format: - VRP0 VRP1 VRP2 VRP3 VRP4 VRP5 PRP0 PRP1 PKP0 PKP1 PKP2 PKP3 PKP4 CGM - VRN0 VRN1 VRN2 VRN3 VRN4 VRN5 PRN0 PRN1 PKN0 PKN1 PKN2 PKN3 PKN4 CGM -*/ + * Gamma string format: + * VRP0 VRP1 VRP2 VRP3 VRP4 VRP5 PRP0 PRP1 PKP0 PKP1 PKP2 PKP3 PKP4 CGM + * VRN0 VRN1 VRN2 VRN3 VRN4 VRN5 PRN0 PRN1 PKN0 PKN1 PKN2 PKN3 PKN4 CGM + */ #define CURVE(num, idx) curves[num * par->gamma.num_values + idx] static int set_gamma(struct fbtft_par *par, unsigned long *curves) { @@ -140,6 +140,7 @@ static int set_gamma(struct fbtft_par *par, unsigned long *curves) return 0; } + #undef CURVE static struct fbtft_display display = { diff --git a/drivers/staging/fbtft/fb_hx8353d.c b/drivers/staging/fbtft/fb_hx8353d.c index 8552411695fa..72e4ff8c5553 100644 --- a/drivers/staging/fbtft/fb_hx8353d.c +++ b/drivers/staging/fbtft/fb_hx8353d.c @@ -19,6 +19,7 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> +#include <video/mipi_display.h> #include "fbtft.h" @@ -27,7 +28,6 @@ static int init_display(struct fbtft_par *par) { - par->fbtftops.reset(par); mdelay(150); @@ -47,18 +47,18 @@ static int init_display(struct fbtft_par *par) write_reg(par, 0x3A, 0x05); /* MEM ACCESS */ - write_reg(par, 0x36, 0xC0); + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, 0xC0); /* SLPOUT - Sleep out & booster on */ - write_reg(par, 0x11); + write_reg(par, MIPI_DCS_EXIT_SLEEP_MODE); mdelay(150); /* DISPON - Display On */ - write_reg(par, 0x29); + write_reg(par, MIPI_DCS_SET_DISPLAY_ON); /* RGBSET */ - write_reg(par, 0x2D, - 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, + write_reg(par, MIPI_DCS_WRITE_LUT, + 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, @@ -87,41 +87,45 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye) #define mv BIT(5) static int set_var(struct fbtft_par *par) { - /* madctl - memory data access control - rgb/bgr: - 1. mode selection pin srgb - rgb h/w pin for color filter setting: 0=rgb, 1=bgr - 2. madctl rgb bit - rgb-bgr order color filter panel: 0=rgb, 1=bgr */ + /* + * madctl - memory data access control + * rgb/bgr: + * 1. mode selection pin srgb + * rgb h/w pin for color filter setting: 0=rgb, 1=bgr + * 2. madctl rgb bit + * rgb-bgr order color filter panel: 0=rgb, 1=bgr + */ switch (par->info->var.rotate) { case 0: - write_reg(par, 0x36, mx | my | (par->bgr << 3)); + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, + mx | my | (par->bgr << 3)); break; case 270: - write_reg(par, 0x36, my | mv | (par->bgr << 3)); + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, + my | mv | (par->bgr << 3)); break; case 180: - write_reg(par, 0x36, par->bgr << 3); + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, + par->bgr << 3); break; case 90: - write_reg(par, 0x36, mx | mv | (par->bgr << 3)); + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, + mx | mv | (par->bgr << 3)); break; } return 0; } -/* - gamma string format: -*/ +/* gamma string format: */ static int set_gamma(struct fbtft_par *par, unsigned long *curves) { write_reg(par, 0xE0, - curves[0], curves[1], curves[2], curves[3], - curves[4], curves[5], curves[6], curves[7], - curves[8], curves[9], curves[10], curves[11], - curves[12], curves[13], curves[14], curves[15], - curves[16], curves[17], curves[18]); + curves[0], curves[1], curves[2], curves[3], + curves[4], curves[5], curves[6], curves[7], + curves[8], curves[9], curves[10], curves[11], + curves[12], curves[13], curves[14], curves[15], + curves[16], curves[17], curves[18]); return 0; } diff --git a/drivers/staging/fbtft/fb_hx8357d.c b/drivers/staging/fbtft/fb_hx8357d.c index a381dbcf5535..32e6efe1d0a7 100644 --- a/drivers/staging/fbtft/fb_hx8357d.c +++ b/drivers/staging/fbtft/fb_hx8357d.c @@ -22,6 +22,7 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> +#include <video/mipi_display.h> #include "fbtft.h" #include "fb_hx8357d.h" @@ -35,7 +36,7 @@ static int init_display(struct fbtft_par *par) par->fbtftops.reset(par); /* Reset things like Gamma */ - write_reg(par, HX8357B_SWRESET); + write_reg(par, MIPI_DCS_SOFT_RESET); usleep_range(5000, 7000); /* setextc */ @@ -55,83 +56,83 @@ static int init_display(struct fbtft_par *par) write_reg(par, HX8357_SETPANEL, 0x05); write_reg(par, HX8357_SETPWR1, - 0x00, /* Not deep standby */ - 0x15, /* BT */ - 0x1C, /* VSPR */ - 0x1C, /* VSNR */ - 0x83, /* AP */ - 0xAA); /* FS */ + 0x00, /* Not deep standby */ + 0x15, /* BT */ + 0x1C, /* VSPR */ + 0x1C, /* VSNR */ + 0x83, /* AP */ + 0xAA); /* FS */ write_reg(par, HX8357D_SETSTBA, - 0x50, /* OPON normal */ - 0x50, /* OPON idle */ - 0x01, /* STBA */ - 0x3C, /* STBA */ - 0x1E, /* STBA */ - 0x08); /* GEN */ + 0x50, /* OPON normal */ + 0x50, /* OPON idle */ + 0x01, /* STBA */ + 0x3C, /* STBA */ + 0x1E, /* STBA */ + 0x08); /* GEN */ write_reg(par, HX8357D_SETCYC, - 0x02, /* NW 0x02 */ - 0x40, /* RTN */ - 0x00, /* DIV */ - 0x2A, /* DUM */ - 0x2A, /* DUM */ - 0x0D, /* GDON */ - 0x78); /* GDOFF */ + 0x02, /* NW 0x02 */ + 0x40, /* RTN */ + 0x00, /* DIV */ + 0x2A, /* DUM */ + 0x2A, /* DUM */ + 0x0D, /* GDON */ + 0x78); /* GDOFF */ write_reg(par, HX8357D_SETGAMMA, - 0x02, - 0x0A, - 0x11, - 0x1d, - 0x23, - 0x35, - 0x41, - 0x4b, - 0x4b, - 0x42, - 0x3A, - 0x27, - 0x1B, - 0x08, - 0x09, - 0x03, - 0x02, - 0x0A, - 0x11, - 0x1d, - 0x23, - 0x35, - 0x41, - 0x4b, - 0x4b, - 0x42, - 0x3A, - 0x27, - 0x1B, - 0x08, - 0x09, - 0x03, - 0x00, - 0x01); + 0x02, + 0x0A, + 0x11, + 0x1d, + 0x23, + 0x35, + 0x41, + 0x4b, + 0x4b, + 0x42, + 0x3A, + 0x27, + 0x1B, + 0x08, + 0x09, + 0x03, + 0x02, + 0x0A, + 0x11, + 0x1d, + 0x23, + 0x35, + 0x41, + 0x4b, + 0x4b, + 0x42, + 0x3A, + 0x27, + 0x1B, + 0x08, + 0x09, + 0x03, + 0x00, + 0x01); /* 16 bit */ - write_reg(par, HX8357_COLMOD, 0x55); + write_reg(par, MIPI_DCS_SET_PIXEL_FORMAT, 0x55); - write_reg(par, HX8357_MADCTL, 0xC0); + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, 0xC0); /* TE off */ - write_reg(par, HX8357_TEON, 0x00); + write_reg(par, MIPI_DCS_SET_TEAR_ON, 0x00); /* tear line */ - write_reg(par, HX8357_TEARLINE, 0x00, 0x02); + write_reg(par, MIPI_DCS_SET_TEAR_SCANLINE, 0x00, 0x02); /* Exit Sleep */ - write_reg(par, HX8357_SLPOUT); + write_reg(par, MIPI_DCS_EXIT_SLEEP_MODE); msleep(150); /* display on */ - write_reg(par, HX8357_DISPON); + write_reg(par, MIPI_DCS_SET_DISPLAY_ON); usleep_range(5000, 7000); return 0; @@ -139,18 +140,15 @@ static int init_display(struct fbtft_par *par) static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye) { - /* Column addr set */ - write_reg(par, HX8357_CASET, - xs >> 8, xs & 0xff, /* XSTART */ - xe >> 8, xe & 0xff); /* XEND */ - - /* Row addr set */ - write_reg(par, HX8357_PASET, - ys >> 8, ys & 0xff, /* YSTART */ - ye >> 8, ye & 0xff); /* YEND */ - - /* write to RAM */ - write_reg(par, HX8357_RAMWR); + write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS, + xs >> 8, xs & 0xff, /* XSTART */ + xe >> 8, xe & 0xff); /* XEND */ + + write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS, + ys >> 8, ys & 0xff, /* YSTART */ + ye >> 8, ye & 0xff); /* YEND */ + + write_reg(par, MIPI_DCS_WRITE_MEMORY_START); } #define HX8357D_MADCTL_MY 0x80 @@ -182,7 +180,7 @@ static int set_var(struct fbtft_par *par) val |= (par->bgr ? HX8357D_MADCTL_RGB : HX8357D_MADCTL_BGR); /* Memory Access Control */ - write_reg(par, HX8357_MADCTL, val); + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, val); return 0; } diff --git a/drivers/staging/fbtft/fb_hx8357d.h b/drivers/staging/fbtft/fb_hx8357d.h index de05e8cdf04c..e281921d4a97 100644 --- a/drivers/staging/fbtft/fb_hx8357d.h +++ b/drivers/staging/fbtft/fb_hx8357d.h @@ -1,17 +1,17 @@ -/*************************************************** - This is our library for the Adafruit ILI9341 Breakout and Shield - ----> http://www.adafruit.com/products/1651 - - Check out the links above for our tutorials and wiring diagrams - These displays use SPI to communicate, 4 or 5 pins are required to - interface (RST is optional) - Adafruit invests time and resources providing this open source code, - please support Adafruit and open-source hardware by purchasing - products from Adafruit! - - Written by Limor Fried/Ladyada for Adafruit Industries. - MIT license, all text above must be included in any redistribution - ****************************************************/ +/* + * This is our library for the Adafruit ILI9341 Breakout and Shield + * ----> http://www.adafruit.com/products/1651 + * + * Check out the links above for our tutorials and wiring diagrams + * These displays use SPI to communicate, 4 or 5 pins are required to + * interface (RST is optional) + * Adafruit invests time and resources providing this open source code, + * please support Adafruit and open-source hardware by purchasing + * products from Adafruit! + * + * Written by Limor Fried/Ladyada for Adafruit Industries. + * MIT license, all text above must be included in any redistribution + */ #ifndef __HX8357_H__ #define __HX8357_H__ @@ -22,38 +22,6 @@ #define HX8357_TFTWIDTH 320 #define HX8357_TFTHEIGHT 480 -#define HX8357B_NOP 0x00 -#define HX8357B_SWRESET 0x01 -#define HX8357B_RDDID 0x04 -#define HX8357B_RDDST 0x09 - -#define HX8357B_RDPOWMODE 0x0A -#define HX8357B_RDMADCTL 0x0B -#define HX8357B_RDCOLMOD 0x0C -#define HX8357B_RDDIM 0x0D -#define HX8357B_RDDSDR 0x0F - -#define HX8357_SLPIN 0x10 -#define HX8357_SLPOUT 0x11 -#define HX8357B_PTLON 0x12 -#define HX8357B_NORON 0x13 - -#define HX8357_INVOFF 0x20 -#define HX8357_INVON 0x21 -#define HX8357_DISPOFF 0x28 -#define HX8357_DISPON 0x29 - -#define HX8357_CASET 0x2A -#define HX8357_PASET 0x2B -#define HX8357_RAMWR 0x2C -#define HX8357_RAMRD 0x2E - -#define HX8357B_PTLAR 0x30 -#define HX8357_TEON 0x35 -#define HX8357_TEARLINE 0x44 -#define HX8357_MADCTL 0x36 -#define HX8357_COLMOD 0x3A - #define HX8357_SETOSC 0xB0 #define HX8357_SETPWR1 0xB1 #define HX8357B_SETDISPLAY 0xB2 diff --git a/drivers/staging/fbtft/fb_ili9163.c b/drivers/staging/fbtft/fb_ili9163.c index f31b3f4b9275..6b8f8b17e9a3 100644 --- a/drivers/staging/fbtft/fb_ili9163.c +++ b/drivers/staging/fbtft/fb_ili9163.c @@ -22,6 +22,7 @@ #include <linux/init.h> #include <linux/gpio.h> #include <linux/delay.h> +#include <video/mipi_display.h> #include "fbtft.h" @@ -38,37 +39,11 @@ #endif /* ILI9163C commands */ -#define CMD_NOP 0x00 /* Non operation*/ -#define CMD_SWRESET 0x01 /* Soft Reset */ -#define CMD_SLPIN 0x10 /* Sleep ON */ -#define CMD_SLPOUT 0x11 /* Sleep OFF */ -#define CMD_PTLON 0x12 /* Partial Mode ON */ -#define CMD_NORML 0x13 /* Normal Display ON */ -#define CMD_DINVOF 0x20 /* Display Inversion OFF */ -#define CMD_DINVON 0x21 /* Display Inversion ON */ -#define CMD_GAMMASET 0x26 /* Gamma Set (0x01[1],0x02[2],0x04[3],0x08[4]) */ -#define CMD_DISPOFF 0x28 /* Display OFF */ -#define CMD_DISPON 0x29 /* Display ON */ -#define CMD_IDLEON 0x39 /* Idle Mode ON */ -#define CMD_IDLEOF 0x38 /* Idle Mode OFF */ -#define CMD_CLMADRS 0x2A /* Column Address Set */ -#define CMD_PGEADRS 0x2B /* Page Address Set */ - -#define CMD_RAMWR 0x2C /* Memory Write */ -#define CMD_RAMRD 0x2E /* Memory Read */ -#define CMD_CLRSPACE 0x2D /* Color Space : 4K/65K/262K */ -#define CMD_PARTAREA 0x30 /* Partial Area */ -#define CMD_VSCLLDEF 0x33 /* Vertical Scroll Definition */ -#define CMD_TEFXLON 0x34 /* Tearing Effect Line ON */ -#define CMD_TEFXLOF 0x35 /* Tearing Effect Line OFF */ -#define CMD_MADCTL 0x36 /* Memory Access Control */ - -#define CMD_PIXFMT 0x3A /* Interface Pixel Format */ -#define CMD_FRMCTR1 0xB1 /* Frame Rate Control - (In normal mode/Full colors) */ +#define CMD_FRMCTR1 0xB1 /* Frame Rate Control */ + /* (In normal mode/Full colors) */ #define CMD_FRMCTR2 0xB2 /* Frame Rate Control (In Idle mode/8-colors) */ -#define CMD_FRMCTR3 0xB3 /* Frame Rate Control - (In Partial mode/full colors) */ +#define CMD_FRMCTR3 0xB3 /* Frame Rate Control */ + /* (In Partial mode/full colors) */ #define CMD_DINVCTR 0xB4 /* Display Inversion Control */ #define CMD_RGBBLK 0xB5 /* RGB Interface Blanking Porch setting */ #define CMD_DFUNCTR 0xB6 /* Display Function set 5 */ @@ -88,17 +63,18 @@ #define CMD_GAMRSEL 0xF2 /* GAM_R_SEL */ /* -This display: -http://www.ebay.com/itm/Replace-Nokia-5110-LCD-1-44-Red-Serial-128X128-SPI-Color-TFT-LCD-Display-Module-/271422122271 -This particular display has a design error! The controller has 3 pins to -configure to constrain the memory and resolution to a fixed dimension (in -that case 128x128) but they leaved those pins configured for 128x160 so -there was several pixel memory addressing problems. -I solved by setup several parameters that dinamically fix the resolution as -needit so below the parameters for this display. If you have a strain or a -correct display (can happen with chinese) you can copy those parameters and -create setup for different displays. -*/ + * This display: + * http://www.ebay.com/itm/Replace-Nokia-5110-LCD-1-44-Red-Serial-128X128-SPI- + * Color-TFT-LCD-Display-Module-/271422122271 + * This particular display has a design error! The controller has 3 pins to + * configure to constrain the memory and resolution to a fixed dimension (in + * that case 128x128) but they leaved those pins configured for 128x160 so + * there was several pixel memory addressing problems. + * I solved by setup several parameters that dinamically fix the resolution as + * needit so below the parameters for this display. If you have a strain or a + * correct display (can happen with chinese) you can copy those parameters and + * create setup for different displays. + */ #ifdef RED #define __OFFSET 32 /*see note 2 - this is the red version */ @@ -113,16 +89,17 @@ static int init_display(struct fbtft_par *par) if (par->gpio.cs != -1) gpio_set_value(par->gpio.cs, 0); /* Activate chip */ - write_reg(par, CMD_SWRESET); /* software reset */ + write_reg(par, MIPI_DCS_SOFT_RESET); /* software reset */ mdelay(500); - write_reg(par, CMD_SLPOUT); /* exit sleep */ + write_reg(par, MIPI_DCS_EXIT_SLEEP_MODE); /* exit sleep */ mdelay(5); - write_reg(par, CMD_PIXFMT, 0x05); /* Set Color Format 16bit */ - write_reg(par, CMD_GAMMASET, 0x02); /* default gamma curve 3 */ + write_reg(par, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT); + /* default gamma curve 3 */ + write_reg(par, MIPI_DCS_SET_GAMMA_CURVE, 0x02); #ifdef GAMMA_ADJ write_reg(par, CMD_GAMRSEL, 0x01); /* Enable Gamma adj */ #endif - write_reg(par, CMD_NORML); + write_reg(par, MIPI_DCS_ENTER_NORMAL_MODE); write_reg(par, CMD_DFUNCTR, 0xff, 0x06); /* Frame Rate Control (In normal mode/Full colors) */ write_reg(par, CMD_FRMCTR1, 0x08, 0x02); @@ -135,66 +112,67 @@ static int init_display(struct fbtft_par *par) write_reg(par, CMD_VCOMCTR1, 0x50, 0x63); write_reg(par, CMD_VCOMOFFS, 0); - write_reg(par, CMD_CLMADRS, 0, 0, 0, WIDTH); /* Set Column Address */ - write_reg(par, CMD_PGEADRS, 0, 0, 0, HEIGHT); /* Set Page Address */ + write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS, 0, 0, 0, WIDTH); + write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS, 0, 0, 0, HEIGHT); - write_reg(par, CMD_DISPON); /* display ON */ - write_reg(par, CMD_RAMWR); /* Memory Write */ + write_reg(par, MIPI_DCS_SET_DISPLAY_ON); /* display ON */ + write_reg(par, MIPI_DCS_WRITE_MEMORY_START); /* Memory Write */ return 0; } static void set_addr_win(struct fbtft_par *par, int xs, int ys, - int xe, int ye) + int xe, int ye) { switch (par->info->var.rotate) { case 0: - write_reg(par, CMD_CLMADRS, xs >> 8, xs & 0xff, xe >> 8, - xe & 0xff); - write_reg(par, CMD_PGEADRS, - (ys + __OFFSET) >> 8, (ys + __OFFSET) & 0xff, - (ye + __OFFSET) >> 8, (ye + __OFFSET) & 0xff); + write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS, + xs >> 8, xs & 0xff, xe >> 8, xe & 0xff); + write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS, + (ys + __OFFSET) >> 8, (ys + __OFFSET) & 0xff, + (ye + __OFFSET) >> 8, (ye + __OFFSET) & 0xff); break; case 90: - write_reg(par, CMD_CLMADRS, - (xs + __OFFSET) >> 8, (xs + __OFFSET) & 0xff, - (xe + __OFFSET) >> 8, (xe + __OFFSET) & 0xff); - write_reg(par, CMD_PGEADRS, ys >> 8, ys & 0xff, ye >> 8, - ye & 0xff); + write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS, + (xs + __OFFSET) >> 8, (xs + __OFFSET) & 0xff, + (xe + __OFFSET) >> 8, (xe + __OFFSET) & 0xff); + write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS, + ys >> 8, ys & 0xff, ye >> 8, ye & 0xff); break; case 180: case 270: - write_reg(par, CMD_CLMADRS, xs >> 8, xs & 0xff, xe >> 8, - xe & 0xff); - write_reg(par, CMD_PGEADRS, ys >> 8, ys & 0xff, ye >> 8, - ye & 0xff); + write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS, + xs >> 8, xs & 0xff, xe >> 8, xe & 0xff); + write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS, + ys >> 8, ys & 0xff, ye >> 8, ye & 0xff); break; default: - par->info->var.rotate = 0; /* Fix incorrect setting */ + /* Fix incorrect setting */ + par->info->var.rotate = 0; } - write_reg(par, CMD_RAMWR); /* Write Data to GRAM mode */ + write_reg(par, MIPI_DCS_WRITE_MEMORY_START); } /* -7) MY: 1(bottom to top), 0(top to bottom) Row Address Order -6) MX: 1(R to L), 0(L to R) Column Address Order -5) MV: 1(Exchanged), 0(normal) Row/Column exchange -4) ML: 1(bottom to top), 0(top to bottom) Vertical Refresh Order -3) RGB: 1(BGR), 0(RGB) Color Space -2) MH: 1(R to L), 0(L to R) Horizontal Refresh Order -1) -0) - - MY, MX, MV, ML,RGB, MH, D1, D0 - 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 //normal - 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 //Y-Mirror - 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 //X-Mirror - 1 | 1 | 0 | 0 | 1 | 0 | 0 | 0 //X-Y-Mirror - 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 //X-Y Exchange - 1 | 0 | 1 | 0 | 1 | 0 | 0 | 0 //X-Y Exchange, Y-Mirror - 0 | 1 | 1 | 0 | 1 | 0 | 0 | 0 //XY exchange - 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 -*/ + * 7) MY: 1(bottom to top), 0(top to bottom) Row Address Order + * 6) MX: 1(R to L), 0(L to R) Column Address Order + * 5) MV: 1(Exchanged), 0(normal) Row/Column exchange + * 4) ML: 1(bottom to top), 0(top to bottom) Vertical Refresh Order + * 3) RGB: 1(BGR), 0(RGB) Color Space + * 2) MH: 1(R to L), 0(L to R) Horizontal Refresh Order + * 1) + * 0) + * + * MY, MX, MV, ML,RGB, MH, D1, D0 + * 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 //normal + * 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 //Y-Mirror + * 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 //X-Mirror + * 1 | 1 | 0 | 0 | 1 | 0 | 0 | 0 //X-Y-Mirror + * 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 //X-Y Exchange + * 1 | 0 | 1 | 0 | 1 | 0 | 0 | 0 //X-Y Exchange, Y-Mirror + * 0 | 1 | 1 | 0 | 1 | 0 | 0 | 0 //XY exchange + * 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 + */ static int set_var(struct fbtft_par *par) { u8 mactrl_data = 0; /* Avoid compiler warning */ @@ -217,8 +195,8 @@ static int set_var(struct fbtft_par *par) /* Colorspcae */ if (par->bgr) mactrl_data |= (1 << 2); - write_reg(par, CMD_MADCTL, mactrl_data); - write_reg(par, CMD_RAMWR); /* Write Data to GRAM mode */ + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, mactrl_data); + write_reg(par, MIPI_DCS_WRITE_MEMORY_START); return 0; } @@ -237,27 +215,28 @@ static int gamma_adj(struct fbtft_par *par, unsigned long *curves) CURVE(i, j) &= mask[i * par->gamma.num_values + j]; write_reg(par, CMD_PGAMMAC, - CURVE(0, 0), - CURVE(0, 1), - CURVE(0, 2), - CURVE(0, 3), - CURVE(0, 4), - CURVE(0, 5), - CURVE(0, 6), - (CURVE(0, 7) << 4) | CURVE(0, 8), - CURVE(0, 9), - CURVE(0, 10), - CURVE(0, 11), - CURVE(0, 12), - CURVE(0, 13), - CURVE(0, 14), - CURVE(0, 15) - ); + CURVE(0, 0), + CURVE(0, 1), + CURVE(0, 2), + CURVE(0, 3), + CURVE(0, 4), + CURVE(0, 5), + CURVE(0, 6), + (CURVE(0, 7) << 4) | CURVE(0, 8), + CURVE(0, 9), + CURVE(0, 10), + CURVE(0, 11), + CURVE(0, 12), + CURVE(0, 13), + CURVE(0, 14), + CURVE(0, 15)); - write_reg(par, CMD_RAMWR); /* Write Data to GRAM mode */ + /* Write Data to GRAM mode */ + write_reg(par, MIPI_DCS_WRITE_MEMORY_START); return 0; } + #undef CURVE #endif diff --git a/drivers/staging/fbtft/fb_ili9320.c b/drivers/staging/fbtft/fb_ili9320.c index 3ed50febe36f..6ff222d6d6d6 100644 --- a/drivers/staging/fbtft/fb_ili9320.c +++ b/drivers/staging/fbtft/fb_ili9320.c @@ -47,10 +47,10 @@ static int init_display(struct fbtft_par *par) devcode = read_devicecode(par); fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "Device code: 0x%04X\n", - devcode); + devcode); if ((devcode != 0x0000) && (devcode != 0x9320)) dev_warn(par->info->device, - "Unrecognized Device code: 0x%04X (expected 0x9320)\n", + "Unrecognized Device code: 0x%04X (expected 0x9320)\n", devcode); /* Initialization sequence from ILI9320 Application Notes */ @@ -216,10 +216,10 @@ static int set_var(struct fbtft_par *par) } /* - Gamma string format: - VRP0 VRP1 RP0 RP1 KP0 KP1 KP2 KP3 KP4 KP5 - VRN0 VRN1 RN0 RN1 KN0 KN1 KN2 KN3 KN4 KN5 -*/ + * Gamma string format: + * VRP0 VRP1 RP0 RP1 KP0 KP1 KP2 KP3 KP4 KP5 + * VRN0 VRN1 RN0 RN1 KN0 KN1 KN2 KN3 KN4 KN5 + */ #define CURVE(num, idx) curves[num * par->gamma.num_values + idx] static int set_gamma(struct fbtft_par *par, unsigned long *curves) { @@ -248,6 +248,7 @@ static int set_gamma(struct fbtft_par *par, unsigned long *curves) return 0; } + #undef CURVE static struct fbtft_display display = { diff --git a/drivers/staging/fbtft/fb_ili9325.c b/drivers/staging/fbtft/fb_ili9325.c index 3b3a06d8a125..fdf98d37550e 100644 --- a/drivers/staging/fbtft/fb_ili9325.c +++ b/drivers/staging/fbtft/fb_ili9325.c @@ -56,42 +56,42 @@ module_param(vcm, uint, 0); MODULE_PARM_DESC(vcm, "Set the internal VcomH voltage"); /* -Verify that this configuration is within the Voltage limits - -Display module configuration: Vcc = IOVcc = Vci = 3.3V - - Voltages ----------- -Vci = 3.3 -Vci1 = Vci * 0.80 = 2.64 -DDVDH = Vci1 * 2 = 5.28 -VCL = -Vci1 = -2.64 -VREG1OUT = Vci * 1.85 = 4.88 -VCOMH = VREG1OUT * 0.735 = 3.59 -VCOM amplitude = VREG1OUT * 0.98 = 4.79 -VGH = Vci * 4 = 13.2 -VGL = -Vci * 4 = -13.2 - - Limits --------- -Power supplies -1.65 < IOVcc < 3.30 => 1.65 < 3.3 < 3.30 -2.40 < Vcc < 3.30 => 2.40 < 3.3 < 3.30 -2.50 < Vci < 3.30 => 2.50 < 3.3 < 3.30 - -Source/VCOM power supply voltage - 4.50 < DDVDH < 6.0 => 4.50 < 5.28 < 6.0 --3.0 < VCL < -2.0 => -3.0 < -2.64 < -2.0 -VCI - VCL < 6.0 => 5.94 < 6.0 - -Gate driver output voltage - 10 < VGH < 20 => 10 < 13.2 < 20 --15 < VGL < -5 => -15 < -13.2 < -5 -VGH - VGL < 32 => 26.4 < 32 - -VCOM driver output voltage -VCOMH - VCOML < 6.0 => 4.79 < 6.0 -*/ + * Verify that this configuration is within the Voltage limits + * + * Display module configuration: Vcc = IOVcc = Vci = 3.3V + * + * Voltages + * ---------- + * Vci = 3.3 + * Vci1 = Vci * 0.80 = 2.64 + * DDVDH = Vci1 * 2 = 5.28 + * VCL = -Vci1 = -2.64 + * VREG1OUT = Vci * 1.85 = 4.88 + * VCOMH = VREG1OUT * 0.735 = 3.59 + * VCOM amplitude = VREG1OUT * 0.98 = 4.79 + * VGH = Vci * 4 = 13.2 + * VGL = -Vci * 4 = -13.2 + * + * Limits + * -------- + * Power supplies + * 1.65 < IOVcc < 3.30 => 1.65 < 3.3 < 3.30 + * 2.40 < Vcc < 3.30 => 2.40 < 3.3 < 3.30 + * 2.50 < Vci < 3.30 => 2.50 < 3.3 < 3.30 + * + * Source/VCOM power supply voltage + * 4.50 < DDVDH < 6.0 => 4.50 < 5.28 < 6.0 + * -3.0 < VCL < -2.0 => -3.0 < -2.64 < -2.0 + * VCI - VCL < 6.0 => 5.94 < 6.0 + * + * Gate driver output voltage + * 10 < VGH < 20 => 10 < 13.2 < 20 + * -15 < VGL < -5 => -15 < -13.2 < -5 + * VGH - VGL < 32 => 26.4 < 32 + * + * VCOM driver output voltage + * VCOMH - VCOML < 6.0 => 4.79 < 6.0 + */ static int init_display(struct fbtft_par *par) { @@ -213,10 +213,10 @@ static int set_var(struct fbtft_par *par) } /* - Gamma string format: - VRP0 VRP1 RP0 RP1 KP0 KP1 KP2 KP3 KP4 KP5 - VRN0 VRN1 RN0 RN1 KN0 KN1 KN2 KN3 KN4 KN5 -*/ + * Gamma string format: + * VRP0 VRP1 RP0 RP1 KP0 KP1 KP2 KP3 KP4 KP5 + * VRN0 VRN1 RN0 RN1 KN0 KN1 KN2 KN3 KN4 KN5 + */ #define CURVE(num, idx) curves[num * par->gamma.num_values + idx] static int set_gamma(struct fbtft_par *par, unsigned long *curves) { @@ -245,6 +245,7 @@ static int set_gamma(struct fbtft_par *par, unsigned long *curves) return 0; } + #undef CURVE static struct fbtft_display display = { diff --git a/drivers/staging/fbtft/fb_ili9340.c b/drivers/staging/fbtft/fb_ili9340.c index e0e253989271..0711121c303c 100644 --- a/drivers/staging/fbtft/fb_ili9340.c +++ b/drivers/staging/fbtft/fb_ili9340.c @@ -19,6 +19,7 @@ #include <linux/init.h> #include <linux/gpio.h> #include <linux/delay.h> +#include <video/mipi_display.h> #include "fbtft.h" @@ -53,7 +54,7 @@ static int init_display(struct fbtft_par *par) /* COLMOD: Pixel Format Set */ /* 16 bits/pixel */ - write_reg(par, 0x3A, 0x55); + write_reg(par, MIPI_DCS_SET_PIXEL_FORMAT, 0x55); /* Frame Rate Control */ /* Division ratio = fosc, Frame Rate = 79Hz */ @@ -65,40 +66,37 @@ static int init_display(struct fbtft_par *par) /* Gamma Function Disable */ write_reg(par, 0xF2, 0x00); - /* Gamma curve selected */ - write_reg(par, 0x26, 0x01); + /* Gamma curve selection */ + write_reg(par, MIPI_DCS_SET_GAMMA_CURVE, 0x01); /* Positive Gamma Correction */ write_reg(par, 0xE0, - 0x0F, 0x31, 0x2B, 0x0C, 0x0E, 0x08, 0x4E, 0xF1, - 0x37, 0x07, 0x10, 0x03, 0x0E, 0x09, 0x00); + 0x0F, 0x31, 0x2B, 0x0C, 0x0E, 0x08, 0x4E, 0xF1, + 0x37, 0x07, 0x10, 0x03, 0x0E, 0x09, 0x00); /* Negative Gamma Correction */ write_reg(par, 0xE1, - 0x00, 0x0E, 0x14, 0x03, 0x11, 0x07, 0x31, 0xC1, - 0x48, 0x08, 0x0F, 0x0C, 0x31, 0x36, 0x0F); + 0x00, 0x0E, 0x14, 0x03, 0x11, 0x07, 0x31, 0xC1, + 0x48, 0x08, 0x0F, 0x0C, 0x31, 0x36, 0x0F); - /* Sleep OUT */ - write_reg(par, 0x11); + write_reg(par, MIPI_DCS_EXIT_SLEEP_MODE); mdelay(120); - /* Display ON */ - write_reg(par, 0x29); + write_reg(par, MIPI_DCS_SET_DISPLAY_ON); return 0; } static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye) { - /* Column address */ - write_reg(par, 0x2A, xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF); + write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS, + xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF); - /* Row address */ - write_reg(par, 0x2B, ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF); + write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS, + ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF); - /* Memory write */ - write_reg(par, 0x2C); + write_reg(par, MIPI_DCS_WRITE_MEMORY_START); } #define ILI9340_MADCTL_MV 0x20 @@ -123,7 +121,7 @@ static int set_var(struct fbtft_par *par) break; } /* Memory Access Control */ - write_reg(par, 0x36, val | (par->bgr << 3)); + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, val | (par->bgr << 3)); return 0; } diff --git a/drivers/staging/fbtft/fb_ili9341.c b/drivers/staging/fbtft/fb_ili9341.c index dcee0aff5875..ff35c8624ca3 100644 --- a/drivers/staging/fbtft/fb_ili9341.c +++ b/drivers/staging/fbtft/fb_ili9341.c @@ -24,6 +24,7 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> +#include <video/mipi_display.h> #include "fbtft.h" @@ -39,9 +40,9 @@ static int init_display(struct fbtft_par *par) par->fbtftops.reset(par); /* startup sequence for MI0283QT-9A */ - write_reg(par, 0x01); /* software reset */ + write_reg(par, MIPI_DCS_SOFT_RESET); mdelay(5); - write_reg(par, 0x28); /* display off */ + write_reg(par, MIPI_DCS_SET_DISPLAY_OFF); /* --------------------------------------------------------- */ write_reg(par, 0xCF, 0x00, 0x83, 0x30); write_reg(par, 0xED, 0x64, 0x03, 0x12, 0x81); @@ -56,18 +57,18 @@ static int init_display(struct fbtft_par *par) write_reg(par, 0xC5, 0x35, 0x3E); write_reg(par, 0xC7, 0xBE); /* ------------memory access control------------------------ */ - write_reg(par, 0x3A, 0x55); /* 16bit pixel */ + write_reg(par, MIPI_DCS_SET_PIXEL_FORMAT, 0x55); /* 16bit pixel */ /* ------------frame rate----------------------------------- */ write_reg(par, 0xB1, 0x00, 0x1B); /* ------------Gamma---------------------------------------- */ /* write_reg(par, 0xF2, 0x08); */ /* Gamma Function Disable */ - write_reg(par, 0x26, 0x01); + write_reg(par, MIPI_DCS_SET_GAMMA_CURVE, 0x01); /* ------------display-------------------------------------- */ write_reg(par, 0xB7, 0x07); /* entry mode set */ write_reg(par, 0xB6, 0x0A, 0x82, 0x27, 0x00); - write_reg(par, 0x11); /* sleep out */ + write_reg(par, MIPI_DCS_EXIT_SLEEP_MODE); mdelay(100); - write_reg(par, 0x29); /* display on */ + write_reg(par, MIPI_DCS_SET_DISPLAY_ON); mdelay(20); return 0; @@ -75,40 +76,39 @@ static int init_display(struct fbtft_par *par) static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye) { - /* Column address set */ - write_reg(par, 0x2A, - (xs >> 8) & 0xFF, xs & 0xFF, (xe >> 8) & 0xFF, xe & 0xFF); + write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS, + (xs >> 8) & 0xFF, xs & 0xFF, (xe >> 8) & 0xFF, xe & 0xFF); - /* Row address set */ - write_reg(par, 0x2B, - (ys >> 8) & 0xFF, ys & 0xFF, (ye >> 8) & 0xFF, ye & 0xFF); + write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS, + (ys >> 8) & 0xFF, ys & 0xFF, (ye >> 8) & 0xFF, ye & 0xFF); - /* Memory write */ - write_reg(par, 0x2C); + write_reg(par, MIPI_DCS_WRITE_MEMORY_START); } -#define MEM_Y (7) /* MY row address order */ -#define MEM_X (6) /* MX column address order */ -#define MEM_V (5) /* MV row / column exchange */ -#define MEM_L (4) /* ML vertical refresh order */ -#define MEM_H (2) /* MH horizontal refresh order */ +#define MEM_Y BIT(7) /* MY row address order */ +#define MEM_X BIT(6) /* MX column address order */ +#define MEM_V BIT(5) /* MV row / column exchange */ +#define MEM_L BIT(4) /* ML vertical refresh order */ +#define MEM_H BIT(2) /* MH horizontal refresh order */ #define MEM_BGR (3) /* RGB-BGR Order */ static int set_var(struct fbtft_par *par) { switch (par->info->var.rotate) { case 0: - write_reg(par, 0x36, (1 << MEM_X) | (par->bgr << MEM_BGR)); + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, + MEM_X | (par->bgr << MEM_BGR)); break; case 270: - write_reg(par, 0x36, - (1 << MEM_V) | (1 << MEM_L) | (par->bgr << MEM_BGR)); + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, + MEM_V | MEM_L | (par->bgr << MEM_BGR)); break; case 180: - write_reg(par, 0x36, (1 << MEM_Y) | (par->bgr << MEM_BGR)); + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, + MEM_Y | (par->bgr << MEM_BGR)); break; case 90: - write_reg(par, 0x36, (1 << MEM_Y) | (1 << MEM_X) | - (1 << MEM_V) | (par->bgr << MEM_BGR)); + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, + MEM_Y | MEM_X | MEM_V | (par->bgr << MEM_BGR)); break; } @@ -116,10 +116,10 @@ static int set_var(struct fbtft_par *par) } /* - Gamma string format: - Positive: Par1 Par2 [...] Par15 - Negative: Par1 Par2 [...] Par15 -*/ + * Gamma string format: + * Positive: Par1 Par2 [...] Par15 + * Negative: Par1 Par2 [...] Par15 + */ #define CURVE(num, idx) curves[num * par->gamma.num_values + idx] static int set_gamma(struct fbtft_par *par, unsigned long *curves) { @@ -127,14 +127,15 @@ static int set_gamma(struct fbtft_par *par, unsigned long *curves) for (i = 0; i < par->gamma.num_curves; i++) write_reg(par, 0xE0 + i, - CURVE(i, 0), CURVE(i, 1), CURVE(i, 2), - CURVE(i, 3), CURVE(i, 4), CURVE(i, 5), - CURVE(i, 6), CURVE(i, 7), CURVE(i, 8), - CURVE(i, 9), CURVE(i, 10), CURVE(i, 11), - CURVE(i, 12), CURVE(i, 13), CURVE(i, 14)); + CURVE(i, 0), CURVE(i, 1), CURVE(i, 2), + CURVE(i, 3), CURVE(i, 4), CURVE(i, 5), + CURVE(i, 6), CURVE(i, 7), CURVE(i, 8), + CURVE(i, 9), CURVE(i, 10), CURVE(i, 11), + CURVE(i, 12), CURVE(i, 13), CURVE(i, 14)); return 0; } + #undef CURVE static struct fbtft_display display = { diff --git a/drivers/staging/fbtft/fb_ili9481.c b/drivers/staging/fbtft/fb_ili9481.c index 63684864f309..242adb3859bd 100644 --- a/drivers/staging/fbtft/fb_ili9481.c +++ b/drivers/staging/fbtft/fb_ili9481.c @@ -19,6 +19,7 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> +#include <video/mipi_display.h> #include "fbtft.h" @@ -27,9 +28,8 @@ #define HEIGHT 480 static int default_init_sequence[] = { - /* SLP_OUT - Sleep out */ - -1, 0x11, + -1, MIPI_DCS_EXIT_SLEEP_MODE, -2, 50, /* Power setting */ -1, 0xD0, 0x07, 0x42, 0x18, @@ -42,44 +42,47 @@ static int default_init_sequence[] = { /* Frame rate & inv. */ -1, 0xC5, 0x03, /* Pixel format */ - -1, 0x3A, 0x55, + -1, MIPI_DCS_SET_PIXEL_FORMAT, 0x55, /* Gamma */ -1, 0xC8, 0x00, 0x32, 0x36, 0x45, 0x06, 0x16, 0x37, 0x75, 0x77, 0x54, 0x0C, 0x00, /* DISP_ON */ - -1, 0x29, + -1, MIPI_DCS_SET_DISPLAY_ON, -3 }; static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye) { - /* column address */ - write_reg(par, 0x2a, xs >> 8, xs & 0xff, xe >> 8, xe & 0xff); + write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS, + xs >> 8, xs & 0xff, xe >> 8, xe & 0xff); - /* Row address */ - write_reg(par, 0x2b, ys >> 8, ys & 0xff, ye >> 8, ye & 0xff); + write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS, + ys >> 8, ys & 0xff, ye >> 8, ye & 0xff); - /* memory write */ - write_reg(par, 0x2c); + write_reg(par, MIPI_DCS_WRITE_MEMORY_START); } #define HFLIP 0x01 #define VFLIP 0x02 -#define ROWxCOL 0x20 +#define ROW_X_COL 0x20 static int set_var(struct fbtft_par *par) { switch (par->info->var.rotate) { case 270: - write_reg(par, 0x36, ROWxCOL | HFLIP | VFLIP | (par->bgr << 3)); + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, + ROW_X_COL | HFLIP | VFLIP | (par->bgr << 3)); break; case 180: - write_reg(par, 0x36, VFLIP | (par->bgr << 3)); + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, + VFLIP | (par->bgr << 3)); break; case 90: - write_reg(par, 0x36, ROWxCOL | (par->bgr << 3)); + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, + ROW_X_COL | (par->bgr << 3)); break; default: - write_reg(par, 0x36, HFLIP | (par->bgr << 3)); + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, + HFLIP | (par->bgr << 3)); break; } diff --git a/drivers/staging/fbtft/fb_ili9486.c b/drivers/staging/fbtft/fb_ili9486.c index d9dfff68159b..fa38d8885f0b 100644 --- a/drivers/staging/fbtft/fb_ili9486.c +++ b/drivers/staging/fbtft/fb_ili9486.c @@ -17,6 +17,7 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> +#include <video/mipi_display.h> #include "fbtft.h" @@ -28,11 +29,10 @@ static int default_init_sequence[] = { /* Interface Mode Control */ -1, 0xb0, 0x0, - /* Sleep OUT */ - -1, 0x11, + -1, MIPI_DCS_EXIT_SLEEP_MODE, -2, 250, /* Interface Pixel Format */ - -1, 0x3A, 0x55, + -1, MIPI_DCS_SET_PIXEL_FORMAT, 0x55, /* Power Control 3 */ -1, 0xC2, 0x44, /* VCOM Control 1 */ @@ -46,40 +46,41 @@ static int default_init_sequence[] = { /* Digital Gamma Control 1 */ -1, 0xE2, 0x0F, 0x32, 0x2E, 0x0B, 0x0D, 0x05, 0x47, 0x75, 0x37, 0x06, 0x10, 0x03, 0x24, 0x20, 0x00, - /* Sleep OUT */ - -1, 0x11, - /* Display ON */ - -1, 0x29, + -1, MIPI_DCS_EXIT_SLEEP_MODE, + -1, MIPI_DCS_SET_DISPLAY_ON, /* end marker */ -3 }; static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye) { - /* Column address */ - write_reg(par, 0x2A, xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF); + write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS, + xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF); - /* Row address */ - write_reg(par, 0x2B, ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF); + write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS, + ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF); - /* Memory write */ - write_reg(par, 0x2C); + write_reg(par, MIPI_DCS_WRITE_MEMORY_START); } static int set_var(struct fbtft_par *par) { switch (par->info->var.rotate) { case 0: - write_reg(par, 0x36, 0x80 | (par->bgr << 3)); + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, + 0x80 | (par->bgr << 3)); break; case 90: - write_reg(par, 0x36, 0x20 | (par->bgr << 3)); + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, + 0x20 | (par->bgr << 3)); break; case 180: - write_reg(par, 0x36, 0x40 | (par->bgr << 3)); + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, + 0x40 | (par->bgr << 3)); break; case 270: - write_reg(par, 0x36, 0xE0 | (par->bgr << 3)); + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, + 0xE0 | (par->bgr << 3)); break; default: break; diff --git a/drivers/staging/fbtft/fb_ra8875.c b/drivers/staging/fbtft/fb_ra8875.c index b167c5061631..308a244972aa 100644 --- a/drivers/staging/fbtft/fb_ra8875.c +++ b/drivers/staging/fbtft/fb_ra8875.c @@ -257,7 +257,7 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...) static int write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len) { u16 *vmem16; - u16 *txbuf16 = (u16 *)par->txbuf.buf; + u16 *txbuf16 = par->txbuf.buf; size_t remain; size_t to_copy; size_t tx_array_size; @@ -271,13 +271,13 @@ static int write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len) remain = len / 2; vmem16 = (u16 *)(par->info->screen_buffer + offset); tx_array_size = par->txbuf.len / 2; - txbuf16 = (u16 *)(par->txbuf.buf + 1); + txbuf16 = par->txbuf.buf + 1; tx_array_size -= 2; *(u8 *)(par->txbuf.buf) = 0x00; startbyte_size = 1; while (remain) { - to_copy = remain > tx_array_size ? tx_array_size : remain; + to_copy = min(tx_array_size, remain); dev_dbg(par->info->device, " to_copy=%zu, remain=%zu\n", to_copy, remain - to_copy); diff --git a/drivers/staging/fbtft/fb_s6d02a1.c b/drivers/staging/fbtft/fb_s6d02a1.c index da85057eb3e0..3113355062fc 100644 --- a/drivers/staging/fbtft/fb_s6d02a1.c +++ b/drivers/staging/fbtft/fb_s6d02a1.c @@ -18,6 +18,7 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> +#include <video/mipi_display.h> #include "fbtft.h" @@ -50,7 +51,7 @@ static int default_init_sequence[] = { -1, 0xf3, 0x00, 0x00, - -1, 0x11, + -1, MIPI_DCS_EXIT_SLEEP_MODE, -2, 50, -1, 0xf3, 0x00, 0x01, @@ -79,18 +80,18 @@ static int default_init_sequence[] = { /* initializing sequence */ - -1, 0x36, 0x08, + -1, MIPI_DCS_SET_ADDRESS_MODE, 0x08, - -1, 0x35, 0x00, + -1, MIPI_DCS_SET_TEAR_ON, 0x00, - -1, 0x3a, 0x05, + -1, MIPI_DCS_SET_PIXEL_FORMAT, 0x05, - /* gamma setting sequence */ - -1, 0x26, 0x01, /* preset gamma curves, possible values 0x01, 0x02, 0x04, 0x08 */ + /* gamma setting - possible values 0x01, 0x02, 0x04, 0x08 */ + -1, MIPI_DCS_SET_GAMMA_CURVE, 0x01, -2, 150, - -1, 0x29, - -1, 0x2c, + -1, MIPI_DCS_SET_DISPLAY_ON, + -1, MIPI_DCS_WRITE_MEMORY_START, /* end marker */ -3 @@ -98,14 +99,13 @@ static int default_init_sequence[] = { static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye) { - /* Column address */ - write_reg(par, 0x2A, xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF); + write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS, + xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF); - /* Row address */ - write_reg(par, 0x2B, ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF); + write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS, + ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF); - /* Memory write */ - write_reg(par, 0x2C); + write_reg(par, MIPI_DCS_WRITE_MEMORY_START); } #define MY BIT(7) @@ -113,7 +113,7 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye) #define MV BIT(5) static int set_var(struct fbtft_par *par) { - /* MADCTL - Memory data access control + /* Memory data access control (0x36h) RGB/BGR: 1. Mode selection pin SRGB RGB H/W pin for color filter setting: 0=RGB, 1=BGR @@ -121,16 +121,20 @@ static int set_var(struct fbtft_par *par) RGB-BGR ORDER color filter panel: 0=RGB, 1=BGR */ switch (par->info->var.rotate) { case 0: - write_reg(par, 0x36, MX | MY | (par->bgr << 3)); + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, + MX | MY | (par->bgr << 3)); break; case 270: - write_reg(par, 0x36, MY | MV | (par->bgr << 3)); + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, + MY | MV | (par->bgr << 3)); break; case 180: - write_reg(par, 0x36, par->bgr << 3); + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, + par->bgr << 3); break; case 90: - write_reg(par, 0x36, MX | MV | (par->bgr << 3)); + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, + MX | MV | (par->bgr << 3)); break; } diff --git a/drivers/staging/fbtft/fb_ssd1305.c b/drivers/staging/fbtft/fb_ssd1305.c new file mode 100644 index 000000000000..4b38c3fadd60 --- /dev/null +++ b/drivers/staging/fbtft/fb_ssd1305.c @@ -0,0 +1,216 @@ +/* + * FB driver for the SSD1305 OLED Controller + * + * based on SSD1306 driver by Noralf Tronnes + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/gpio.h> +#include <linux/delay.h> + +#include "fbtft.h" + +#define DRVNAME "fb_ssd1305" + +#define WIDTH 128 +#define HEIGHT 64 + +/* + * write_reg() caveat: + * + * This doesn't work because D/C has to be LOW for both values: + * write_reg(par, val1, val2); + * + * Do it like this: + * write_reg(par, val1); + * write_reg(par, val2); + */ + +/* Init sequence taken from the Adafruit SSD1306 Arduino library */ +static int init_display(struct fbtft_par *par) +{ + par->fbtftops.reset(par); + + if (par->gamma.curves[0] == 0) { + mutex_lock(&par->gamma.lock); + if (par->info->var.yres == 64) + par->gamma.curves[0] = 0xCF; + else + par->gamma.curves[0] = 0x8F; + mutex_unlock(&par->gamma.lock); + } + + /* Set Display OFF */ + write_reg(par, 0xAE); + + /* Set Display Clock Divide Ratio/ Oscillator Frequency */ + write_reg(par, 0xD5); + write_reg(par, 0x80); + + /* Set Multiplex Ratio */ + write_reg(par, 0xA8); + if (par->info->var.yres == 64) + write_reg(par, 0x3F); + else + write_reg(par, 0x1F); + + /* Set Display Offset */ + write_reg(par, 0xD3); + write_reg(par, 0x0); + + /* Set Display Start Line */ + write_reg(par, 0x40 | 0x0); + + /* Charge Pump Setting */ + write_reg(par, 0x8D); + /* A[2] = 1b, Enable charge pump during display on */ + write_reg(par, 0x14); + + /* Set Memory Addressing Mode */ + write_reg(par, 0x20); + /* Vertical addressing mode */ + write_reg(par, 0x01); + + /* + * Set Segment Re-map + * column address 127 is mapped to SEG0 + */ + write_reg(par, 0xA0 | ((par->info->var.rotate == 180) ? 0x0 : 0x1)); + + /* + * Set COM Output Scan Direction + * remapped mode. Scan from COM[N-1] to COM0 + */ + write_reg(par, ((par->info->var.rotate == 180) ? 0xC8 : 0xC0)); + + /* Set COM Pins Hardware Configuration */ + write_reg(par, 0xDA); + if (par->info->var.yres == 64) { + /* A[4]=1b, Alternative COM pin configuration */ + write_reg(par, 0x12); + } else { + /* A[4]=0b, Sequential COM pin configuration */ + write_reg(par, 0x02); + } + + /* Set Pre-charge Period */ + write_reg(par, 0xD9); + write_reg(par, 0xF1); + + /* + * Entire Display ON + * Resume to RAM content display. Output follows RAM content + */ + write_reg(par, 0xA4); + + /* + * Set Normal Display + * 0 in RAM: OFF in display panel + * 1 in RAM: ON in display panel + */ + write_reg(par, 0xA6); + + /* Set Display ON */ + write_reg(par, 0xAF); + + return 0; +} + +static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye) +{ + /* Set Lower Column Start Address for Page Addressing Mode */ + write_reg(par, 0x00 | ((par->info->var.rotate == 180) ? 0x0 : 0x4)); + /* Set Higher Column Start Address for Page Addressing Mode */ + write_reg(par, 0x10 | 0x0); + /* Set Display Start Line */ + write_reg(par, 0x40 | 0x0); +} + +static int blank(struct fbtft_par *par, bool on) +{ + if (on) + write_reg(par, 0xAE); + else + write_reg(par, 0xAF); + return 0; +} + +/* Gamma is used to control Contrast */ +static int set_gamma(struct fbtft_par *par, unsigned long *curves) +{ + curves[0] &= 0xFF; + /* Set Contrast Control for BANK0 */ + write_reg(par, 0x81); + write_reg(par, curves[0]); + + return 0; +} + +static int write_vmem(struct fbtft_par *par, size_t offset, size_t len) +{ + u16 *vmem16 = (u16 *)par->info->screen_buffer; + u8 *buf = par->txbuf.buf; + int x, y, i; + int ret; + + for (x = 0; x < par->info->var.xres; x++) { + for (y = 0; y < par->info->var.yres / 8; y++) { + *buf = 0x00; + for (i = 0; i < 8; i++) + *buf |= (vmem16[(y * 8 + i) * + par->info->var.xres + x] ? + 1 : 0) << i; + buf++; + } + } + + /* Write data */ + gpio_set_value(par->gpio.dc, 1); + ret = par->fbtftops.write(par, par->txbuf.buf, + par->info->var.xres * par->info->var.yres / + 8); + if (ret < 0) + dev_err(par->info->device, "write failed and returned: %d\n", + ret); + return ret; +} + +static struct fbtft_display display = { + .regwidth = 8, + .width = WIDTH, + .height = HEIGHT, + .txbuflen = WIDTH * HEIGHT / 8, + .gamma_num = 1, + .gamma_len = 1, + .gamma = "00", + .fbtftops = { + .write_vmem = write_vmem, + .init_display = init_display, + .set_addr_win = set_addr_win, + .blank = blank, + .set_gamma = set_gamma, + }, +}; + +FBTFT_REGISTER_DRIVER(DRVNAME, "solomon,ssd1305", &display); + +MODULE_ALIAS("spi:" DRVNAME); +MODULE_ALIAS("platform:" DRVNAME); +MODULE_ALIAS("spi:ssd1305"); +MODULE_ALIAS("platform:ssd1305"); + +MODULE_DESCRIPTION("SSD1305 OLED Driver"); +MODULE_AUTHOR("Alexey Mednyy"); +MODULE_LICENSE("GPL"); diff --git a/drivers/staging/fbtft/fb_ssd1325.c b/drivers/staging/fbtft/fb_ssd1325.c new file mode 100644 index 000000000000..15078bf2aa4b --- /dev/null +++ b/drivers/staging/fbtft/fb_ssd1325.c @@ -0,0 +1,205 @@ +/* + * FB driver for the SSD1325 OLED Controller + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/gpio.h> +#include <linux/delay.h> + +#include "fbtft.h" + +#define DRVNAME "fb_ssd1325" + +#define WIDTH 128 +#define HEIGHT 64 +#define GAMMA_NUM 1 +#define GAMMA_LEN 15 +#define DEFAULT_GAMMA "7 1 1 1 1 2 2 3 3 4 4 5 5 6 6" + +/* + * write_reg() caveat: + * + * This doesn't work because D/C has to be LOW for both values: + * write_reg(par, val1, val2); + * + * Do it like this: + * write_reg(par, val1); + * write_reg(par, val2); + */ + +/* Init sequence taken from the Adafruit SSD1306 Arduino library */ +static int init_display(struct fbtft_par *par) +{ + par->fbtftops.reset(par); + + gpio_set_value(par->gpio.cs, 0); + + write_reg(par, 0xb3); + write_reg(par, 0xf0); + write_reg(par, 0xae); + write_reg(par, 0xa1); + write_reg(par, 0x00); + write_reg(par, 0xa8); + write_reg(par, 0x3f); + write_reg(par, 0xa0); + write_reg(par, 0x45); + write_reg(par, 0xa2); + write_reg(par, 0x40); + write_reg(par, 0x75); + write_reg(par, 0x00); + write_reg(par, 0x3f); + write_reg(par, 0x15); + write_reg(par, 0x00); + write_reg(par, 0x7f); + write_reg(par, 0xa4); + write_reg(par, 0xaf); + + return 0; +} + +static uint8_t rgb565_to_g16(u16 pixel) +{ + u16 b = pixel & 0x1f; + u16 g = (pixel & (0x3f << 5)) >> 5; + u16 r = (pixel & (0x1f << (5 + 6))) >> (5 + 6); + + pixel = (299 * r + 587 * g + 114 * b) / 195; + if (pixel > 255) + pixel = 255; + return (uint8_t)pixel / 16; +} + +static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye) +{ + fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par, + "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n", __func__, xs, ys, xe, + ye); + + write_reg(par, 0x75); + write_reg(par, 0x00); + write_reg(par, 0x3f); + write_reg(par, 0x15); + write_reg(par, 0x00); + write_reg(par, 0x7f); +} + +static int blank(struct fbtft_par *par, bool on) +{ + fbtft_par_dbg(DEBUG_BLANK, par, "%s(blank=%s)\n", + __func__, on ? "true" : "false"); + + if (on) + write_reg(par, 0xAE); + else + write_reg(par, 0xAF); + return 0; +} + +/* + * Grayscale Lookup Table + * GS1 - GS15 + * The "Gamma curve" contains the relative values between the entries + * in the Lookup table. + * + * 0 = Setting of GS1 < Setting of GS2 < Setting of GS3.....< + * Setting of GS14 < Setting of GS15 + */ +static int set_gamma(struct fbtft_par *par, unsigned long *curves) +{ + int i; + + fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__); + + for (i = 0; i < GAMMA_LEN; i++) { + if (i > 0 && curves[i] < 1) { + dev_err(par->info->device, + "Illegal value in Grayscale Lookup Table at index %d.\n" + "Must be greater than 0\n", i); + return -EINVAL; + } + if (curves[i] > 7) { + dev_err(par->info->device, + "Illegal value(s) in Grayscale Lookup Table.\n" + "At index=%d, the accumulated value has exceeded 7\n", + i); + return -EINVAL; + } + } + write_reg(par, 0xB8); + for (i = 0; i < 8; i++) + write_reg(par, (curves[i] & 0xFF)); + return 0; +} + +static int write_vmem(struct fbtft_par *par, size_t offset, size_t len) +{ + u16 *vmem16 = (u16 *)par->info->screen_buffer; + u8 *buf = par->txbuf.buf; + u8 n1; + u8 n2; + int y, x; + int ret; + + for (x = 0; x < par->info->var.xres; x++) { + if (x % 2) + continue; + for (y = 0; y < par->info->var.yres; y++) { + n1 = rgb565_to_g16(vmem16[y * par->info->var.xres + x]); + n2 = rgb565_to_g16(vmem16 + [y * par->info->var.xres + x + 1]); + *buf = (n1 << 4) | n2; + buf++; + } + } + + gpio_set_value(par->gpio.dc, 1); + + /* Write data */ + ret = par->fbtftops.write(par, par->txbuf.buf, + par->info->var.xres * par->info->var.yres / 2); + if (ret < 0) + dev_err(par->info->device, + "%s: write failed and returned: %d\n", __func__, ret); + + return ret; +} + +static struct fbtft_display display = { + .regwidth = 8, + .width = WIDTH, + .height = HEIGHT, + .txbuflen = WIDTH * HEIGHT / 2, + .gamma_num = GAMMA_NUM, + .gamma_len = GAMMA_LEN, + .gamma = DEFAULT_GAMMA, + .fbtftops = { + .write_vmem = write_vmem, + .init_display = init_display, + .set_addr_win = set_addr_win, + .blank = blank, + .set_gamma = set_gamma, + }, +}; + +FBTFT_REGISTER_DRIVER(DRVNAME, "solomon,ssd1325", &display); + +MODULE_ALIAS("spi:" DRVNAME); +MODULE_ALIAS("platform:" DRVNAME); +MODULE_ALIAS("spi:ssd1325"); +MODULE_ALIAS("platform:ssd1325"); + +MODULE_DESCRIPTION("SSD1325 OLED Driver"); +MODULE_AUTHOR("Alexey Mednyy"); +MODULE_LICENSE("GPL"); diff --git a/drivers/staging/fbtft/fb_st7735r.c b/drivers/staging/fbtft/fb_st7735r.c index a92b0d071097..c5e51fe1aad5 100644 --- a/drivers/staging/fbtft/fb_st7735r.c +++ b/drivers/staging/fbtft/fb_st7735r.c @@ -17,6 +17,7 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> +#include <video/mipi_display.h> #include "fbtft.h" @@ -25,12 +26,10 @@ "0F 1B 0F 17 33 2C 29 2E 30 30 39 3F 00 07 03 10" static int default_init_sequence[] = { - /* SWRESET - Software reset */ - -1, 0x01, + -1, MIPI_DCS_SOFT_RESET, -2, 150, /* delay */ - /* SLPOUT - Sleep out & booster on */ - -1, 0x11, + -1, MIPI_DCS_EXIT_SLEEP_MODE, -2, 500, /* delay */ /* FRMCTR1 - frame rate control: normal mode @@ -71,18 +70,14 @@ static int default_init_sequence[] = { /* VMCTR1 - Power Control */ -1, 0xC5, 0x0E, - /* INVOFF - Display inversion off */ - -1, 0x20, + -1, MIPI_DCS_EXIT_INVERT_MODE, - /* COLMOD - Interface pixel format */ - -1, 0x3A, 0x05, + -1, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT, - /* DISPON - Display On */ - -1, 0x29, + -1, MIPI_DCS_SET_DISPLAY_ON, -2, 100, /* delay */ - /* NORON - Partial off (Normal) */ - -1, 0x13, + -1, MIPI_DCS_ENTER_NORMAL_MODE, -2, 10, /* delay */ /* end marker */ @@ -91,14 +86,13 @@ static int default_init_sequence[] = { static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye) { - /* Column address */ - write_reg(par, 0x2A, xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF); + write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS, + xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF); - /* Row address */ - write_reg(par, 0x2B, ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF); + write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS, + ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF); - /* Memory write */ - write_reg(par, 0x2C); + write_reg(par, MIPI_DCS_WRITE_MEMORY_START); } #define MY BIT(7) @@ -114,16 +108,20 @@ static int set_var(struct fbtft_par *par) RGB-BGR ORDER color filter panel: 0=RGB, 1=BGR */ switch (par->info->var.rotate) { case 0: - write_reg(par, 0x36, MX | MY | (par->bgr << 3)); + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, + MX | MY | (par->bgr << 3)); break; case 270: - write_reg(par, 0x36, MY | MV | (par->bgr << 3)); + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, + MY | MV | (par->bgr << 3)); break; case 180: - write_reg(par, 0x36, par->bgr << 3); + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, + par->bgr << 3); break; case 90: - write_reg(par, 0x36, MX | MV | (par->bgr << 3)); + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, + MX | MV | (par->bgr << 3)); break; } diff --git a/drivers/staging/fbtft/fb_tinylcd.c b/drivers/staging/fbtft/fb_tinylcd.c index caf263db436a..097e71cfef62 100644 --- a/drivers/staging/fbtft/fb_tinylcd.c +++ b/drivers/staging/fbtft/fb_tinylcd.c @@ -18,6 +18,7 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> +#include <video/mipi_display.h> #include "fbtft.h" @@ -38,7 +39,7 @@ static int init_display(struct fbtft_par *par) write_reg(par, 0xB4, 0x02); write_reg(par, 0xB6, 0x00, 0x22, 0x3B); write_reg(par, 0xB7, 0x07); - write_reg(par, 0x36, 0x58); + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, 0x58); write_reg(par, 0xF0, 0x36, 0xA5, 0xD3); write_reg(par, 0xE5, 0x80); write_reg(par, 0xE5, 0x01); @@ -47,24 +48,23 @@ static int init_display(struct fbtft_par *par) write_reg(par, 0xF0, 0x36, 0xA5, 0x53); write_reg(par, 0xE0, 0x00, 0x35, 0x33, 0x00, 0x00, 0x00, 0x00, 0x35, 0x33, 0x00, 0x00, 0x00); - write_reg(par, 0x3A, 0x55); - write_reg(par, 0x11); + write_reg(par, MIPI_DCS_SET_PIXEL_FORMAT, 0x55); + write_reg(par, MIPI_DCS_EXIT_SLEEP_MODE); udelay(250); - write_reg(par, 0x29); + write_reg(par, MIPI_DCS_SET_DISPLAY_ON); return 0; } static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye) { - /* Column address */ - write_reg(par, 0x2A, xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF); + write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS, + xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF); - /* Row address */ - write_reg(par, 0x2B, ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF); + write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS, + ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF); - /* Memory write */ - write_reg(par, 0x2C); + write_reg(par, MIPI_DCS_WRITE_MEMORY_START); } static int set_var(struct fbtft_par *par) @@ -72,19 +72,19 @@ static int set_var(struct fbtft_par *par) switch (par->info->var.rotate) { case 270: write_reg(par, 0xB6, 0x00, 0x02, 0x3B); - write_reg(par, 0x36, 0x28); + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, 0x28); break; case 180: write_reg(par, 0xB6, 0x00, 0x22, 0x3B); - write_reg(par, 0x36, 0x58); + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, 0x58); break; case 90: write_reg(par, 0xB6, 0x00, 0x22, 0x3B); - write_reg(par, 0x36, 0x38); + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, 0x38); break; default: write_reg(par, 0xB6, 0x00, 0x22, 0x3B); - write_reg(par, 0x36, 0x08); + write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, 0x08); break; } diff --git a/drivers/staging/fbtft/fb_uc1611.c b/drivers/staging/fbtft/fb_uc1611.c index 4e828142058e..e87401aacfb3 100644 --- a/drivers/staging/fbtft/fb_uc1611.c +++ b/drivers/staging/fbtft/fb_uc1611.c @@ -222,8 +222,8 @@ static int set_var(struct fbtft_par *par) static int write_vmem(struct fbtft_par *par, size_t offset, size_t len) { u8 *vmem8 = (u8 *)(par->info->screen_buffer); - u8 *buf8 = (u8 *)(par->txbuf.buf); - u16 *buf16 = (u16 *)(par->txbuf.buf); + u8 *buf8 = par->txbuf.buf; + u16 *buf16 = par->txbuf.buf; int line_length = par->info->fix.line_length; int y_start = (offset / line_length); int y_end = (offset + len - 1) / line_length; diff --git a/drivers/staging/fbtft/fb_uc1701.c b/drivers/staging/fbtft/fb_uc1701.c index 212908e39277..b78045fe5393 100644 --- a/drivers/staging/fbtft/fb_uc1701.c +++ b/drivers/staging/fbtft/fb_uc1701.c @@ -78,11 +78,11 @@ static int init_display(struct fbtft_par *par) mdelay(10); /* set startpoint */ - /* LCD_START_LINE | (pos & 0x3F) */ write_reg(par, LCD_START_LINE); /* select orientation BOTTOMVIEW */ write_reg(par, LCD_BOTTOMVIEW | 1); + /* output mode select (turns display upside-down) */ write_reg(par, LCD_SCAN_DIR | 0x00); @@ -96,20 +96,14 @@ static int init_display(struct fbtft_par *par) write_reg(par, LCD_BIAS | 0); /* power control mode: all features on */ - /* LCD_POWER_CONTROL | (val&0x07) */ write_reg(par, LCD_POWER_CONTROL | 0x07); /* set voltage regulator R/R */ - /* LCD_VOLTAGE | (val&0x07) */ write_reg(par, LCD_VOLTAGE | 0x07); /* volume mode set */ - /* LCD_VOLUME_MODE,val&0x3f,LCD_NO_OP */ write_reg(par, LCD_VOLUME_MODE); - /* LCD_VOLUME_MODE,val&0x3f,LCD_NO_OP */ write_reg(par, 0x09); - /* ???? */ - /* LCD_VOLUME_MODE,val&0x3f,LCD_NO_OP */ write_reg(par, LCD_NO_OP); /* advanced program control */ @@ -125,17 +119,8 @@ static int init_display(struct fbtft_par *par) static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye) { /* goto address */ - /* LCD_PAGE_ADDRESS | ((page) & 0x1F), - (((col)+SHIFT_ADDR_NORMAL) & 0x0F), - LCD_COL_ADDRESS | ((((col)+SHIFT_ADDR_NORMAL)>>4) & 0x0F) */ write_reg(par, LCD_PAGE_ADDRESS); - /* LCD_PAGE_ADDRESS | ((page) & 0x1F), - (((col)+SHIFT_ADDR_NORMAL) & 0x0F), - LCD_COL_ADDRESS | ((((col)+SHIFT_ADDR_NORMAL)>>4) & 0x0F) */ write_reg(par, 0x00); - /* LCD_PAGE_ADDRESS | ((page) & 0x1F), - (((col)+SHIFT_ADDR_NORMAL) & 0x0F), - LCD_COL_ADDRESS | ((((col)+SHIFT_ADDR_NORMAL)>>4) & 0x0F) */ write_reg(par, LCD_COL_ADDRESS); } @@ -156,17 +141,9 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len) 1 : 0) << i; buf++; } - /* LCD_PAGE_ADDRESS | ((page) & 0x1F), - (((col)+SHIFT_ADDR_NORMAL) & 0x0F), - LCD_COL_ADDRESS | ((((col)+SHIFT_ADDR_NORMAL)>>4) & 0x0F) */ + write_reg(par, LCD_PAGE_ADDRESS | (u8)y); - /* LCD_PAGE_ADDRESS | ((page) & 0x1F), - (((col)+SHIFT_ADDR_NORMAL) & 0x0F), - LCD_COL_ADDRESS | ((((col)+SHIFT_ADDR_NORMAL)>>4) & 0x0F) */ write_reg(par, 0x00); - /* LCD_PAGE_ADDRESS | ((page) & 0x1F), - (((col)+SHIFT_ADDR_NORMAL) & 0x0F), - LCD_COL_ADDRESS | ((((col)+SHIFT_ADDR_NORMAL)>>4) & 0x0F) */ write_reg(par, LCD_COL_ADDRESS); gpio_set_value(par->gpio.dc, 1); ret = par->fbtftops.write(par, par->txbuf.buf, WIDTH); diff --git a/drivers/staging/fbtft/fbtft-bus.c b/drivers/staging/fbtft/fbtft-bus.c index 58449ad84f46..83505bce628a 100644 --- a/drivers/staging/fbtft/fbtft-bus.c +++ b/drivers/staging/fbtft/fbtft-bus.c @@ -125,7 +125,7 @@ EXPORT_SYMBOL(fbtft_write_reg8_bus9); int fbtft_write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len) { u16 *vmem16; - u16 *txbuf16 = (u16 *)par->txbuf.buf; + u16 *txbuf16 = par->txbuf.buf; size_t remain; size_t to_copy; size_t tx_array_size; @@ -150,14 +150,14 @@ int fbtft_write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len) tx_array_size = par->txbuf.len / 2; if (par->startbyte) { - txbuf16 = (u16 *)(par->txbuf.buf + 1); + txbuf16 = par->txbuf.buf + 1; tx_array_size -= 2; *(u8 *)(par->txbuf.buf) = par->startbyte | 0x2; startbyte_size = 1; } while (remain) { - to_copy = remain > tx_array_size ? tx_array_size : remain; + to_copy = min(tx_array_size, remain); dev_dbg(par->info->device, " to_copy=%zu, remain=%zu\n", to_copy, remain - to_copy); @@ -201,7 +201,7 @@ int fbtft_write_vmem16_bus9(struct fbtft_par *par, size_t offset, size_t len) tx_array_size = par->txbuf.len / 2; while (remain) { - to_copy = remain > tx_array_size ? tx_array_size : remain; + to_copy = min(tx_array_size, remain); dev_dbg(par->info->device, " to_copy=%zu, remain=%zu\n", to_copy, remain - to_copy); diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c index b1e45161eefc..0c1a77cafe14 100644 --- a/drivers/staging/fbtft/fbtft-core.c +++ b/drivers/staging/fbtft/fbtft-core.c @@ -35,6 +35,7 @@ #include <linux/dma-mapping.h> #include <linux/of.h> #include <linux/of_gpio.h> +#include <video/mipi_display.h> #include "fbtft.h" #include "internal.h" @@ -129,7 +130,8 @@ static int fbtft_request_gpios(struct fbtft_par *par) while (gpio->name[0]) { flags = FBTFT_GPIO_NO_MATCH; /* if driver provides match function, try it first, - if no match use our own */ + * if no match use our own + */ if (par->fbtftops.request_gpios_match) flags = par->fbtftops.request_gpios_match(par, gpio); if (flags == FBTFT_GPIO_NO_MATCH) @@ -319,16 +321,13 @@ EXPORT_SYMBOL(fbtft_unregister_backlight); static void fbtft_set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye) { - /* Column address set */ - write_reg(par, 0x2A, - (xs >> 8) & 0xFF, xs & 0xFF, (xe >> 8) & 0xFF, xe & 0xFF); + write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS, + (xs >> 8) & 0xFF, xs & 0xFF, (xe >> 8) & 0xFF, xe & 0xFF); - /* Row address set */ - write_reg(par, 0x2B, - (ys >> 8) & 0xFF, ys & 0xFF, (ye >> 8) & 0xFF, ye & 0xFF); + write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS, + (ys >> 8) & 0xFF, ys & 0xFF, (ye >> 8) & 0xFF, ye & 0xFF); - /* Memory write */ - write_reg(par, 0x2C); + write_reg(par, MIPI_DCS_WRITE_MEMORY_START); } static void fbtft_reset(struct fbtft_par *par) @@ -520,8 +519,7 @@ static ssize_t fbtft_fb_write(struct fb_info *info, const char __user *buf, "%s: count=%zd, ppos=%llu\n", __func__, count, *ppos); res = fb_sys_write(info, buf, count, ppos); - /* TODO: only mark changed area - update all for now */ + /* TODO: only mark changed area update all for now */ par->fbtftops.mkdirty(info, -1, 0); return res; @@ -738,8 +736,11 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display, goto alloc_fail; if (display->gamma_num && display->gamma_len) { - gamma_curves = devm_kzalloc(dev, display->gamma_num * display->gamma_len * sizeof(gamma_curves[0]), - GFP_KERNEL); + gamma_curves = devm_kcalloc(dev, + display->gamma_num * + display->gamma_len, + sizeof(gamma_curves[0]), + GFP_KERNEL); if (!gamma_curves) goto alloc_fail; } @@ -987,10 +988,6 @@ int fbtft_register_framebuffer(struct fb_info *fb_info) reg_fail: if (par->fbtftops.unregister_backlight) par->fbtftops.unregister_backlight(par); - if (spi) - spi_set_drvdata(spi, NULL); - if (par->pdev) - platform_set_drvdata(par->pdev, NULL); return ret; } @@ -1008,12 +1005,7 @@ EXPORT_SYMBOL(fbtft_register_framebuffer); int fbtft_unregister_framebuffer(struct fb_info *fb_info) { struct fbtft_par *par = fb_info->par; - struct spi_device *spi = par->spi; - if (spi) - spi_set_drvdata(spi, NULL); - if (par->pdev) - platform_set_drvdata(par->pdev, NULL); if (par->fbtftops.unregister_backlight) par->fbtftops.unregister_backlight(par); fbtft_sysfs_exit(par); diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h index 3ccdec94fee7..d3bc3943a983 100644 --- a/drivers/staging/fbtft/fbtft.h +++ b/drivers/staging/fbtft/fbtft.h @@ -20,14 +20,6 @@ #include <linux/spi/spi.h> #include <linux/platform_device.h> -#define FBTFT_NOP 0x00 -#define FBTFT_SWRESET 0x01 -#define FBTFT_RDDID 0x04 -#define FBTFT_RDDST 0x09 -#define FBTFT_CASET 0x2A -#define FBTFT_RASET 0x2B -#define FBTFT_RAMWR 0x2C - #define FBTFT_ONBOARD_BACKLIGHT 2 #define FBTFT_GPIO_NO_MATCH 0xFFFF diff --git a/drivers/staging/fbtft/fbtft_device.c b/drivers/staging/fbtft/fbtft_device.c index 071f79bd19f3..241d7c6bebde 100644 --- a/drivers/staging/fbtft/fbtft_device.c +++ b/drivers/staging/fbtft/fbtft_device.c @@ -212,38 +212,63 @@ static int hy28b_init_sequence[] = { "0F 00 1 7 4 0 0 0 6 7" static int pitft_init_sequence[] = { - -1, 0x01, -2, 5, -1, 0x28, -1, 0xEF, - 0x03, 0x80, 0x02, -1, 0xCF, 0x00, 0xC1, 0x30, + -1, MIPI_DCS_SOFT_RESET, + -2, 5, + -1, MIPI_DCS_SET_DISPLAY_OFF, + -1, 0xEF, 0x03, 0x80, 0x02, + -1, 0xCF, 0x00, 0xC1, 0x30, -1, 0xED, 0x64, 0x03, 0x12, 0x81, -1, 0xE8, 0x85, 0x00, 0x78, -1, 0xCB, 0x39, 0x2C, 0x00, 0x34, 0x02, - -1, 0xF7, 0x20, -1, 0xEA, 0x00, 0x00, - -1, 0xC0, 0x23, -1, 0xC1, 0x10, -1, 0xC5, - 0x3e, 0x28, -1, 0xC7, 0x86, -1, 0x3A, 0x55, - -1, 0xB1, 0x00, 0x18, -1, 0xB6, 0x08, 0x82, - 0x27, -1, 0xF2, 0x00, -1, 0x26, 0x01, - -1, 0xE0, 0x0F, 0x31, 0x2B, 0x0C, 0x0E, 0x08, - 0x4E, 0xF1, 0x37, 0x07, 0x10, 0x03, - 0x0E, 0x09, 0x00, -1, 0xE1, 0x00, 0x0E, 0x14, - 0x03, 0x11, 0x07, 0x31, 0xC1, 0x48, - 0x08, 0x0F, 0x0C, 0x31, 0x36, 0x0F, -1, - 0x11, -2, 100, -1, 0x29, -2, 20, -3 }; + -1, 0xF7, 0x20, + -1, 0xEA, 0x00, 0x00, + -1, 0xC0, 0x23, + -1, 0xC1, 0x10, + -1, 0xC5, 0x3E, 0x28, + -1, 0xC7, 0x86, + -1, MIPI_DCS_SET_PIXEL_FORMAT, 0x55, + -1, 0xB1, 0x00, 0x18, + -1, 0xB6, 0x08, 0x82, 0x27, + -1, 0xF2, 0x00, + -1, MIPI_DCS_SET_GAMMA_CURVE, 0x01, + -1, 0xE0, 0x0F, 0x31, 0x2B, 0x0C, 0x0E, 0x08, 0x4E, + 0xF1, 0x37, 0x07, 0x10, 0x03, 0x0E, 0x09, 0x00, + -1, 0xE1, 0x00, 0x0E, 0x14, 0x03, 0x11, 0x07, 0x31, + 0xC1, 0x48, 0x08, 0x0F, 0x0C, 0x31, 0x36, 0x0F, + -1, MIPI_DCS_EXIT_SLEEP_MODE, + -2, 100, + -1, MIPI_DCS_SET_DISPLAY_ON, + -2, 20, + -3 +}; static int waveshare32b_init_sequence[] = { -1, 0xCB, 0x39, 0x2C, 0x00, 0x34, 0x02, -1, 0xCF, 0x00, 0xC1, 0x30, - -1, 0xE8, 0x85, 0x00, 0x78, -1, 0xEA, 0x00, - 0x00, -1, 0xED, 0x64, 0x03, 0x12, 0x81, - -1, 0xF7, 0x20, -1, 0xC0, 0x23, -1, 0xC1, - 0x10, -1, 0xC5, 0x3e, 0x28, -1, 0xC7, 0x86, - -1, 0x36, 0x28, -1, 0x3A, 0x55, -1, 0xB1, 0x00, - 0x18, -1, 0xB6, 0x08, 0x82, 0x27, - -1, 0xF2, 0x00, -1, 0x26, 0x01, + -1, 0xE8, 0x85, 0x00, 0x78, + -1, 0xEA, 0x00, 0x00, + -1, 0xED, 0x64, 0x03, 0x12, 0x81, + -1, 0xF7, 0x20, + -1, 0xC0, 0x23, + -1, 0xC1, 0x10, + -1, 0xC5, 0x3E, 0x28, + -1, 0xC7, 0x86, + -1, MIPI_DCS_SET_ADDRESS_MODE, 0x28, + -1, MIPI_DCS_SET_PIXEL_FORMAT, 0x55, + -1, 0xB1, 0x00, 0x18, + -1, 0xB6, 0x08, 0x82, 0x27, + -1, 0xF2, 0x00, + -1, MIPI_DCS_SET_GAMMA_CURVE, 0x01, -1, 0xE0, 0x0F, 0x31, 0x2B, 0x0C, 0x0E, 0x08, 0x4E, - 0xF1, 0x37, 0x07, 0x10, 0x03, 0x0E, 0x09, 0x00, + 0xF1, 0x37, 0x07, 0x10, 0x03, 0x0E, 0x09, 0x00, -1, 0xE1, 0x00, 0x0E, 0x14, 0x03, 0x11, 0x07, 0x31, - 0xC1, 0x48, 0x08, 0x0F, 0x0C, 0x31, 0x36, 0x0F, - -1, 0x11, -2, 120, -1, 0x29, -1, 0x2c, -3 }; + 0xC1, 0x48, 0x08, 0x0F, 0x0C, 0x31, 0x36, 0x0F, + -1, MIPI_DCS_EXIT_SLEEP_MODE, + -2, 120, + -1, MIPI_DCS_SET_DISPLAY_ON, + -1, MIPI_DCS_WRITE_MEMORY_START, + -3 +}; /* Supported displays in alphabetical order */ static struct fbtft_device_display displays[] = { @@ -1287,7 +1312,7 @@ Device 'xxx' does not have a release() function, it is broken and must be fixed static int spi_device_found(struct device *dev, void *data) { - struct spi_device *spi = container_of(dev, struct spi_device, dev); + struct spi_device *spi = to_spi_device(dev); dev_info(dev, "%s %s %dkHz %d bits mode=0x%02X\n", spi->modalias, dev_name(dev), spi->max_speed_hz / 1000, spi->bits_per_word, @@ -1305,7 +1330,7 @@ static void pr_spi_devices(void) static int p_device_found(struct device *dev, void *data) { struct platform_device - *pdev = container_of(dev, struct platform_device, dev); + *pdev = to_platform_device(dev); if (strstr(pdev->name, "fb")) dev_info(dev, "%s id=%d pdata? %s\n", pdev->name, pdev->id, diff --git a/drivers/staging/fsl-mc/bus/Kconfig b/drivers/staging/fsl-mc/bus/Kconfig index 0d779d9ccbd8..1f959339c671 100644 --- a/drivers/staging/fsl-mc/bus/Kconfig +++ b/drivers/staging/fsl-mc/bus/Kconfig @@ -7,8 +7,9 @@ # config FSL_MC_BUS - tristate "Freescale Management Complex (MC) bus driver" + bool "Freescale Management Complex (MC) bus driver" depends on OF && ARM64 + select GENERIC_MSI_IRQ_DOMAIN help Driver to enable the bus infrastructure for the Freescale QorIQ Management Complex (fsl-mc). The fsl-mc is a hardware diff --git a/drivers/staging/fsl-mc/bus/Makefile b/drivers/staging/fsl-mc/bus/Makefile index 25433a998478..e7315170b7a3 100644 --- a/drivers/staging/fsl-mc/bus/Makefile +++ b/drivers/staging/fsl-mc/bus/Makefile @@ -13,5 +13,7 @@ mc-bus-driver-objs := mc-bus.o \ dpmng.o \ dprc-driver.o \ mc-allocator.o \ + mc-msi.o \ + irq-gic-v3-its-fsl-mc-msi.o \ dpmcp.o \ dpbp.o diff --git a/drivers/staging/fsl-mc/bus/dprc-driver.c b/drivers/staging/fsl-mc/bus/dprc-driver.c index 2c4cd70b4cbb..31488a7b9e86 100644 --- a/drivers/staging/fsl-mc/bus/dprc-driver.c +++ b/drivers/staging/fsl-mc/bus/dprc-driver.c @@ -13,6 +13,8 @@ #include "../include/mc-sys.h" #include <linux/module.h> #include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/msi.h> #include "dprc-cmd.h" struct dprc_child_objs { @@ -127,7 +129,7 @@ static void check_plugged_state_change(struct fsl_mc_device *mc_dev, { int error; u32 plugged_flag_at_mc = - (obj_desc->state & DPRC_OBJ_STATE_PLUGGED); + obj_desc->state & DPRC_OBJ_STATE_PLUGGED; if (plugged_flag_at_mc != (mc_dev->obj_desc.state & DPRC_OBJ_STATE_PLUGGED)) { @@ -241,6 +243,7 @@ static void dprc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev) * dprc_scan_objects - Discover objects in a DPRC * * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object + * @total_irq_count: total number of IRQs needed by objects in the DPRC. * * Detects objects added and removed from a DPRC and synchronizes the * state of the Linux bus driver, MC by adding and removing @@ -254,11 +257,13 @@ static void dprc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev) * populated before they can get allocation requests from probe callbacks * of the device drivers for the non-allocatable devices. */ -int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev) +int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev, + unsigned int *total_irq_count) { int num_child_objects; int dprc_get_obj_failures; int error; + unsigned int irq_count = mc_bus_dev->obj_desc.irq_count; struct dprc_obj_desc *child_obj_desc_array = NULL; error = dprc_get_obj_count(mc_bus_dev->mc_io, @@ -307,6 +312,7 @@ int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev) continue; } + irq_count += obj_desc->irq_count; dev_dbg(&mc_bus_dev->dev, "Discovered object: type %s, id %d\n", obj_desc->type, obj_desc->id); @@ -319,6 +325,7 @@ int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev) } } + *total_irq_count = irq_count; dprc_remove_devices(mc_bus_dev, child_obj_desc_array, num_child_objects); @@ -344,6 +351,7 @@ EXPORT_SYMBOL_GPL(dprc_scan_objects); int dprc_scan_container(struct fsl_mc_device *mc_bus_dev) { int error; + unsigned int irq_count; struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev); dprc_init_all_resource_pools(mc_bus_dev); @@ -352,11 +360,25 @@ int dprc_scan_container(struct fsl_mc_device *mc_bus_dev) * Discover objects in the DPRC: */ mutex_lock(&mc_bus->scan_mutex); - error = dprc_scan_objects(mc_bus_dev); + error = dprc_scan_objects(mc_bus_dev, &irq_count); mutex_unlock(&mc_bus->scan_mutex); if (error < 0) goto error; + if (dev_get_msi_domain(&mc_bus_dev->dev) && !mc_bus->irq_resources) { + if (irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS) { + dev_warn(&mc_bus_dev->dev, + "IRQs needed (%u) exceed IRQs preallocated (%u)\n", + irq_count, FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS); + } + + error = fsl_mc_populate_irq_pool( + mc_bus, + FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS); + if (error < 0) + goto error; + } + return 0; error: dprc_cleanup_all_resource_pools(mc_bus_dev); @@ -365,6 +387,230 @@ error: EXPORT_SYMBOL_GPL(dprc_scan_container); /** + * dprc_irq0_handler - Regular ISR for DPRC interrupt 0 + * + * @irq: IRQ number of the interrupt being handled + * @arg: Pointer to device structure + */ +static irqreturn_t dprc_irq0_handler(int irq_num, void *arg) +{ + return IRQ_WAKE_THREAD; +} + +/** + * dprc_irq0_handler_thread - Handler thread function for DPRC interrupt 0 + * + * @irq: IRQ number of the interrupt being handled + * @arg: Pointer to device structure + */ +static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg) +{ + int error; + u32 status; + struct device *dev = arg; + struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); + struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev); + struct fsl_mc_io *mc_io = mc_dev->mc_io; + struct msi_desc *msi_desc = mc_dev->irqs[0]->msi_desc; + + dev_dbg(dev, "DPRC IRQ %d triggered on CPU %u\n", + irq_num, smp_processor_id()); + + if (WARN_ON(!(mc_dev->flags & FSL_MC_IS_DPRC))) + return IRQ_HANDLED; + + mutex_lock(&mc_bus->scan_mutex); + if (WARN_ON(!msi_desc || msi_desc->irq != (u32)irq_num)) + goto out; + + error = dprc_get_irq_status(mc_io, 0, mc_dev->mc_handle, 0, + &status); + if (error < 0) { + dev_err(dev, + "dprc_get_irq_status() failed: %d\n", error); + goto out; + } + + error = dprc_clear_irq_status(mc_io, 0, mc_dev->mc_handle, 0, + status); + if (error < 0) { + dev_err(dev, + "dprc_clear_irq_status() failed: %d\n", error); + goto out; + } + + if (status & (DPRC_IRQ_EVENT_OBJ_ADDED | + DPRC_IRQ_EVENT_OBJ_REMOVED | + DPRC_IRQ_EVENT_CONTAINER_DESTROYED | + DPRC_IRQ_EVENT_OBJ_DESTROYED | + DPRC_IRQ_EVENT_OBJ_CREATED)) { + unsigned int irq_count; + + error = dprc_scan_objects(mc_dev, &irq_count); + if (error < 0) { + /* + * If the error is -ENXIO, we ignore it, as it indicates + * that the object scan was aborted, as we detected that + * an object was removed from the DPRC in the MC, while + * we were scanning the DPRC. + */ + if (error != -ENXIO) { + dev_err(dev, "dprc_scan_objects() failed: %d\n", + error); + } + + goto out; + } + + if (irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS) { + dev_warn(dev, + "IRQs needed (%u) exceed IRQs preallocated (%u)\n", + irq_count, FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS); + } + } + +out: + mutex_unlock(&mc_bus->scan_mutex); + return IRQ_HANDLED; +} + +/* + * Disable and clear interrupt for a given DPRC object + */ +static int disable_dprc_irq(struct fsl_mc_device *mc_dev) +{ + int error; + struct fsl_mc_io *mc_io = mc_dev->mc_io; + + WARN_ON(mc_dev->obj_desc.irq_count != 1); + + /* + * Disable generation of interrupt, while we configure it: + */ + error = dprc_set_irq_enable(mc_io, 0, mc_dev->mc_handle, 0, 0); + if (error < 0) { + dev_err(&mc_dev->dev, + "Disabling DPRC IRQ failed: dprc_set_irq_enable() failed: %d\n", + error); + return error; + } + + /* + * Disable all interrupt causes for the interrupt: + */ + error = dprc_set_irq_mask(mc_io, 0, mc_dev->mc_handle, 0, 0x0); + if (error < 0) { + dev_err(&mc_dev->dev, + "Disabling DPRC IRQ failed: dprc_set_irq_mask() failed: %d\n", + error); + return error; + } + + /* + * Clear any leftover interrupts: + */ + error = dprc_clear_irq_status(mc_io, 0, mc_dev->mc_handle, 0, ~0x0U); + if (error < 0) { + dev_err(&mc_dev->dev, + "Disabling DPRC IRQ failed: dprc_clear_irq_status() failed: %d\n", + error); + return error; + } + + return 0; +} + +static int register_dprc_irq_handler(struct fsl_mc_device *mc_dev) +{ + int error; + struct fsl_mc_device_irq *irq = mc_dev->irqs[0]; + + WARN_ON(mc_dev->obj_desc.irq_count != 1); + + /* + * NOTE: devm_request_threaded_irq() invokes the device-specific + * function that programs the MSI physically in the device + */ + error = devm_request_threaded_irq(&mc_dev->dev, + irq->msi_desc->irq, + dprc_irq0_handler, + dprc_irq0_handler_thread, + IRQF_NO_SUSPEND | IRQF_ONESHOT, + "FSL MC DPRC irq0", + &mc_dev->dev); + if (error < 0) { + dev_err(&mc_dev->dev, + "devm_request_threaded_irq() failed: %d\n", + error); + return error; + } + + return 0; +} + +static int enable_dprc_irq(struct fsl_mc_device *mc_dev) +{ + int error; + + /* + * Enable all interrupt causes for the interrupt: + */ + error = dprc_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle, 0, + ~0x0u); + if (error < 0) { + dev_err(&mc_dev->dev, + "Enabling DPRC IRQ failed: dprc_set_irq_mask() failed: %d\n", + error); + + return error; + } + + /* + * Enable generation of the interrupt: + */ + error = dprc_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle, 0, 1); + if (error < 0) { + dev_err(&mc_dev->dev, + "Enabling DPRC IRQ failed: dprc_set_irq_enable() failed: %d\n", + error); + + return error; + } + + return 0; +} + +/* + * Setup interrupt for a given DPRC device + */ +static int dprc_setup_irq(struct fsl_mc_device *mc_dev) +{ + int error; + + error = fsl_mc_allocate_irqs(mc_dev); + if (error < 0) + return error; + + error = disable_dprc_irq(mc_dev); + if (error < 0) + goto error_free_irqs; + + error = register_dprc_irq_handler(mc_dev); + if (error < 0) + goto error_free_irqs; + + error = enable_dprc_irq(mc_dev); + if (error < 0) + goto error_free_irqs; + + return 0; + +error_free_irqs: + fsl_mc_free_irqs(mc_dev); + return error; +} + +/** * dprc_probe - callback invoked when a DPRC is being bound to this driver * * @mc_dev: Pointer to fsl-mc device representing a DPRC @@ -378,15 +624,24 @@ static int dprc_probe(struct fsl_mc_device *mc_dev) { int error; size_t region_size; + struct device *parent_dev = mc_dev->dev.parent; struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev); + bool mc_io_created = false; + bool msi_domain_set = false; if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0)) return -EINVAL; + if (WARN_ON(dev_get_msi_domain(&mc_dev->dev))) + return -EINVAL; + if (!mc_dev->mc_io) { /* * This is a child DPRC: */ + if (WARN_ON(parent_dev->bus != &fsl_mc_bus_type)) + return -EINVAL; + if (WARN_ON(mc_dev->obj_desc.region_count == 0)) return -EINVAL; @@ -396,16 +651,45 @@ static int dprc_probe(struct fsl_mc_device *mc_dev) error = fsl_create_mc_io(&mc_dev->dev, mc_dev->regions[0].start, region_size, - NULL, 0, &mc_dev->mc_io); + NULL, + FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, + &mc_dev->mc_io); if (error < 0) return error; + + mc_io_created = true; + + /* + * Inherit parent MSI domain: + */ + dev_set_msi_domain(&mc_dev->dev, + dev_get_msi_domain(parent_dev)); + msi_domain_set = true; + } else { + /* + * This is a root DPRC + */ + struct irq_domain *mc_msi_domain; + + if (WARN_ON(parent_dev->bus == &fsl_mc_bus_type)) + return -EINVAL; + + error = fsl_mc_find_msi_domain(parent_dev, + &mc_msi_domain); + if (error < 0) { + dev_warn(&mc_dev->dev, + "WARNING: MC bus without interrupt support\n"); + } else { + dev_set_msi_domain(&mc_dev->dev, mc_msi_domain); + msi_domain_set = true; + } } error = dprc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id, &mc_dev->mc_handle); if (error < 0) { dev_err(&mc_dev->dev, "dprc_open() failed: %d\n", error); - goto error_cleanup_mc_io; + goto error_cleanup_msi_domain; } mutex_init(&mc_bus->scan_mutex); @@ -417,17 +701,40 @@ static int dprc_probe(struct fsl_mc_device *mc_dev) if (error < 0) goto error_cleanup_open; + /* + * Configure interrupt for the DPRC object associated with this MC bus: + */ + error = dprc_setup_irq(mc_dev); + if (error < 0) + goto error_cleanup_open; + dev_info(&mc_dev->dev, "DPRC device bound to driver"); return 0; error_cleanup_open: (void)dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle); -error_cleanup_mc_io: - fsl_destroy_mc_io(mc_dev->mc_io); +error_cleanup_msi_domain: + if (msi_domain_set) + dev_set_msi_domain(&mc_dev->dev, NULL); + + if (mc_io_created) { + fsl_destroy_mc_io(mc_dev->mc_io); + mc_dev->mc_io = NULL; + } + return error; } +/* + * Tear down interrupt for a given DPRC object + */ +static void dprc_teardown_irq(struct fsl_mc_device *mc_dev) +{ + (void)disable_dprc_irq(mc_dev); + fsl_mc_free_irqs(mc_dev); +} + /** * dprc_remove - callback invoked when a DPRC is being unbound from this driver * @@ -441,18 +748,30 @@ error_cleanup_mc_io: static int dprc_remove(struct fsl_mc_device *mc_dev) { int error; + struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev); if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0)) return -EINVAL; if (WARN_ON(!mc_dev->mc_io)) return -EINVAL; + if (WARN_ON(!mc_bus->irq_resources)) + return -EINVAL; + + if (dev_get_msi_domain(&mc_dev->dev)) + dprc_teardown_irq(mc_dev); + device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove); dprc_cleanup_all_resource_pools(mc_dev); error = dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle); if (error < 0) dev_err(&mc_dev->dev, "dprc_close() failed: %d\n", error); + if (dev_get_msi_domain(&mc_dev->dev)) { + fsl_mc_cleanup_irq_pool(mc_bus); + dev_set_msi_domain(&mc_dev->dev, NULL); + } + dev_info(&mc_dev->dev, "DPRC device unbound from driver"); return 0; } diff --git a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c new file mode 100644 index 000000000000..720e2b018d00 --- /dev/null +++ b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c @@ -0,0 +1,125 @@ +/* + * Freescale Management Complex (MC) bus driver MSI support + * + * Copyright (C) 2015 Freescale Semiconductor, Inc. + * Author: German Rivera <German.Rivera@freescale.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include "../include/mc-private.h" +#include <linux/of_device.h> +#include <linux/of_address.h> +#include <linux/irqchip/arm-gic-v3.h> +#include <linux/irq.h> +#include <linux/msi.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include "../include/mc-sys.h" +#include "dprc-cmd.h" + +static struct irq_chip its_msi_irq_chip = { + .name = "fsl-mc-bus-msi", + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = msi_domain_set_affinity +}; + +static int its_fsl_mc_msi_prepare(struct irq_domain *msi_domain, + struct device *dev, + int nvec, msi_alloc_info_t *info) +{ + struct fsl_mc_device *mc_bus_dev; + struct msi_domain_info *msi_info; + + if (WARN_ON(dev->bus != &fsl_mc_bus_type)) + return -EINVAL; + + mc_bus_dev = to_fsl_mc_device(dev); + if (WARN_ON(!(mc_bus_dev->flags & FSL_MC_IS_DPRC))) + return -EINVAL; + + /* + * Set the device Id to be passed to the GIC-ITS: + * + * NOTE: This device id corresponds to the IOMMU stream ID + * associated with the DPRC object (ICID). + */ + info->scratchpad[0].ul = mc_bus_dev->icid; + msi_info = msi_get_domain_info(msi_domain->parent); + return msi_info->ops->msi_prepare(msi_domain->parent, dev, nvec, info); +} + +static struct msi_domain_ops its_fsl_mc_msi_ops = { + .msi_prepare = its_fsl_mc_msi_prepare, +}; + +static struct msi_domain_info its_fsl_mc_msi_domain_info = { + .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS), + .ops = &its_fsl_mc_msi_ops, + .chip = &its_msi_irq_chip, +}; + +static const struct of_device_id its_device_id[] = { + { .compatible = "arm,gic-v3-its", }, + {}, +}; + +int __init its_fsl_mc_msi_init(void) +{ + struct device_node *np; + struct irq_domain *parent; + struct irq_domain *mc_msi_domain; + + for (np = of_find_matching_node(NULL, its_device_id); np; + np = of_find_matching_node(np, its_device_id)) { + if (!of_property_read_bool(np, "msi-controller")) + continue; + + parent = irq_find_matching_host(np, DOMAIN_BUS_NEXUS); + if (!parent || !msi_get_domain_info(parent)) { + pr_err("%s: unable to locate ITS domain\n", + np->full_name); + continue; + } + + mc_msi_domain = fsl_mc_msi_create_irq_domain( + of_node_to_fwnode(np), + &its_fsl_mc_msi_domain_info, + parent); + if (!mc_msi_domain) { + pr_err("%s: unable to create fsl-mc domain\n", + np->full_name); + continue; + } + + WARN_ON(mc_msi_domain-> + host_data != &its_fsl_mc_msi_domain_info); + + pr_info("fsl-mc MSI: %s domain created\n", np->full_name); + } + + return 0; +} + +void its_fsl_mc_msi_cleanup(void) +{ + struct device_node *np; + + for (np = of_find_matching_node(NULL, its_device_id); np; + np = of_find_matching_node(np, its_device_id)) { + struct irq_domain *mc_msi_domain = irq_find_matching_host( + np, + DOMAIN_BUS_FSL_MC_MSI); + + if (!of_property_read_bool(np, "msi-controller")) + continue; + + if (mc_msi_domain && + mc_msi_domain->host_data == &its_fsl_mc_msi_domain_info) + irq_domain_remove(mc_msi_domain); + } +} diff --git a/drivers/staging/fsl-mc/bus/mc-allocator.c b/drivers/staging/fsl-mc/bus/mc-allocator.c index 88d1857521a5..86f8543c2b9a 100644 --- a/drivers/staging/fsl-mc/bus/mc-allocator.c +++ b/drivers/staging/fsl-mc/bus/mc-allocator.c @@ -15,6 +15,7 @@ #include "../include/dpcon-cmd.h" #include "dpmcp-cmd.h" #include "dpmcp.h" +#include <linux/msi.h> /** * fsl_mc_resource_pool_add_device - add allocatable device to a resource @@ -160,6 +161,7 @@ static const char *const fsl_mc_pool_type_strings[] = { [FSL_MC_POOL_DPMCP] = "dpmcp", [FSL_MC_POOL_DPBP] = "dpbp", [FSL_MC_POOL_DPCON] = "dpcon", + [FSL_MC_POOL_IRQ] = "irq", }; static int __must_check object_type_to_pool_type(const char *object_type, @@ -465,6 +467,203 @@ void fsl_mc_object_free(struct fsl_mc_device *mc_adev) } EXPORT_SYMBOL_GPL(fsl_mc_object_free); +/* + * Initialize the interrupt pool associated with a MC bus. + * It allocates a block of IRQs from the GIC-ITS + */ +int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus, + unsigned int irq_count) +{ + unsigned int i; + struct msi_desc *msi_desc; + struct fsl_mc_device_irq *irq_resources; + struct fsl_mc_device_irq *mc_dev_irq; + int error; + struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev; + struct fsl_mc_resource_pool *res_pool = + &mc_bus->resource_pools[FSL_MC_POOL_IRQ]; + + if (WARN_ON(irq_count == 0 || + irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS)) + return -EINVAL; + + error = fsl_mc_msi_domain_alloc_irqs(&mc_bus_dev->dev, irq_count); + if (error < 0) + return error; + + irq_resources = devm_kzalloc(&mc_bus_dev->dev, + sizeof(*irq_resources) * irq_count, + GFP_KERNEL); + if (!irq_resources) { + error = -ENOMEM; + goto cleanup_msi_irqs; + } + + for (i = 0; i < irq_count; i++) { + mc_dev_irq = &irq_resources[i]; + + /* + * NOTE: This mc_dev_irq's MSI addr/value pair will be set + * by the fsl_mc_msi_write_msg() callback + */ + mc_dev_irq->resource.type = res_pool->type; + mc_dev_irq->resource.data = mc_dev_irq; + mc_dev_irq->resource.parent_pool = res_pool; + INIT_LIST_HEAD(&mc_dev_irq->resource.node); + list_add_tail(&mc_dev_irq->resource.node, &res_pool->free_list); + } + + for_each_msi_entry(msi_desc, &mc_bus_dev->dev) { + mc_dev_irq = &irq_resources[msi_desc->fsl_mc.msi_index]; + mc_dev_irq->msi_desc = msi_desc; + mc_dev_irq->resource.id = msi_desc->irq; + } + + res_pool->max_count = irq_count; + res_pool->free_count = irq_count; + mc_bus->irq_resources = irq_resources; + return 0; + +cleanup_msi_irqs: + fsl_mc_msi_domain_free_irqs(&mc_bus_dev->dev); + return error; +} +EXPORT_SYMBOL_GPL(fsl_mc_populate_irq_pool); + +/** + * Teardown the interrupt pool associated with an MC bus. + * It frees the IRQs that were allocated to the pool, back to the GIC-ITS. + */ +void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus) +{ + struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev; + struct fsl_mc_resource_pool *res_pool = + &mc_bus->resource_pools[FSL_MC_POOL_IRQ]; + + if (WARN_ON(!mc_bus->irq_resources)) + return; + + if (WARN_ON(res_pool->max_count == 0)) + return; + + if (WARN_ON(res_pool->free_count != res_pool->max_count)) + return; + + INIT_LIST_HEAD(&res_pool->free_list); + res_pool->max_count = 0; + res_pool->free_count = 0; + mc_bus->irq_resources = NULL; + fsl_mc_msi_domain_free_irqs(&mc_bus_dev->dev); +} +EXPORT_SYMBOL_GPL(fsl_mc_cleanup_irq_pool); + +/** + * It allocates the IRQs required by a given MC object device. The + * IRQs are allocated from the interrupt pool associated with the + * MC bus that contains the device, if the device is not a DPRC device. + * Otherwise, the IRQs are allocated from the interrupt pool associated + * with the MC bus that represents the DPRC device itself. + */ +int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev) +{ + int i; + int irq_count; + int res_allocated_count = 0; + int error = -EINVAL; + struct fsl_mc_device_irq **irqs = NULL; + struct fsl_mc_bus *mc_bus; + struct fsl_mc_resource_pool *res_pool; + + if (WARN_ON(mc_dev->irqs)) + return -EINVAL; + + irq_count = mc_dev->obj_desc.irq_count; + if (WARN_ON(irq_count == 0)) + return -EINVAL; + + if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) + mc_bus = to_fsl_mc_bus(mc_dev); + else + mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent)); + + if (WARN_ON(!mc_bus->irq_resources)) + return -EINVAL; + + res_pool = &mc_bus->resource_pools[FSL_MC_POOL_IRQ]; + if (res_pool->free_count < irq_count) { + dev_err(&mc_dev->dev, + "Not able to allocate %u irqs for device\n", irq_count); + return -ENOSPC; + } + + irqs = devm_kzalloc(&mc_dev->dev, irq_count * sizeof(irqs[0]), + GFP_KERNEL); + if (!irqs) + return -ENOMEM; + + for (i = 0; i < irq_count; i++) { + struct fsl_mc_resource *resource; + + error = fsl_mc_resource_allocate(mc_bus, FSL_MC_POOL_IRQ, + &resource); + if (error < 0) + goto error_resource_alloc; + + irqs[i] = to_fsl_mc_irq(resource); + res_allocated_count++; + + WARN_ON(irqs[i]->mc_dev); + irqs[i]->mc_dev = mc_dev; + irqs[i]->dev_irq_index = i; + } + + mc_dev->irqs = irqs; + return 0; + +error_resource_alloc: + for (i = 0; i < res_allocated_count; i++) { + irqs[i]->mc_dev = NULL; + fsl_mc_resource_free(&irqs[i]->resource); + } + + return error; +} +EXPORT_SYMBOL_GPL(fsl_mc_allocate_irqs); + +/* + * It frees the IRQs that were allocated for a MC object device, by + * returning them to the corresponding interrupt pool. + */ +void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev) +{ + int i; + int irq_count; + struct fsl_mc_bus *mc_bus; + struct fsl_mc_device_irq **irqs = mc_dev->irqs; + + if (WARN_ON(!irqs)) + return; + + irq_count = mc_dev->obj_desc.irq_count; + + if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) + mc_bus = to_fsl_mc_bus(mc_dev); + else + mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent)); + + if (WARN_ON(!mc_bus->irq_resources)) + return; + + for (i = 0; i < irq_count; i++) { + WARN_ON(!irqs[i]->mc_dev); + irqs[i]->mc_dev = NULL; + fsl_mc_resource_free(&irqs[i]->resource); + } + + mc_dev->irqs = NULL; +} +EXPORT_SYMBOL_GPL(fsl_mc_free_irqs); + /** * fsl_mc_allocator_probe - callback invoked when an allocatable device is * being added to the system @@ -557,7 +756,7 @@ int __init fsl_mc_allocator_driver_init(void) return fsl_mc_driver_register(&fsl_mc_allocator_driver); } -void __exit fsl_mc_allocator_driver_exit(void) +void fsl_mc_allocator_driver_exit(void) { fsl_mc_driver_unregister(&fsl_mc_allocator_driver); } diff --git a/drivers/staging/fsl-mc/bus/mc-bus.c b/drivers/staging/fsl-mc/bus/mc-bus.c index 84db55b4dda5..9f77c37bd612 100644 --- a/drivers/staging/fsl-mc/bus/mc-bus.c +++ b/drivers/staging/fsl-mc/bus/mc-bus.c @@ -16,6 +16,8 @@ #include <linux/ioport.h> #include <linux/slab.h> #include <linux/limits.h> +#include <linux/bitops.h> +#include <linux/msi.h> #include "../include/dpmng.h" #include "../include/mc-sys.h" #include "dprc-cmd.h" @@ -246,8 +248,7 @@ static bool fsl_mc_is_root_dprc(struct device *dev) fsl_mc_get_root_dprc(dev, &root_dprc_dev); if (!root_dprc_dev) return false; - else - return dev == root_dprc_dev; + return dev == root_dprc_dev; } static int get_dprc_icid(struct fsl_mc_io *mc_io, @@ -259,14 +260,15 @@ static int get_dprc_icid(struct fsl_mc_io *mc_io, error = dprc_open(mc_io, 0, container_id, &dprc_handle); if (error < 0) { - pr_err("dprc_open() failed: %d\n", error); + dev_err(&mc_io->dev, "dprc_open() failed: %d\n", error); return error; } memset(&attr, 0, sizeof(attr)); error = dprc_get_attributes(mc_io, 0, dprc_handle, &attr); if (error < 0) { - pr_err("dprc_get_attributes() failed: %d\n", error); + dev_err(&mc_io->dev, "dprc_get_attributes() failed: %d\n", + error); goto common_cleanup; } @@ -472,6 +474,8 @@ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc, mc_dev->icid = parent_mc_dev->icid; mc_dev->dma_mask = FSL_MC_DEFAULT_DMA_MASK; mc_dev->dev.dma_mask = &mc_dev->dma_mask; + dev_set_msi_domain(&mc_dev->dev, + dev_get_msi_domain(&parent_mc_dev->dev)); } /* @@ -702,7 +706,8 @@ static int fsl_mc_bus_probe(struct platform_device *pdev) mc_portal_phys_addr = res.start; mc_portal_size = resource_size(&res); error = fsl_create_mc_io(&pdev->dev, mc_portal_phys_addr, - mc_portal_size, NULL, 0, &mc_io); + mc_portal_size, NULL, + FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, &mc_io); if (error < 0) return error; @@ -790,7 +795,6 @@ MODULE_DEVICE_TABLE(of, fsl_mc_bus_match_table); static struct platform_driver fsl_mc_bus_driver = { .driver = { .name = "fsl_mc_bus", - .owner = THIS_MODULE, .pm = NULL, .of_match_table = fsl_mc_bus_match_table, }, @@ -832,8 +836,15 @@ static int __init fsl_mc_bus_driver_init(void) if (error < 0) goto error_cleanup_dprc_driver; + error = its_fsl_mc_msi_init(); + if (error < 0) + goto error_cleanup_mc_allocator; + return 0; +error_cleanup_mc_allocator: + fsl_mc_allocator_driver_exit(); + error_cleanup_dprc_driver: dprc_driver_exit(); @@ -855,6 +866,7 @@ static void __exit fsl_mc_bus_driver_exit(void) if (WARN_ON(!mc_dev_cache)) return; + its_fsl_mc_msi_cleanup(); fsl_mc_allocator_driver_exit(); dprc_driver_exit(); platform_driver_unregister(&fsl_mc_bus_driver); diff --git a/drivers/staging/fsl-mc/bus/mc-msi.c b/drivers/staging/fsl-mc/bus/mc-msi.c new file mode 100644 index 000000000000..3a8258ff4426 --- /dev/null +++ b/drivers/staging/fsl-mc/bus/mc-msi.c @@ -0,0 +1,276 @@ +/* + * Freescale Management Complex (MC) bus driver MSI support + * + * Copyright (C) 2015 Freescale Semiconductor, Inc. + * Author: German Rivera <German.Rivera@freescale.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include "../include/mc-private.h" +#include <linux/of_device.h> +#include <linux/of_address.h> +#include <linux/irqchip/arm-gic-v3.h> +#include <linux/of_irq.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/msi.h> +#include "../include/mc-sys.h" +#include "dprc-cmd.h" + +static void fsl_mc_msi_set_desc(msi_alloc_info_t *arg, + struct msi_desc *desc) +{ + arg->desc = desc; + arg->hwirq = (irq_hw_number_t)desc->fsl_mc.msi_index; +} + +static void fsl_mc_msi_update_dom_ops(struct msi_domain_info *info) +{ + struct msi_domain_ops *ops = info->ops; + + if (WARN_ON(!ops)) + return; + + /* + * set_desc should not be set by the caller + */ + if (WARN_ON(ops->set_desc)) + return; + + ops->set_desc = fsl_mc_msi_set_desc; +} + +static void __fsl_mc_msi_write_msg(struct fsl_mc_device *mc_bus_dev, + struct fsl_mc_device_irq *mc_dev_irq) +{ + int error; + struct fsl_mc_device *owner_mc_dev = mc_dev_irq->mc_dev; + struct msi_desc *msi_desc = mc_dev_irq->msi_desc; + struct dprc_irq_cfg irq_cfg; + + /* + * msi_desc->msg.address is 0x0 when this function is invoked in + * the free_irq() code path. In this case, for the MC, we don't + * really need to "unprogram" the MSI, so we just return. + */ + if (msi_desc->msg.address_lo == 0x0 && msi_desc->msg.address_hi == 0x0) + return; + + if (WARN_ON(!owner_mc_dev)) + return; + + irq_cfg.paddr = ((u64)msi_desc->msg.address_hi << 32) | + msi_desc->msg.address_lo; + irq_cfg.val = msi_desc->msg.data; + irq_cfg.user_irq_id = msi_desc->irq; + + if (owner_mc_dev == mc_bus_dev) { + /* + * IRQ is for the mc_bus_dev's DPRC itself + */ + error = dprc_set_irq(mc_bus_dev->mc_io, + MC_CMD_FLAG_INTR_DIS | MC_CMD_FLAG_PRI, + mc_bus_dev->mc_handle, + mc_dev_irq->dev_irq_index, + &irq_cfg); + if (error < 0) { + dev_err(&owner_mc_dev->dev, + "dprc_set_irq() failed: %d\n", error); + } + } else { + /* + * IRQ is for for a child device of mc_bus_dev + */ + error = dprc_set_obj_irq(mc_bus_dev->mc_io, + MC_CMD_FLAG_INTR_DIS | MC_CMD_FLAG_PRI, + mc_bus_dev->mc_handle, + owner_mc_dev->obj_desc.type, + owner_mc_dev->obj_desc.id, + mc_dev_irq->dev_irq_index, + &irq_cfg); + if (error < 0) { + dev_err(&owner_mc_dev->dev, + "dprc_obj_set_irq() failed: %d\n", error); + } + } +} + +/* + * NOTE: This function is invoked with interrupts disabled + */ +static void fsl_mc_msi_write_msg(struct irq_data *irq_data, + struct msi_msg *msg) +{ + struct msi_desc *msi_desc = irq_data_get_msi_desc(irq_data); + struct fsl_mc_device *mc_bus_dev = to_fsl_mc_device(msi_desc->dev); + struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev); + struct fsl_mc_device_irq *mc_dev_irq = + &mc_bus->irq_resources[msi_desc->fsl_mc.msi_index]; + + WARN_ON(mc_dev_irq->msi_desc != msi_desc); + msi_desc->msg = *msg; + + /* + * Program the MSI (paddr, value) pair in the device: + */ + __fsl_mc_msi_write_msg(mc_bus_dev, mc_dev_irq); +} + +static void fsl_mc_msi_update_chip_ops(struct msi_domain_info *info) +{ + struct irq_chip *chip = info->chip; + + if (WARN_ON((!chip))) + return; + + /* + * irq_write_msi_msg should not be set by the caller + */ + if (WARN_ON(chip->irq_write_msi_msg)) + return; + + chip->irq_write_msi_msg = fsl_mc_msi_write_msg; +} + +/** + * fsl_mc_msi_create_irq_domain - Create a fsl-mc MSI interrupt domain + * @np: Optional device-tree node of the interrupt controller + * @info: MSI domain info + * @parent: Parent irq domain + * + * Updates the domain and chip ops and creates a fsl-mc MSI + * interrupt domain. + * + * Returns: + * A domain pointer or NULL in case of failure. + */ +struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode, + struct msi_domain_info *info, + struct irq_domain *parent) +{ + struct irq_domain *domain; + + if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS) + fsl_mc_msi_update_dom_ops(info); + if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) + fsl_mc_msi_update_chip_ops(info); + + domain = msi_create_irq_domain(fwnode, info, parent); + if (domain) + domain->bus_token = DOMAIN_BUS_FSL_MC_MSI; + + return domain; +} + +int fsl_mc_find_msi_domain(struct device *mc_platform_dev, + struct irq_domain **mc_msi_domain) +{ + struct irq_domain *msi_domain; + struct device_node *mc_of_node = mc_platform_dev->of_node; + + msi_domain = of_msi_get_domain(mc_platform_dev, mc_of_node, + DOMAIN_BUS_FSL_MC_MSI); + if (!msi_domain) { + pr_err("Unable to find fsl-mc MSI domain for %s\n", + mc_of_node->full_name); + + return -ENOENT; + } + + *mc_msi_domain = msi_domain; + return 0; +} + +static void fsl_mc_msi_free_descs(struct device *dev) +{ + struct msi_desc *desc, *tmp; + + list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) { + list_del(&desc->list); + free_msi_entry(desc); + } +} + +static int fsl_mc_msi_alloc_descs(struct device *dev, unsigned int irq_count) + +{ + unsigned int i; + int error; + struct msi_desc *msi_desc; + + for (i = 0; i < irq_count; i++) { + msi_desc = alloc_msi_entry(dev); + if (!msi_desc) { + dev_err(dev, "Failed to allocate msi entry\n"); + error = -ENOMEM; + goto cleanup_msi_descs; + } + + msi_desc->fsl_mc.msi_index = i; + msi_desc->nvec_used = 1; + INIT_LIST_HEAD(&msi_desc->list); + list_add_tail(&msi_desc->list, dev_to_msi_list(dev)); + } + + return 0; + +cleanup_msi_descs: + fsl_mc_msi_free_descs(dev); + return error; +} + +int fsl_mc_msi_domain_alloc_irqs(struct device *dev, + unsigned int irq_count) +{ + struct irq_domain *msi_domain; + int error; + + if (WARN_ON(!list_empty(dev_to_msi_list(dev)))) + return -EINVAL; + + error = fsl_mc_msi_alloc_descs(dev, irq_count); + if (error < 0) + return error; + + msi_domain = dev_get_msi_domain(dev); + if (WARN_ON(!msi_domain)) { + error = -EINVAL; + goto cleanup_msi_descs; + } + + /* + * NOTE: Calling this function will trigger the invocation of the + * its_fsl_mc_msi_prepare() callback + */ + error = msi_domain_alloc_irqs(msi_domain, dev, irq_count); + + if (error) { + dev_err(dev, "Failed to allocate IRQs\n"); + goto cleanup_msi_descs; + } + + return 0; + +cleanup_msi_descs: + fsl_mc_msi_free_descs(dev); + return error; +} + +void fsl_mc_msi_domain_free_irqs(struct device *dev) +{ + struct irq_domain *msi_domain; + + msi_domain = dev_get_msi_domain(dev); + if (WARN_ON(!msi_domain)) + return; + + msi_domain_free_irqs(msi_domain, dev); + + if (WARN_ON(list_empty(dev_to_msi_list(dev)))) + return; + + fsl_mc_msi_free_descs(dev); +} diff --git a/drivers/staging/fsl-mc/bus/mc-sys.c b/drivers/staging/fsl-mc/bus/mc-sys.c index 6e1489246066..8101c469abb0 100644 --- a/drivers/staging/fsl-mc/bus/mc-sys.c +++ b/drivers/staging/fsl-mc/bus/mc-sys.c @@ -328,7 +328,8 @@ static int mc_polling_wait_preemptible(struct fsl_mc_io *mc_io, MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS); if (time_after_eq(jiffies, jiffies_until_timeout)) { - pr_debug("MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n", + dev_dbg(&mc_io->dev, + "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n", mc_io->portal_phys_addr, (unsigned int) MC_CMD_HDR_READ_TOKEN(cmd->header), @@ -369,7 +370,8 @@ static int mc_polling_wait_atomic(struct fsl_mc_io *mc_io, udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS); timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS; if (timeout_usecs == 0) { - pr_debug("MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n", + dev_dbg(&mc_io->dev, + "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n", mc_io->portal_phys_addr, (unsigned int) MC_CMD_HDR_READ_TOKEN(cmd->header), @@ -424,7 +426,8 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd) goto common_exit; if (status != MC_CMD_STATUS_OK) { - pr_debug("MC command failed: portal: %#llx, obj handle: %#x, command: %#x, status: %s (%#x)\n", + dev_dbg(&mc_io->dev, + "MC command failed: portal: %#llx, obj handle: %#x, command: %#x, status: %s (%#x)\n", mc_io->portal_phys_addr, (unsigned int)MC_CMD_HDR_READ_TOKEN(cmd->header), (unsigned int)MC_CMD_HDR_READ_CMDID(cmd->header), diff --git a/drivers/staging/fsl-mc/include/dprc.h b/drivers/staging/fsl-mc/include/dprc.h index c3152f677ff1..94c492706315 100644 --- a/drivers/staging/fsl-mc/include/dprc.h +++ b/drivers/staging/fsl-mc/include/dprc.h @@ -176,7 +176,7 @@ int dprc_reset_container(struct fsl_mc_io *mc_io, * @user_irq_id: A user defined number associated with this IRQ */ struct dprc_irq_cfg { - u64 paddr; + phys_addr_t paddr; u32 val; int user_irq_id; }; diff --git a/drivers/staging/fsl-mc/include/mc-private.h b/drivers/staging/fsl-mc/include/mc-private.h index c706f778626e..ee5f1d2bf604 100644 --- a/drivers/staging/fsl-mc/include/mc-private.h +++ b/drivers/staging/fsl-mc/include/mc-private.h @@ -26,6 +26,19 @@ strcmp(_obj_type, "dpmcp") == 0 || \ strcmp(_obj_type, "dpcon") == 0) +struct irq_domain; +struct msi_domain_info; + +/** + * Maximum number of total IRQs that can be pre-allocated for an MC bus' + * IRQ pool + */ +#define FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS 256 + +struct device_node; +struct irq_domain; +struct msi_domain_info; + /** * struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device * @root_mc_bus_dev: MC object device representing the root DPRC @@ -79,11 +92,13 @@ struct fsl_mc_resource_pool { * @resource_pools: array of resource pools (one pool per resource type) * for this MC bus. These resources represent allocatable entities * from the physical DPRC. + * @irq_resources: Pointer to array of IRQ objects for the IRQ pool * @scan_mutex: Serializes bus scanning */ struct fsl_mc_bus { struct fsl_mc_device mc_dev; struct fsl_mc_resource_pool resource_pools[FSL_MC_NUM_POOL_TYPES]; + struct fsl_mc_device_irq *irq_resources; struct mutex scan_mutex; /* serializes bus scanning */ }; @@ -99,7 +114,8 @@ void fsl_mc_device_remove(struct fsl_mc_device *mc_dev); int dprc_scan_container(struct fsl_mc_device *mc_bus_dev); -int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev); +int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev, + unsigned int *total_irq_count); int __init dprc_driver_init(void); @@ -107,7 +123,7 @@ void dprc_driver_exit(void); int __init fsl_mc_allocator_driver_init(void); -void __exit fsl_mc_allocator_driver_exit(void); +void fsl_mc_allocator_driver_exit(void); int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus, enum fsl_mc_pool_type pool_type, @@ -116,4 +132,25 @@ int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus, void fsl_mc_resource_free(struct fsl_mc_resource *resource); +struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode, + struct msi_domain_info *info, + struct irq_domain *parent); + +int fsl_mc_find_msi_domain(struct device *mc_platform_dev, + struct irq_domain **mc_msi_domain); + +int fsl_mc_msi_domain_alloc_irqs(struct device *dev, + unsigned int irq_count); + +void fsl_mc_msi_domain_free_irqs(struct device *dev); + +int __init its_fsl_mc_msi_init(void); + +void its_fsl_mc_msi_cleanup(void); + +int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus, + unsigned int irq_count); + +void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus); + #endif /* _FSL_MC_PRIVATE_H_ */ diff --git a/drivers/staging/fsl-mc/include/mc.h b/drivers/staging/fsl-mc/include/mc.h index a933291e400a..ac7c1ce68c03 100644 --- a/drivers/staging/fsl-mc/include/mc.h +++ b/drivers/staging/fsl-mc/include/mc.h @@ -14,12 +14,14 @@ #include <linux/device.h> #include <linux/mod_devicetable.h> #include <linux/list.h> +#include <linux/interrupt.h> #include "../include/dprc.h" #define FSL_MC_VENDOR_FREESCALE 0x1957 struct fsl_mc_device; struct fsl_mc_io; +struct fsl_mc_bus; /** * struct fsl_mc_driver - MC object device driver object @@ -75,6 +77,7 @@ enum fsl_mc_pool_type { FSL_MC_POOL_DPMCP = 0x0, /* corresponds to "dpmcp" in the MC */ FSL_MC_POOL_DPBP, /* corresponds to "dpbp" in the MC */ FSL_MC_POOL_DPCON, /* corresponds to "dpcon" in the MC */ + FSL_MC_POOL_IRQ, /* * NOTE: New resource pool types must be added before this entry @@ -104,6 +107,23 @@ struct fsl_mc_resource { }; /** + * struct fsl_mc_device_irq - MC object device message-based interrupt + * @msi_desc: pointer to MSI descriptor allocated by fsl_mc_msi_alloc_descs() + * @mc_dev: MC object device that owns this interrupt + * @dev_irq_index: device-relative IRQ index + * @resource: MC generic resource associated with the interrupt + */ +struct fsl_mc_device_irq { + struct msi_desc *msi_desc; + struct fsl_mc_device *mc_dev; + u8 dev_irq_index; + struct fsl_mc_resource resource; +}; + +#define to_fsl_mc_irq(_mc_resource) \ + container_of(_mc_resource, struct fsl_mc_device_irq, resource) + +/** * Bit masks for a MC object device (struct fsl_mc_device) flags */ #define FSL_MC_IS_DPRC 0x0001 @@ -124,6 +144,7 @@ struct fsl_mc_resource { * NULL if none. * @obj_desc: MC description of the DPAA device * @regions: pointer to array of MMIO region entries + * @irqs: pointer to array of pointers to interrupts allocated to this device * @resource: generic resource associated with this MC object device, if any. * * Generic device object for MC object devices that are "attached" to a @@ -155,6 +176,7 @@ struct fsl_mc_device { struct fsl_mc_io *mc_io; struct dprc_obj_desc obj_desc; struct resource *regions; + struct fsl_mc_device_irq **irqs; struct fsl_mc_resource *resource; }; @@ -198,6 +220,10 @@ int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev, void fsl_mc_object_free(struct fsl_mc_device *mc_adev); +int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev); + +void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev); + extern struct bus_type fsl_mc_bus_type; #endif /* _FSL_MC_H_ */ diff --git a/drivers/staging/fwserial/dma_fifo.c b/drivers/staging/fwserial/dma_fifo.c index 7a3347c3d02b..4cd3ed3ee141 100644 --- a/drivers/staging/fwserial/dma_fifo.c +++ b/drivers/staging/fwserial/dma_fifo.c @@ -106,7 +106,7 @@ void dma_fifo_free(struct dma_fifo *fifo) { struct dma_pending *pending, *next; - if (fifo->data == NULL) + if (!fifo->data) return; list_for_each_entry_safe(pending, next, &fifo->pending, link) @@ -123,7 +123,7 @@ void dma_fifo_reset(struct dma_fifo *fifo) { struct dma_pending *pending, *next; - if (fifo->data == NULL) + if (!fifo->data) return; list_for_each_entry_safe(pending, next, &fifo->pending, link) @@ -149,7 +149,7 @@ int dma_fifo_in(struct dma_fifo *fifo, const void *src, int n) { int ofs, l; - if (fifo->data == NULL) + if (!fifo->data) return -ENOENT; if (fifo->corrupt) return -ENXIO; @@ -192,7 +192,7 @@ int dma_fifo_out_pend(struct dma_fifo *fifo, struct dma_pending *pended) { unsigned len, n, ofs, l, limit; - if (fifo->data == NULL) + if (!fifo->data) return -ENOENT; if (fifo->corrupt) return -ENXIO; @@ -252,7 +252,7 @@ int dma_fifo_out_complete(struct dma_fifo *fifo, struct dma_pending *complete) { struct dma_pending *pending, *next, *tmp; - if (fifo->data == NULL) + if (!fifo->data) return -ENOENT; if (fifo->corrupt) return -ENXIO; diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c index b676c486cb18..9b23b5c95f5e 100644 --- a/drivers/staging/fwserial/fwserial.c +++ b/drivers/staging/fwserial/fwserial.c @@ -828,7 +828,7 @@ static void fwtty_write_xchar(struct fwtty_port *port, char ch) rcu_read_unlock(); } -struct fwtty_port *fwtty_port_get(unsigned index) +static struct fwtty_port *fwtty_port_get(unsigned index) { struct fwtty_port *port; @@ -842,7 +842,6 @@ struct fwtty_port *fwtty_port_get(unsigned index) mutex_unlock(&port_table_lock); return port; } -EXPORT_SYMBOL(fwtty_port_get); static int fwtty_ports_add(struct fw_serial *serial) { @@ -1465,9 +1464,9 @@ static void fwtty_debugfs_show_peer(struct seq_file *m, struct fwtty_peer *peer) seq_printf(m, " %s:", dev_name(&peer->unit->device)); seq_printf(m, " node:%04x gen:%d", peer->node_id, generation); seq_printf(m, " sp:%d max:%d guid:%016llx", peer->speed, - peer->max_payload, (unsigned long long) peer->guid); - seq_printf(m, " mgmt:%012llx", (unsigned long long) peer->mgmt_addr); - seq_printf(m, " addr:%012llx", (unsigned long long) peer->status_addr); + peer->max_payload, (unsigned long long)peer->guid); + seq_printf(m, " mgmt:%012llx", (unsigned long long)peer->mgmt_addr); + seq_printf(m, " addr:%012llx", (unsigned long long)peer->status_addr); seq_putc(m, '\n'); } @@ -1514,7 +1513,7 @@ static int fwtty_debugfs_peers_show(struct seq_file *m, void *v) rcu_read_lock(); seq_printf(m, "card: %s guid: %016llx\n", dev_name(serial->card->device), - (unsigned long long) serial->card->guid); + (unsigned long long)serial->card->guid); list_for_each_entry_rcu(peer, &serial->peer_list, list) fwtty_debugfs_show_peer(m, peer); rcu_read_unlock(); @@ -1986,7 +1985,7 @@ static struct fwtty_peer *__fwserial_peer_by_node_id(struct fw_card *card, * been probed for any unit devices... */ fwtty_err(card, "unknown card (guid %016llx)\n", - (unsigned long long) card->guid); + (unsigned long long)card->guid); return NULL; } @@ -2016,7 +2015,7 @@ static void __dump_peer_list(struct fw_card *card) smp_rmb(); fwtty_dbg(card, "peer(%d:%x) guid: %016llx\n", - g, peer->node_id, (unsigned long long) peer->guid); + g, peer->node_id, (unsigned long long)peer->guid); } } #else @@ -2313,7 +2312,7 @@ static int fwserial_create(struct fw_unit *unit) list_add_rcu(&serial->list, &fwserial_list); fwtty_notice(&unit, "TTY over FireWire on device %s (guid %016llx)\n", - dev_name(card->device), (unsigned long long) card->guid); + dev_name(card->device), (unsigned long long)card->guid); err = fwserial_add_peer(serial, unit); if (!err) diff --git a/drivers/staging/fwserial/fwserial.h b/drivers/staging/fwserial/fwserial.h index e13fe33a6897..6fa936501b3f 100644 --- a/drivers/staging/fwserial/fwserial.h +++ b/drivers/staging/fwserial/fwserial.h @@ -341,7 +341,6 @@ static const char loop_dev_name[] = "fwloop"; extern struct tty_driver *fwtty_driver; -struct fwtty_port *fwtty_port_get(unsigned index); /* * Returns the max send async payload size in bytes based on the unit device * link speed. Self-limiting asynchronous bandwidth (via reducing the payload) diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c index 17d148f6e02c..bb552193e4ba 100644 --- a/drivers/staging/gdm724x/gdm_lte.c +++ b/drivers/staging/gdm724x/gdm_lte.c @@ -382,7 +382,7 @@ static s32 gdm_lte_tx_nic_type(struct net_device *dev, struct sk_buff *skb) /* Check DHCPv4 */ if (ip->protocol == IPPROTO_UDP) { struct udphdr *udp = - (network_data + sizeof(struct iphdr)); + network_data + sizeof(struct iphdr); if (ntohs(udp->dest) == 67 || ntohs(udp->dest) == 68) nic_type |= NIC_TYPE_F_DHCP; } @@ -393,12 +393,12 @@ static s32 gdm_lte_tx_nic_type(struct net_device *dev, struct sk_buff *skb) if (ipv6->nexthdr == IPPROTO_ICMPV6) /* Check NDP request */ { struct icmp6hdr *icmp6 = - (network_data + sizeof(struct ipv6hdr)); + network_data + sizeof(struct ipv6hdr); if (icmp6->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) nic_type |= NIC_TYPE_ICMPV6; } else if (ipv6->nexthdr == IPPROTO_UDP) /* Check DHCPv6 */ { struct udphdr *udp = - (network_data + sizeof(struct ipv6hdr)); + network_data + sizeof(struct ipv6hdr); if (ntohs(udp->dest) == 546 || ntohs(udp->dest) == 547) nic_type |= NIC_TYPE_F_DHCP; } @@ -855,7 +855,7 @@ static void form_mac_address(u8 *dev_addr, u8 *nic_src, u8 *nic_dest, /* Create random nic src and copy the first * 3 bytes to be the same as dev_addr */ - random_ether_addr(nic_src); + eth_random_addr(nic_src); memcpy(nic_src, dev_addr, 3); /* Copy the nic_dest from dev_addr*/ diff --git a/drivers/staging/gdm724x/gdm_mux.c b/drivers/staging/gdm724x/gdm_mux.c index 445f83615575..6bedd668324c 100644 --- a/drivers/staging/gdm724x/gdm_mux.c +++ b/drivers/staging/gdm724x/gdm_mux.c @@ -26,8 +26,6 @@ #include "gdm_mux.h" -static struct workqueue_struct *mux_rx_wq; - static u16 packet_type[TTY_MAX_COUNT] = {0xF011, 0xF010}; #define USB_DEVICE_CDC_DATA(vid, pid) \ @@ -275,7 +273,7 @@ static void gdm_mux_rcv_complete(struct urb *urb) r->len = r->urb->actual_length; spin_lock_irqsave(&rx->to_host_lock, flags); list_add_tail(&r->to_host_list, &rx->to_host_list); - queue_work(mux_rx_wq, &mux_dev->work_rx.work); + schedule_work(&mux_dev->work_rx.work); spin_unlock_irqrestore(&rx->to_host_lock, flags); } } @@ -435,7 +433,7 @@ static int gdm_mux_send_control(void *priv_dev, int request, int value, if (ret < 0) pr_err("usb_control_msg error: %d\n", ret); - return ret < 0 ? ret : 0; + return min(ret, 0); } static void release_usb(struct mux_dev *mux_dev) @@ -602,6 +600,8 @@ static int gdm_mux_suspend(struct usb_interface *intf, pm_message_t pm_msg) mux_dev = tty_dev->priv_dev; rx = &mux_dev->rx; + cancel_work_sync(&mux_dev->work_rx.work); + if (mux_dev->usb_state != PM_NORMAL) { dev_err(intf->usb_dev, "usb suspend - invalid state\n"); return -1; @@ -656,13 +656,6 @@ static struct usb_driver gdm_mux_driver = { static int __init gdm_usb_mux_init(void) { - - mux_rx_wq = create_workqueue("mux_rx_wq"); - if (!mux_rx_wq) { - pr_err("work queue create fail\n"); - return -1; - } - register_lte_tty_driver(); return usb_register(&gdm_mux_driver); @@ -672,11 +665,6 @@ static void __exit gdm_usb_mux_exit(void) { unregister_lte_tty_driver(); - if (mux_rx_wq) { - flush_workqueue(mux_rx_wq); - destroy_workqueue(mux_rx_wq); - } - usb_deregister(&gdm_mux_driver); } diff --git a/drivers/staging/gdm724x/gdm_usb.c b/drivers/staging/gdm724x/gdm_usb.c index 92ea1a16afff..9db9b903f1db 100644 --- a/drivers/staging/gdm724x/gdm_usb.c +++ b/drivers/staging/gdm724x/gdm_usb.c @@ -55,9 +55,6 @@ static const struct usb_device_id id_table[] = { MODULE_DEVICE_TABLE(usb, id_table); -static struct workqueue_struct *usb_tx_wq; -static struct workqueue_struct *usb_rx_wq; - static void do_tx(struct work_struct *work); static void do_rx(struct work_struct *work); @@ -476,7 +473,7 @@ static void gdm_usb_rcv_complete(struct urb *urb) if (!urb->status && r->callback) { spin_lock_irqsave(&rx->to_host_lock, flags); list_add_tail(&r->to_host_list, &rx->to_host_list); - queue_work(usb_rx_wq, &udev->work_rx.work); + schedule_work(&udev->work_rx.work); spin_unlock_irqrestore(&rx->to_host_lock, flags); } else { if (urb->status && udev->usb_state == PM_NORMAL) @@ -568,7 +565,7 @@ static void gdm_usb_send_complete(struct urb *urb) spin_lock_irqsave(&tx->lock, flags); udev->send_complete = 1; - queue_work(usb_tx_wq, &udev->work_tx.work); + schedule_work(&udev->work_tx.work); spin_unlock_irqrestore(&tx->lock, flags); } @@ -759,7 +756,7 @@ static int gdm_usb_sdu_send(void *priv_dev, void *data, int len, spin_lock_irqsave(&tx->lock, flags); list_add_tail(&t_sdu->list, &tx->sdu_list); - queue_work(usb_tx_wq, &udev->work_tx.work); + schedule_work(&udev->work_tx.work); spin_unlock_irqrestore(&tx->lock, flags); if (no_spc) @@ -796,7 +793,7 @@ static int gdm_usb_hci_send(void *priv_dev, void *data, int len, spin_lock_irqsave(&tx->lock, flags); list_add_tail(&t->list, &tx->hci_list); - queue_work(usb_tx_wq, &udev->work_tx.work); + schedule_work(&udev->work_tx.work); spin_unlock_irqrestore(&tx->lock, flags); return 0; @@ -944,6 +941,9 @@ static int gdm_usb_suspend(struct usb_interface *intf, pm_message_t pm_msg) } spin_unlock_irqrestore(&rx->submit_lock, flags); + cancel_work_sync(&udev->work_tx.work); + cancel_work_sync(&udev->work_rx.work); + return 0; } @@ -981,7 +981,7 @@ static int gdm_usb_resume(struct usb_interface *intf) tx = &udev->tx; spin_lock_irqsave(&tx->lock, flags); - queue_work(usb_tx_wq, &udev->work_tx.work); + schedule_work(&udev->work_tx.work); spin_unlock_irqrestore(&tx->lock, flags); return 0; @@ -1005,14 +1005,6 @@ static int __init gdm_usb_lte_init(void) return -1; } - usb_tx_wq = create_workqueue("usb_tx_wq"); - if (!usb_tx_wq) - return -1; - - usb_rx_wq = create_workqueue("usb_rx_wq"); - if (!usb_rx_wq) - return -1; - return usb_register(&gdm_usb_lte_driver); } @@ -1021,16 +1013,6 @@ static void __exit gdm_usb_lte_exit(void) gdm_lte_event_exit(); usb_deregister(&gdm_usb_lte_driver); - - if (usb_tx_wq) { - flush_workqueue(usb_tx_wq); - destroy_workqueue(usb_tx_wq); - } - - if (usb_rx_wq) { - flush_workqueue(usb_rx_wq); - destroy_workqueue(usb_rx_wq); - } } module_init(gdm_usb_lte_init); diff --git a/drivers/staging/gdm72xx/Kconfig b/drivers/staging/gdm72xx/Kconfig deleted file mode 100644 index bf11a7fbfc51..000000000000 --- a/drivers/staging/gdm72xx/Kconfig +++ /dev/null @@ -1,63 +0,0 @@ -# -# GCT GDM72xx WiMAX driver configuration -# - -menuconfig WIMAX_GDM72XX - tristate "GCT GDM72xx WiMAX support" - depends on NET && (USB || MMC) - help - Support a WiMAX module based on the GCT GDM72xx WiMAX chip. - -if WIMAX_GDM72XX - -config WIMAX_GDM72XX_QOS - bool "Enable QoS support" - default n - help - Enable Quality of Service support based on the data protocol of - transmitting packets. - -config WIMAX_GDM72XX_K_MODE - bool "Enable K mode" - default n - help - Enable support for proprietary functions for KT (Korea Telecom). - -config WIMAX_GDM72XX_WIMAX2 - bool "Enable WiMAX2 support" - default n - help - Enable support for transmitting multiple packets (packet - aggregation) from the WiMAX module to the host processor. - -choice - prompt "Select interface" - -config WIMAX_GDM72XX_USB - bool "USB interface" - depends on (USB = y || USB = WIMAX_GDM72XX) - help - Select this option if the WiMAX module interfaces with the host - processor via USB. - -config WIMAX_GDM72XX_SDIO - bool "SDIO interface" - depends on (MMC = y || MMC = WIMAX_GDM72XX) - help - Select this option if the WiMAX module interfaces with the host - processor via SDIO. - -endchoice - -if WIMAX_GDM72XX_USB - -config WIMAX_GDM72XX_USB_PM - bool "Enable power management support" - depends on PM - help - Enable USB power management in order to reduce power consumption - while the interface is not in use. - -endif # WIMAX_GDM72XX_USB - -endif # WIMAX_GDM72XX diff --git a/drivers/staging/gdm72xx/Makefile b/drivers/staging/gdm72xx/Makefile deleted file mode 100644 index 35da7b90b19b..000000000000 --- a/drivers/staging/gdm72xx/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -obj-$(CONFIG_WIMAX_GDM72XX) := gdmwm.o - -gdmwm-y += gdm_wimax.o netlink_k.o -gdmwm-$(CONFIG_WIMAX_GDM72XX_QOS) += gdm_qos.o -gdmwm-$(CONFIG_WIMAX_GDM72XX_SDIO) += gdm_sdio.o sdio_boot.o -gdmwm-$(CONFIG_WIMAX_GDM72XX_USB) += gdm_usb.o usb_boot.o diff --git a/drivers/staging/gdm72xx/TODO b/drivers/staging/gdm72xx/TODO deleted file mode 100644 index 62d0cd6225c8..000000000000 --- a/drivers/staging/gdm72xx/TODO +++ /dev/null @@ -1,2 +0,0 @@ -TODO: -- Clean up coding style to meet kernel standard. diff --git a/drivers/staging/gdm72xx/gdm_qos.c b/drivers/staging/gdm72xx/gdm_qos.c deleted file mode 100644 index cad347a05d18..000000000000 --- a/drivers/staging/gdm72xx/gdm_qos.c +++ /dev/null @@ -1,438 +0,0 @@ -/* - * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/etherdevice.h> -#include <asm/byteorder.h> - -#include <linux/ip.h> -#include <linux/tcp.h> -#include <linux/if_ether.h> - -#include "gdm_wimax.h" -#include "hci.h" -#include "gdm_qos.h" - -#define MAX_FREE_LIST_CNT 32 -static struct { - struct list_head head; - int cnt; - spinlock_t lock; -} qos_free_list; - -static void init_qos_entry_list(void) -{ - qos_free_list.cnt = 0; - INIT_LIST_HEAD(&qos_free_list.head); - spin_lock_init(&qos_free_list.lock); -} - -static void *alloc_qos_entry(void) -{ - struct qos_entry_s *entry; - unsigned long flags; - - spin_lock_irqsave(&qos_free_list.lock, flags); - if (qos_free_list.cnt) { - entry = list_entry(qos_free_list.head.prev, struct qos_entry_s, - list); - list_del(&entry->list); - qos_free_list.cnt--; - spin_unlock_irqrestore(&qos_free_list.lock, flags); - return entry; - } - spin_unlock_irqrestore(&qos_free_list.lock, flags); - - return kmalloc(sizeof(*entry), GFP_ATOMIC); -} - -static void free_qos_entry(void *entry) -{ - struct qos_entry_s *qentry = entry; - unsigned long flags; - - spin_lock_irqsave(&qos_free_list.lock, flags); - if (qos_free_list.cnt < MAX_FREE_LIST_CNT) { - list_add(&qentry->list, &qos_free_list.head); - qos_free_list.cnt++; - spin_unlock_irqrestore(&qos_free_list.lock, flags); - return; - } - spin_unlock_irqrestore(&qos_free_list.lock, flags); - - kfree(entry); -} - -static void free_qos_entry_list(struct list_head *free_list) -{ - struct qos_entry_s *entry, *n; - int total_free = 0; - - list_for_each_entry_safe(entry, n, free_list, list) { - list_del(&entry->list); - kfree(entry); - total_free++; - } - - pr_debug("%s: total_free_cnt=%d\n", __func__, total_free); -} - -void gdm_qos_init(void *nic_ptr) -{ - struct nic *nic = nic_ptr; - struct qos_cb_s *qcb = &nic->qos; - int i; - - for (i = 0; i < QOS_MAX; i++) { - INIT_LIST_HEAD(&qcb->qos_list[i]); - qcb->csr[i].qos_buf_count = 0; - qcb->csr[i].enabled = false; - } - - qcb->qos_list_cnt = 0; - qcb->qos_null_idx = QOS_MAX - 1; - qcb->qos_limit_size = 255; - - spin_lock_init(&qcb->qos_lock); - - init_qos_entry_list(); -} - -void gdm_qos_release_list(void *nic_ptr) -{ - struct nic *nic = nic_ptr; - struct qos_cb_s *qcb = &nic->qos; - unsigned long flags; - struct qos_entry_s *entry, *n; - struct list_head free_list; - int i; - - INIT_LIST_HEAD(&free_list); - - spin_lock_irqsave(&qcb->qos_lock, flags); - - for (i = 0; i < QOS_MAX; i++) { - qcb->csr[i].qos_buf_count = 0; - qcb->csr[i].enabled = false; - } - - qcb->qos_list_cnt = 0; - qcb->qos_null_idx = QOS_MAX - 1; - - for (i = 0; i < QOS_MAX; i++) { - list_for_each_entry_safe(entry, n, &qcb->qos_list[i], list) { - list_move_tail(&entry->list, &free_list); - } - } - spin_unlock_irqrestore(&qcb->qos_lock, flags); - free_qos_entry_list(&free_list); -} - -static int chk_ipv4_rule(struct gdm_wimax_csr_s *csr, u8 *stream, u8 *port) -{ - int i; - - if (csr->classifier_rule_en & IPTYPEOFSERVICE) { - if (((stream[1] & csr->ip2s_mask) < csr->ip2s_lo) || - ((stream[1] & csr->ip2s_mask) > csr->ip2s_hi)) - return 1; - } - - if (csr->classifier_rule_en & PROTOCOL) { - if (stream[9] != csr->protocol) - return 1; - } - - if (csr->classifier_rule_en & IPMASKEDSRCADDRESS) { - for (i = 0; i < 4; i++) { - if ((stream[12 + i] & csr->ipsrc_addrmask[i]) != - (csr->ipsrc_addr[i] & csr->ipsrc_addrmask[i])) - return 1; - } - } - - if (csr->classifier_rule_en & IPMASKEDDSTADDRESS) { - for (i = 0; i < 4; i++) { - if ((stream[16 + i] & csr->ipdst_addrmask[i]) != - (csr->ipdst_addr[i] & csr->ipdst_addrmask[i])) - return 1; - } - } - - if (csr->classifier_rule_en & PROTOCOLSRCPORTRANGE) { - i = ((port[0] << 8) & 0xff00) + port[1]; - if ((i < csr->srcport_lo) || (i > csr->srcport_hi)) - return 1; - } - - if (csr->classifier_rule_en & PROTOCOLDSTPORTRANGE) { - i = ((port[2] << 8) & 0xff00) + port[3]; - if ((i < csr->dstport_lo) || (i > csr->dstport_hi)) - return 1; - } - - return 0; -} - -static int get_qos_index(struct nic *nic, u8 *iph, u8 *tcpudph) -{ - int ip_ver, i; - struct qos_cb_s *qcb = &nic->qos; - - if (!iph || !tcpudph) - return -1; - - ip_ver = (iph[0] >> 4) & 0xf; - - if (ip_ver != 4) - return -1; - - for (i = 0; i < QOS_MAX; i++) { - if (!qcb->csr[i].enabled) - continue; - if (!qcb->csr[i].classifier_rule_en) - continue; - if (chk_ipv4_rule(&qcb->csr[i], iph, tcpudph) == 0) - return i; - } - - return -1; -} - -static void extract_qos_list(struct nic *nic, struct list_head *head) -{ - struct qos_cb_s *qcb = &nic->qos; - struct qos_entry_s *entry; - int i; - - INIT_LIST_HEAD(head); - - for (i = 0; i < QOS_MAX; i++) { - if (!qcb->csr[i].enabled) - continue; - if (qcb->csr[i].qos_buf_count >= qcb->qos_limit_size) - continue; - if (list_empty(&qcb->qos_list[i])) - continue; - - entry = list_entry(qcb->qos_list[i].prev, struct qos_entry_s, - list); - - list_move_tail(&entry->list, head); - qcb->csr[i].qos_buf_count++; - - if (!list_empty(&qcb->qos_list[i])) - netdev_warn(nic->netdev, "Index(%d) is piled!!\n", i); - } -} - -static void send_qos_list(struct nic *nic, struct list_head *head) -{ - struct qos_entry_s *entry, *n; - - list_for_each_entry_safe(entry, n, head, list) { - list_del(&entry->list); - gdm_wimax_send_tx(entry->skb, entry->dev); - free_qos_entry(entry); - } -} - -int gdm_qos_send_hci_pkt(struct sk_buff *skb, struct net_device *dev) -{ - struct nic *nic = netdev_priv(dev); - int index; - struct qos_cb_s *qcb = &nic->qos; - unsigned long flags; - struct ethhdr *ethh = (struct ethhdr *)(skb->data + HCI_HEADER_SIZE); - struct iphdr *iph = (struct iphdr *)((char *)ethh + ETH_HLEN); - struct tcphdr *tcph; - struct qos_entry_s *entry = NULL; - struct list_head send_list; - int ret = 0; - - tcph = (struct tcphdr *)iph + iph->ihl*4; - - if (ethh->h_proto == cpu_to_be16(ETH_P_IP)) { - if (qcb->qos_list_cnt && !qos_free_list.cnt) { - entry = alloc_qos_entry(); - entry->skb = skb; - entry->dev = dev; - netdev_dbg(dev, "qcb->qos_list_cnt=%d\n", - qcb->qos_list_cnt); - } - - spin_lock_irqsave(&qcb->qos_lock, flags); - if (qcb->qos_list_cnt) { - index = get_qos_index(nic, (u8 *)iph, (u8 *)tcph); - if (index == -1) - index = qcb->qos_null_idx; - - if (!entry) { - entry = alloc_qos_entry(); - entry->skb = skb; - entry->dev = dev; - } - - list_add_tail(&entry->list, &qcb->qos_list[index]); - extract_qos_list(nic, &send_list); - spin_unlock_irqrestore(&qcb->qos_lock, flags); - send_qos_list(nic, &send_list); - goto out; - } - spin_unlock_irqrestore(&qcb->qos_lock, flags); - if (entry) - free_qos_entry(entry); - } - - ret = gdm_wimax_send_tx(skb, dev); -out: - return ret; -} - -static int get_csr(struct qos_cb_s *qcb, u32 sfid, int mode) -{ - int i; - - for (i = 0; i < qcb->qos_list_cnt; i++) { - if (qcb->csr[i].sfid == sfid) - return i; - } - - if (mode) { - for (i = 0; i < QOS_MAX; i++) { - if (!qcb->csr[i].enabled) { - qcb->csr[i].enabled = true; - qcb->qos_list_cnt++; - return i; - } - } - } - return -1; -} - -#define QOS_CHANGE_DEL 0xFC -#define QOS_ADD 0xFD -#define QOS_REPORT 0xFE - -void gdm_recv_qos_hci_packet(void *nic_ptr, u8 *buf, int size) -{ - struct nic *nic = nic_ptr; - int i, index, pos; - u32 sfid; - u8 sub_cmd_evt; - struct qos_cb_s *qcb = &nic->qos; - struct qos_entry_s *entry, *n; - struct list_head send_list; - struct list_head free_list; - unsigned long flags; - - sub_cmd_evt = (u8)buf[4]; - - if (sub_cmd_evt == QOS_REPORT) { - spin_lock_irqsave(&qcb->qos_lock, flags); - for (i = 0; i < qcb->qos_list_cnt; i++) { - sfid = ((buf[(i*5) + 6] << 24) & 0xff000000); - sfid += ((buf[(i*5) + 7] << 16) & 0xff0000); - sfid += ((buf[(i*5) + 8] << 8) & 0xff00); - sfid += (buf[(i*5) + 9]); - index = get_csr(qcb, sfid, 0); - if (index == -1) { - spin_unlock_irqrestore(&qcb->qos_lock, flags); - netdev_err(nic->netdev, "QoS ERROR: No SF\n"); - return; - } - qcb->csr[index].qos_buf_count = buf[(i*5) + 10]; - } - - extract_qos_list(nic, &send_list); - spin_unlock_irqrestore(&qcb->qos_lock, flags); - send_qos_list(nic, &send_list); - return; - } - - /* sub_cmd_evt == QOS_ADD || sub_cmd_evt == QOS_CHANG_DEL */ - pos = 6; - sfid = ((buf[pos++] << 24) & 0xff000000); - sfid += ((buf[pos++] << 16) & 0xff0000); - sfid += ((buf[pos++] << 8) & 0xff00); - sfid += (buf[pos++]); - - index = get_csr(qcb, sfid, 1); - if (index == -1) { - netdev_err(nic->netdev, - "QoS ERROR: csr Update Error / Wrong index (%d)\n", - index); - return; - } - - if (sub_cmd_evt == QOS_ADD) { - netdev_dbg(nic->netdev, "QOS_ADD SFID = 0x%x, index=%d\n", - sfid, index); - - spin_lock_irqsave(&qcb->qos_lock, flags); - qcb->csr[index].sfid = sfid; - qcb->csr[index].classifier_rule_en = ((buf[pos++] << 8) & 0xff00); - qcb->csr[index].classifier_rule_en += buf[pos++]; - if (qcb->csr[index].classifier_rule_en == 0) - qcb->qos_null_idx = index; - qcb->csr[index].ip2s_mask = buf[pos++]; - qcb->csr[index].ip2s_lo = buf[pos++]; - qcb->csr[index].ip2s_hi = buf[pos++]; - qcb->csr[index].protocol = buf[pos++]; - qcb->csr[index].ipsrc_addrmask[0] = buf[pos++]; - qcb->csr[index].ipsrc_addrmask[1] = buf[pos++]; - qcb->csr[index].ipsrc_addrmask[2] = buf[pos++]; - qcb->csr[index].ipsrc_addrmask[3] = buf[pos++]; - qcb->csr[index].ipsrc_addr[0] = buf[pos++]; - qcb->csr[index].ipsrc_addr[1] = buf[pos++]; - qcb->csr[index].ipsrc_addr[2] = buf[pos++]; - qcb->csr[index].ipsrc_addr[3] = buf[pos++]; - qcb->csr[index].ipdst_addrmask[0] = buf[pos++]; - qcb->csr[index].ipdst_addrmask[1] = buf[pos++]; - qcb->csr[index].ipdst_addrmask[2] = buf[pos++]; - qcb->csr[index].ipdst_addrmask[3] = buf[pos++]; - qcb->csr[index].ipdst_addr[0] = buf[pos++]; - qcb->csr[index].ipdst_addr[1] = buf[pos++]; - qcb->csr[index].ipdst_addr[2] = buf[pos++]; - qcb->csr[index].ipdst_addr[3] = buf[pos++]; - qcb->csr[index].srcport_lo = ((buf[pos++] << 8) & 0xff00); - qcb->csr[index].srcport_lo += buf[pos++]; - qcb->csr[index].srcport_hi = ((buf[pos++] << 8) & 0xff00); - qcb->csr[index].srcport_hi += buf[pos++]; - qcb->csr[index].dstport_lo = ((buf[pos++] << 8) & 0xff00); - qcb->csr[index].dstport_lo += buf[pos++]; - qcb->csr[index].dstport_hi = ((buf[pos++] << 8) & 0xff00); - qcb->csr[index].dstport_hi += buf[pos++]; - - qcb->qos_limit_size = 254 / qcb->qos_list_cnt; - spin_unlock_irqrestore(&qcb->qos_lock, flags); - } else if (sub_cmd_evt == QOS_CHANGE_DEL) { - netdev_dbg(nic->netdev, "QOS_CHANGE_DEL SFID = 0x%x, index=%d\n", - sfid, index); - - INIT_LIST_HEAD(&free_list); - - spin_lock_irqsave(&qcb->qos_lock, flags); - qcb->csr[index].enabled = false; - qcb->qos_list_cnt--; - qcb->qos_limit_size = 254 / qcb->qos_list_cnt; - - list_for_each_entry_safe(entry, n, &qcb->qos_list[index], - list) { - list_move_tail(&entry->list, &free_list); - } - spin_unlock_irqrestore(&qcb->qos_lock, flags); - free_qos_entry_list(&free_list); - } -} diff --git a/drivers/staging/gdm72xx/gdm_qos.h b/drivers/staging/gdm72xx/gdm_qos.h deleted file mode 100644 index bbc8aab338b5..000000000000 --- a/drivers/staging/gdm72xx/gdm_qos.h +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __GDM72XX_GDM_QOS_H__ -#define __GDM72XX_GDM_QOS_H__ - -#include <linux/types.h> -#include <linux/usb.h> -#include <linux/list.h> - -#define QOS_MAX 16 -#define IPTYPEOFSERVICE 0x8000 -#define PROTOCOL 0x4000 -#define IPMASKEDSRCADDRESS 0x2000 -#define IPMASKEDDSTADDRESS 0x1000 -#define PROTOCOLSRCPORTRANGE 0x800 -#define PROTOCOLDSTPORTRANGE 0x400 -#define DSTMACADDR 0x200 -#define SRCMACADDR 0x100 -#define ETHERTYPE 0x80 -#define IEEE802_1DUSERPRIORITY 0x40 -#define IEEE802_1QVLANID 0x10 - -struct gdm_wimax_csr_s { - bool enabled; - u32 sfid; - u8 qos_buf_count; - u16 classifier_rule_en; - u8 ip2s_lo; - u8 ip2s_hi; - u8 ip2s_mask; - u8 protocol; - u8 ipsrc_addr[16]; - u8 ipsrc_addrmask[16]; - u8 ipdst_addr[16]; - u8 ipdst_addrmask[16]; - u16 srcport_lo; - u16 srcport_hi; - u16 dstport_lo; - u16 dstport_hi; -}; - -struct qos_entry_s { - struct list_head list; - struct sk_buff *skb; - struct net_device *dev; - -}; - -struct qos_cb_s { - struct list_head qos_list[QOS_MAX]; - int qos_list_cnt; - int qos_null_idx; - struct gdm_wimax_csr_s csr[QOS_MAX]; - spinlock_t qos_lock; - int qos_limit_size; -}; - -void gdm_qos_init(void *nic_ptr); -void gdm_qos_release_list(void *nic_ptr); -int gdm_qos_send_hci_pkt(struct sk_buff *skb, struct net_device *dev); -void gdm_recv_qos_hci_packet(void *nic_ptr, u8 *buf, int size); - -#endif /* __GDM72XX_GDM_QOS_H__ */ diff --git a/drivers/staging/gdm72xx/gdm_sdio.c b/drivers/staging/gdm72xx/gdm_sdio.c deleted file mode 100644 index 1f5a087723ba..000000000000 --- a/drivers/staging/gdm72xx/gdm_sdio.c +++ /dev/null @@ -1,700 +0,0 @@ -/* - * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include <linux/module.h> -#include <linux/init.h> -#include <linux/kernel.h> - -#include <linux/mmc/core.h> -#include <linux/mmc/card.h> -#include <linux/mmc/sdio_func.h> -#include <linux/mmc/sdio_ids.h> - -#include "gdm_sdio.h" -#include "gdm_wimax.h" -#include "sdio_boot.h" -#include "hci.h" - -#define TYPE_A_HEADER_SIZE 4 -#define TYPE_A_LOOKAHEAD_SIZE 16 - -#define MAX_NR_RX_BUF 4 - -#define SDU_TX_BUF_SIZE 2048 -#define TX_BUF_SIZE 2048 -#define TX_CHUNK_SIZE (2048 - TYPE_A_HEADER_SIZE) -#define RX_BUF_SIZE (25*1024) - -#define TX_HZ 2000 -#define TX_INTERVAL (NSEC_PER_SEC/TX_HZ) - -static struct sdio_tx *alloc_tx_struct(struct tx_cxt *tx) -{ - struct sdio_tx *t = kzalloc(sizeof(*t), GFP_ATOMIC); - - if (!t) - return NULL; - - t->buf = kmalloc(TX_BUF_SIZE, GFP_ATOMIC); - if (!t->buf) { - kfree(t); - return NULL; - } - - t->tx_cxt = tx; - - return t; -} - -static void free_tx_struct(struct sdio_tx *t) -{ - if (t) { - kfree(t->buf); - kfree(t); - } -} - -static struct sdio_rx *alloc_rx_struct(struct rx_cxt *rx) -{ - struct sdio_rx *r = kzalloc(sizeof(*r), GFP_ATOMIC); - - if (r) - r->rx_cxt = rx; - - return r; -} - -static void free_rx_struct(struct sdio_rx *r) -{ - kfree(r); -} - -/* Before this function is called, spin lock should be locked. */ -static struct sdio_tx *get_tx_struct(struct tx_cxt *tx, int *no_spc) -{ - struct sdio_tx *t; - - if (list_empty(&tx->free_list)) - return NULL; - - t = list_entry(tx->free_list.prev, struct sdio_tx, list); - list_del(&t->list); - - *no_spc = list_empty(&tx->free_list) ? 1 : 0; - - return t; -} - -/* Before this function is called, spin lock should be locked. */ -static void put_tx_struct(struct tx_cxt *tx, struct sdio_tx *t) -{ - list_add_tail(&t->list, &tx->free_list); -} - -/* Before this function is called, spin lock should be locked. */ -static struct sdio_rx *get_rx_struct(struct rx_cxt *rx) -{ - struct sdio_rx *r; - - if (list_empty(&rx->free_list)) - return NULL; - - r = list_entry(rx->free_list.prev, struct sdio_rx, list); - list_del(&r->list); - - return r; -} - -/* Before this function is called, spin lock should be locked. */ -static void put_rx_struct(struct rx_cxt *rx, struct sdio_rx *r) -{ - list_add_tail(&r->list, &rx->free_list); -} - -static void release_sdio(struct sdiowm_dev *sdev) -{ - struct tx_cxt *tx = &sdev->tx; - struct rx_cxt *rx = &sdev->rx; - struct sdio_tx *t, *t_next; - struct sdio_rx *r, *r_next; - - kfree(tx->sdu_buf); - - list_for_each_entry_safe(t, t_next, &tx->free_list, list) { - list_del(&t->list); - free_tx_struct(t); - } - - list_for_each_entry_safe(t, t_next, &tx->sdu_list, list) { - list_del(&t->list); - free_tx_struct(t); - } - - list_for_each_entry_safe(t, t_next, &tx->hci_list, list) { - list_del(&t->list); - free_tx_struct(t); - } - - kfree(rx->rx_buf); - - list_for_each_entry_safe(r, r_next, &rx->free_list, list) { - list_del(&r->list); - free_rx_struct(r); - } - - list_for_each_entry_safe(r, r_next, &rx->req_list, list) { - list_del(&r->list); - free_rx_struct(r); - } -} - -static int init_sdio(struct sdiowm_dev *sdev) -{ - int ret = 0, i; - struct tx_cxt *tx = &sdev->tx; - struct rx_cxt *rx = &sdev->rx; - struct sdio_tx *t; - struct sdio_rx *r; - - INIT_LIST_HEAD(&tx->free_list); - INIT_LIST_HEAD(&tx->sdu_list); - INIT_LIST_HEAD(&tx->hci_list); - - spin_lock_init(&tx->lock); - - tx->sdu_buf = kmalloc(SDU_TX_BUF_SIZE, GFP_KERNEL); - if (!tx->sdu_buf) - goto fail; - - for (i = 0; i < MAX_NR_SDU_BUF; i++) { - t = alloc_tx_struct(tx); - if (!t) { - ret = -ENOMEM; - goto fail; - } - list_add(&t->list, &tx->free_list); - } - - INIT_LIST_HEAD(&rx->free_list); - INIT_LIST_HEAD(&rx->req_list); - - spin_lock_init(&rx->lock); - - for (i = 0; i < MAX_NR_RX_BUF; i++) { - r = alloc_rx_struct(rx); - if (!r) { - ret = -ENOMEM; - goto fail; - } - list_add(&r->list, &rx->free_list); - } - - rx->rx_buf = kmalloc(RX_BUF_SIZE, GFP_KERNEL); - if (!rx->rx_buf) - goto fail; - - return 0; - -fail: - release_sdio(sdev); - return ret; -} - -static void send_sdio_pkt(struct sdio_func *func, u8 *data, int len) -{ - int n, blocks, ret, remain; - - sdio_claim_host(func); - - blocks = len / func->cur_blksize; - n = blocks * func->cur_blksize; - if (blocks) { - ret = sdio_memcpy_toio(func, 0, data, n); - if (ret < 0) { - if (ret != -ENOMEDIUM) - dev_err(&func->dev, - "gdmwms: error: ret = %d\n", ret); - goto end_io; - } - } - - remain = len - n; - remain = (remain + 3) & ~3; - - if (remain) { - ret = sdio_memcpy_toio(func, 0, data + n, remain); - if (ret < 0) { - if (ret != -ENOMEDIUM) - dev_err(&func->dev, - "gdmwms: error: ret = %d\n", ret); - goto end_io; - } - } - -end_io: - sdio_release_host(func); -} - -static void send_sdu(struct sdio_func *func, struct tx_cxt *tx) -{ - struct list_head *l, *next; - struct hci_s *hci; - struct sdio_tx *t; - int pos, len, i, estlen, aggr_num = 0, aggr_len; - u8 *buf; - unsigned long flags; - - spin_lock_irqsave(&tx->lock, flags); - - pos = TYPE_A_HEADER_SIZE + HCI_HEADER_SIZE; - list_for_each_entry(t, &tx->sdu_list, list) { - estlen = ((t->len + 3) & ~3) + 4; - if ((pos + estlen) > SDU_TX_BUF_SIZE) - break; - - aggr_num++; - memcpy(tx->sdu_buf + pos, t->buf, t->len); - memset(tx->sdu_buf + pos + t->len, 0, estlen - t->len); - pos += estlen; - } - aggr_len = pos; - - hci = (struct hci_s *)(tx->sdu_buf + TYPE_A_HEADER_SIZE); - hci->cmd_evt = cpu_to_be16(WIMAX_TX_SDU_AGGR); - hci->length = cpu_to_be16(aggr_len - TYPE_A_HEADER_SIZE - - HCI_HEADER_SIZE); - - spin_unlock_irqrestore(&tx->lock, flags); - - dev_dbg(&func->dev, "sdio_send: %*ph\n", aggr_len - TYPE_A_HEADER_SIZE, - tx->sdu_buf + TYPE_A_HEADER_SIZE); - - for (pos = TYPE_A_HEADER_SIZE; pos < aggr_len; pos += TX_CHUNK_SIZE) { - len = aggr_len - pos; - len = len > TX_CHUNK_SIZE ? TX_CHUNK_SIZE : len; - buf = tx->sdu_buf + pos - TYPE_A_HEADER_SIZE; - - buf[0] = len & 0xff; - buf[1] = (len >> 8) & 0xff; - buf[2] = (len >> 16) & 0xff; - buf[3] = (pos + len) >= aggr_len ? 0 : 1; - send_sdio_pkt(func, buf, len + TYPE_A_HEADER_SIZE); - } - - spin_lock_irqsave(&tx->lock, flags); - - for (l = tx->sdu_list.next, i = 0; i < aggr_num; i++, l = next) { - next = l->next; - t = list_entry(l, struct sdio_tx, list); - if (t->callback) - t->callback(t->cb_data); - - list_del(l); - put_tx_struct(t->tx_cxt, t); - } - - tx->sdu_stamp = ktime_get(); - spin_unlock_irqrestore(&tx->lock, flags); -} - -static void send_hci(struct sdio_func *func, struct tx_cxt *tx, - struct sdio_tx *t) -{ - unsigned long flags; - - dev_dbg(&func->dev, "sdio_send: %*ph\n", t->len - TYPE_A_HEADER_SIZE, - t->buf + TYPE_A_HEADER_SIZE); - - send_sdio_pkt(func, t->buf, t->len); - - spin_lock_irqsave(&tx->lock, flags); - if (t->callback) - t->callback(t->cb_data); - free_tx_struct(t); - spin_unlock_irqrestore(&tx->lock, flags); -} - -static void do_tx(struct work_struct *work) -{ - struct sdiowm_dev *sdev = container_of(work, struct sdiowm_dev, ws); - struct sdio_func *func = sdev->func; - struct tx_cxt *tx = &sdev->tx; - struct sdio_tx *t = NULL; - ktime_t now, before; - int is_sdu = 0; - long diff; - unsigned long flags; - - spin_lock_irqsave(&tx->lock, flags); - if (!tx->can_send) { - spin_unlock_irqrestore(&tx->lock, flags); - return; - } - - if (!list_empty(&tx->hci_list)) { - t = list_entry(tx->hci_list.next, struct sdio_tx, list); - list_del(&t->list); - is_sdu = 0; - } else if (!tx->stop_sdu_tx && !list_empty(&tx->sdu_list)) { - now = ktime_get(); - before = tx->sdu_stamp; - - diff = ktime_to_ns(ktime_sub(now, before)); - if (diff >= 0 && diff < TX_INTERVAL) { - schedule_work(&sdev->ws); - spin_unlock_irqrestore(&tx->lock, flags); - return; - } - is_sdu = 1; - } - - if (!is_sdu && !t) { - spin_unlock_irqrestore(&tx->lock, flags); - return; - } - - tx->can_send = 0; - - spin_unlock_irqrestore(&tx->lock, flags); - - if (is_sdu) - send_sdu(func, tx); - else - send_hci(func, tx, t); -} - -static int gdm_sdio_send(void *priv_dev, void *data, int len, - void (*cb)(void *data), void *cb_data) -{ - struct sdiowm_dev *sdev = priv_dev; - struct tx_cxt *tx = &sdev->tx; - struct sdio_tx *t; - u8 *pkt = data; - int no_spc = 0; - u16 cmd_evt; - unsigned long flags; - - if (len > TX_BUF_SIZE - TYPE_A_HEADER_SIZE) - return -EINVAL; - - spin_lock_irqsave(&tx->lock, flags); - - cmd_evt = (pkt[0] << 8) | pkt[1]; - if (cmd_evt == WIMAX_TX_SDU) { - t = get_tx_struct(tx, &no_spc); - if (!t) { - /* This case must not happen. */ - spin_unlock_irqrestore(&tx->lock, flags); - return -ENOSPC; - } - list_add_tail(&t->list, &tx->sdu_list); - - memcpy(t->buf, data, len); - - t->len = len; - t->callback = cb; - t->cb_data = cb_data; - } else { - t = alloc_tx_struct(tx); - if (!t) { - spin_unlock_irqrestore(&tx->lock, flags); - return -ENOMEM; - } - list_add_tail(&t->list, &tx->hci_list); - - t->buf[0] = len & 0xff; - t->buf[1] = (len >> 8) & 0xff; - t->buf[2] = (len >> 16) & 0xff; - t->buf[3] = 2; - memcpy(t->buf + TYPE_A_HEADER_SIZE, data, len); - - t->len = len + TYPE_A_HEADER_SIZE; - t->callback = cb; - t->cb_data = cb_data; - } - - if (tx->can_send) - schedule_work(&sdev->ws); - - spin_unlock_irqrestore(&tx->lock, flags); - - if (no_spc) - return -ENOSPC; - - return 0; -} - -/* Handle the HCI, WIMAX_SDU_TX_FLOW. */ -static int control_sdu_tx_flow(struct sdiowm_dev *sdev, u8 *hci_data, int len) -{ - struct tx_cxt *tx = &sdev->tx; - u16 cmd_evt; - unsigned long flags; - - spin_lock_irqsave(&tx->lock, flags); - - cmd_evt = (hci_data[0] << 8) | (hci_data[1]); - if (cmd_evt != WIMAX_SDU_TX_FLOW) - goto out; - - if (hci_data[4] == 0) { - dev_dbg(&sdev->func->dev, "WIMAX ==> STOP SDU TX\n"); - tx->stop_sdu_tx = 1; - } else if (hci_data[4] == 1) { - dev_dbg(&sdev->func->dev, "WIMAX ==> START SDU TX\n"); - tx->stop_sdu_tx = 0; - if (tx->can_send) - schedule_work(&sdev->ws); - /* If free buffer for sdu tx doesn't exist, then tx queue - * should not be woken. For this reason, don't pass the command, - * START_SDU_TX. - */ - if (list_empty(&tx->free_list)) - len = 0; - } - -out: - spin_unlock_irqrestore(&tx->lock, flags); - return len; -} - -static void gdm_sdio_irq(struct sdio_func *func) -{ - struct phy_dev *phy_dev = sdio_get_drvdata(func); - struct sdiowm_dev *sdev = phy_dev->priv_dev; - struct tx_cxt *tx = &sdev->tx; - struct rx_cxt *rx = &sdev->rx; - struct sdio_rx *r; - unsigned long flags; - u8 val, hdr[TYPE_A_LOOKAHEAD_SIZE], *buf; - u32 len, blocks, n; - int ret, remain; - - /* Check interrupt */ - val = sdio_readb(func, 0x13, &ret); - if (val & 0x01) - sdio_writeb(func, 0x01, 0x13, &ret); /* clear interrupt */ - else - return; - - ret = sdio_memcpy_fromio(func, hdr, 0x0, TYPE_A_LOOKAHEAD_SIZE); - if (ret) { - dev_err(&func->dev, - "Cannot read from function %d\n", func->num); - goto done; - } - - len = (hdr[2] << 16) | (hdr[1] << 8) | hdr[0]; - if (len > (RX_BUF_SIZE - TYPE_A_HEADER_SIZE)) { - dev_err(&func->dev, "Too big Type-A size: %d\n", len); - goto done; - } - - if (hdr[3] == 1) { /* Ack */ - u32 *ack_seq = (u32 *)&hdr[4]; - - spin_lock_irqsave(&tx->lock, flags); - tx->can_send = 1; - - if (!list_empty(&tx->sdu_list) || !list_empty(&tx->hci_list)) - schedule_work(&sdev->ws); - spin_unlock_irqrestore(&tx->lock, flags); - dev_dbg(&func->dev, "Ack... %0x\n", ntohl(*ack_seq)); - goto done; - } - - memcpy(rx->rx_buf, hdr + TYPE_A_HEADER_SIZE, - TYPE_A_LOOKAHEAD_SIZE - TYPE_A_HEADER_SIZE); - - buf = rx->rx_buf + TYPE_A_LOOKAHEAD_SIZE - TYPE_A_HEADER_SIZE; - remain = len - TYPE_A_LOOKAHEAD_SIZE + TYPE_A_HEADER_SIZE; - if (remain <= 0) - goto end_io; - - blocks = remain / func->cur_blksize; - - if (blocks) { - n = blocks * func->cur_blksize; - ret = sdio_memcpy_fromio(func, buf, 0x0, n); - if (ret) { - dev_err(&func->dev, - "Cannot read from function %d\n", func->num); - goto done; - } - buf += n; - remain -= n; - } - - if (remain) { - ret = sdio_memcpy_fromio(func, buf, 0x0, remain); - if (ret) { - dev_err(&func->dev, - "Cannot read from function %d\n", func->num); - goto done; - } - } - -end_io: - dev_dbg(&func->dev, "sdio_receive: %*ph\n", len, rx->rx_buf); - - len = control_sdu_tx_flow(sdev, rx->rx_buf, len); - - spin_lock_irqsave(&rx->lock, flags); - - if (!list_empty(&rx->req_list)) { - r = list_entry(rx->req_list.next, struct sdio_rx, list); - spin_unlock_irqrestore(&rx->lock, flags); - if (r->callback) - r->callback(r->cb_data, rx->rx_buf, len); - spin_lock_irqsave(&rx->lock, flags); - list_del(&r->list); - put_rx_struct(rx, r); - } - - spin_unlock_irqrestore(&rx->lock, flags); - -done: - sdio_writeb(func, 0x00, 0x10, &ret); /* PCRRT */ - if (!phy_dev->netdev) - register_wimax_device(phy_dev, &func->dev); -} - -static int gdm_sdio_receive(void *priv_dev, - void (*cb)(void *cb_data, void *data, int len), - void *cb_data) -{ - struct sdiowm_dev *sdev = priv_dev; - struct rx_cxt *rx = &sdev->rx; - struct sdio_rx *r; - unsigned long flags; - - spin_lock_irqsave(&rx->lock, flags); - r = get_rx_struct(rx); - if (!r) { - spin_unlock_irqrestore(&rx->lock, flags); - return -ENOMEM; - } - - r->callback = cb; - r->cb_data = cb_data; - - list_add_tail(&r->list, &rx->req_list); - spin_unlock_irqrestore(&rx->lock, flags); - - return 0; -} - -static int sdio_wimax_probe(struct sdio_func *func, - const struct sdio_device_id *id) -{ - int ret; - struct phy_dev *phy_dev = NULL; - struct sdiowm_dev *sdev = NULL; - - dev_info(&func->dev, "Found GDM SDIO VID = 0x%04x PID = 0x%04x...\n", - func->vendor, func->device); - dev_info(&func->dev, "GCT WiMax driver version %s\n", DRIVER_VERSION); - - sdio_claim_host(func); - sdio_enable_func(func); - sdio_claim_irq(func, gdm_sdio_irq); - - ret = sdio_boot(func); - if (ret) - return ret; - - phy_dev = kzalloc(sizeof(*phy_dev), GFP_KERNEL); - if (!phy_dev) { - ret = -ENOMEM; - goto out; - } - sdev = kzalloc(sizeof(*sdev), GFP_KERNEL); - if (!sdev) { - ret = -ENOMEM; - goto out; - } - - phy_dev->priv_dev = (void *)sdev; - phy_dev->send_func = gdm_sdio_send; - phy_dev->rcv_func = gdm_sdio_receive; - - ret = init_sdio(sdev); - if (ret < 0) - goto out; - - sdev->func = func; - - sdio_writeb(func, 1, 0x14, &ret); /* Enable interrupt */ - sdio_release_host(func); - - INIT_WORK(&sdev->ws, do_tx); - - sdio_set_drvdata(func, phy_dev); -out: - if (ret) { - kfree(phy_dev); - kfree(sdev); - } - - return ret; -} - -static void sdio_wimax_remove(struct sdio_func *func) -{ - struct phy_dev *phy_dev = sdio_get_drvdata(func); - struct sdiowm_dev *sdev = phy_dev->priv_dev; - - cancel_work_sync(&sdev->ws); - if (phy_dev->netdev) - unregister_wimax_device(phy_dev); - sdio_claim_host(func); - sdio_release_irq(func); - sdio_disable_func(func); - sdio_release_host(func); - release_sdio(sdev); - - kfree(sdev); - kfree(phy_dev); -} - -static const struct sdio_device_id sdio_wimax_ids[] = { - { SDIO_DEVICE(0x0296, 0x5347) }, - {0} -}; - -MODULE_DEVICE_TABLE(sdio, sdio_wimax_ids); - -static struct sdio_driver sdio_wimax_driver = { - .probe = sdio_wimax_probe, - .remove = sdio_wimax_remove, - .name = "sdio_wimax", - .id_table = sdio_wimax_ids, -}; - -static int __init sdio_gdm_wimax_init(void) -{ - return sdio_register_driver(&sdio_wimax_driver); -} - -static void __exit sdio_gdm_wimax_exit(void) -{ - sdio_unregister_driver(&sdio_wimax_driver); -} - -module_init(sdio_gdm_wimax_init); -module_exit(sdio_gdm_wimax_exit); - -MODULE_VERSION(DRIVER_VERSION); -MODULE_DESCRIPTION("GCT WiMax SDIO Device Driver"); -MODULE_AUTHOR("Ethan Park"); -MODULE_LICENSE("GPL"); diff --git a/drivers/staging/gdm72xx/gdm_sdio.h b/drivers/staging/gdm72xx/gdm_sdio.h deleted file mode 100644 index aa7dad22a219..000000000000 --- a/drivers/staging/gdm72xx/gdm_sdio.h +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __GDM72XX_GDM_SDIO_H__ -#define __GDM72XX_GDM_SDIO_H__ - -#include <linux/types.h> -#include <linux/ktime.h> - -#define MAX_NR_SDU_BUF 64 - -struct sdio_tx { - struct list_head list; - struct tx_cxt *tx_cxt; - u8 *buf; - int len; - void (*callback)(void *cb_data); - void *cb_data; -}; - -struct tx_cxt { - struct list_head free_list; - struct list_head sdu_list; - struct list_head hci_list; - ktime_t sdu_stamp; - u8 *sdu_buf; - spinlock_t lock; - int can_send; - int stop_sdu_tx; -}; - -struct sdio_rx { - struct list_head list; - struct rx_cxt *rx_cxt; - void (*callback)(void *cb_data, void *data, int len); - void *cb_data; -}; - -struct rx_cxt { - struct list_head free_list; - struct list_head req_list; - u8 *rx_buf; - spinlock_t lock; -}; - -struct sdiowm_dev { - struct sdio_func *func; - struct tx_cxt tx; - struct rx_cxt rx; - struct work_struct ws; -}; - -#endif /* __GDM72XX_GDM_SDIO_H__ */ diff --git a/drivers/staging/gdm72xx/gdm_usb.c b/drivers/staging/gdm72xx/gdm_usb.c deleted file mode 100644 index 16e497d9d0cf..000000000000 --- a/drivers/staging/gdm72xx/gdm_usb.c +++ /dev/null @@ -1,789 +0,0 @@ -/* - * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/usb.h> -#include <asm/byteorder.h> -#include <linux/kthread.h> - -#include "gdm_usb.h" -#include "gdm_wimax.h" -#include "usb_boot.h" -#include "hci.h" - -#include "usb_ids.h" - -MODULE_DEVICE_TABLE(usb, id_table); - -#define TX_BUF_SIZE 2048 - -#if defined(CONFIG_WIMAX_GDM72XX_WIMAX2) -#define RX_BUF_SIZE (128*1024) /* For packet aggregation */ -#else -#define RX_BUF_SIZE 2048 -#endif - -#define GDM7205_PADDING 256 - -#define DOWNLOAD_CONF_VALUE 0x21 - -#ifdef CONFIG_WIMAX_GDM72XX_K_MODE - -static DECLARE_WAIT_QUEUE_HEAD(k_wait); -static LIST_HEAD(k_list); -static DEFINE_SPINLOCK(k_lock); -static int k_mode_stop; - -#define K_WAIT_TIME (2 * HZ / 100) - -#endif /* CONFIG_WIMAX_GDM72XX_K_MODE */ - -static struct usb_tx *alloc_tx_struct(struct tx_cxt *tx) -{ - struct usb_tx *t = kzalloc(sizeof(*t), GFP_ATOMIC); - - if (!t) - return NULL; - - t->urb = usb_alloc_urb(0, GFP_ATOMIC); - t->buf = kmalloc(TX_BUF_SIZE, GFP_ATOMIC); - if (!t->urb || !t->buf) { - usb_free_urb(t->urb); - kfree(t->buf); - kfree(t); - return NULL; - } - - t->tx_cxt = tx; - - return t; -} - -static void free_tx_struct(struct usb_tx *t) -{ - if (t) { - usb_free_urb(t->urb); - kfree(t->buf); - kfree(t); - } -} - -static struct usb_rx *alloc_rx_struct(struct rx_cxt *rx) -{ - struct usb_rx *r = kzalloc(sizeof(*r), GFP_ATOMIC); - - if (!r) - return NULL; - - r->urb = usb_alloc_urb(0, GFP_ATOMIC); - r->buf = kmalloc(RX_BUF_SIZE, GFP_ATOMIC); - if (!r->urb || !r->buf) { - usb_free_urb(r->urb); - kfree(r->buf); - kfree(r); - return NULL; - } - - r->rx_cxt = rx; - return r; -} - -static void free_rx_struct(struct usb_rx *r) -{ - if (r) { - usb_free_urb(r->urb); - kfree(r->buf); - kfree(r); - } -} - -/* Before this function is called, spin lock should be locked. */ -static struct usb_tx *get_tx_struct(struct tx_cxt *tx, int *no_spc) -{ - struct usb_tx *t; - - if (list_empty(&tx->free_list)) { - *no_spc = 1; - return NULL; - } - - t = list_entry(tx->free_list.next, struct usb_tx, list); - list_del(&t->list); - - *no_spc = list_empty(&tx->free_list) ? 1 : 0; - - return t; -} - -/* Before this function is called, spin lock should be locked. */ -static void put_tx_struct(struct tx_cxt *tx, struct usb_tx *t) -{ - list_add_tail(&t->list, &tx->free_list); -} - -/* Before this function is called, spin lock should be locked. */ -static struct usb_rx *get_rx_struct(struct rx_cxt *rx) -{ - struct usb_rx *r; - - if (list_empty(&rx->free_list)) { - r = alloc_rx_struct(rx); - if (!r) - return NULL; - - list_add(&r->list, &rx->free_list); - } - - r = list_entry(rx->free_list.next, struct usb_rx, list); - list_move_tail(&r->list, &rx->used_list); - - return r; -} - -/* Before this function is called, spin lock should be locked. */ -static void put_rx_struct(struct rx_cxt *rx, struct usb_rx *r) -{ - list_move(&r->list, &rx->free_list); -} - -static void release_usb(struct usbwm_dev *udev) -{ - struct tx_cxt *tx = &udev->tx; - struct rx_cxt *rx = &udev->rx; - struct usb_tx *t, *t_next; - struct usb_rx *r, *r_next; - unsigned long flags; - - spin_lock_irqsave(&tx->lock, flags); - - list_for_each_entry_safe(t, t_next, &tx->sdu_list, list) { - list_del(&t->list); - free_tx_struct(t); - } - - list_for_each_entry_safe(t, t_next, &tx->hci_list, list) { - list_del(&t->list); - free_tx_struct(t); - } - - list_for_each_entry_safe(t, t_next, &tx->free_list, list) { - list_del(&t->list); - free_tx_struct(t); - } - - spin_unlock_irqrestore(&tx->lock, flags); - - spin_lock_irqsave(&rx->lock, flags); - - list_for_each_entry_safe(r, r_next, &rx->free_list, list) { - list_del(&r->list); - free_rx_struct(r); - } - - list_for_each_entry_safe(r, r_next, &rx->used_list, list) { - list_del(&r->list); - free_rx_struct(r); - } - - spin_unlock_irqrestore(&rx->lock, flags); -} - -static int init_usb(struct usbwm_dev *udev) -{ - int ret = 0, i; - struct tx_cxt *tx = &udev->tx; - struct rx_cxt *rx = &udev->rx; - struct usb_tx *t; - struct usb_rx *r; - unsigned long flags; - - INIT_LIST_HEAD(&tx->free_list); - INIT_LIST_HEAD(&tx->sdu_list); - INIT_LIST_HEAD(&tx->hci_list); -#if defined(CONFIG_WIMAX_GDM72XX_USB_PM) || defined(CONFIG_WIMAX_GDM72XX_K_MODE) - INIT_LIST_HEAD(&tx->pending_list); -#endif - - INIT_LIST_HEAD(&rx->free_list); - INIT_LIST_HEAD(&rx->used_list); - - spin_lock_init(&tx->lock); - spin_lock_init(&rx->lock); - - spin_lock_irqsave(&tx->lock, flags); - for (i = 0; i < MAX_NR_SDU_BUF; i++) { - t = alloc_tx_struct(tx); - if (!t) { - spin_unlock_irqrestore(&tx->lock, flags); - ret = -ENOMEM; - goto fail; - } - list_add(&t->list, &tx->free_list); - } - spin_unlock_irqrestore(&tx->lock, flags); - - r = alloc_rx_struct(rx); - if (!r) { - ret = -ENOMEM; - goto fail; - } - - spin_lock_irqsave(&rx->lock, flags); - list_add(&r->list, &rx->free_list); - spin_unlock_irqrestore(&rx->lock, flags); - return ret; - -fail: - release_usb(udev); - return ret; -} - -static void __gdm_usb_send_complete(struct urb *urb) -{ - struct usb_tx *t = urb->context; - struct tx_cxt *tx = t->tx_cxt; - u8 *pkt = t->buf; - u16 cmd_evt; - - /* Completion by usb_unlink_urb */ - if (urb->status == -ECONNRESET) - return; - - if (t->callback) - t->callback(t->cb_data); - - /* Delete from sdu list or hci list. */ - list_del(&t->list); - - cmd_evt = (pkt[0] << 8) | pkt[1]; - if (cmd_evt == WIMAX_TX_SDU) - put_tx_struct(tx, t); - else - free_tx_struct(t); -} - -static void gdm_usb_send_complete(struct urb *urb) -{ - struct usb_tx *t = urb->context; - struct tx_cxt *tx = t->tx_cxt; - unsigned long flags; - - spin_lock_irqsave(&tx->lock, flags); - __gdm_usb_send_complete(urb); - spin_unlock_irqrestore(&tx->lock, flags); -} - -static int gdm_usb_send(void *priv_dev, void *data, int len, - void (*cb)(void *data), void *cb_data) -{ - struct usbwm_dev *udev = priv_dev; - struct usb_device *usbdev = udev->usbdev; - struct tx_cxt *tx = &udev->tx; - struct usb_tx *t; - int padding = udev->padding; - int no_spc = 0, ret; - u8 *pkt = data; - u16 cmd_evt; - unsigned long flags; -#ifdef CONFIG_WIMAX_GDM72XX_K_MODE - unsigned long flags2; -#endif /* CONFIG_WIMAX_GDM72XX_K_MODE */ - - if (!udev->usbdev) { - dev_err(&usbdev->dev, "%s: No such device\n", __func__); - return -ENODEV; - } - - if (len > TX_BUF_SIZE - padding - 1) - return -EINVAL; - - spin_lock_irqsave(&tx->lock, flags); - - cmd_evt = (pkt[0] << 8) | pkt[1]; - if (cmd_evt == WIMAX_TX_SDU) { - t = get_tx_struct(tx, &no_spc); - if (!t) { - /* This case must not happen. */ - spin_unlock_irqrestore(&tx->lock, flags); - return -ENOSPC; - } - list_add_tail(&t->list, &tx->sdu_list); - } else { - t = alloc_tx_struct(tx); - if (!t) { - spin_unlock_irqrestore(&tx->lock, flags); - return -ENOMEM; - } - list_add_tail(&t->list, &tx->hci_list); - } - - memcpy(t->buf + padding, data, len); - t->callback = cb; - t->cb_data = cb_data; - - /* In some cases, USB Module of WiMax is blocked when data size is - * the multiple of 512. So, increment length by one in that case. - */ - if ((len % 512) == 0) - len++; - - usb_fill_bulk_urb(t->urb, usbdev, usb_sndbulkpipe(usbdev, 1), t->buf, - len + padding, gdm_usb_send_complete, t); - - dev_dbg(&usbdev->dev, "usb_send: %*ph\n", len + padding, t->buf); - -#ifdef CONFIG_WIMAX_GDM72XX_USB_PM - if (usbdev->state & USB_STATE_SUSPENDED) { - list_add_tail(&t->p_list, &tx->pending_list); - schedule_work(&udev->pm_ws); - goto out; - } -#endif /* CONFIG_WIMAX_GDM72XX_USB_PM */ - -#ifdef CONFIG_WIMAX_GDM72XX_K_MODE - if (udev->bw_switch) { - list_add_tail(&t->p_list, &tx->pending_list); - goto out; - } else if (cmd_evt == WIMAX_SCAN) { - struct rx_cxt *rx; - struct usb_rx *r; - - rx = &udev->rx; - - spin_lock_irqsave(&rx->lock, flags2); - list_for_each_entry(r, &rx->used_list, list) - usb_unlink_urb(r->urb); - spin_unlock_irqrestore(&rx->lock, flags2); - - udev->bw_switch = 1; - - spin_lock_irqsave(&k_lock, flags2); - list_add_tail(&udev->list, &k_list); - spin_unlock_irqrestore(&k_lock, flags2); - - wake_up(&k_wait); - } -#endif /* CONFIG_WIMAX_GDM72XX_K_MODE */ - - ret = usb_submit_urb(t->urb, GFP_ATOMIC); - if (ret) - goto send_fail; - -#ifdef CONFIG_WIMAX_GDM72XX_USB_PM - usb_mark_last_busy(usbdev); -#endif /* CONFIG_WIMAX_GDM72XX_USB_PM */ - -#if defined(CONFIG_WIMAX_GDM72XX_USB_PM) || defined(CONFIG_WIMAX_GDM72XX_K_MODE) -out: -#endif - spin_unlock_irqrestore(&tx->lock, flags); - - if (no_spc) - return -ENOSPC; - - return 0; - -send_fail: - t->callback = NULL; - __gdm_usb_send_complete(t->urb); - spin_unlock_irqrestore(&tx->lock, flags); - return ret; -} - -static void gdm_usb_rcv_complete(struct urb *urb) -{ - struct usb_rx *r = urb->context; - struct rx_cxt *rx = r->rx_cxt; - struct usbwm_dev *udev = container_of(r->rx_cxt, struct usbwm_dev, rx); - struct tx_cxt *tx = &udev->tx; - struct usb_tx *t; - u16 cmd_evt; - unsigned long flags, flags2; - struct usb_device *dev = urb->dev; - - /* Completion by usb_unlink_urb */ - if (urb->status == -ECONNRESET) - return; - - spin_lock_irqsave(&tx->lock, flags); - - if (!urb->status) { - cmd_evt = (r->buf[0] << 8) | (r->buf[1]); - - dev_dbg(&dev->dev, "usb_receive: %*ph\n", urb->actual_length, - r->buf); - - if (cmd_evt == WIMAX_SDU_TX_FLOW) { - if (r->buf[4] == 0) { - dev_dbg(&dev->dev, "WIMAX ==> STOP SDU TX\n"); - list_for_each_entry(t, &tx->sdu_list, list) - usb_unlink_urb(t->urb); - } else if (r->buf[4] == 1) { - dev_dbg(&dev->dev, "WIMAX ==> START SDU TX\n"); - list_for_each_entry(t, &tx->sdu_list, list) { - usb_submit_urb(t->urb, GFP_ATOMIC); - } - /* If free buffer for sdu tx doesn't - * exist, then tx queue should not be - * woken. For this reason, don't pass - * the command, START_SDU_TX. - */ - if (list_empty(&tx->free_list)) - urb->actual_length = 0; - } - } - } - - if (!urb->status && r->callback) - r->callback(r->cb_data, r->buf, urb->actual_length); - - spin_lock_irqsave(&rx->lock, flags2); - put_rx_struct(rx, r); - spin_unlock_irqrestore(&rx->lock, flags2); - - spin_unlock_irqrestore(&tx->lock, flags); - -#ifdef CONFIG_WIMAX_GDM72XX_USB_PM - usb_mark_last_busy(dev); -#endif -} - -static int gdm_usb_receive(void *priv_dev, - void (*cb)(void *cb_data, void *data, int len), - void *cb_data) -{ - struct usbwm_dev *udev = priv_dev; - struct usb_device *usbdev = udev->usbdev; - struct rx_cxt *rx = &udev->rx; - struct usb_rx *r; - unsigned long flags; - - if (!udev->usbdev) { - dev_err(&usbdev->dev, "%s: No such device\n", __func__); - return -ENODEV; - } - - spin_lock_irqsave(&rx->lock, flags); - r = get_rx_struct(rx); - spin_unlock_irqrestore(&rx->lock, flags); - - if (!r) - return -ENOMEM; - - r->callback = cb; - r->cb_data = cb_data; - - usb_fill_bulk_urb(r->urb, usbdev, usb_rcvbulkpipe(usbdev, 0x82), r->buf, - RX_BUF_SIZE, gdm_usb_rcv_complete, r); - - return usb_submit_urb(r->urb, GFP_ATOMIC); -} - -#ifdef CONFIG_WIMAX_GDM72XX_USB_PM -static void do_pm_control(struct work_struct *work) -{ - struct usbwm_dev *udev = container_of(work, struct usbwm_dev, pm_ws); - struct tx_cxt *tx = &udev->tx; - int ret; - unsigned long flags; - - ret = usb_autopm_get_interface(udev->intf); - if (!ret) - usb_autopm_put_interface(udev->intf); - - spin_lock_irqsave(&tx->lock, flags); - if (!(udev->usbdev->state & USB_STATE_SUSPENDED) && - (!list_empty(&tx->hci_list) || !list_empty(&tx->sdu_list))) { - struct usb_tx *t, *temp; - - list_for_each_entry_safe(t, temp, &tx->pending_list, p_list) { - list_del(&t->p_list); - ret = usb_submit_urb(t->urb, GFP_ATOMIC); - - if (ret) { - t->callback = NULL; - __gdm_usb_send_complete(t->urb); - } - } - } - spin_unlock_irqrestore(&tx->lock, flags); -} -#endif /* CONFIG_WIMAX_GDM72XX_USB_PM */ - -static int gdm_usb_probe(struct usb_interface *intf, - const struct usb_device_id *id) -{ - int ret = 0; - u8 bConfigurationValue; - struct phy_dev *phy_dev = NULL; - struct usbwm_dev *udev = NULL; - u16 idVendor, idProduct, bcdDevice; - - struct usb_device *usbdev = interface_to_usbdev(intf); - - usb_get_dev(usbdev); - bConfigurationValue = usbdev->actconfig->desc.bConfigurationValue; - - /*USB description is set up with Little-Endian*/ - idVendor = le16_to_cpu(usbdev->descriptor.idVendor); - idProduct = le16_to_cpu(usbdev->descriptor.idProduct); - bcdDevice = le16_to_cpu(usbdev->descriptor.bcdDevice); - - dev_info(&intf->dev, "Found GDM USB VID = 0x%04x PID = 0x%04x...\n", - idVendor, idProduct); - dev_info(&intf->dev, "GCT WiMax driver version %s\n", DRIVER_VERSION); - - - if (idProduct == EMERGENCY_PID) { - ret = usb_emergency(usbdev); - goto out; - } - - /* Support for EEPROM bootloader */ - if (bConfigurationValue == DOWNLOAD_CONF_VALUE || - idProduct & B_DOWNLOAD) { - ret = usb_boot(usbdev, bcdDevice); - goto out; - } - - phy_dev = kzalloc(sizeof(*phy_dev), GFP_KERNEL); - if (!phy_dev) { - ret = -ENOMEM; - goto out; - } - udev = kzalloc(sizeof(*udev), GFP_KERNEL); - if (!udev) { - ret = -ENOMEM; - goto out; - } - - if (idProduct == 0x7205 || idProduct == 0x7206) - udev->padding = GDM7205_PADDING; - else - udev->padding = 0; - - phy_dev->priv_dev = (void *)udev; - phy_dev->send_func = gdm_usb_send; - phy_dev->rcv_func = gdm_usb_receive; - - ret = init_usb(udev); - if (ret < 0) - goto out; - - udev->usbdev = usbdev; - -#ifdef CONFIG_WIMAX_GDM72XX_USB_PM - udev->intf = intf; - - intf->needs_remote_wakeup = 1; - device_init_wakeup(&intf->dev, 1); - - pm_runtime_set_autosuspend_delay(&usbdev->dev, 10 * 1000); /* msec */ - - INIT_WORK(&udev->pm_ws, do_pm_control); -#endif /* CONFIG_WIMAX_GDM72XX_USB_PM */ - - ret = register_wimax_device(phy_dev, &intf->dev); - if (ret) - release_usb(udev); - -out: - if (ret) { - kfree(phy_dev); - kfree(udev); - usb_put_dev(usbdev); - } else { - usb_set_intfdata(intf, phy_dev); - } - return ret; -} - -static void gdm_usb_disconnect(struct usb_interface *intf) -{ - u8 bConfigurationValue; - struct phy_dev *phy_dev; - struct usbwm_dev *udev; - u16 idProduct; - struct usb_device *usbdev = interface_to_usbdev(intf); - - bConfigurationValue = usbdev->actconfig->desc.bConfigurationValue; - phy_dev = usb_get_intfdata(intf); - - /*USB description is set up with Little-Endian*/ - idProduct = le16_to_cpu(usbdev->descriptor.idProduct); - - if (idProduct != EMERGENCY_PID && - bConfigurationValue != DOWNLOAD_CONF_VALUE && - (idProduct & B_DOWNLOAD) == 0) { - udev = phy_dev->priv_dev; - udev->usbdev = NULL; - - unregister_wimax_device(phy_dev); - release_usb(udev); - kfree(udev); - kfree(phy_dev); - } - - usb_put_dev(usbdev); -} - -#ifdef CONFIG_WIMAX_GDM72XX_USB_PM -static int gdm_suspend(struct usb_interface *intf, pm_message_t pm_msg) -{ - struct phy_dev *phy_dev; - struct usbwm_dev *udev; - struct rx_cxt *rx; - struct usb_rx *r; - unsigned long flags; - - phy_dev = usb_get_intfdata(intf); - if (!phy_dev) - return 0; - - udev = phy_dev->priv_dev; - rx = &udev->rx; - - spin_lock_irqsave(&rx->lock, flags); - - list_for_each_entry(r, &rx->used_list, list) - usb_unlink_urb(r->urb); - - spin_unlock_irqrestore(&rx->lock, flags); - - return 0; -} - -static int gdm_resume(struct usb_interface *intf) -{ - struct phy_dev *phy_dev; - struct usbwm_dev *udev; - struct rx_cxt *rx; - struct usb_rx *r; - unsigned long flags; - - phy_dev = usb_get_intfdata(intf); - if (!phy_dev) - return 0; - - udev = phy_dev->priv_dev; - rx = &udev->rx; - - spin_lock_irqsave(&rx->lock, flags); - - list_for_each_entry(r, &rx->used_list, list) - usb_submit_urb(r->urb, GFP_ATOMIC); - - spin_unlock_irqrestore(&rx->lock, flags); - - return 0; -} - -#endif /* CONFIG_WIMAX_GDM72XX_USB_PM */ - -#ifdef CONFIG_WIMAX_GDM72XX_K_MODE -static int k_mode_thread(void *arg) -{ - struct usbwm_dev *udev; - struct tx_cxt *tx; - struct rx_cxt *rx; - struct usb_tx *t, *temp; - struct usb_rx *r; - unsigned long flags, flags2, expire; - int ret; - - while (!k_mode_stop) { - spin_lock_irqsave(&k_lock, flags2); - while (!list_empty(&k_list)) { - udev = list_entry(k_list.next, struct usbwm_dev, list); - tx = &udev->tx; - rx = &udev->rx; - - list_del(&udev->list); - spin_unlock_irqrestore(&k_lock, flags2); - - expire = jiffies + K_WAIT_TIME; - while (time_before(jiffies, expire)) - schedule_timeout(K_WAIT_TIME); - - spin_lock_irqsave(&rx->lock, flags); - - list_for_each_entry(r, &rx->used_list, list) - usb_submit_urb(r->urb, GFP_ATOMIC); - - spin_unlock_irqrestore(&rx->lock, flags); - - spin_lock_irqsave(&tx->lock, flags); - - list_for_each_entry_safe(t, temp, &tx->pending_list, - p_list) { - list_del(&t->p_list); - ret = usb_submit_urb(t->urb, GFP_ATOMIC); - - if (ret) { - t->callback = NULL; - __gdm_usb_send_complete(t->urb); - } - } - - udev->bw_switch = 0; - spin_unlock_irqrestore(&tx->lock, flags); - - spin_lock_irqsave(&k_lock, flags2); - } - wait_event_interruptible_lock_irq(k_wait, - !list_empty(&k_list) || - k_mode_stop, k_lock); - spin_unlock_irqrestore(&k_lock, flags2); - } - return 0; -} -#endif /* CONFIG_WIMAX_GDM72XX_K_MODE */ - -static struct usb_driver gdm_usb_driver = { - .name = "gdm_wimax", - .probe = gdm_usb_probe, - .disconnect = gdm_usb_disconnect, - .id_table = id_table, -#ifdef CONFIG_WIMAX_GDM72XX_USB_PM - .supports_autosuspend = 1, - .suspend = gdm_suspend, - .resume = gdm_resume, - .reset_resume = gdm_resume, -#endif -}; - -static int __init usb_gdm_wimax_init(void) -{ -#ifdef CONFIG_WIMAX_GDM72XX_K_MODE - kthread_run(k_mode_thread, NULL, "k_mode_wimax"); -#endif /* CONFIG_WIMAX_GDM72XX_K_MODE */ - return usb_register(&gdm_usb_driver); -} - -static void __exit usb_gdm_wimax_exit(void) -{ -#ifdef CONFIG_WIMAX_GDM72XX_K_MODE - k_mode_stop = 1; - wake_up(&k_wait); -#endif - usb_deregister(&gdm_usb_driver); -} - -module_init(usb_gdm_wimax_init); -module_exit(usb_gdm_wimax_exit); - -MODULE_VERSION(DRIVER_VERSION); -MODULE_DESCRIPTION("GCT WiMax Device Driver"); -MODULE_AUTHOR("Ethan Park"); -MODULE_LICENSE("GPL"); diff --git a/drivers/staging/gdm72xx/gdm_usb.h b/drivers/staging/gdm72xx/gdm_usb.h deleted file mode 100644 index 8e58a25e7143..000000000000 --- a/drivers/staging/gdm72xx/gdm_usb.h +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __GDM72XX_GDM_USB_H__ -#define __GDM72XX_GDM_USB_H__ - -#include <linux/types.h> -#include <linux/usb.h> -#include <linux/list.h> - -#define B_DIFF_DL_DRV (1 << 4) -#define B_DOWNLOAD (1 << 5) -#define MAX_NR_SDU_BUF 64 - -struct usb_tx { - struct list_head list; -#if defined(CONFIG_WIMAX_GDM72XX_USB_PM) || defined(CONFIG_WIMAX_GDM72XX_K_MODE) - struct list_head p_list; -#endif - struct tx_cxt *tx_cxt; - struct urb *urb; - u8 *buf; - void (*callback)(void *cb_data); - void *cb_data; -}; - -struct tx_cxt { - struct list_head free_list; - struct list_head sdu_list; - struct list_head hci_list; -#if defined(CONFIG_WIMAX_GDM72XX_USB_PM) || defined(CONFIG_WIMAX_GDM72XX_K_MODE) - struct list_head pending_list; -#endif - spinlock_t lock; -}; - -struct usb_rx { - struct list_head list; - struct rx_cxt *rx_cxt; - struct urb *urb; - u8 *buf; - void (*callback)(void *cb_data, void *data, int len); - void *cb_data; -}; - -struct rx_cxt { - struct list_head free_list; - struct list_head used_list; - spinlock_t lock; -}; - -struct usbwm_dev { - struct usb_device *usbdev; -#ifdef CONFIG_WIMAX_GDM72XX_USB_PM - struct work_struct pm_ws; - - struct usb_interface *intf; -#endif -#ifdef CONFIG_WIMAX_GDM72XX_K_MODE - int bw_switch; - struct list_head list; -#endif - struct tx_cxt tx; - struct rx_cxt rx; - int padding; -}; - -#endif /* __GDM72XX_GDM_USB_H__ */ diff --git a/drivers/staging/gdm72xx/gdm_wimax.c b/drivers/staging/gdm72xx/gdm_wimax.c deleted file mode 100644 index ba03f9386567..000000000000 --- a/drivers/staging/gdm72xx/gdm_wimax.c +++ /dev/null @@ -1,815 +0,0 @@ -/* - * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/etherdevice.h> -#include <asm/byteorder.h> -#include <linux/ip.h> -#include <linux/ipv6.h> -#include <linux/udp.h> -#include <linux/in.h> - -#include "gdm_wimax.h" -#include "hci.h" -#include "wm_ioctl.h" -#include "netlink_k.h" - -#define gdm_wimax_send(n, d, l) \ - (n->phy_dev->send_func)(n->phy_dev->priv_dev, d, l, NULL, NULL) -#define gdm_wimax_send_with_cb(n, d, l, c, b) \ - (n->phy_dev->send_func)(n->phy_dev->priv_dev, d, l, c, b) -#define gdm_wimax_rcv_with_cb(n, c, b) \ - (n->phy_dev->rcv_func)(n->phy_dev->priv_dev, c, b) - -#define EVT_MAX_SIZE 2048 - -struct evt_entry { - struct list_head list; - struct net_device *dev; - char evt_data[EVT_MAX_SIZE]; - int size; -}; - -static struct { - int ref_cnt; - struct sock *sock; - struct list_head evtq; - spinlock_t evt_lock; - struct list_head freeq; - struct work_struct ws; -} wm_event; - -static u8 gdm_wimax_macaddr[6] = {0x00, 0x0a, 0x3b, 0xf0, 0x01, 0x30}; - -static inline int gdm_wimax_header(struct sk_buff **pskb) -{ - u16 buf[HCI_HEADER_SIZE / sizeof(u16)]; - struct hci_s *hci = (struct hci_s *)buf; - struct sk_buff *skb = *pskb; - - if (unlikely(skb_headroom(skb) < HCI_HEADER_SIZE)) { - struct sk_buff *skb2; - - skb2 = skb_realloc_headroom(skb, HCI_HEADER_SIZE); - if (!skb2) - return -ENOMEM; - if (skb->sk) - skb_set_owner_w(skb2, skb->sk); - kfree_skb(skb); - skb = skb2; - } - - skb_push(skb, HCI_HEADER_SIZE); - hci->cmd_evt = cpu_to_be16(WIMAX_TX_SDU); - hci->length = cpu_to_be16(skb->len - HCI_HEADER_SIZE); - memcpy(skb->data, buf, HCI_HEADER_SIZE); - - *pskb = skb; - return 0; -} - -static inline struct evt_entry *alloc_event_entry(void) -{ - return kmalloc(sizeof(struct evt_entry), GFP_ATOMIC); -} - -static struct evt_entry *get_event_entry(void) -{ - struct evt_entry *e; - - if (list_empty(&wm_event.freeq)) { - e = alloc_event_entry(); - } else { - e = list_entry(wm_event.freeq.next, struct evt_entry, list); - list_del(&e->list); - } - - return e; -} - -static void put_event_entry(struct evt_entry *e) -{ - BUG_ON(!e); - - list_add_tail(&e->list, &wm_event.freeq); -} - -static void gdm_wimax_event_rcv(struct net_device *dev, u16 type, void *msg, - int len) -{ - struct nic *nic = netdev_priv(dev); - - u8 *buf = msg; - u16 hci_cmd = (buf[0]<<8) | buf[1]; - u16 hci_len = (buf[2]<<8) | buf[3]; - - netdev_dbg(dev, "H=>D: 0x%04x(%d)\n", hci_cmd, hci_len); - - gdm_wimax_send(nic, msg, len); -} - -static void __gdm_wimax_event_send(struct work_struct *work) -{ - int idx; - unsigned long flags; - struct evt_entry *e; - struct evt_entry *tmp; - - spin_lock_irqsave(&wm_event.evt_lock, flags); - - list_for_each_entry_safe(e, tmp, &wm_event.evtq, list) { - spin_unlock_irqrestore(&wm_event.evt_lock, flags); - - if (sscanf(e->dev->name, "wm%d", &idx) == 1) - netlink_send(wm_event.sock, idx, 0, e->evt_data, - e->size); - - spin_lock_irqsave(&wm_event.evt_lock, flags); - list_del(&e->list); - put_event_entry(e); - } - - spin_unlock_irqrestore(&wm_event.evt_lock, flags); -} - -static int gdm_wimax_event_init(void) -{ - if (!wm_event.ref_cnt) { - wm_event.sock = netlink_init(NETLINK_WIMAX, - gdm_wimax_event_rcv); - if (wm_event.sock) { - INIT_LIST_HEAD(&wm_event.evtq); - INIT_LIST_HEAD(&wm_event.freeq); - INIT_WORK(&wm_event.ws, __gdm_wimax_event_send); - spin_lock_init(&wm_event.evt_lock); - } - } - - if (wm_event.sock) { - wm_event.ref_cnt++; - return 0; - } - - pr_err("Creating WiMax Event netlink is failed\n"); - return -1; -} - -static void gdm_wimax_event_exit(void) -{ - if (wm_event.sock && --wm_event.ref_cnt == 0) { - struct evt_entry *e, *temp; - unsigned long flags; - - spin_lock_irqsave(&wm_event.evt_lock, flags); - - list_for_each_entry_safe(e, temp, &wm_event.evtq, list) { - list_del(&e->list); - kfree(e); - } - list_for_each_entry_safe(e, temp, &wm_event.freeq, list) { - list_del(&e->list); - kfree(e); - } - - spin_unlock_irqrestore(&wm_event.evt_lock, flags); - netlink_exit(wm_event.sock); - wm_event.sock = NULL; - } -} - -static int gdm_wimax_event_send(struct net_device *dev, char *buf, int size) -{ - struct evt_entry *e; - unsigned long flags; - - u16 hci_cmd = ((u8)buf[0]<<8) | (u8)buf[1]; - u16 hci_len = ((u8)buf[2]<<8) | (u8)buf[3]; - - netdev_dbg(dev, "D=>H: 0x%04x(%d)\n", hci_cmd, hci_len); - - spin_lock_irqsave(&wm_event.evt_lock, flags); - - e = get_event_entry(); - if (!e) { - netdev_err(dev, "%s: No memory for event\n", __func__); - spin_unlock_irqrestore(&wm_event.evt_lock, flags); - return -ENOMEM; - } - - e->dev = dev; - e->size = size; - memcpy(e->evt_data, buf, size); - - list_add_tail(&e->list, &wm_event.evtq); - spin_unlock_irqrestore(&wm_event.evt_lock, flags); - - schedule_work(&wm_event.ws); - - return 0; -} - -static void tx_complete(void *arg) -{ - struct nic *nic = arg; - - if (netif_queue_stopped(nic->netdev)) - netif_wake_queue(nic->netdev); -} - -int gdm_wimax_send_tx(struct sk_buff *skb, struct net_device *dev) -{ - int ret = 0; - struct nic *nic = netdev_priv(dev); - - ret = gdm_wimax_send_with_cb(nic, skb->data, skb->len, tx_complete, - nic); - if (ret == -ENOSPC) { - netif_stop_queue(dev); - ret = 0; - } - - if (ret) { - skb_pull(skb, HCI_HEADER_SIZE); - return ret; - } - - dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len - HCI_HEADER_SIZE; - kfree_skb(skb); - return ret; -} - -static int gdm_wimax_tx(struct sk_buff *skb, struct net_device *dev) -{ - int ret = 0; - - ret = gdm_wimax_header(&skb); - if (ret < 0) { - skb_pull(skb, HCI_HEADER_SIZE); - return ret; - } - -#if defined(CONFIG_WIMAX_GDM72XX_QOS) - ret = gdm_qos_send_hci_pkt(skb, dev); -#else - ret = gdm_wimax_send_tx(skb, dev); -#endif - return ret; -} - -static int gdm_wimax_set_config(struct net_device *dev, struct ifmap *map) -{ - if (dev->flags & IFF_UP) - return -EBUSY; - - return 0; -} - -static void __gdm_wimax_set_mac_addr(struct net_device *dev, char *mac_addr) -{ - u16 hci_pkt_buf[32 / sizeof(u16)]; - struct hci_s *hci = (struct hci_s *)hci_pkt_buf; - struct nic *nic = netdev_priv(dev); - - /* Since dev is registered as a ethernet device, - * ether_setup has made dev->addr_len to be ETH_ALEN - */ - memcpy(dev->dev_addr, mac_addr, dev->addr_len); - - /* Let lower layer know of this change by sending - * SetInformation(MAC Address) - */ - hci->cmd_evt = cpu_to_be16(WIMAX_SET_INFO); - hci->length = cpu_to_be16(8); - hci->data[0] = 0; /* T */ - hci->data[1] = 6; /* L */ - memcpy(&hci->data[2], mac_addr, dev->addr_len); /* V */ - - gdm_wimax_send(nic, hci, HCI_HEADER_SIZE + 8); -} - -/* A driver function */ -static int gdm_wimax_set_mac_addr(struct net_device *dev, void *p) -{ - struct sockaddr *addr = p; - - if (netif_running(dev)) - return -EBUSY; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; - - __gdm_wimax_set_mac_addr(dev, addr->sa_data); - - return 0; -} - -static void gdm_wimax_ind_if_updown(struct net_device *dev, int if_up) -{ - u16 buf[32 / sizeof(u16)]; - struct hci_s *hci = (struct hci_s *)buf; - unsigned char up_down; - - up_down = if_up ? WIMAX_IF_UP : WIMAX_IF_DOWN; - - /* Indicate updating fsm */ - hci->cmd_evt = cpu_to_be16(WIMAX_IF_UPDOWN); - hci->length = cpu_to_be16(sizeof(up_down)); - hci->data[0] = up_down; - - gdm_wimax_event_send(dev, (char *)hci, HCI_HEADER_SIZE+sizeof(up_down)); -} - -static int gdm_wimax_open(struct net_device *dev) -{ - struct nic *nic = netdev_priv(dev); - struct fsm_s *fsm = nic->sdk_data[SIOC_DATA_FSM].buf; - - netif_start_queue(dev); - - if (fsm && fsm->m_status != M_INIT) - gdm_wimax_ind_if_updown(dev, 1); - return 0; -} - -static int gdm_wimax_close(struct net_device *dev) -{ - struct nic *nic = netdev_priv(dev); - struct fsm_s *fsm = nic->sdk_data[SIOC_DATA_FSM].buf; - - netif_stop_queue(dev); - - if (fsm && fsm->m_status != M_INIT) - gdm_wimax_ind_if_updown(dev, 0); - return 0; -} - -static void kdelete(void **buf) -{ - if (buf && *buf) { - kfree(*buf); - *buf = NULL; - } -} - -static int gdm_wimax_ioctl_get_data(struct udata_s *dst, struct data_s *src) -{ - int size; - - size = dst->size < src->size ? dst->size : src->size; - - dst->size = size; - if (src->size) { - if (!dst->buf) - return -EINVAL; - if (copy_to_user(dst->buf, src->buf, size)) - return -EFAULT; - } - return 0; -} - -static int gdm_wimax_ioctl_set_data(struct data_s *dst, struct udata_s *src) -{ - if (!src->size) { - dst->size = 0; - return 0; - } - - if (!src->buf) - return -EINVAL; - - if (!(dst->buf && dst->size == src->size)) { - kdelete(&dst->buf); - dst->buf = kmalloc(src->size, GFP_KERNEL); - if (!dst->buf) - return -ENOMEM; - } - - if (copy_from_user(dst->buf, src->buf, src->size)) { - kdelete(&dst->buf); - return -EFAULT; - } - dst->size = src->size; - return 0; -} - -static void gdm_wimax_cleanup_ioctl(struct net_device *dev) -{ - struct nic *nic = netdev_priv(dev); - int i; - - for (i = 0; i < SIOC_DATA_MAX; i++) - kdelete(&nic->sdk_data[i].buf); -} - -static void gdm_wimax_ind_fsm_update(struct net_device *dev, struct fsm_s *fsm) -{ - u16 buf[32 / sizeof(u16)]; - struct hci_s *hci = (struct hci_s *)buf; - - /* Indicate updating fsm */ - hci->cmd_evt = cpu_to_be16(WIMAX_FSM_UPDATE); - hci->length = cpu_to_be16(sizeof(struct fsm_s)); - memcpy(&hci->data[0], fsm, sizeof(struct fsm_s)); - - gdm_wimax_event_send(dev, (char *)hci, - HCI_HEADER_SIZE + sizeof(struct fsm_s)); -} - -static void gdm_update_fsm(struct net_device *dev, struct fsm_s *new_fsm) -{ - struct nic *nic = netdev_priv(dev); - struct fsm_s *cur_fsm = - nic->sdk_data[SIOC_DATA_FSM].buf; - - if (!cur_fsm) - return; - - if (cur_fsm->m_status != new_fsm->m_status || - cur_fsm->c_status != new_fsm->c_status) { - if (new_fsm->m_status == M_CONNECTED) { - netif_carrier_on(dev); - } else if (cur_fsm->m_status == M_CONNECTED) { - netif_carrier_off(dev); - #if defined(CONFIG_WIMAX_GDM72XX_QOS) - gdm_qos_release_list(nic); - #endif - } - gdm_wimax_ind_fsm_update(dev, new_fsm); - } -} - -static int gdm_wimax_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - struct wm_req_s *req = (struct wm_req_s *)ifr; - struct nic *nic = netdev_priv(dev); - int ret; - struct fsm_s fsm_buf; - - if (cmd != SIOCWMIOCTL) - return -EOPNOTSUPP; - - switch (req->cmd) { - case SIOCG_DATA: - case SIOCS_DATA: - if (req->data_id >= SIOC_DATA_MAX) { - netdev_err(dev, "%s error: data-index(%d) is invalid!!\n", - __func__, req->data_id); - return -EOPNOTSUPP; - } - if (req->cmd == SIOCG_DATA) { - ret = gdm_wimax_ioctl_get_data( - &req->data, &nic->sdk_data[req->data_id]); - if (ret < 0) - return ret; - } else if (req->cmd == SIOCS_DATA) { - if (req->data_id == SIOC_DATA_FSM) { - /* NOTE: gdm_update_fsm should be called - * before gdm_wimax_ioctl_set_data is called. - */ - if (copy_from_user(&fsm_buf, req->data.buf, - sizeof(struct fsm_s))) - return -EFAULT; - - gdm_update_fsm(dev, &fsm_buf); - } - ret = gdm_wimax_ioctl_set_data( - &nic->sdk_data[req->data_id], &req->data); - if (ret < 0) - return ret; - } - break; - default: - netdev_err(dev, "%s: %x unknown ioctl\n", __func__, cmd); - return -EOPNOTSUPP; - } - - return 0; -} - -static void gdm_wimax_prepare_device(struct net_device *dev) -{ - struct nic *nic = netdev_priv(dev); - u16 buf[32 / sizeof(u16)]; - struct hci_s *hci = (struct hci_s *)buf; - u16 len = 0; - u32 val = 0; - __be32 val_be32; - - /* GetInformation mac address */ - len = 0; - hci->cmd_evt = cpu_to_be16(WIMAX_GET_INFO); - hci->data[len++] = TLV_T(T_MAC_ADDRESS); - hci->length = cpu_to_be16(len); - gdm_wimax_send(nic, hci, HCI_HEADER_SIZE+len); - - val = T_CAPABILITY_WIMAX | T_CAPABILITY_MULTI_CS; - #if defined(CONFIG_WIMAX_GDM72XX_QOS) - val |= T_CAPABILITY_QOS; - #endif - #if defined(CONFIG_WIMAX_GDM72XX_WIMAX2) - val |= T_CAPABILITY_AGGREGATION; - #endif - - /* Set capability */ - len = 0; - hci->cmd_evt = cpu_to_be16(WIMAX_SET_INFO); - hci->data[len++] = TLV_T(T_CAPABILITY); - hci->data[len++] = TLV_L(T_CAPABILITY); - val_be32 = cpu_to_be32(val); - memcpy(&hci->data[len], &val_be32, TLV_L(T_CAPABILITY)); - len += TLV_L(T_CAPABILITY); - hci->length = cpu_to_be16(len); - gdm_wimax_send(nic, hci, HCI_HEADER_SIZE+len); - - netdev_info(dev, "GDM WiMax Set CAPABILITY: 0x%08X\n", val); -} - -static int gdm_wimax_hci_get_tlv(u8 *buf, u8 *T, u16 *L, u8 **V) -{ - #define __U82U16(b) ((u16)((u8 *)(b))[0] | ((u16)((u8 *)(b))[1] << 8)) - int next_pos; - - *T = buf[0]; - if (buf[1] == 0x82) { - *L = be16_to_cpu(__U82U16(&buf[2])); - next_pos = 1/*type*/+3/*len*/; - } else { - *L = buf[1]; - next_pos = 1/*type*/+1/*len*/; - } - *V = &buf[next_pos]; - - next_pos += *L/*length of val*/; - return next_pos; -} - -static int gdm_wimax_get_prepared_info(struct net_device *dev, char *buf, - int len) -{ - u8 T, *V; - u16 L; - u16 cmd_evt, cmd_len; - int pos = HCI_HEADER_SIZE; - - cmd_evt = be16_to_cpup((const __be16 *)&buf[0]); - cmd_len = be16_to_cpup((const __be16 *)&buf[2]); - - if (len < cmd_len + HCI_HEADER_SIZE) { - netdev_err(dev, "%s: invalid length [%d/%d]\n", __func__, - cmd_len + HCI_HEADER_SIZE, len); - return -1; - } - - if (cmd_evt == WIMAX_GET_INFO_RESULT) { - if (cmd_len < 2) { - netdev_err(dev, "%s: len is too short [%x/%d]\n", - __func__, cmd_evt, len); - return -1; - } - - pos += gdm_wimax_hci_get_tlv(&buf[pos], &T, &L, &V); - if (TLV_T(T_MAC_ADDRESS) == T) { - if (dev->addr_len != L) { - netdev_err(dev, - "%s Invalid information result T/L [%x/%d]\n", - __func__, T, L); - return -1; - } - netdev_info(dev, "MAC change [%pM]->[%pM]\n", - dev->dev_addr, V); - memcpy(dev->dev_addr, V, dev->addr_len); - return 1; - } - } - - gdm_wimax_event_send(dev, buf, len); - return 0; -} - -static void gdm_wimax_netif_rx(struct net_device *dev, char *buf, int len) -{ - struct sk_buff *skb; - int ret; - - skb = dev_alloc_skb(len + 2); - if (!skb) - return; - skb_reserve(skb, 2); - - dev->stats.rx_packets++; - dev->stats.rx_bytes += len; - - memcpy(skb_put(skb, len), buf, len); - - skb->dev = dev; - skb->protocol = eth_type_trans(skb, dev); /* what will happen? */ - - ret = in_interrupt() ? netif_rx(skb) : netif_rx_ni(skb); - if (ret == NET_RX_DROP) - netdev_err(dev, "%s skb dropped\n", __func__); -} - -static void gdm_wimax_transmit_aggr_pkt(struct net_device *dev, char *buf, - int len) -{ - #define HCI_PADDING_BYTE 4 - #define HCI_RESERVED_BYTE 4 - struct hci_s *hci; - int length; - - while (len > 0) { - hci = (struct hci_s *)buf; - - if (hci->cmd_evt != cpu_to_be16(WIMAX_RX_SDU)) { - netdev_err(dev, "Wrong cmd_evt(0x%04X)\n", - be16_to_cpu(hci->cmd_evt)); - break; - } - - length = be16_to_cpu(hci->length); - gdm_wimax_netif_rx(dev, hci->data, length); - - if (length & 0x3) { - /* Add padding size */ - length += HCI_PADDING_BYTE - (length & 0x3); - } - - length += HCI_HEADER_SIZE + HCI_RESERVED_BYTE; - len -= length; - buf += length; - } -} - -static void gdm_wimax_transmit_pkt(struct net_device *dev, char *buf, int len) -{ - #if defined(CONFIG_WIMAX_GDM72XX_QOS) - struct nic *nic = netdev_priv(dev); - #endif - u16 cmd_evt, cmd_len; - - /* This code is added for certain rx packet to be ignored. */ - if (len == 0) - return; - - cmd_evt = be16_to_cpup((const __be16 *)&buf[0]); - cmd_len = be16_to_cpup((const __be16 *)&buf[2]); - - if (len < cmd_len + HCI_HEADER_SIZE) { - if (len) - netdev_err(dev, "%s: invalid length [%d/%d]\n", - __func__, cmd_len + HCI_HEADER_SIZE, len); - return; - } - - switch (cmd_evt) { - case WIMAX_RX_SDU_AGGR: - gdm_wimax_transmit_aggr_pkt(dev, &buf[HCI_HEADER_SIZE], - cmd_len); - break; - case WIMAX_RX_SDU: - gdm_wimax_netif_rx(dev, &buf[HCI_HEADER_SIZE], cmd_len); - break; - #if defined(CONFIG_WIMAX_GDM72XX_QOS) - case WIMAX_EVT_MODEM_REPORT: - gdm_recv_qos_hci_packet(nic, buf, len); - break; - #endif - case WIMAX_SDU_TX_FLOW: - if (buf[4] == 0) { - if (!netif_queue_stopped(dev)) - netif_stop_queue(dev); - } else if (buf[4] == 1) { - if (netif_queue_stopped(dev)) - netif_wake_queue(dev); - } - break; - default: - gdm_wimax_event_send(dev, buf, len); - break; - } -} - -static void rx_complete(void *arg, void *data, int len) -{ - struct nic *nic = arg; - - gdm_wimax_transmit_pkt(nic->netdev, data, len); - gdm_wimax_rcv_with_cb(nic, rx_complete, nic); -} - -static void prepare_rx_complete(void *arg, void *data, int len) -{ - struct nic *nic = arg; - int ret; - - ret = gdm_wimax_get_prepared_info(nic->netdev, data, len); - if (ret == 1) { - gdm_wimax_rcv_with_cb(nic, rx_complete, nic); - } else { - if (ret < 0) - netdev_err(nic->netdev, - "get_prepared_info failed(%d)\n", ret); - gdm_wimax_rcv_with_cb(nic, prepare_rx_complete, nic); - } -} - -static void start_rx_proc(struct nic *nic) -{ - gdm_wimax_rcv_with_cb(nic, prepare_rx_complete, nic); -} - -static struct net_device_ops gdm_netdev_ops = { - .ndo_open = gdm_wimax_open, - .ndo_stop = gdm_wimax_close, - .ndo_set_config = gdm_wimax_set_config, - .ndo_start_xmit = gdm_wimax_tx, - .ndo_set_mac_address = gdm_wimax_set_mac_addr, - .ndo_do_ioctl = gdm_wimax_ioctl, -}; - -int register_wimax_device(struct phy_dev *phy_dev, struct device *pdev) -{ - struct nic *nic = NULL; - struct net_device *dev; - int ret; - - dev = alloc_netdev(sizeof(*nic), "wm%d", NET_NAME_UNKNOWN, - ether_setup); - - if (!dev) { - pr_err("alloc_etherdev failed\n"); - return -ENOMEM; - } - - SET_NETDEV_DEV(dev, pdev); - dev->mtu = 1400; - dev->netdev_ops = &gdm_netdev_ops; - dev->flags &= ~IFF_MULTICAST; - memcpy(dev->dev_addr, gdm_wimax_macaddr, sizeof(gdm_wimax_macaddr)); - - nic = netdev_priv(dev); - nic->netdev = dev; - nic->phy_dev = phy_dev; - phy_dev->netdev = dev; - - /* event socket init */ - ret = gdm_wimax_event_init(); - if (ret < 0) { - pr_err("Cannot create event.\n"); - goto cleanup; - } - - ret = register_netdev(dev); - if (ret) - goto cleanup; - - netif_carrier_off(dev); - -#ifdef CONFIG_WIMAX_GDM72XX_QOS - gdm_qos_init(nic); -#endif - - start_rx_proc(nic); - - /* Prepare WiMax device */ - gdm_wimax_prepare_device(dev); - - return 0; - -cleanup: - pr_err("register_netdev failed\n"); - free_netdev(dev); - return ret; -} - -void unregister_wimax_device(struct phy_dev *phy_dev) -{ - struct nic *nic = netdev_priv(phy_dev->netdev); - struct fsm_s *fsm = nic->sdk_data[SIOC_DATA_FSM].buf; - - if (fsm) - fsm->m_status = M_INIT; - unregister_netdev(nic->netdev); - - gdm_wimax_event_exit(); - -#if defined(CONFIG_WIMAX_GDM72XX_QOS) - gdm_qos_release_list(nic); -#endif - - gdm_wimax_cleanup_ioctl(phy_dev->netdev); - - free_netdev(nic->netdev); -} diff --git a/drivers/staging/gdm72xx/gdm_wimax.h b/drivers/staging/gdm72xx/gdm_wimax.h deleted file mode 100644 index 3330cd798c69..000000000000 --- a/drivers/staging/gdm72xx/gdm_wimax.h +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __GDM72XX_GDM_WIMAX_H__ -#define __GDM72XX_GDM_WIMAX_H__ - -#include <linux/netdevice.h> -#include <linux/types.h> -#include "wm_ioctl.h" -#if defined(CONFIG_WIMAX_GDM72XX_QOS) -#include "gdm_qos.h" -#endif - -#define DRIVER_VERSION "3.2.3" - -struct phy_dev { - void *priv_dev; - struct net_device *netdev; - int (*send_func)(void *priv_dev, void *data, int len, - void (*cb)(void *cb_data), void *cb_data); - int (*rcv_func)(void *priv_dev, - void (*cb)(void *cb_data, void *data, int len), - void *cb_data); -}; - -struct nic { - struct net_device *netdev; - struct phy_dev *phy_dev; - struct data_s sdk_data[SIOC_DATA_MAX]; -#if defined(CONFIG_WIMAX_GDM72XX_QOS) - struct qos_cb_s qos; -#endif -}; - -int register_wimax_device(struct phy_dev *phy_dev, struct device *pdev); -int gdm_wimax_send_tx(struct sk_buff *skb, struct net_device *dev); -void unregister_wimax_device(struct phy_dev *phy_dev); - -#endif /* __GDM72XX_GDM_WIMAX_H__ */ diff --git a/drivers/staging/gdm72xx/hci.h b/drivers/staging/gdm72xx/hci.h deleted file mode 100644 index 10a6bfa6e998..000000000000 --- a/drivers/staging/gdm72xx/hci.h +++ /dev/null @@ -1,213 +0,0 @@ -/* - * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __GDM72XX_HCI_H__ -#define __GDM72XX_HCI_H__ - -#define HCI_HEADER_SIZE 4 -#define HCI_VALUE_OFFS (HCI_HEADER_SIZE) -#define HCI_MAX_PACKET 2048 -#define HCI_MAX_PARAM (HCI_MAX_PACKET-HCI_HEADER_SIZE) -#define HCI_MAX_TLV 32 - -/* CMD-EVT */ - -/* Category 0 */ -#define WIMAX_RESET 0x0000 -#define WIMAX_SET_INFO 0x0001 -#define WIMAX_GET_INFO 0x0002 -#define WIMAX_GET_INFO_RESULT 0x8003 -#define WIMAX_RADIO_OFF 0x0004 -#define WIMAX_RADIO_ON 0x0006 -#define WIMAX_WIMAX_RESET 0x0007 /* Is this still here */ - -/* Category 1 */ -#define WIMAX_NET_ENTRY 0x0100 -#define WIMAX_NET_DISCONN 0x0102 -#define WIMAX_ENTER_SLEEP 0x0103 -#define WIMAX_EXIT_SLEEP 0x0104 -#define WIMAX_ENTER_IDLE 0x0105 -#define WIMAX_EXIT_IDLE 0x0106 -#define WIMAX_MODE_CHANGE 0x8108 -#define WIMAX_HANDOVER 0x8109 /* obsolete */ -#define WIMAX_SCAN 0x010d -#define WIMAX_SCAN_COMPLETE 0x810e -#define WIMAX_SCAN_RESULT 0x810f -#define WIMAX_CONNECT 0x0110 -#define WIMAX_CONNECT_START 0x8111 -#define WIMAX_CONNECT_COMPLETE 0x8112 -#define WIMAX_ASSOC_START 0x8113 -#define WIMAX_ASSOC_COMPLETE 0x8114 -#define WIMAX_DISCONN_IND 0x8115 -#define WIMAX_ENTRY_IND 0x8116 -#define WIMAX_HO_START 0x8117 -#define WIMAX_HO_COMPLETE 0x8118 -#define WIMAX_RADIO_STATE_IND 0x8119 -#define WIMAX_IP_RENEW_IND 0x811a -#define WIMAX_DISCOVER_NSP 0x011d -#define WIMAX_DISCOVER_NSP_RESULT 0x811e -#define WIMAX_SDU_TX_FLOW 0x8125 - -/* Category 2 */ -#define WIMAX_TX_EAP 0x0200 -#define WIMAX_RX_EAP 0x8201 -#define WIMAX_TX_SDU 0x0202 -#define WIMAX_RX_SDU 0x8203 -#define WIMAX_RX_SDU_AGGR 0x8204 -#define WIMAX_TX_SDU_AGGR 0x0205 - -/* Category 3 */ -#define WIMAX_DM_CMD 0x030a -#define WIMAX_DM_RSP 0x830b - -#define WIMAX_CLI_CMD 0x030c -#define WIMAX_CLI_RSP 0x830d - -#define WIMAX_DL_IMAGE 0x0310 -#define WIMAX_DL_IMAGE_STATUS 0x8311 -#define WIMAX_UL_IMAGE 0x0312 -#define WIMAX_UL_IMAGE_RESULT 0x8313 -#define WIMAX_UL_IMAGE_STATUS 0x0314 -#define WIMAX_EVT_MODEM_REPORT 0x8325 - -/* Category 0xF */ -#define WIMAX_FSM_UPDATE 0x8F01 -#define WIMAX_IF_UPDOWN 0x8F02 -#define WIMAX_IF_UP 1 -#define WIMAX_IF_DOWN 2 - -/* WIMAX mode */ -#define W_NULL 0 -#define W_STANDBY 1 -#define W_OOZ 2 -#define W_AWAKE 3 -#define W_IDLE 4 -#define W_SLEEP 5 -#define W_WAIT 6 - -#define W_NET_ENTRY_RNG 0x80 -#define W_NET_ENTRY_SBC 0x81 -#define W_NET_ENTRY_PKM 0x82 -#define W_NET_ENTRY_REG 0x83 -#define W_NET_ENTRY_DSX 0x84 - -#define W_NET_ENTRY_RNG_FAIL 0x1100100 -#define W_NET_ENTRY_SBC_FAIL 0x1100200 -#define W_NET_ENTRY_PKM_FAIL 0x1102000 -#define W_NET_ENTRY_REG_FAIL 0x1103000 -#define W_NET_ENTRY_DSX_FAIL 0x1104000 - -/* Scan Type */ -#define W_SCAN_ALL_CHANNEL 0 -#define W_SCAN_ALL_SUBSCRIPTION 1 -#define W_SCAN_SPECIFIED_SUBSCRIPTION 2 - -/* TLV - * - * [31:31] indicates the type is composite. - * [30:16] is the length of the type. 0 length means length is variable. - * [15:0] is the actual type. - */ -#define TLV_L(x) (((x) >> 16) & 0xff) -#define TLV_T(x) ((x) & 0xff) -#define TLV_COMPOSITE(x) ((x) >> 31) - -/* GENERAL */ -#define T_MAC_ADDRESS (0x00 | (6 << 16)) -#define T_BSID (0x01 | (6 << 16)) -#define T_MSK (0x02 | (64 << 16)) -#define T_RSSI_THRSHLD (0x03 | (1 << 16)) -#define T_FREQUENCY (0x04 | (4 << 16)) -#define T_CONN_CS_TYPE (0x05 | (1 << 16)) -#define T_HOST_IP_VER (0x06 | (1 << 16)) -#define T_STBY_SCAN_INTERVAL (0x07 | (4 << 16)) -#define T_OOZ_SCAN_INTERVAL (0x08 | (4 << 16)) -#define T_IMEI (0x09 | (8 << 16)) -#define T_PID (0x0a | (12 << 16)) -#define T_CAPABILITY (0x1a | (4 << 16)) -#define T_RELEASE_NUMBER (0x1b | (4 << 16)) -#define T_DRIVER_REVISION (0x1c | (4 << 16)) -#define T_FW_REVISION (0x1d | (4 << 16)) -#define T_MAC_HW_REVISION (0x1e | (4 << 16)) -#define T_PHY_HW_REVISION (0x1f | (4 << 16)) - -/* HANDOVER */ -#define T_SCAN_INTERVAL (0x20 | (1 << 16)) -#define T_RSC_RETAIN_TIME (0x2f | (2 << 16)) - -/* SLEEP */ -#define T_TYPE1_ISW (0x40 | (1 << 16)) -#define T_SLP_START_TO (0x4a | (2 << 16)) - -/* IDLE */ -#define T_IDLE_MODE_TO (0x50 | (2 << 16)) -#define T_IDLE_START_TO (0x54 | (2 << 16)) - -/* MONITOR */ -#define T_RSSI (0x60 | (1 << 16)) -#define T_CINR (0x61 | (1 << 16)) -#define T_TX_POWER (0x6a | (1 << 16)) -#define T_CUR_FREQ (0x7f | (4 << 16)) - - -/* WIMAX */ -#define T_MAX_SUBSCRIPTION (0xa1 | (1 << 16)) -#define T_MAX_SF (0xa2 | (1 << 16)) -#define T_PHY_TYPE (0xa3 | (1 << 16)) -#define T_PKM (0xa4 | (1 << 16)) -#define T_AUTH_POLICY (0xa5 | (1 << 16)) -#define T_CS_TYPE (0xa6 | (2 << 16)) -#define T_VENDOR_NAME (0xa7 | (0 << 16)) -#define T_MOD_NAME (0xa8 | (0 << 16)) -#define T_PACKET_FILTER (0xa9 | (1 << 16)) -#define T_NSP_CHANGE_COUNT (0xaa | (4 << 16)) -#define T_RADIO_STATE (0xab | (1 << 16)) -#define T_URI_CONTACT_TYPE (0xac | (1 << 16)) -#define T_URI_TEXT (0xad | (0 << 16)) -#define T_URI (0xae | (0 << 16)) -#define T_ENABLE_AUTH (0xaf | (1 << 16)) -#define T_TIMEOUT (0xb0 | (2 << 16)) -#define T_RUN_MODE (0xb1 | (1 << 16)) -#define T_OMADMT_VER (0xb2 | (4 << 16)) -/* This is measured in seconds from 00:00:00 GMT January 1, 1970. */ -#define T_RTC_TIME (0xb3 | (4 << 16)) -#define T_CERT_STATUS (0xb4 | (4 << 16)) -#define T_CERT_MASK (0xb5 | (4 << 16)) -#define T_EMSK (0xb6 | (64 << 16)) - -/* Subscription TLV */ -#define T_SUBSCRIPTION_LIST (0xd1 | (0 << 16) | (1 << 31)) -#define T_H_NSPID (0xd2 | (3 << 16)) -#define T_NSP_NAME (0xd3 | (0 << 16)) -#define T_SUBSCRIPTION_NAME (0xd4 | (0 << 16)) -#define T_SUBSCRIPTION_FLAG (0xd5 | (2 << 16)) -#define T_V_NSPID (0xd6 | (3 << 16)) -#define T_NAP_ID (0xd7 | (3 << 16)) -#define T_PREAMBLES (0xd8 | (15 << 16)) -#define T_BW (0xd9 | (4 << 16)) -#define T_FFTSIZE (0xda | (4 << 16)) -#define T_DUPLEX_MODE (0xdb | (4 << 16)) - -/* T_CAPABILITY */ -#define T_CAPABILITY_MULTI_CS (1 << 0) -#define T_CAPABILITY_WIMAX (1 << 1) -#define T_CAPABILITY_QOS (1 << 2) -#define T_CAPABILITY_AGGREGATION (1 << 3) - -struct hci_s { - __be16 cmd_evt; - __be16 length; - u8 data[0]; -} __packed; - -#endif /* __GDM72XX_HCI_H__ */ diff --git a/drivers/staging/gdm72xx/netlink_k.c b/drivers/staging/gdm72xx/netlink_k.c deleted file mode 100644 index f3cdaa6c468c..000000000000 --- a/drivers/staging/gdm72xx/netlink_k.c +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/module.h> -#include <linux/etherdevice.h> -#include <net/netlink.h> -#include <asm/byteorder.h> -#include <net/sock.h> -#include "netlink_k.h" - -#if !defined(NLMSG_HDRLEN) -#define NLMSG_HDRLEN ((int) NLMSG_ALIGN(sizeof(struct nlmsghdr))) -#endif - -#define ND_MAX_GROUP 30 -#define ND_IFINDEX_LEN sizeof(int) -#define ND_NLMSG_SPACE(len) (nlmsg_total_size(len) + ND_IFINDEX_LEN) -#define ND_NLMSG_DATA(nlh) \ - ((void *)((char *)nlmsg_data(nlh) + ND_IFINDEX_LEN)) -#define ND_NLMSG_S_LEN(len) (len+ND_IFINDEX_LEN) -#define ND_NLMSG_R_LEN(nlh) (nlh->nlmsg_len-ND_IFINDEX_LEN) -#define ND_NLMSG_IFIDX(nlh) nlmsg_data(nlh) -#define ND_MAX_MSG_LEN 8096 - -#if defined(DEFINE_MUTEX) -static DEFINE_MUTEX(netlink_mutex); -#else -static struct semaphore netlink_mutex; -#define mutex_lock(x) down(x) -#define mutex_unlock(x) up(x) -#endif - -static void (*rcv_cb)(struct net_device *dev, u16 type, void *msg, int len); - -static void netlink_rcv_cb(struct sk_buff *skb) -{ - struct nlmsghdr *nlh; - struct net_device *dev; - u32 mlen; - void *msg; - int ifindex; - - if (skb->len >= NLMSG_HDRLEN) { - nlh = (struct nlmsghdr *)skb->data; - - if (skb->len < nlh->nlmsg_len || - nlh->nlmsg_len > ND_MAX_MSG_LEN) { - netdev_err(skb->dev, "Invalid length (%d,%d)\n", - skb->len, nlh->nlmsg_len); - return; - } - - memcpy(&ifindex, ND_NLMSG_IFIDX(nlh), ND_IFINDEX_LEN); - msg = ND_NLMSG_DATA(nlh); - mlen = ND_NLMSG_R_LEN(nlh); - - if (rcv_cb) { - dev = dev_get_by_index(&init_net, ifindex); - if (dev) { - rcv_cb(dev, nlh->nlmsg_type, msg, mlen); - dev_put(dev); - } else - netdev_err(skb->dev, - "dev_get_by_index(%d) is not found.\n", - ifindex); - } else { - netdev_err(skb->dev, "Unregistered Callback\n"); - } - } -} - -static void netlink_rcv(struct sk_buff *skb) -{ - mutex_lock(&netlink_mutex); - netlink_rcv_cb(skb); - mutex_unlock(&netlink_mutex); -} - -struct sock *netlink_init(int unit, void (*cb)(struct net_device *dev, u16 type, - void *msg, int len)) -{ - struct sock *sock; - struct netlink_kernel_cfg cfg = { - .input = netlink_rcv, - }; - -#if !defined(DEFINE_MUTEX) - init_MUTEX(&netlink_mutex); -#endif - - sock = netlink_kernel_create(&init_net, unit, &cfg); - - if (sock) - rcv_cb = cb; - - return sock; -} - -void netlink_exit(struct sock *sock) -{ - netlink_kernel_release(sock); -} - -int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len) -{ - static u32 seq; - struct sk_buff *skb = NULL; - struct nlmsghdr *nlh; - int ret = 0; - - if (group > ND_MAX_GROUP) { - pr_err("Group %d is invalid.\n", group); - pr_err("Valid group is 0 ~ %d.\n", ND_MAX_GROUP); - return -EINVAL; - } - - skb = nlmsg_new(len, GFP_ATOMIC); - if (!skb) { - pr_err("netlink_broadcast ret=%d\n", ret); - return -ENOMEM; - } - - seq++; - nlh = nlmsg_put(skb, 0, seq, type, len, 0); - if (!nlh) { - kfree_skb(skb); - return -EMSGSIZE; - } - memcpy(nlmsg_data(nlh), msg, len); - - NETLINK_CB(skb).portid = 0; - NETLINK_CB(skb).dst_group = 0; - - ret = netlink_broadcast(sock, skb, 0, group+1, GFP_ATOMIC); - - if (!ret) - return len; - if (ret != -ESRCH) { - pr_err("netlink_broadcast g=%d, t=%d, l=%d, r=%d\n", - group, type, len, ret); - } - ret = 0; - return ret; -} diff --git a/drivers/staging/gdm72xx/netlink_k.h b/drivers/staging/gdm72xx/netlink_k.h deleted file mode 100644 index 1fe7198d539e..000000000000 --- a/drivers/staging/gdm72xx/netlink_k.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __GDM72XX_NETLINK_K_H__ -#define __GDM72XX_NETLINK_K_H__ - -#include <linux/netdevice.h> -#include <net/sock.h> - -struct sock *netlink_init(int unit, void (*cb)(struct net_device *dev, u16 type, - void *msg, int len)); -void netlink_exit(struct sock *sock); -int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len); - -#endif /* __GDM72XX_NETLINK_K_H__ */ diff --git a/drivers/staging/gdm72xx/sdio_boot.c b/drivers/staging/gdm72xx/sdio_boot.c deleted file mode 100644 index ba94b5f13bb2..000000000000 --- a/drivers/staging/gdm72xx/sdio_boot.c +++ /dev/null @@ -1,158 +0,0 @@ -/* - * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/uaccess.h> -#include <linux/fs.h> -#include <linux/sched.h> -#include <linux/slab.h> - -#include <linux/mmc/core.h> -#include <linux/mmc/card.h> -#include <linux/mmc/sdio_func.h> - -#include <linux/firmware.h> - -#include "gdm_sdio.h" -#include "sdio_boot.h" - -#define TYPE_A_HEADER_SIZE 4 -#define TYPE_A_LOOKAHEAD_SIZE 16 -#define YMEM0_SIZE 0x8000 /* 32kbytes */ -#define DOWNLOAD_SIZE (YMEM0_SIZE - TYPE_A_HEADER_SIZE) - -#define FW_DIR "gdm72xx/" -#define FW_KRN "gdmskrn.bin" -#define FW_RFS "gdmsrfs.bin" - -static u8 *tx_buf; - -static int ack_ready(struct sdio_func *func) -{ - unsigned long wait = jiffies + HZ; - u8 val; - int ret; - - while (time_before(jiffies, wait)) { - val = sdio_readb(func, 0x13, &ret); - if (val & 0x01) - return 1; - schedule(); - } - - return 0; -} - -static int download_image(struct sdio_func *func, const char *img_name) -{ - int ret = 0, len, pno; - u8 *buf = tx_buf; - loff_t pos = 0; - int img_len; - const struct firmware *firm; - - ret = request_firmware(&firm, img_name, &func->dev); - if (ret < 0) { - dev_err(&func->dev, - "requesting firmware %s failed with error %d\n", - img_name, ret); - return ret; - } - - buf = kmalloc(DOWNLOAD_SIZE + TYPE_A_HEADER_SIZE, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - img_len = firm->size; - - if (img_len <= 0) { - ret = -1; - goto out; - } - - pno = 0; - while (img_len > 0) { - if (img_len > DOWNLOAD_SIZE) { - len = DOWNLOAD_SIZE; - buf[3] = 0; - } else { - len = img_len; /* the last packet */ - buf[3] = 2; - } - - buf[0] = len & 0xff; - buf[1] = (len >> 8) & 0xff; - buf[2] = (len >> 16) & 0xff; - - memcpy(buf+TYPE_A_HEADER_SIZE, firm->data + pos, len); - ret = sdio_memcpy_toio(func, 0, buf, len + TYPE_A_HEADER_SIZE); - if (ret < 0) { - dev_err(&func->dev, - "send image error: packet number = %d ret = %d\n", - pno, ret); - goto out; - } - - if (buf[3] == 2) /* The last packet */ - break; - if (!ack_ready(func)) { - ret = -EIO; - dev_err(&func->dev, "Ack is not ready.\n"); - goto out; - } - ret = sdio_memcpy_fromio(func, buf, 0, TYPE_A_LOOKAHEAD_SIZE); - if (ret < 0) { - dev_err(&func->dev, - "receive ack error: packet number = %d ret = %d\n", - pno, ret); - goto out; - } - sdio_writeb(func, 0x01, 0x13, &ret); - sdio_writeb(func, 0x00, 0x10, &ret); /* PCRRT */ - - img_len -= DOWNLOAD_SIZE; - pos += DOWNLOAD_SIZE; - pno++; - } - -out: - kfree(buf); - return ret; -} - -int sdio_boot(struct sdio_func *func) -{ - int ret; - const char *krn_name = FW_DIR FW_KRN; - const char *rfs_name = FW_DIR FW_RFS; - - tx_buf = kmalloc(YMEM0_SIZE, GFP_KERNEL); - if (!tx_buf) - return -ENOMEM; - - ret = download_image(func, krn_name); - if (ret) - goto restore_fs; - dev_info(&func->dev, "GCT: Kernel download success.\n"); - - ret = download_image(func, rfs_name); - if (ret) - goto restore_fs; - dev_info(&func->dev, "GCT: Filesystem download success.\n"); - -restore_fs: - kfree(tx_buf); - return ret; -} diff --git a/drivers/staging/gdm72xx/sdio_boot.h b/drivers/staging/gdm72xx/sdio_boot.h deleted file mode 100644 index e0800c6fe2fd..000000000000 --- a/drivers/staging/gdm72xx/sdio_boot.h +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __GDM72XX_SDIO_BOOT_H__ -#define __GDM72XX_SDIO_BOOT_H__ - -struct sdio_func; - -int sdio_boot(struct sdio_func *func); - -#endif /* __GDM72XX_SDIO_BOOT_H__ */ diff --git a/drivers/staging/gdm72xx/usb_boot.c b/drivers/staging/gdm72xx/usb_boot.c deleted file mode 100644 index 39ca34031a6b..000000000000 --- a/drivers/staging/gdm72xx/usb_boot.c +++ /dev/null @@ -1,363 +0,0 @@ -/* - * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include <linux/uaccess.h> -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/usb.h> -#include <linux/unistd.h> -#include <linux/slab.h> -#include <linux/firmware.h> - -#include <asm/byteorder.h> -#include "gdm_usb.h" -#include "usb_boot.h" - -#define DN_KERNEL_MAGIC_NUMBER 0x10760001 -#define DN_ROOTFS_MAGIC_NUMBER 0x10760002 - -#define DOWNLOAD_SIZE 1024 - -#define MAX_IMG_CNT 16 -#define FW_DIR "gdm72xx/" -#define FW_UIMG "gdmuimg.bin" -#define FW_KERN "zImage" -#define FW_FS "ramdisk.jffs2" - -struct dn_header { - __be32 magic_num; - __be32 file_size; -}; - -struct img_header { - u32 magic_code; - u32 count; - u32 len; - u32 offset[MAX_IMG_CNT]; - char hostname[32]; - char date[32]; -}; - -struct fw_info { - u32 id; - u32 len; - u32 kernel_len; - u32 rootfs_len; - u32 kernel_offset; - u32 rootfs_offset; - u32 fw_ver; - u32 mac_ver; - char hostname[32]; - char userid[16]; - char date[32]; - char user_desc[128]; -}; - -static void array_le32_to_cpu(u32 *arr, int num) -{ - int i; - - for (i = 0; i < num; i++, arr++) - le32_to_cpus(arr); -} - -static u8 *tx_buf; - -static int gdm_wibro_send(struct usb_device *usbdev, void *data, int len) -{ - int ret; - int actual; - - ret = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 1), data, len, - &actual, 1000); - - if (ret < 0) { - dev_err(&usbdev->dev, "Error : usb_bulk_msg ( result = %d )\n", - ret); - return ret; - } - return 0; -} - -static int gdm_wibro_recv(struct usb_device *usbdev, void *data, int len) -{ - int ret; - int actual; - - ret = usb_bulk_msg(usbdev, usb_rcvbulkpipe(usbdev, 2), data, len, - &actual, 5000); - - if (ret < 0) { - dev_err(&usbdev->dev, - "Error : usb_bulk_msg(recv) ( result = %d )\n", ret); - return ret; - } - return 0; -} - -static int download_image(struct usb_device *usbdev, - const struct firmware *firm, - loff_t pos, u32 img_len, u32 magic_num) -{ - struct dn_header h; - int ret = 0; - u32 size; - - size = ALIGN(img_len, DOWNLOAD_SIZE); - h.magic_num = cpu_to_be32(magic_num); - h.file_size = cpu_to_be32(size); - - ret = gdm_wibro_send(usbdev, &h, sizeof(h)); - if (ret < 0) - return ret; - - while (img_len > 0) { - if (img_len > DOWNLOAD_SIZE) - size = DOWNLOAD_SIZE; - else - size = img_len; /* the last chunk of data */ - - memcpy(tx_buf, firm->data + pos, size); - ret = gdm_wibro_send(usbdev, tx_buf, size); - - if (ret < 0) - return ret; - - img_len -= size; - pos += size; - } - - return ret; -} - -int usb_boot(struct usb_device *usbdev, u16 pid) -{ - int i, ret = 0; - struct img_header hdr; - struct fw_info fw_info; - loff_t pos = 0; - char *img_name = FW_DIR FW_UIMG; - const struct firmware *firm; - - ret = request_firmware(&firm, img_name, &usbdev->dev); - if (ret < 0) { - dev_err(&usbdev->dev, - "requesting firmware %s failed with error %d\n", - img_name, ret); - return ret; - } - - tx_buf = kmalloc(DOWNLOAD_SIZE, GFP_KERNEL); - if (!tx_buf) { - release_firmware(firm); - return -ENOMEM; - } - - if (firm->size < sizeof(hdr)) { - dev_err(&usbdev->dev, "Cannot read the image info.\n"); - ret = -EIO; - goto out; - } - memcpy(&hdr, firm->data, sizeof(hdr)); - - array_le32_to_cpu((u32 *)&hdr, 19); - - if (hdr.count > MAX_IMG_CNT) { - dev_err(&usbdev->dev, "Too many images. %d\n", hdr.count); - ret = -EINVAL; - goto out; - } - - for (i = 0; i < hdr.count; i++) { - if (hdr.offset[i] > hdr.len) { - dev_err(&usbdev->dev, - "Invalid offset. Entry = %d Offset = 0x%08x Image length = 0x%08x\n", - i, hdr.offset[i], hdr.len); - ret = -EINVAL; - goto out; - } - - pos = hdr.offset[i]; - if (firm->size < sizeof(fw_info) + pos) { - dev_err(&usbdev->dev, "Cannot read the FW info.\n"); - ret = -EIO; - goto out; - } - memcpy(&fw_info, firm->data + pos, sizeof(fw_info)); - - array_le32_to_cpu((u32 *)&fw_info, 8); - - if ((fw_info.id & 0xffff) != pid) - continue; - - pos = hdr.offset[i] + fw_info.kernel_offset; - if (firm->size < fw_info.kernel_len + pos) { - dev_err(&usbdev->dev, "Kernel FW is too small.\n"); - goto out; - } - - ret = download_image(usbdev, firm, pos, fw_info.kernel_len, - DN_KERNEL_MAGIC_NUMBER); - if (ret < 0) - goto out; - dev_info(&usbdev->dev, "GCT: Kernel download success.\n"); - - pos = hdr.offset[i] + fw_info.rootfs_offset; - if (firm->size < fw_info.rootfs_len + pos) { - dev_err(&usbdev->dev, "Filesystem FW is too small.\n"); - goto out; - } - ret = download_image(usbdev, firm, pos, fw_info.rootfs_len, - DN_ROOTFS_MAGIC_NUMBER); - if (ret < 0) - goto out; - dev_info(&usbdev->dev, "GCT: Filesystem download success.\n"); - - break; - } - - if (i == hdr.count) { - dev_err(&usbdev->dev, "Firmware for gsk%x is not installed.\n", - pid); - ret = -EINVAL; - } -out: - release_firmware(firm); - kfree(tx_buf); - return ret; -} - -/*#define GDM7205_PADDING 256 */ -#define DOWNLOAD_CHUCK 2048 -#define KERNEL_TYPE_STRING "linux" -#define FS_TYPE_STRING "rootfs" - -static int em_wait_ack(struct usb_device *usbdev, int send_zlp) -{ - int ack; - int ret = -1; - - if (send_zlp) { - /*Send ZLP*/ - ret = gdm_wibro_send(usbdev, NULL, 0); - if (ret < 0) - goto out; - } - - /*Wait for ACK*/ - ret = gdm_wibro_recv(usbdev, &ack, sizeof(ack)); - if (ret < 0) - goto out; -out: - return ret; -} - -static int em_download_image(struct usb_device *usbdev, const char *img_name, - char *type_string) -{ - char *buf = NULL; - loff_t pos = 0; - int ret = 0; - int len; - int img_len; - const struct firmware *firm; - #if defined(GDM7205_PADDING) - const int pad_size = GDM7205_PADDING; - #else - const int pad_size = 0; - #endif - - ret = request_firmware(&firm, img_name, &usbdev->dev); - if (ret < 0) { - dev_err(&usbdev->dev, - "requesting firmware %s failed with error %d\n", - img_name, ret); - return ret; - } - - buf = kmalloc(DOWNLOAD_CHUCK + pad_size, GFP_KERNEL); - if (!buf) { - release_firmware(firm); - return -ENOMEM; - } - - strcpy(buf+pad_size, type_string); - ret = gdm_wibro_send(usbdev, buf, strlen(type_string)+pad_size); - if (ret < 0) - goto out; - - img_len = firm->size; - - if (img_len <= 0) { - ret = -1; - goto out; - } - - while (img_len > 0) { - if (img_len > DOWNLOAD_CHUCK) - len = DOWNLOAD_CHUCK; - else - len = img_len; /* the last chunk of data */ - - memcpy(buf+pad_size, firm->data + pos, len); - ret = gdm_wibro_send(usbdev, buf, len+pad_size); - - if (ret < 0) - goto out; - - img_len -= DOWNLOAD_CHUCK; - pos += DOWNLOAD_CHUCK; - - ret = em_wait_ack(usbdev, ((len+pad_size) % 512 == 0)); - if (ret < 0) - goto out; - } - - ret = em_wait_ack(usbdev, 1); - if (ret < 0) - goto out; - -out: - release_firmware(firm); - kfree(buf); - - return ret; -} - -static int em_fw_reset(struct usb_device *usbdev) -{ - /*Send ZLP*/ - return gdm_wibro_send(usbdev, NULL, 0); -} - -int usb_emergency(struct usb_device *usbdev) -{ - int ret; - const char *kern_name = FW_DIR FW_KERN; - const char *fs_name = FW_DIR FW_FS; - - ret = em_download_image(usbdev, kern_name, KERNEL_TYPE_STRING); - if (ret < 0) - return ret; - dev_err(&usbdev->dev, "GCT Emergency: Kernel download success.\n"); - - ret = em_download_image(usbdev, fs_name, FS_TYPE_STRING); - if (ret < 0) - return ret; - dev_info(&usbdev->dev, "GCT Emergency: Filesystem download success.\n"); - - ret = em_fw_reset(usbdev); - - return ret; -} diff --git a/drivers/staging/gdm72xx/usb_boot.h b/drivers/staging/gdm72xx/usb_boot.h deleted file mode 100644 index 5bf7190377e2..000000000000 --- a/drivers/staging/gdm72xx/usb_boot.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __GDM72XX_USB_BOOT_H__ -#define __GDM72XX_USB_BOOT_H__ - -struct usb_device; - -int usb_boot(struct usb_device *usbdev, u16 pid); -int usb_emergency(struct usb_device *usbdev); - -#endif /* __GDM72XX_USB_BOOT_H__ */ diff --git a/drivers/staging/gdm72xx/usb_ids.h b/drivers/staging/gdm72xx/usb_ids.h deleted file mode 100644 index 7afb9ba5fdba..000000000000 --- a/drivers/staging/gdm72xx/usb_ids.h +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __GDM72XX_USB_IDS_H__ -#define __GDM72XX_USB_IDS_H__ - -/*You can replace vendor-ID as yours.*/ -#define GCT_VID 0x1076 - -/*You can replace product-ID as yours.*/ -#define GCT_PID1 0x7e00 -#define GCT_PID2 0x7f00 - -#define USB_DEVICE_ID_MATCH_DEVICE_INTERFACE \ - (USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_CLASS) - -#define USB_DEVICE_INTF(vend, prod, intf) \ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE_INTERFACE, \ - .idVendor = (vend), .idProduct = (prod), .bInterfaceClass = (intf) - -#define EMERGENCY_PID 0x720f -#define BL_PID_MASK 0xffc0 - -#define USB_DEVICE_BOOTLOADER(vid, pid) \ - {USB_DEVICE((vid), ((pid)&BL_PID_MASK)|B_DOWNLOAD)} - -#define USB_DEVICE_BOOTLOADER_DRV(vid, pid) \ - {USB_DEVICE((vid), ((pid)&BL_PID_MASK)|B_DOWNLOAD|B_DIFF_DL_DRV)} - -#define USB_DEVICE_CDC_DATA(vid, pid) \ - {USB_DEVICE_INTF((vid), (pid), USB_CLASS_CDC_DATA)} - -static const struct usb_device_id id_table[] = { - USB_DEVICE_BOOTLOADER(GCT_VID, GCT_PID1), - USB_DEVICE_BOOTLOADER_DRV(GCT_VID, GCT_PID1), - USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1), - USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x1), - USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x2), - USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x3), - USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x4), - USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x5), - USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x6), - USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x7), - USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x8), - USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x9), - USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0xa), - USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0xb), - USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0xc), - USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0xd), - USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0xe), - USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0xf), - - USB_DEVICE_BOOTLOADER(GCT_VID, GCT_PID2), - USB_DEVICE_BOOTLOADER_DRV(GCT_VID, GCT_PID2), - USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2), - USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x1), - USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x2), - USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x3), - USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x4), - USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x5), - USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x6), - USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x7), - USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x8), - USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x9), - USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0xa), - USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0xb), - USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0xc), - USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0xd), - USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0xe), - USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0xf), - - {USB_DEVICE(GCT_VID, EMERGENCY_PID)}, - { } -}; - -#endif /* __GDM72XX_USB_IDS_H__ */ diff --git a/drivers/staging/gdm72xx/wm_ioctl.h b/drivers/staging/gdm72xx/wm_ioctl.h deleted file mode 100644 index 631cb1d23c7e..000000000000 --- a/drivers/staging/gdm72xx/wm_ioctl.h +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __GDM72XX_WM_IOCTL_H__ -#define __GDM72XX_WM_IOCTL_H__ - -#if !defined(__KERNEL__) -#include <net/if.h> -#endif - -#define NETLINK_WIMAX 31 - -#define SIOCWMIOCTL SIOCDEVPRIVATE - -#define SIOCG_DATA 0x8D10 -#define SIOCS_DATA 0x8D11 - -enum { - SIOC_DATA_FSM, - SIOC_DATA_NETLIST, - SIOC_DATA_CONNNSP, - SIOC_DATA_CONNCOMP, - SIOC_DATA_PROFILEID, - - SIOC_DATA_END -}; - -#define SIOC_DATA_MAX 16 - -/* FSM */ -enum { - M_INIT = 0, - M_OPEN_OFF, - M_OPEN_ON, - M_SCAN, - M_CONNECTING, - M_CONNECTED, - M_FSM_END, - - C_INIT = 0, - C_CONNSTART, - C_ASSOCSTART, - C_RNG, - C_SBC, - C_AUTH, - C_REG, - C_DSX, - C_ASSOCCOMPLETE, - C_CONNCOMPLETE, - C_FSM_END, - - D_INIT = 0, - D_READY, - D_LISTEN, - D_IPACQUISITION, - - END_FSM -}; - -struct fsm_s { - int m_status; /*main status*/ - int c_status; /*connection status*/ - int d_status; /*oma-dm status*/ -}; - -struct data_s { - int size; - void *buf; -}; - -struct udata_s { - int size; - void __user *buf; -}; - -struct wm_req_s { - union { - char ifrn_name[IFNAMSIZ]; - } ifr_ifrn; - unsigned short cmd; - unsigned short data_id; - struct udata_s data; - -/* NOTE: sizeof(struct wm_req_s) must be less than sizeof(struct ifreq). */ -}; - -#ifndef ifr_name -#define ifr_name ifr_ifrn.ifrn_name -#endif - -#endif /* __GDM72XX_WM_IOCTL_H__ */ diff --git a/drivers/staging/goldfish/goldfish_audio.c b/drivers/staging/goldfish/goldfish_audio.c index 364fdcdd3a06..bd559956f199 100644 --- a/drivers/staging/goldfish/goldfish_audio.c +++ b/drivers/staging/goldfish/goldfish_audio.c @@ -26,6 +26,7 @@ #include <linux/sched.h> #include <linux/dma-mapping.h> #include <linux/uaccess.h> +#include <linux/slab.h> #include <linux/goldfish.h> MODULE_AUTHOR("Google, Inc."); @@ -344,11 +345,18 @@ static int goldfish_audio_remove(struct platform_device *pdev) return 0; } +static const struct of_device_id goldfish_audio_of_match[] = { + { .compatible = "google,goldfish-audio", }, + {}, +}; +MODULE_DEVICE_TABLE(of, goldfish_audio_of_match); + static struct platform_driver goldfish_audio_driver = { .probe = goldfish_audio_probe, .remove = goldfish_audio_remove, .driver = { - .name = "goldfish_audio" + .name = "goldfish_audio", + .of_match_table = goldfish_audio_of_match, } }; diff --git a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c index a3a10f9a2a2b..7b7c9786c162 100644 --- a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c +++ b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c @@ -34,7 +34,7 @@ #define DEVICE_NAME "device" #define CLASS_NAME "fpgaboot" -static uint8_t bits_magic[] = { +static u8 bits_magic[] = { 0x0, 0x9, 0xf, 0xf0, 0xf, 0xf0, 0xf, 0xf0, 0xf, 0xf0, 0x0, 0x0, 0x1}; @@ -54,7 +54,7 @@ static void read_bitstream(char *bitdata, char *buf, int *offset, int rdsize) static void readinfo_bitstream(char *bitdata, char *buf, int *offset) { char tbuf[64]; - int32_t len; + s32 len; /* read section char */ read_bitstream(bitdata, tbuf, offset, 1); @@ -281,17 +281,12 @@ static int init_driver(void) return PTR_ERR_OR_ZERO(firmware_pdev); } -static void finish_driver(void) -{ - platform_device_unregister(firmware_pdev); -} - static int gs_fpgaboot(void) { int err; struct fpgaimage *fimage; - fimage = kmalloc(sizeof(struct fpgaimage), GFP_KERNEL); + fimage = kmalloc(sizeof(*fimage), GFP_KERNEL); if (!fimage) return -ENOMEM; @@ -370,14 +365,14 @@ static int __init gs_fpgaboot_init(void) return 0; errout: - finish_driver(); + platform_device_unregister(firmware_pdev); return err; } static void __exit gs_fpgaboot_exit(void) { - finish_driver(); + platform_device_unregister(firmware_pdev); pr_info("FPGA image download module removed\n"); } diff --git a/Documentation/isdn/README.act2000 b/drivers/staging/i4l/Documentation/README.act2000 index ce7115e7f4ce..ce7115e7f4ce 100644 --- a/Documentation/isdn/README.act2000 +++ b/drivers/staging/i4l/Documentation/README.act2000 diff --git a/Documentation/isdn/README.icn b/drivers/staging/i4l/Documentation/README.icn index 13f833d4e910..13f833d4e910 100644 --- a/Documentation/isdn/README.icn +++ b/drivers/staging/i4l/Documentation/README.icn diff --git a/Documentation/isdn/README.pcbit b/drivers/staging/i4l/Documentation/README.pcbit index 5125002282e5..5125002282e5 100644 --- a/Documentation/isdn/README.pcbit +++ b/drivers/staging/i4l/Documentation/README.pcbit diff --git a/Documentation/isdn/README.sc b/drivers/staging/i4l/Documentation/README.sc index 1153cd926059..1153cd926059 100644 --- a/Documentation/isdn/README.sc +++ b/drivers/staging/i4l/Documentation/README.sc diff --git a/drivers/staging/i4l/Kconfig b/drivers/staging/i4l/Kconfig new file mode 100644 index 000000000000..920216e88de7 --- /dev/null +++ b/drivers/staging/i4l/Kconfig @@ -0,0 +1,13 @@ +# +# Old ISDN4Linux config +# +menu "Old ISDN4Linux (deprecated)" + depends on ISDN_I4L + +source "drivers/staging/i4l/icn/Kconfig" + +source "drivers/staging/i4l/pcbit/Kconfig" + +source "drivers/staging/i4l/act2000/Kconfig" + +endmenu diff --git a/drivers/staging/i4l/Makefile b/drivers/staging/i4l/Makefile new file mode 100644 index 000000000000..158b87093db5 --- /dev/null +++ b/drivers/staging/i4l/Makefile @@ -0,0 +1,5 @@ +# Makefile for the old ISDN I4L subsystem and device drivers. + +obj-$(CONFIG_ISDN_DRV_ICN) += icn/ +obj-$(CONFIG_ISDN_DRV_PCBIT) += pcbit/ +obj-$(CONFIG_ISDN_DRV_ACT2000) += act2000/ diff --git a/drivers/staging/i4l/TODO b/drivers/staging/i4l/TODO new file mode 100644 index 000000000000..6fe2c08bec7a --- /dev/null +++ b/drivers/staging/i4l/TODO @@ -0,0 +1,3 @@ +* The icn, pcbit and act2000 drivers are dead, remove them in 2017 + after another longterm kernel has been released, just in the + unlikely case someone still has this hardware. diff --git a/drivers/isdn/act2000/Kconfig b/drivers/staging/i4l/act2000/Kconfig index fa2673fc69c2..fa2673fc69c2 100644 --- a/drivers/isdn/act2000/Kconfig +++ b/drivers/staging/i4l/act2000/Kconfig diff --git a/drivers/isdn/act2000/Makefile b/drivers/staging/i4l/act2000/Makefile index 05e582fb5c00..05e582fb5c00 100644 --- a/drivers/isdn/act2000/Makefile +++ b/drivers/staging/i4l/act2000/Makefile diff --git a/drivers/isdn/act2000/act2000.h b/drivers/staging/i4l/act2000/act2000.h index 321d437f579e..321d437f579e 100644 --- a/drivers/isdn/act2000/act2000.h +++ b/drivers/staging/i4l/act2000/act2000.h diff --git a/drivers/isdn/act2000/act2000_isa.c b/drivers/staging/i4l/act2000/act2000_isa.c index b5fad29a9ba6..b5fad29a9ba6 100644 --- a/drivers/isdn/act2000/act2000_isa.c +++ b/drivers/staging/i4l/act2000/act2000_isa.c diff --git a/drivers/isdn/act2000/act2000_isa.h b/drivers/staging/i4l/act2000/act2000_isa.h index 1a728984ede1..1a728984ede1 100644 --- a/drivers/isdn/act2000/act2000_isa.h +++ b/drivers/staging/i4l/act2000/act2000_isa.h diff --git a/drivers/isdn/act2000/capi.c b/drivers/staging/i4l/act2000/capi.c index 3f66ca20b5e5..3f66ca20b5e5 100644 --- a/drivers/isdn/act2000/capi.c +++ b/drivers/staging/i4l/act2000/capi.c diff --git a/drivers/isdn/act2000/capi.h b/drivers/staging/i4l/act2000/capi.h index 01ccdecd43f7..01ccdecd43f7 100644 --- a/drivers/isdn/act2000/capi.h +++ b/drivers/staging/i4l/act2000/capi.h diff --git a/drivers/isdn/act2000/module.c b/drivers/staging/i4l/act2000/module.c index 68073d0da0e3..68073d0da0e3 100644 --- a/drivers/isdn/act2000/module.c +++ b/drivers/staging/i4l/act2000/module.c diff --git a/drivers/isdn/icn/Kconfig b/drivers/staging/i4l/icn/Kconfig index 4534f21a1852..4534f21a1852 100644 --- a/drivers/isdn/icn/Kconfig +++ b/drivers/staging/i4l/icn/Kconfig diff --git a/drivers/isdn/icn/Makefile b/drivers/staging/i4l/icn/Makefile index d9b476fcf384..d9b476fcf384 100644 --- a/drivers/isdn/icn/Makefile +++ b/drivers/staging/i4l/icn/Makefile diff --git a/drivers/isdn/icn/icn.c b/drivers/staging/i4l/icn/icn.c index 358a574d9e8b..46d957c34be1 100644 --- a/drivers/isdn/icn/icn.c +++ b/drivers/staging/i4l/icn/icn.c @@ -718,7 +718,7 @@ icn_sendbuf(int channel, int ack, struct sk_buff *skb, icn_card *card) return 0; if (card->sndcount[channel] > ICN_MAX_SQUEUE) return 0; -#warning TODO test headroom or use skb->nb to flag ACK + /* TODO test headroom or use skb->nb to flag ACK */ nskb = skb_clone(skb, GFP_ATOMIC); if (nskb) { /* Push ACK flag as one diff --git a/drivers/isdn/icn/icn.h b/drivers/staging/i4l/icn/icn.h index f8f2e76d34bf..f8f2e76d34bf 100644 --- a/drivers/isdn/icn/icn.h +++ b/drivers/staging/i4l/icn/icn.h diff --git a/drivers/isdn/pcbit/Kconfig b/drivers/staging/i4l/pcbit/Kconfig index e9b2dd85d410..e9b2dd85d410 100644 --- a/drivers/isdn/pcbit/Kconfig +++ b/drivers/staging/i4l/pcbit/Kconfig diff --git a/drivers/isdn/pcbit/Makefile b/drivers/staging/i4l/pcbit/Makefile index 2d026c3242e8..2d026c3242e8 100644 --- a/drivers/isdn/pcbit/Makefile +++ b/drivers/staging/i4l/pcbit/Makefile diff --git a/drivers/isdn/pcbit/callbacks.c b/drivers/staging/i4l/pcbit/callbacks.c index efb6d6a3639a..efb6d6a3639a 100644 --- a/drivers/isdn/pcbit/callbacks.c +++ b/drivers/staging/i4l/pcbit/callbacks.c diff --git a/drivers/isdn/pcbit/callbacks.h b/drivers/staging/i4l/pcbit/callbacks.h index a036b4a7ffad..a036b4a7ffad 100644 --- a/drivers/isdn/pcbit/callbacks.h +++ b/drivers/staging/i4l/pcbit/callbacks.h diff --git a/drivers/isdn/pcbit/capi.c b/drivers/staging/i4l/pcbit/capi.c index 4e3cbf857d60..4e3cbf857d60 100644 --- a/drivers/isdn/pcbit/capi.c +++ b/drivers/staging/i4l/pcbit/capi.c diff --git a/drivers/isdn/pcbit/capi.h b/drivers/staging/i4l/pcbit/capi.h index 635f63476944..635f63476944 100644 --- a/drivers/isdn/pcbit/capi.h +++ b/drivers/staging/i4l/pcbit/capi.h diff --git a/drivers/isdn/pcbit/drv.c b/drivers/staging/i4l/pcbit/drv.c index 4172e22ae7ed..4172e22ae7ed 100644 --- a/drivers/isdn/pcbit/drv.c +++ b/drivers/staging/i4l/pcbit/drv.c diff --git a/drivers/isdn/pcbit/edss1.c b/drivers/staging/i4l/pcbit/edss1.c index b2262ba6f0c9..b2262ba6f0c9 100644 --- a/drivers/isdn/pcbit/edss1.c +++ b/drivers/staging/i4l/pcbit/edss1.c diff --git a/drivers/isdn/pcbit/edss1.h b/drivers/staging/i4l/pcbit/edss1.h index 2f6b3a8edfba..2f6b3a8edfba 100644 --- a/drivers/isdn/pcbit/edss1.h +++ b/drivers/staging/i4l/pcbit/edss1.h diff --git a/drivers/isdn/pcbit/layer2.c b/drivers/staging/i4l/pcbit/layer2.c index 46e1240ae074..46e1240ae074 100644 --- a/drivers/isdn/pcbit/layer2.c +++ b/drivers/staging/i4l/pcbit/layer2.c diff --git a/drivers/isdn/pcbit/layer2.h b/drivers/staging/i4l/pcbit/layer2.h index be1327bc162a..be1327bc162a 100644 --- a/drivers/isdn/pcbit/layer2.h +++ b/drivers/staging/i4l/pcbit/layer2.h diff --git a/drivers/isdn/pcbit/module.c b/drivers/staging/i4l/pcbit/module.c index 0a59bd0b8210..0a59bd0b8210 100644 --- a/drivers/isdn/pcbit/module.c +++ b/drivers/staging/i4l/pcbit/module.c diff --git a/drivers/isdn/pcbit/pcbit.h b/drivers/staging/i4l/pcbit/pcbit.h index 0a5a99440a80..0a5a99440a80 100644 --- a/drivers/isdn/pcbit/pcbit.h +++ b/drivers/staging/i4l/pcbit/pcbit.h diff --git a/drivers/staging/iio/Documentation/sysfs-bus-iio-light b/drivers/staging/iio/Documentation/sysfs-bus-iio-light index 17e5c9c515d4..7c7cd8456060 100644 --- a/drivers/staging/iio/Documentation/sysfs-bus-iio-light +++ b/drivers/staging/iio/Documentation/sysfs-bus-iio-light @@ -1,31 +1,3 @@ - -What: /sys/bus/iio/devices/device[n]/range -KernelVersion: 2.6.37 -Contact: linux-iio@vger.kernel.org -Description: - Hardware dependent ADC Full Scale Range used for some ambient - light sensors in calculating lux. - -What: /sys/bus/iio/devices/device[n]/range_available -KernelVersion: 2.6.37 -Contact: linux-iio@vger.kernel.org -Description: - Hardware dependent supported vales for ADC Full Scale Range. - -What: /sys/bus/iio/devices/device[n]/adc_resolution -KernelVersion: 2.6.37 -Contact: linux-iio@vger.kernel.org -Description: - Hardware dependent ADC resolution of the ambient light sensor - used in calculating the lux. - -What: /sys/bus/iio/devices/device[n]/adc_resolution_available -KernelVersion: 2.6.37 -Contact: linux-iio@vger.kernel.org -Description: - Hardware dependent list of possible values supported for the - adc_resolution of the given sensor. - What: /sys/bus/iio/devices/device[n]/in_illuminance0[_input|_raw] KernelVersion: 2.6.35 Contact: linux-iio@vger.kernel.org diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig index 0e044cb0def8..8abc1ab3c0c7 100644 --- a/drivers/staging/iio/Kconfig +++ b/drivers/staging/iio/Kconfig @@ -12,7 +12,6 @@ source "drivers/staging/iio/frequency/Kconfig" source "drivers/staging/iio/gyro/Kconfig" source "drivers/staging/iio/impedance-analyzer/Kconfig" source "drivers/staging/iio/light/Kconfig" -source "drivers/staging/iio/magnetometer/Kconfig" source "drivers/staging/iio/meter/Kconfig" source "drivers/staging/iio/resolver/Kconfig" source "drivers/staging/iio/trigger/Kconfig" diff --git a/drivers/staging/iio/Makefile b/drivers/staging/iio/Makefile index 3e616b4437f5..0cfd05d5bf49 100644 --- a/drivers/staging/iio/Makefile +++ b/drivers/staging/iio/Makefile @@ -10,7 +10,6 @@ obj-y += frequency/ obj-y += gyro/ obj-y += impedance-analyzer/ obj-y += light/ -obj-y += magnetometer/ obj-y += meter/ obj-y += resolver/ obj-y += trigger/ diff --git a/drivers/staging/iio/TODO b/drivers/staging/iio/TODO index c22a0edd1528..93a896883e37 100644 --- a/drivers/staging/iio/TODO +++ b/drivers/staging/iio/TODO @@ -58,14 +58,6 @@ different requirements. This one suits mid range frequencies (100Hz - 4kHz). 2) Lots of testing -Periodic Timer trigger -1) Move to a more general hardware periodic timer request -subsystem. Current approach is abusing purpose of RTC. -Initial discussions have taken place, but no actual code -is in place as yet. This topic will be reopened on lkml -shortly. I don't really envision this patch being merged -in anything like its current form. - GPIO trigger 1) Add control over the type of interrupt etc. This will necessitate a header that is also visible from arch board diff --git a/drivers/staging/iio/accel/lis3l02dq.h b/drivers/staging/iio/accel/lis3l02dq.h index 3f24c629be6f..6bd3d4d5bc9d 100644 --- a/drivers/staging/iio/accel/lis3l02dq.h +++ b/drivers/staging/iio/accel/lis3l02dq.h @@ -67,7 +67,8 @@ #define LIS3L02DQ_REG_CTRL_2_THREE_WIRE_SPI_MODE 0x02 /* Data alignment, default is 12 bit right justified - * - option for 16 bit left justified */ + * - option for 16 bit left justified + */ #define LIS3L02DQ_REG_CTRL_2_DATA_ALIGNMENT_16_BIT_LEFT_JUSTIFIED 0x01 /* Interrupt related stuff */ @@ -77,7 +78,8 @@ #define LIS3L02DQ_REG_WAKE_UP_CFG_BOOLEAN_AND 0x80 /* Latch interrupt request, - * if on ack must be given by reading the ack register */ + * if on ack must be given by reading the ack register + */ #define LIS3L02DQ_REG_WAKE_UP_CFG_LATCH_SRC 0x40 /* Z Interrupt on High (above threshold) */ @@ -94,7 +96,8 @@ #define LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_X_LOW 0x01 /* Register that gives description of what caused interrupt - * - latched if set in CFG_ADDRES */ + * - latched if set in CFG_ADDRES + */ #define LIS3L02DQ_REG_WAKE_UP_SRC_ADDR 0x24 /* top bit ignored */ /* Interrupt Active */ @@ -123,7 +126,8 @@ #define LIS3L02DQ_REG_STATUS_X_NEW_DATA 0x01 /* The accelerometer readings - low and high bytes. - * Form of high byte dependent on justification set in ctrl reg */ + * Form of high byte dependent on justification set in ctrl reg + */ #define LIS3L02DQ_REG_OUT_X_L_ADDR 0x28 #define LIS3L02DQ_REG_OUT_X_H_ADDR 0x29 #define LIS3L02DQ_REG_OUT_Y_L_ADDR 0x2A @@ -132,7 +136,8 @@ #define LIS3L02DQ_REG_OUT_Z_H_ADDR 0x2D /* Threshold values for all axes and both above and below thresholds - * - i.e. there is only one value */ + * - i.e. there is only one value + */ #define LIS3L02DQ_REG_THS_L_ADDR 0x2E #define LIS3L02DQ_REG_THS_H_ADDR 0x2F diff --git a/drivers/staging/iio/accel/lis3l02dq_core.c b/drivers/staging/iio/accel/lis3l02dq_core.c index 7939ae6378d7..7a6fed3f2d3f 100644 --- a/drivers/staging/iio/accel/lis3l02dq_core.c +++ b/drivers/staging/iio/accel/lis3l02dq_core.c @@ -567,7 +567,7 @@ static int lis3l02dq_read_event_config(struct iio_dev *indio_dev, { u8 val; int ret; - u8 mask = (1 << (chan->channel2 * 2 + (dir == IIO_EV_DIR_RISING))); + u8 mask = 1 << (chan->channel2 * 2 + (dir == IIO_EV_DIR_RISING)); ret = lis3l02dq_spi_read_reg_8(indio_dev, LIS3L02DQ_REG_WAKE_UP_CFG_ADDR, @@ -622,7 +622,7 @@ static int lis3l02dq_write_event_config(struct iio_dev *indio_dev, u8 val, control; u8 currentlyset; bool changed = false; - u8 mask = (1 << (chan->channel2 * 2 + (dir == IIO_EV_DIR_RISING))); + u8 mask = 1 << (chan->channel2 * 2 + (dir == IIO_EV_DIR_RISING)); mutex_lock(&indio_dev->mlock); /* read current control */ diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c index 02e930c55570..a8f533af9eca 100644 --- a/drivers/staging/iio/accel/sca3000_core.c +++ b/drivers/staging/iio/accel/sca3000_core.c @@ -216,8 +216,7 @@ static int sca3000_read_ctrl_reg(struct sca3000_state *st, ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_CTRL_DATA, 1); if (ret) goto error_ret; - else - return st->rx[0]; + return st->rx[0]; error_ret: return ret; } diff --git a/drivers/staging/iio/accel/sca3000_ring.c b/drivers/staging/iio/accel/sca3000_ring.c index 1920dc60cf3d..d1cb9b9cf22b 100644 --- a/drivers/staging/iio/accel/sca3000_ring.c +++ b/drivers/staging/iio/accel/sca3000_ring.c @@ -99,8 +99,7 @@ static int sca3000_read_first_n_hw_rb(struct iio_buffer *r, ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_BUF_COUNT, 1); if (ret) goto error_ret; - else - num_available = st->rx[0]; + num_available = st->rx[0]; /* * num_available is the total number of samples available * i.e. number of time points * number of channels. diff --git a/drivers/staging/iio/adc/Kconfig b/drivers/staging/iio/adc/Kconfig index b9519be90fda..deff89973d53 100644 --- a/drivers/staging/iio/adc/Kconfig +++ b/drivers/staging/iio/adc/Kconfig @@ -59,12 +59,12 @@ config AD7816 temperature sensors and ADC. config AD7192 - tristate "Analog Devices AD7190 AD7192 AD7195 ADC driver" + tristate "Analog Devices AD7190 AD7192 AD7193 AD7195 ADC driver" depends on SPI select AD_SIGMA_DELTA help Say yes here to build support for Analog Devices AD7190, - AD7192 or AD7195 SPI analog to digital converters (ADC). + AD7192, AD7193 or AD7195 SPI analog to digital converters (ADC). If unsure, say N (but it's safe to say "Y"). To compile this driver as a module, choose M here: the @@ -92,20 +92,6 @@ config LPC32XX_ADC activate only one via device tree selection. Provides direct access via sysfs. -config MXS_LRADC - tristate "Freescale i.MX23/i.MX28 LRADC" - depends on (ARCH_MXS || COMPILE_TEST) && HAS_IOMEM - depends on INPUT - select STMP_DEVICE - select IIO_BUFFER - select IIO_TRIGGERED_BUFFER - help - Say yes here to build support for i.MX23/i.MX28 LRADC convertor - built into these chips. - - To compile this driver as a module, choose M here: the - module will be called mxs-lradc. - config SPEAR_ADC tristate "ST SPEAr ADC" depends on PLAT_SPEAR || COMPILE_TEST diff --git a/drivers/staging/iio/adc/Makefile b/drivers/staging/iio/adc/Makefile index 0c87ce3530f8..3cdd83ccec8e 100644 --- a/drivers/staging/iio/adc/Makefile +++ b/drivers/staging/iio/adc/Makefile @@ -12,5 +12,4 @@ obj-$(CONFIG_AD7816) += ad7816.o obj-$(CONFIG_AD7192) += ad7192.o obj-$(CONFIG_AD7280) += ad7280a.o obj-$(CONFIG_LPC32XX_ADC) += lpc32xx_adc.o -obj-$(CONFIG_MXS_LRADC) += mxs-lradc.o obj-$(CONFIG_SPEAR_ADC) += spear_adc.o diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c index 92211039ffa9..f843f19cf675 100644 --- a/drivers/staging/iio/adc/ad7192.c +++ b/drivers/staging/iio/adc/ad7192.c @@ -1,7 +1,7 @@ /* - * AD7190 AD7192 AD7195 SPI ADC driver + * AD7190 AD7192 AD7193 AD7195 SPI ADC driver * - * Copyright 2011-2012 Analog Devices Inc. + * Copyright 2011-2015 Analog Devices Inc. * * Licensed under the GPL-2. */ @@ -92,26 +92,43 @@ #define AD7192_CONF_CHOP BIT(23) /* CHOP enable */ #define AD7192_CONF_REFSEL BIT(20) /* REFIN1/REFIN2 Reference Select */ -#define AD7192_CONF_CHAN(x) (((1 << (x)) & 0xFF) << 8) /* Channel select */ -#define AD7192_CONF_CHAN_MASK (0xFF << 8) /* Channel select mask */ +#define AD7192_CONF_CHAN(x) ((x) << 8) /* Channel select */ +#define AD7192_CONF_CHAN_MASK (0x7FF << 8) /* Channel select mask */ #define AD7192_CONF_BURN BIT(7) /* Burnout current enable */ #define AD7192_CONF_REFDET BIT(6) /* Reference detect enable */ #define AD7192_CONF_BUF BIT(4) /* Buffered Mode Enable */ #define AD7192_CONF_UNIPOLAR BIT(3) /* Unipolar/Bipolar Enable */ #define AD7192_CONF_GAIN(x) ((x) & 0x7) /* Gain Select */ -#define AD7192_CH_AIN1P_AIN2M 0 /* AIN1(+) - AIN2(-) */ -#define AD7192_CH_AIN3P_AIN4M 1 /* AIN3(+) - AIN4(-) */ -#define AD7192_CH_TEMP 2 /* Temp Sensor */ -#define AD7192_CH_AIN2P_AIN2M 3 /* AIN2(+) - AIN2(-) */ -#define AD7192_CH_AIN1 4 /* AIN1 - AINCOM */ -#define AD7192_CH_AIN2 5 /* AIN2 - AINCOM */ -#define AD7192_CH_AIN3 6 /* AIN3 - AINCOM */ -#define AD7192_CH_AIN4 7 /* AIN4 - AINCOM */ +#define AD7192_CH_AIN1P_AIN2M BIT(0) /* AIN1(+) - AIN2(-) */ +#define AD7192_CH_AIN3P_AIN4M BIT(1) /* AIN3(+) - AIN4(-) */ +#define AD7192_CH_TEMP BIT(2) /* Temp Sensor */ +#define AD7192_CH_AIN2P_AIN2M BIT(3) /* AIN2(+) - AIN2(-) */ +#define AD7192_CH_AIN1 BIT(4) /* AIN1 - AINCOM */ +#define AD7192_CH_AIN2 BIT(5) /* AIN2 - AINCOM */ +#define AD7192_CH_AIN3 BIT(6) /* AIN3 - AINCOM */ +#define AD7192_CH_AIN4 BIT(7) /* AIN4 - AINCOM */ + +#define AD7193_CH_AIN1P_AIN2M 0x000 /* AIN1(+) - AIN2(-) */ +#define AD7193_CH_AIN3P_AIN4M 0x001 /* AIN3(+) - AIN4(-) */ +#define AD7193_CH_AIN5P_AIN6M 0x002 /* AIN5(+) - AIN6(-) */ +#define AD7193_CH_AIN7P_AIN8M 0x004 /* AIN7(+) - AIN8(-) */ +#define AD7193_CH_TEMP 0x100 /* Temp senseor */ +#define AD7193_CH_AIN2P_AIN2M 0x200 /* AIN2(+) - AIN2(-) */ +#define AD7193_CH_AIN1 0x401 /* AIN1 - AINCOM */ +#define AD7193_CH_AIN2 0x402 /* AIN2 - AINCOM */ +#define AD7193_CH_AIN3 0x404 /* AIN3 - AINCOM */ +#define AD7193_CH_AIN4 0x408 /* AIN4 - AINCOM */ +#define AD7193_CH_AIN5 0x410 /* AIN5 - AINCOM */ +#define AD7193_CH_AIN6 0x420 /* AIN6 - AINCOM */ +#define AD7193_CH_AIN7 0x440 /* AIN7 - AINCOM */ +#define AD7193_CH_AIN8 0x480 /* AIN7 - AINCOM */ +#define AD7193_CH_AINCOM 0x600 /* AINCOM - AINCOM */ /* ID Register Bit Designations (AD7192_REG_ID) */ #define ID_AD7190 0x4 #define ID_AD7192 0x0 +#define ID_AD7193 0x2 #define ID_AD7195 0x6 #define AD7192_ID_MASK 0x0F @@ -236,7 +253,7 @@ static int ad7192_setup(struct ad7192_state *st, st->mclk = pdata->ext_clk_hz; else st->mclk = AD7192_INT_FREQ_MHZ; - break; + break; default: ret = -EINVAL; goto out; @@ -607,6 +624,24 @@ static const struct iio_chan_spec ad7192_channels[] = { IIO_CHAN_SOFT_TIMESTAMP(8), }; +static const struct iio_chan_spec ad7193_channels[] = { + AD_SD_DIFF_CHANNEL(0, 1, 2, AD7193_CH_AIN1P_AIN2M, 24, 32, 0), + AD_SD_DIFF_CHANNEL(1, 3, 4, AD7193_CH_AIN3P_AIN4M, 24, 32, 0), + AD_SD_DIFF_CHANNEL(2, 5, 6, AD7193_CH_AIN5P_AIN6M, 24, 32, 0), + AD_SD_DIFF_CHANNEL(3, 7, 8, AD7193_CH_AIN7P_AIN8M, 24, 32, 0), + AD_SD_TEMP_CHANNEL(4, AD7193_CH_TEMP, 24, 32, 0), + AD_SD_SHORTED_CHANNEL(5, 2, AD7193_CH_AIN2P_AIN2M, 24, 32, 0), + AD_SD_CHANNEL(6, 1, AD7193_CH_AIN1, 24, 32, 0), + AD_SD_CHANNEL(7, 2, AD7193_CH_AIN2, 24, 32, 0), + AD_SD_CHANNEL(8, 3, AD7193_CH_AIN3, 24, 32, 0), + AD_SD_CHANNEL(9, 4, AD7193_CH_AIN4, 24, 32, 0), + AD_SD_CHANNEL(10, 5, AD7193_CH_AIN5, 24, 32, 0), + AD_SD_CHANNEL(11, 6, AD7193_CH_AIN6, 24, 32, 0), + AD_SD_CHANNEL(12, 7, AD7193_CH_AIN7, 24, 32, 0), + AD_SD_CHANNEL(13, 8, AD7193_CH_AIN8, 24, 32, 0), + IIO_CHAN_SOFT_TIMESTAMP(14), +}; + static int ad7192_probe(struct spi_device *spi) { const struct ad7192_platform_data *pdata = dev_get_platdata(&spi->dev); @@ -651,8 +686,18 @@ static int ad7192_probe(struct spi_device *spi) indio_dev->dev.parent = &spi->dev; indio_dev->name = spi_get_device_id(spi)->name; indio_dev->modes = INDIO_DIRECT_MODE; - indio_dev->channels = ad7192_channels; - indio_dev->num_channels = ARRAY_SIZE(ad7192_channels); + + switch (st->devid) { + case ID_AD7193: + indio_dev->channels = ad7193_channels; + indio_dev->num_channels = ARRAY_SIZE(ad7193_channels); + break; + default: + indio_dev->channels = ad7192_channels; + indio_dev->num_channels = ARRAY_SIZE(ad7192_channels); + break; + } + if (st->devid == ID_AD7195) indio_dev->info = &ad7195_info; else @@ -699,6 +744,7 @@ static int ad7192_remove(struct spi_device *spi) static const struct spi_device_id ad7192_id[] = { {"ad7190", ID_AD7190}, {"ad7192", ID_AD7192}, + {"ad7193", ID_AD7193}, {"ad7195", ID_AD7195}, {} }; @@ -715,5 +761,5 @@ static struct spi_driver ad7192_driver = { module_spi_driver(ad7192_driver); MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); -MODULE_DESCRIPTION("Analog Devices AD7190, AD7192, AD7195 ADC"); +MODULE_DESCRIPTION("Analog Devices AD7190, AD7192, AD7193, AD7195 ADC"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c index f45ebedb7a05..62e5ecacf634 100644 --- a/drivers/staging/iio/adc/ad7280a.c +++ b/drivers/staging/iio/adc/ad7280a.c @@ -214,8 +214,8 @@ static int __ad7280_read32(struct ad7280_state *st, unsigned *val) static int ad7280_write(struct ad7280_state *st, unsigned devaddr, unsigned addr, bool all, unsigned val) { - unsigned reg = (devaddr << 27 | addr << 21 | - (val & 0xFF) << 13 | all << 12); + unsigned reg = devaddr << 27 | addr << 21 | + (val & 0xFF) << 13 | all << 12; reg |= ad7280_calc_crc8(st->crc_tab, reg >> 11) << 3 | 0x2; st->buf[0] = cpu_to_be32(reg); diff --git a/drivers/staging/iio/adc/ad7606.h b/drivers/staging/iio/adc/ad7606.h index ec89d055cf58..cca946924c58 100644 --- a/drivers/staging/iio/adc/ad7606.h +++ b/drivers/staging/iio/adc/ad7606.h @@ -85,8 +85,6 @@ struct ad7606_bus_ops { int (*read_block)(struct device *, int, void *); }; -void ad7606_suspend(struct iio_dev *indio_dev); -void ad7606_resume(struct iio_dev *indio_dev); struct iio_dev *ad7606_probe(struct device *dev, int irq, void __iomem *base_address, unsigned id, const struct ad7606_bus_ops *bops); @@ -101,4 +99,12 @@ enum ad7606_supported_device_ids { int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev); void ad7606_ring_cleanup(struct iio_dev *indio_dev); + +#ifdef CONFIG_PM_SLEEP +extern const struct dev_pm_ops ad7606_pm_ops; +#define AD7606_PM_OPS (&ad7606_pm_ops) +#else +#define AD7606_PM_OPS NULL +#endif + #endif /* IIO_ADC_AD7606_H_ */ diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c index 2c9d8b7de9f5..fe6caeee0843 100644 --- a/drivers/staging/iio/adc/ad7606_core.c +++ b/drivers/staging/iio/adc/ad7606_core.c @@ -250,7 +250,8 @@ static const struct attribute_group ad7606_attribute_group_range = { }, \ } -static const struct iio_chan_spec ad7606_8_channels[] = { +static const struct iio_chan_spec ad7606_channels[] = { + IIO_CHAN_SOFT_TIMESTAMP(8), AD7606_CHANNEL(0), AD7606_CHANNEL(1), AD7606_CHANNEL(2), @@ -259,25 +260,6 @@ static const struct iio_chan_spec ad7606_8_channels[] = { AD7606_CHANNEL(5), AD7606_CHANNEL(6), AD7606_CHANNEL(7), - IIO_CHAN_SOFT_TIMESTAMP(8), -}; - -static const struct iio_chan_spec ad7606_6_channels[] = { - AD7606_CHANNEL(0), - AD7606_CHANNEL(1), - AD7606_CHANNEL(2), - AD7606_CHANNEL(3), - AD7606_CHANNEL(4), - AD7606_CHANNEL(5), - IIO_CHAN_SOFT_TIMESTAMP(6), -}; - -static const struct iio_chan_spec ad7606_4_channels[] = { - AD7606_CHANNEL(0), - AD7606_CHANNEL(1), - AD7606_CHANNEL(2), - AD7606_CHANNEL(3), - IIO_CHAN_SOFT_TIMESTAMP(4), }; static const struct ad7606_chip_info ad7606_chip_info_tbl[] = { @@ -287,20 +269,20 @@ static const struct ad7606_chip_info ad7606_chip_info_tbl[] = { [ID_AD7606_8] = { .name = "ad7606", .int_vref_mv = 2500, - .channels = ad7606_8_channels, - .num_channels = 8, + .channels = ad7606_channels, + .num_channels = 9, }, [ID_AD7606_6] = { .name = "ad7606-6", .int_vref_mv = 2500, - .channels = ad7606_6_channels, - .num_channels = 6, + .channels = ad7606_channels, + .num_channels = 7, }, [ID_AD7606_4] = { .name = "ad7606-4", .int_vref_mv = 2500, - .channels = ad7606_4_channels, - .num_channels = 4, + .channels = ad7606_channels, + .num_channels = 5, }, }; @@ -578,8 +560,11 @@ int ad7606_remove(struct iio_dev *indio_dev, int irq) } EXPORT_SYMBOL_GPL(ad7606_remove); -void ad7606_suspend(struct iio_dev *indio_dev) +#ifdef CONFIG_PM_SLEEP + +static int ad7606_suspend(struct device *dev) { + struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ad7606_state *st = iio_priv(indio_dev); if (gpio_is_valid(st->pdata->gpio_stby)) { @@ -587,11 +572,13 @@ void ad7606_suspend(struct iio_dev *indio_dev) gpio_set_value(st->pdata->gpio_range, 1); gpio_set_value(st->pdata->gpio_stby, 0); } + + return 0; } -EXPORT_SYMBOL_GPL(ad7606_suspend); -void ad7606_resume(struct iio_dev *indio_dev) +static int ad7606_resume(struct device *dev) { + struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ad7606_state *st = iio_priv(indio_dev); if (gpio_is_valid(st->pdata->gpio_stby)) { @@ -602,8 +589,14 @@ void ad7606_resume(struct iio_dev *indio_dev) gpio_set_value(st->pdata->gpio_stby, 1); ad7606_reset(st); } + + return 0; } -EXPORT_SYMBOL_GPL(ad7606_resume); + +SIMPLE_DEV_PM_OPS(ad7606_pm_ops, ad7606_suspend, ad7606_resume); +EXPORT_SYMBOL_GPL(ad7606_pm_ops); + +#endif MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); MODULE_DESCRIPTION("Analog Devices AD7606 ADC"); diff --git a/drivers/staging/iio/adc/ad7606_par.c b/drivers/staging/iio/adc/ad7606_par.c index adc370ee8632..84d23930fdde 100644 --- a/drivers/staging/iio/adc/ad7606_par.c +++ b/drivers/staging/iio/adc/ad7606_par.c @@ -90,36 +90,6 @@ static int ad7606_par_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM -static int ad7606_par_suspend(struct device *dev) -{ - struct iio_dev *indio_dev = dev_get_drvdata(dev); - - ad7606_suspend(indio_dev); - - return 0; -} - -static int ad7606_par_resume(struct device *dev) -{ - struct iio_dev *indio_dev = dev_get_drvdata(dev); - - ad7606_resume(indio_dev); - - return 0; -} - -static const struct dev_pm_ops ad7606_pm_ops = { - .suspend = ad7606_par_suspend, - .resume = ad7606_par_resume, -}; - -#define AD7606_PAR_PM_OPS (&ad7606_pm_ops) - -#else -#define AD7606_PAR_PM_OPS NULL -#endif /* CONFIG_PM */ - static const struct platform_device_id ad7606_driver_ids[] = { { .name = "ad7606-8", @@ -142,7 +112,7 @@ static struct platform_driver ad7606_driver = { .id_table = ad7606_driver_ids, .driver = { .name = "ad7606", - .pm = AD7606_PAR_PM_OPS, + .pm = AD7606_PM_OPS, }, }; diff --git a/drivers/staging/iio/adc/ad7606_spi.c b/drivers/staging/iio/adc/ad7606_spi.c index cbb36317200e..d873a5164595 100644 --- a/drivers/staging/iio/adc/ad7606_spi.c +++ b/drivers/staging/iio/adc/ad7606_spi.c @@ -62,36 +62,6 @@ static int ad7606_spi_remove(struct spi_device *spi) return ad7606_remove(indio_dev, spi->irq); } -#ifdef CONFIG_PM -static int ad7606_spi_suspend(struct device *dev) -{ - struct iio_dev *indio_dev = dev_get_drvdata(dev); - - ad7606_suspend(indio_dev); - - return 0; -} - -static int ad7606_spi_resume(struct device *dev) -{ - struct iio_dev *indio_dev = dev_get_drvdata(dev); - - ad7606_resume(indio_dev); - - return 0; -} - -static const struct dev_pm_ops ad7606_pm_ops = { - .suspend = ad7606_spi_suspend, - .resume = ad7606_spi_resume, -}; - -#define AD7606_SPI_PM_OPS (&ad7606_pm_ops) - -#else -#define AD7606_SPI_PM_OPS NULL -#endif - static const struct spi_device_id ad7606_id[] = { {"ad7606-8", ID_AD7606_8}, {"ad7606-6", ID_AD7606_6}, @@ -103,7 +73,7 @@ MODULE_DEVICE_TABLE(spi, ad7606_id); static struct spi_driver ad7606_driver = { .driver = { .name = "ad7606", - .pm = AD7606_SPI_PM_OPS, + .pm = AD7606_PM_OPS, }, .probe = ad7606_spi_probe, .remove = ad7606_spi_remove, diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c index 22260512cf01..ac3735c7f4a9 100644 --- a/drivers/staging/iio/adc/ad7816.c +++ b/drivers/staging/iio/adc/ad7816.c @@ -296,14 +296,14 @@ static inline ssize_t ad7816_set_oti(struct device *dev, dev_err(dev, "Invalid oti channel id %d.\n", chip->channel_id); return -EINVAL; } else if (chip->channel_id == 0) { - if (ret || value < AD7816_BOUND_VALUE_MIN || + if (value < AD7816_BOUND_VALUE_MIN || value > AD7816_BOUND_VALUE_MAX) return -EINVAL; data = (u8)(value - AD7816_BOUND_VALUE_MIN + AD7816_BOUND_VALUE_BASE); } else { - if (ret || value < AD7816_BOUND_VALUE_BASE || value > 255) + if (value < AD7816_BOUND_VALUE_BASE || value > 255) return -EINVAL; data = (u8)value; diff --git a/drivers/staging/iio/adc/spear_adc.c b/drivers/staging/iio/adc/spear_adc.c index 712cae0e8608..5dd61f6a57b9 100644 --- a/drivers/staging/iio/adc/spear_adc.c +++ b/drivers/staging/iio/adc/spear_adc.c @@ -262,6 +262,7 @@ static int spear_adc_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; struct device *dev = &pdev->dev; struct spear_adc_state *st; + struct resource *res; struct iio_dev *indio_dev = NULL; int ret = -ENODEV; int irq; @@ -280,45 +281,45 @@ static int spear_adc_probe(struct platform_device *pdev) * (e.g. SPEAr3xx). Let's provide two register base addresses * to support multi-arch kernels. */ - st->adc_base_spear6xx = of_iomap(np, 0); - if (!st->adc_base_spear6xx) { - dev_err(dev, "failed mapping memory\n"); - return -ENOMEM; - } + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + st->adc_base_spear6xx = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(st->adc_base_spear6xx)) + return PTR_ERR(st->adc_base_spear6xx); + st->adc_base_spear3xx = (struct adc_regs_spear3xx __iomem *)st->adc_base_spear6xx; - st->clk = clk_get(dev, NULL); + st->clk = devm_clk_get(dev, NULL); if (IS_ERR(st->clk)) { dev_err(dev, "failed getting clock\n"); - goto errout1; + return PTR_ERR(st->clk); } ret = clk_prepare_enable(st->clk); if (ret) { dev_err(dev, "failed enabling clock\n"); - goto errout2; + return ret; } irq = platform_get_irq(pdev, 0); if (irq <= 0) { dev_err(dev, "failed getting interrupt resource\n"); ret = -EINVAL; - goto errout3; + goto errout2; } ret = devm_request_irq(dev, irq, spear_adc_isr, 0, SPEAR_ADC_MOD_NAME, st); if (ret < 0) { dev_err(dev, "failed requesting interrupt\n"); - goto errout3; + goto errout2; } if (of_property_read_u32(np, "sampling-frequency", &st->sampling_freq)) { dev_err(dev, "sampling-frequency missing in DT\n"); ret = -EINVAL; - goto errout3; + goto errout2; } /* @@ -348,18 +349,14 @@ static int spear_adc_probe(struct platform_device *pdev) ret = iio_device_register(indio_dev); if (ret) - goto errout3; + goto errout2; dev_info(dev, "SPEAR ADC driver loaded, IRQ %d\n", irq); return 0; -errout3: - clk_disable_unprepare(st->clk); errout2: - clk_put(st->clk); -errout1: - iounmap(st->adc_base_spear6xx); + clk_disable_unprepare(st->clk); return ret; } @@ -370,8 +367,6 @@ static int spear_adc_remove(struct platform_device *pdev) iio_device_unregister(indio_dev); clk_disable_unprepare(st->clk); - clk_put(st->clk); - iounmap(st->adc_base_spear6xx); return 0; } diff --git a/drivers/staging/iio/addac/adt7316-i2c.c b/drivers/staging/iio/addac/adt7316-i2c.c index 78fe0b557280..0ccf192b9a03 100644 --- a/drivers/staging/iio/addac/adt7316-i2c.c +++ b/drivers/staging/iio/addac/adt7316-i2c.c @@ -21,7 +21,7 @@ static int adt7316_i2c_read(void *client, u8 reg, u8 *data) { struct i2c_client *cl = client; - int ret = 0; + int ret; ret = i2c_smbus_write_byte(cl, reg); if (ret < 0) { diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c index 3adc4516918c..a10e7d8e6002 100644 --- a/drivers/staging/iio/addac/adt7316.c +++ b/drivers/staging/iio/addac/adt7316.c @@ -465,9 +465,8 @@ static ssize_t adt7316_show_all_ad_channels(struct device *dev, return sprintf(buf, "0 - VDD\n1 - Internal Temperature\n" "2 - External Temperature or AIN1\n" "3 - AIN2\n4 - AIN3\n5 - AIN4\n"); - else - return sprintf(buf, "0 - VDD\n1 - Internal Temperature\n" - "2 - External Temperature\n"); + return sprintf(buf, "0 - VDD\n1 - Internal Temperature\n" + "2 - External Temperature\n"); } static IIO_DEVICE_ATTR(all_ad_channels, S_IRUGO, @@ -637,7 +636,7 @@ static ssize_t adt7316_show_da_high_resolution(struct device *dev, if (chip->config3 & ADT7316_DA_HIGH_RESOLUTION) { if (chip->id == ID_ADT7316 || chip->id == ID_ADT7516) return sprintf(buf, "1 (12 bits)\n"); - else if (chip->id == ID_ADT7317 || chip->id == ID_ADT7517) + if (chip->id == ID_ADT7317 || chip->id == ID_ADT7517) return sprintf(buf, "1 (10 bits)\n"); } @@ -919,8 +918,7 @@ static ssize_t adt7316_show_all_DAC_update_modes(struct device *dev, "1 - auto at MSB DAC AB and CD writing\n" "2 - auto at MSB DAC ABCD writing\n" "3 - manual\n"); - else - return sprintf(buf, "manual\n"); + return sprintf(buf, "manual\n"); } static IIO_DEVICE_ATTR(all_DAC_update_modes, S_IRUGO, @@ -1068,9 +1066,8 @@ static ssize_t adt7316_show_DAC_internal_Vref(struct device *dev, return sprintf(buf, "0x%x\n", (chip->dac_config & ADT7516_DAC_IN_VREF_MASK) >> ADT7516_DAC_IN_VREF_OFFSET); - else - return sprintf(buf, "%d\n", - !!(chip->dac_config & ADT7316_DAC_IN_VREF)); + return sprintf(buf, "%d\n", + !!(chip->dac_config & ADT7316_DAC_IN_VREF)); } static ssize_t adt7316_store_DAC_internal_Vref(struct device *dev, diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c index e8d0ff2d5c9b..f6b9a10326ea 100644 --- a/drivers/staging/iio/cdc/ad7150.c +++ b/drivers/staging/iio/cdc/ad7150.c @@ -21,8 +21,8 @@ */ #define AD7150_STATUS 0 -#define AD7150_STATUS_OUT1 (1 << 3) -#define AD7150_STATUS_OUT2 (1 << 5) +#define AD7150_STATUS_OUT1 BIT(3) +#define AD7150_STATUS_OUT2 BIT(5) #define AD7150_CH1_DATA_HIGH 1 #define AD7150_CH2_DATA_HIGH 3 #define AD7150_CH1_AVG_HIGH 5 @@ -36,7 +36,7 @@ #define AD7150_CH2_TIMEOUT 13 #define AD7150_CH2_SETUP 14 #define AD7150_CFG 15 -#define AD7150_CFG_FIX (1 << 7) +#define AD7150_CFG_FIX BIT(7) #define AD7150_PD_TIMER 16 #define AD7150_CH1_CAPDAC 17 #define AD7150_CH2_CAPDAC 18 @@ -160,8 +160,9 @@ static int ad7150_read_event_config(struct iio_dev *indio_dev, /* lock should be held */ static int ad7150_write_event_params(struct iio_dev *indio_dev, - unsigned int chan, enum iio_event_type type, - enum iio_event_direction dir) + unsigned int chan, + enum iio_event_type type, + enum iio_event_direction dir) { int ret; u16 value; @@ -209,8 +210,9 @@ static int ad7150_write_event_params(struct iio_dev *indio_dev, } static int ad7150_write_event_config(struct iio_dev *indio_dev, - const struct iio_chan_spec *chan, enum iio_event_type type, - enum iio_event_direction dir, int state) + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, int state) { u8 thresh_type, cfg, adaptive; int ret; @@ -302,11 +304,11 @@ static int ad7150_read_event_value(struct iio_dev *indio_dev, } static int ad7150_write_event_value(struct iio_dev *indio_dev, - const struct iio_chan_spec *chan, - enum iio_event_type type, - enum iio_event_direction dir, - enum iio_event_info info, - int val, int val2) + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, + enum iio_event_info info, + int val, int val2) { int ret; struct ad7150_chip_info *chip = iio_priv(indio_dev); @@ -365,9 +367,9 @@ static ssize_t ad7150_show_timeout(struct device *dev, } static ssize_t ad7150_store_timeout(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t len) + struct device_attribute *attr, + const char *buf, + size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct ad7150_chip_info *chip = iio_priv(indio_dev); @@ -580,7 +582,7 @@ static const struct iio_info ad7150_info = { */ static int ad7150_probe(struct i2c_client *client, - const struct i2c_device_id *id) + const struct i2c_device_id *id) { int ret; struct ad7150_chip_info *chip; diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/staging/iio/cdc/ad7746.c index 2c5d27784ed3..5771d4ee8ef1 100644 --- a/drivers/staging/iio/cdc/ad7746.c +++ b/drivers/staging/iio/cdc/ad7746.c @@ -529,8 +529,8 @@ static int ad7746_write_raw(struct iio_dev *indio_dev, val /= 338646; - chip->capdac[chan->channel][chan->differential] = (val > 0 ? - AD7746_CAPDAC_DACP(val) | AD7746_CAPDAC_DACEN : 0); + chip->capdac[chan->channel][chan->differential] = val > 0 ? + AD7746_CAPDAC_DACP(val) | AD7746_CAPDAC_DACEN : 0; ret = i2c_smbus_write_byte_data(chip->client, AD7746_REG_CAPDACA, diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c index 10c43dda0f5a..d1218d896725 100644 --- a/drivers/staging/iio/impedance-analyzer/ad5933.c +++ b/drivers/staging/iio/impedance-analyzer/ad5933.c @@ -558,7 +558,7 @@ out: } static const struct iio_info ad5933_info = { - .read_raw = &ad5933_read_raw, + .read_raw = ad5933_read_raw, .attrs = &ad5933_attribute_group, .driver_module = THIS_MODULE, }; @@ -616,9 +616,9 @@ static int ad5933_ring_postdisable(struct iio_dev *indio_dev) } static const struct iio_buffer_setup_ops ad5933_ring_setup_ops = { - .preenable = &ad5933_ring_preenable, - .postenable = &ad5933_ring_postenable, - .postdisable = &ad5933_ring_postdisable, + .preenable = ad5933_ring_preenable, + .postenable = ad5933_ring_postenable, + .postdisable = ad5933_ring_postdisable, }; static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev) diff --git a/drivers/staging/iio/light/isl29018.c b/drivers/staging/iio/light/isl29018.c index bbf7e35cbc7d..76d9f74e7dcb 100644 --- a/drivers/staging/iio/light/isl29018.c +++ b/drivers/staging/iio/light/isl29018.c @@ -100,7 +100,6 @@ static const struct isl29018_scale { }; struct isl29018_chip { - struct device *dev; struct regmap *regmap; struct mutex lock; int type; @@ -180,30 +179,31 @@ static int isl29018_read_sensor_input(struct isl29018_chip *chip, int mode) int status; unsigned int lsb; unsigned int msb; + struct device *dev = regmap_get_device(chip->regmap); /* Set mode */ status = regmap_write(chip->regmap, ISL29018_REG_ADD_COMMAND1, - mode << COMMMAND1_OPMODE_SHIFT); + mode << COMMMAND1_OPMODE_SHIFT); if (status) { - dev_err(chip->dev, + dev_err(dev, "Error in setting operating mode err %d\n", status); return status; } msleep(CONVERSION_TIME_MS); status = regmap_read(chip->regmap, ISL29018_REG_ADD_DATA_LSB, &lsb); if (status < 0) { - dev_err(chip->dev, + dev_err(dev, "Error in reading LSB DATA with err %d\n", status); return status; } status = regmap_read(chip->regmap, ISL29018_REG_ADD_DATA_MSB, &msb); if (status < 0) { - dev_err(chip->dev, + dev_err(dev, "Error in reading MSB DATA with error %d\n", status); return status; } - dev_vdbg(chip->dev, "MSB 0x%x and LSB 0x%x\n", msb, lsb); + dev_vdbg(dev, "MSB 0x%x and LSB 0x%x\n", msb, lsb); return (msb << 8) | lsb; } @@ -241,23 +241,24 @@ static int isl29018_read_ir(struct isl29018_chip *chip, int *ir) } static int isl29018_read_proximity_ir(struct isl29018_chip *chip, int scheme, - int *near_ir) + int *near_ir) { int status; int prox_data = -1; int ir_data = -1; + struct device *dev = regmap_get_device(chip->regmap); /* Do proximity sensing with required scheme */ status = regmap_update_bits(chip->regmap, ISL29018_REG_ADD_COMMANDII, - COMMANDII_SCHEME_MASK, - scheme << COMMANDII_SCHEME_SHIFT); + COMMANDII_SCHEME_MASK, + scheme << COMMANDII_SCHEME_SHIFT); if (status) { - dev_err(chip->dev, "Error in setting operating mode\n"); + dev_err(dev, "Error in setting operating mode\n"); return status; } prox_data = isl29018_read_sensor_input(chip, - COMMMAND1_OPMODE_PROX_ONCE); + COMMMAND1_OPMODE_PROX_ONCE); if (prox_data < 0) return prox_data; @@ -280,7 +281,7 @@ static int isl29018_read_proximity_ir(struct isl29018_chip *chip, int scheme, } static ssize_t show_scale_available(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct isl29018_chip *chip = iio_priv(indio_dev); @@ -297,7 +298,7 @@ static ssize_t show_scale_available(struct device *dev, } static ssize_t show_int_time_available(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct isl29018_chip *chip = iio_priv(indio_dev); @@ -314,18 +315,22 @@ static ssize_t show_int_time_available(struct device *dev, /* proximity scheme */ static ssize_t show_prox_infrared_suppression(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, + char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct isl29018_chip *chip = iio_priv(indio_dev); - /* return the "proximity scheme" i.e. if the chip does on chip - infrared suppression (1 means perform on chip suppression) */ + /* + * return the "proximity scheme" i.e. if the chip does on chip + * infrared suppression (1 means perform on chip suppression) + */ return sprintf(buf, "%d\n", chip->prox_scheme); } static ssize_t store_prox_infrared_suppression(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) + struct device_attribute *attr, + const char *buf, size_t count) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct isl29018_chip *chip = iio_priv(indio_dev); @@ -338,8 +343,10 @@ static ssize_t store_prox_infrared_suppression(struct device *dev, return -EINVAL; } - /* get the "proximity scheme" i.e. if the chip does on chip - infrared suppression (1 means perform on chip suppression) */ + /* + * get the "proximity scheme" i.e. if the chip does on chip + * infrared suppression (1 means perform on chip suppression) + */ mutex_lock(&chip->lock); chip->prox_scheme = val; mutex_unlock(&chip->lock); @@ -413,7 +420,8 @@ static int isl29018_read_raw(struct iio_dev *indio_dev, break; case IIO_PROXIMITY: ret = isl29018_read_proximity_ir(chip, - chip->prox_scheme, val); + chip->prox_scheme, + val); break; default: break; @@ -518,10 +526,11 @@ static int isl29035_detect(struct isl29018_chip *chip) { int status; unsigned int id; + struct device *dev = regmap_get_device(chip->regmap); status = regmap_read(chip->regmap, ISL29035_REG_DEVICE_ID, &id); if (status < 0) { - dev_err(chip->dev, + dev_err(dev, "Error reading ID register with error %d\n", status); return status; @@ -546,6 +555,7 @@ enum { static int isl29018_chip_init(struct isl29018_chip *chip) { int status; + struct device *dev = regmap_get_device(chip->regmap); if (chip->type == isl29035) { status = isl29035_detect(chip); @@ -575,7 +585,7 @@ static int isl29018_chip_init(struct isl29018_chip *chip) */ status = regmap_write(chip->regmap, ISL29018_REG_TEST, 0x0); if (status < 0) { - dev_err(chip->dev, "Failed to clear isl29018 TEST reg.(%d)\n", + dev_err(dev, "Failed to clear isl29018 TEST reg.(%d)\n", status); return status; } @@ -586,7 +596,7 @@ static int isl29018_chip_init(struct isl29018_chip *chip) */ status = regmap_write(chip->regmap, ISL29018_REG_ADD_COMMAND1, 0); if (status < 0) { - dev_err(chip->dev, "Failed to clear isl29018 CMD1 reg.(%d)\n", + dev_err(dev, "Failed to clear isl29018 CMD1 reg.(%d)\n", status); return status; } @@ -597,14 +607,14 @@ static int isl29018_chip_init(struct isl29018_chip *chip) status = isl29018_set_scale(chip, chip->scale.scale, chip->scale.uscale); if (status < 0) { - dev_err(chip->dev, "Init of isl29018 fails\n"); + dev_err(dev, "Init of isl29018 fails\n"); return status; } status = isl29018_set_integration_time(chip, isl29018_int_utimes[chip->type][chip->int_time]); if (status < 0) { - dev_err(chip->dev, "Init of isl29018 fails\n"); + dev_err(dev, "Init of isl29018 fails\n"); return status; } @@ -614,15 +624,15 @@ static int isl29018_chip_init(struct isl29018_chip *chip) static const struct iio_info isl29018_info = { .attrs = &isl29018_group, .driver_module = THIS_MODULE, - .read_raw = &isl29018_read_raw, - .write_raw = &isl29018_write_raw, + .read_raw = isl29018_read_raw, + .write_raw = isl29018_write_raw, }; static const struct iio_info isl29023_info = { .attrs = &isl29023_group, .driver_module = THIS_MODULE, - .read_raw = &isl29018_read_raw, - .write_raw = &isl29018_write_raw, + .read_raw = isl29018_read_raw, + .write_raw = isl29018_write_raw, }; static bool is_volatile_reg(struct device *dev, unsigned int reg) @@ -699,13 +709,13 @@ static const char *isl29018_match_acpi_device(struct device *dev, int *data) if (!id) return NULL; - *data = (int) id->driver_data; + *data = (int)id->driver_data; return dev_name(dev); } static int isl29018_probe(struct i2c_client *client, - const struct i2c_device_id *id) + const struct i2c_device_id *id) { struct isl29018_chip *chip; struct iio_dev *indio_dev; @@ -721,7 +731,6 @@ static int isl29018_probe(struct i2c_client *client, chip = iio_priv(indio_dev); i2c_set_clientdata(client, indio_dev); - chip->dev = &client->dev; if (id) { name = id->name; @@ -744,7 +753,7 @@ static int isl29018_probe(struct i2c_client *client, chip_info_tbl[dev_id].regmap_cfg); if (IS_ERR(chip->regmap)) { err = PTR_ERR(chip->regmap); - dev_err(chip->dev, "regmap initialization failed: %d\n", err); + dev_err(&client->dev, "regmap initialization fails: %d\n", err); return err; } diff --git a/drivers/staging/iio/light/isl29028.c b/drivers/staging/iio/light/isl29028.c index 32ae1127da33..6e2ba458c24d 100644 --- a/drivers/staging/iio/light/isl29028.c +++ b/drivers/staging/iio/light/isl29028.c @@ -81,7 +81,7 @@ struct isl29028_chip { }; static int isl29028_set_proxim_sampling(struct isl29028_chip *chip, - unsigned int sampling) + unsigned int sampling) { static unsigned int prox_period[] = {800, 400, 200, 100, 75, 50, 12, 0}; int sel; @@ -103,7 +103,7 @@ static int isl29028_enable_proximity(struct isl29028_chip *chip, bool enable) if (enable) val = CONFIGURE_PROX_EN; ret = regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE, - CONFIGURE_PROX_EN_MASK, val); + CONFIGURE_PROX_EN_MASK, val); if (ret < 0) return ret; @@ -122,24 +122,27 @@ static int isl29028_set_als_scale(struct isl29028_chip *chip, int lux_scale) } static int isl29028_set_als_ir_mode(struct isl29028_chip *chip, - enum als_ir_mode mode) + enum als_ir_mode mode) { int ret = 0; switch (mode) { case MODE_ALS: ret = regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE, - CONFIGURE_ALS_IR_MODE_MASK, CONFIGURE_ALS_IR_MODE_ALS); + CONFIGURE_ALS_IR_MODE_MASK, + CONFIGURE_ALS_IR_MODE_ALS); if (ret < 0) return ret; ret = regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE, - CONFIGURE_ALS_RANGE_MASK, CONFIGURE_ALS_RANGE_HIGH_LUX); + CONFIGURE_ALS_RANGE_MASK, + CONFIGURE_ALS_RANGE_HIGH_LUX); break; case MODE_IR: ret = regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE, - CONFIGURE_ALS_IR_MODE_MASK, CONFIGURE_ALS_IR_MODE_IR); + CONFIGURE_ALS_IR_MODE_MASK, + CONFIGURE_ALS_IR_MODE_IR); break; case MODE_NONE: @@ -152,7 +155,7 @@ static int isl29028_set_als_ir_mode(struct isl29028_chip *chip, /* Enable the ALS/IR */ ret = regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE, - CONFIGURE_ALS_EN_MASK, CONFIGURE_ALS_EN); + CONFIGURE_ALS_EN_MASK, CONFIGURE_ALS_EN); if (ret < 0) return ret; @@ -193,7 +196,7 @@ static int isl29028_read_proxim(struct isl29028_chip *chip, int *prox) ret = regmap_read(chip->regmap, ISL29028_REG_PROX_DATA, &data); if (ret < 0) { dev_err(chip->dev, "Error in reading register %d, error %d\n", - ISL29028_REG_PROX_DATA, ret); + ISL29028_REG_PROX_DATA, ret); return ret; } *prox = data; @@ -264,7 +267,8 @@ static int isl29028_ir_get(struct isl29028_chip *chip, int *ir_data) /* Channel IO */ static int isl29028_write_raw(struct iio_dev *indio_dev, - struct iio_chan_spec const *chan, int val, int val2, long mask) + struct iio_chan_spec const *chan, + int val, int val2, long mask) { struct isl29028_chip *chip = iio_priv(indio_dev); int ret = -EINVAL; @@ -323,7 +327,8 @@ static int isl29028_write_raw(struct iio_dev *indio_dev, } static int isl29028_read_raw(struct iio_dev *indio_dev, - struct iio_chan_spec const *chan, int *val, int *val2, long mask) + struct iio_chan_spec const *chan, + int *val, int *val2, long mask) { struct isl29028_chip *chip = iio_priv(indio_dev); int ret = -EINVAL; @@ -406,8 +411,8 @@ static const struct iio_chan_spec isl29028_channels[] = { static const struct iio_info isl29028_info = { .attrs = &isl29108_group, .driver_module = THIS_MODULE, - .read_raw = &isl29028_read_raw, - .write_raw = &isl29028_write_raw, + .read_raw = isl29028_read_raw, + .write_raw = isl29028_write_raw, }; static int isl29028_chip_init(struct isl29028_chip *chip) @@ -476,7 +481,7 @@ static const struct regmap_config isl29028_regmap_config = { }; static int isl29028_probe(struct i2c_client *client, - const struct i2c_device_id *id) + const struct i2c_device_id *id) { struct isl29028_chip *chip; struct iio_dev *indio_dev; diff --git a/drivers/staging/iio/light/tsl2583.c b/drivers/staging/iio/light/tsl2583.c index 3100d960fe2c..05b4ad4e941c 100644 --- a/drivers/staging/iio/light/tsl2583.c +++ b/drivers/staging/iio/light/tsl2583.c @@ -240,8 +240,10 @@ static int taos_get_lux(struct iio_dev *indio_dev) } } - /* clear status, really interrupt status (interrupts are off), but - * we use the bit anyway - don't forget 0x80 - this is a command*/ + /* + * clear status, really interrupt status (interrupts are off), but + * we use the bit anyway - don't forget 0x80 - this is a command + */ ret = i2c_smbus_write_byte(chip->client, (TSL258X_CMD_REG | TSL258X_CMD_SPL_FN | TSL258X_CMD_ALS_INT_CLR)); @@ -265,13 +267,14 @@ static int taos_get_lux(struct iio_dev *indio_dev) if (!ch0) { /* have no data, so return LAST VALUE */ - ret = chip->als_cur_info.lux = 0; + ret = 0; + chip->als_cur_info.lux = 0; goto out_unlock; } /* calculate ratio */ ratio = (ch1 << 15) / ch0; /* convert to unscaled lux using the pointer to the table */ - for (p = (struct taos_lux *) taos_device_lux; + for (p = (struct taos_lux *)taos_device_lux; p->ratio != 0 && p->ratio < ratio; p++) ; @@ -290,7 +293,8 @@ static int taos_get_lux(struct iio_dev *indio_dev) /* note: lux is 31 bit max at this point */ if (ch1lux > ch0lux) { dev_dbg(&chip->client->dev, "No Data - Return last value\n"); - ret = chip->als_cur_info.lux = 0; + ret = 0; + chip->als_cur_info.lux = 0; goto out_unlock; } @@ -378,7 +382,7 @@ static int taos_als_calibrate(struct iio_dev *indio_dev) dev_err(&chip->client->dev, "taos_als_calibrate failed to get lux\n"); return lux_val; } - gain_trim_val = (unsigned int) (((chip->taos_settings.als_cal_target) + gain_trim_val = (unsigned int)(((chip->taos_settings.als_cal_target) * chip->taos_settings.als_gain_trim) / lux_val); if ((gain_trim_val < 250) || (gain_trim_val > 4000)) { @@ -387,9 +391,9 @@ static int taos_als_calibrate(struct iio_dev *indio_dev) gain_trim_val); return -ENODATA; } - chip->taos_settings.als_gain_trim = (int) gain_trim_val; + chip->taos_settings.als_gain_trim = (int)gain_trim_val; - return (int) gain_trim_val; + return (int)gain_trim_val; } /* @@ -429,8 +433,10 @@ static int taos_chip_on(struct iio_dev *indio_dev) chip->als_saturation = als_count * 922; /* 90% of full scale */ chip->als_time_scale = (als_time + 25) / 50; - /* TSL258x Specific power-on / adc enable sequence - * Power on the device 1st. */ + /* + * TSL258x Specific power-on / adc enable sequence + * Power on the device 1st. + */ utmp = TSL258X_CNTL_PWR_ON; ret = i2c_smbus_write_byte_data(chip->client, TSL258X_CMD_REG | TSL258X_CNTRL, utmp); @@ -439,8 +445,10 @@ static int taos_chip_on(struct iio_dev *indio_dev) return ret; } - /* Use the following shadow copy for our delay before enabling ADC. - * Write all the registers. */ + /* + * Use the following shadow copy for our delay before enabling ADC. + * Write all the registers. + */ for (i = 0, uP = chip->taos_config; i < TSL258X_REG_MAX; i++) { ret = i2c_smbus_write_byte_data(chip->client, TSL258X_CMD_REG + i, @@ -453,8 +461,10 @@ static int taos_chip_on(struct iio_dev *indio_dev) } usleep_range(3000, 3500); - /* NOW enable the ADC - * initialize the desired mode of operation */ + /* + * NOW enable the ADC + * initialize the desired mode of operation + */ utmp = TSL258X_CNTL_PWR_ON | TSL258X_CNTL_ADC_ENBL; ret = i2c_smbus_write_byte_data(chip->client, TSL258X_CMD_REG | TSL258X_CNTRL, @@ -482,7 +492,7 @@ static int taos_chip_off(struct iio_dev *indio_dev) /* Sysfs Interface Functions */ static ssize_t taos_power_state_show(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct tsl2583_chip *chip = iio_priv(indio_dev); @@ -491,7 +501,8 @@ static ssize_t taos_power_state_show(struct device *dev, } static ssize_t taos_power_state_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t len) + struct device_attribute *attr, + const char *buf, size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); int value; @@ -508,7 +519,7 @@ static ssize_t taos_power_state_store(struct device *dev, } static ssize_t taos_gain_show(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct tsl2583_chip *chip = iio_priv(indio_dev); @@ -533,7 +544,8 @@ static ssize_t taos_gain_show(struct device *dev, } static ssize_t taos_gain_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t len) + struct device_attribute *attr, + const char *buf, size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct tsl2583_chip *chip = iio_priv(indio_dev); @@ -564,13 +576,14 @@ static ssize_t taos_gain_store(struct device *dev, } static ssize_t taos_gain_available_show(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, + char *buf) { return sprintf(buf, "%s\n", "1 8 16 111"); } static ssize_t taos_als_time_show(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct tsl2583_chip *chip = iio_priv(indio_dev); @@ -579,7 +592,8 @@ static ssize_t taos_als_time_show(struct device *dev, } static ssize_t taos_als_time_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t len) + struct device_attribute *attr, + const char *buf, size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct tsl2583_chip *chip = iio_priv(indio_dev); @@ -600,14 +614,15 @@ static ssize_t taos_als_time_store(struct device *dev, } static ssize_t taos_als_time_available_show(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, + char *buf) { return sprintf(buf, "%s\n", "50 100 150 200 250 300 350 400 450 500 550 600 650"); } static ssize_t taos_als_trim_show(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct tsl2583_chip *chip = iio_priv(indio_dev); @@ -616,7 +631,8 @@ static ssize_t taos_als_trim_show(struct device *dev, } static ssize_t taos_als_trim_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t len) + struct device_attribute *attr, + const char *buf, size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct tsl2583_chip *chip = iio_priv(indio_dev); @@ -632,7 +648,8 @@ static ssize_t taos_als_trim_store(struct device *dev, } static ssize_t taos_als_cal_target_show(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, + char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct tsl2583_chip *chip = iio_priv(indio_dev); @@ -641,7 +658,8 @@ static ssize_t taos_als_cal_target_show(struct device *dev, } static ssize_t taos_als_cal_target_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t len) + struct device_attribute *attr, + const char *buf, size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct tsl2583_chip *chip = iio_priv(indio_dev); @@ -657,7 +675,7 @@ static ssize_t taos_als_cal_target_store(struct device *dev, } static ssize_t taos_lux_show(struct device *dev, struct device_attribute *attr, - char *buf) + char *buf) { int ret; @@ -669,7 +687,8 @@ static ssize_t taos_lux_show(struct device *dev, struct device_attribute *attr, } static ssize_t taos_do_calibrate(struct device *dev, - struct device_attribute *attr, const char *buf, size_t len) + struct device_attribute *attr, + const char *buf, size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); int value; @@ -684,7 +703,7 @@ static ssize_t taos_do_calibrate(struct device *dev, } static ssize_t taos_luxtable_show(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, char *buf) { int i; int offset = 0; @@ -695,8 +714,10 @@ static ssize_t taos_luxtable_show(struct device *dev, taos_device_lux[i].ch0, taos_device_lux[i].ch1); if (taos_device_lux[i].ratio == 0) { - /* We just printed the first "0" entry. - * Now get rid of the extra "," and break. */ + /* + * We just printed the first "0" entry. + * Now get rid of the extra "," and break. + */ offset--; break; } @@ -707,11 +728,12 @@ static ssize_t taos_luxtable_show(struct device *dev, } static ssize_t taos_luxtable_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t len) + struct device_attribute *attr, + const char *buf, size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct tsl2583_chip *chip = iio_priv(indio_dev); - int value[ARRAY_SIZE(taos_device_lux)*3 + 1]; + int value[ARRAY_SIZE(taos_device_lux) * 3 + 1]; int n; get_options(buf, ARRAY_SIZE(value), value); @@ -809,7 +831,7 @@ static int taos_probe(struct i2c_client *clientp, struct iio_dev *indio_dev; if (!i2c_check_functionality(clientp->adapter, - I2C_FUNC_SMBUS_BYTE_DATA)) { + I2C_FUNC_SMBUS_BYTE_DATA)) { dev_err(&clientp->dev, "taos_probe() - i2c smbus byte data func unsupported\n"); return -EOPNOTSUPP; } @@ -846,7 +868,7 @@ static int taos_probe(struct i2c_client *clientp, if (!taos_tsl258x_device(buf)) { dev_info(&clientp->dev, - "i2c device found but does not match expected id in taos_probe()\n"); + "i2c device found but does not match expected id in taos_probe()\n"); return -EINVAL; } diff --git a/drivers/staging/iio/light/tsl2x7x_core.c b/drivers/staging/iio/light/tsl2x7x_core.c index 5b1c1650a0e4..5f308bae41b9 100644 --- a/drivers/staging/iio/light/tsl2x7x_core.c +++ b/drivers/staging/iio/light/tsl2x7x_core.c @@ -296,7 +296,7 @@ static const u8 device_channel_config[] = { static int tsl2x7x_i2c_read(struct i2c_client *client, u8 reg, u8 *val) { - int ret = 0; + int ret; /* select register to write */ ret = i2c_smbus_write_byte(client, (TSL2X7X_CMD_REG | reg)); @@ -687,9 +687,9 @@ static int tsl2x7x_chip_on(struct iio_dev *indio_dev) /* Set the gain based on tsl2x7x_settings struct */ chip->tsl2x7x_config[TSL2X7X_GAIN] = - (chip->tsl2x7x_settings.als_gain | + chip->tsl2x7x_settings.als_gain | (TSL2X7X_mA100 | TSL2X7X_DIODE1) - | ((chip->tsl2x7x_settings.prox_gain) << 2)); + | ((chip->tsl2x7x_settings.prox_gain) << 2); /* set chip struct re scaling and saturation */ chip->als_saturation = als_count * 922; /* 90% of full scale */ @@ -983,7 +983,7 @@ static ssize_t tsl2x7x_als_time_store(struct device *dev, result.fract /= 3; chip->tsl2x7x_settings.als_time = - (TSL2X7X_MAX_TIMER_CNT - (u8)result.fract); + TSL2X7X_MAX_TIMER_CNT - (u8)result.fract; dev_info(&chip->client->dev, "%s: als time = %d", __func__, chip->tsl2x7x_settings.als_time); diff --git a/drivers/staging/iio/magnetometer/Kconfig b/drivers/staging/iio/magnetometer/Kconfig deleted file mode 100644 index dec814a7a073..000000000000 --- a/drivers/staging/iio/magnetometer/Kconfig +++ /dev/null @@ -1,40 +0,0 @@ -# -# Magnetometer sensors -# -menu "Magnetometer sensors" - -config SENSORS_HMC5843 - tristate - select IIO_BUFFER - select IIO_TRIGGERED_BUFFER - -config SENSORS_HMC5843_I2C - tristate "Honeywell HMC5843/5883/5883L 3-Axis Magnetometer (I2C)" - depends on I2C - select SENSORS_HMC5843 - select REGMAP_I2C - help - Say Y here to add support for the Honeywell HMC5843, HMC5883 and - HMC5883L 3-Axis Magnetometer (digital compass). - - This driver can also be compiled as a set of modules. - If so, these modules will be created: - - hmc5843_core (core functions) - - hmc5843_i2c (support for HMC5843, HMC5883, HMC5883L and HMC5983) - -config SENSORS_HMC5843_SPI - tristate "Honeywell HMC5983 3-Axis Magnetometer (SPI)" - depends on SPI_MASTER - select SENSORS_HMC5843 - select REGMAP_SPI - help - Say Y here to add support for the Honeywell HMC5983 3-Axis Magnetometer - (digital compass). - - This driver can also be compiled as a set of modules. - If so, these modules will be created: - - hmc5843_core (core functions) - - hmc5843_spi (support for HMC5983) - - -endmenu diff --git a/drivers/staging/iio/magnetometer/Makefile b/drivers/staging/iio/magnetometer/Makefile deleted file mode 100644 index 33761a19a956..000000000000 --- a/drivers/staging/iio/magnetometer/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -# -# Makefile for industrial I/O Magnetometer sensors -# - -obj-$(CONFIG_SENSORS_HMC5843) += hmc5843_core.o -obj-$(CONFIG_SENSORS_HMC5843_I2C) += hmc5843_i2c.o -obj-$(CONFIG_SENSORS_HMC5843_SPI) += hmc5843_spi.o diff --git a/drivers/staging/iio/meter/ade7754.c b/drivers/staging/iio/meter/ade7754.c index 1e950685e12f..f4188e17d30b 100644 --- a/drivers/staging/iio/meter/ade7754.c +++ b/drivers/staging/iio/meter/ade7754.c @@ -347,7 +347,7 @@ static int ade7754_set_irq(struct device *dev, bool enable) ret = ade7754_spi_read_reg_16(dev, ADE7754_IRQEN, &irqen); if (ret) - goto error_ret; + return ret; if (enable) irqen |= BIT(14); /* Enables an interrupt when a data is @@ -356,10 +356,7 @@ static int ade7754_set_irq(struct device *dev, bool enable) irqen &= ~BIT(14); ret = ade7754_spi_write_reg_16(dev, ADE7754_IRQEN, irqen); - if (ret) - goto error_ret; -error_ret: return ret; } diff --git a/drivers/staging/iio/meter/ade7758_core.c b/drivers/staging/iio/meter/ade7758_core.c index 0db23e4d1852..40f5afaa984b 100644 --- a/drivers/staging/iio/meter/ade7758_core.c +++ b/drivers/staging/iio/meter/ade7758_core.c @@ -423,7 +423,7 @@ int ade7758_set_irq(struct device *dev, bool enable) ret = ade7758_spi_read_reg_24(dev, ADE7758_MASK, &irqen); if (ret) - goto error_ret; + return ret; if (enable) irqen |= BIT(16); /* Enables an interrupt when a data is @@ -432,10 +432,7 @@ int ade7758_set_irq(struct device *dev, bool enable) irqen &= ~BIT(16); ret = ade7758_spi_write_reg_24(dev, ADE7758_MASK, irqen); - if (ret) - goto error_ret; -error_ret: return ret; } diff --git a/drivers/staging/iio/meter/ade7854-i2c.c b/drivers/staging/iio/meter/ade7854-i2c.c index 07cfe28b24e2..8106f8cceeab 100644 --- a/drivers/staging/iio/meter/ade7854-i2c.c +++ b/drivers/staging/iio/meter/ade7854-i2c.c @@ -227,11 +227,6 @@ static int ade7854_i2c_probe(struct i2c_client *client, return ade7854_probe(indio_dev, &client->dev); } -static int ade7854_i2c_remove(struct i2c_client *client) -{ - return ade7854_remove(i2c_get_clientdata(client)); -} - static const struct i2c_device_id ade7854_id[] = { { "ade7854", 0 }, { "ade7858", 0 }, @@ -246,7 +241,6 @@ static struct i2c_driver ade7854_i2c_driver = { .name = "ade7854", }, .probe = ade7854_i2c_probe, - .remove = ade7854_i2c_remove, .id_table = ade7854_id, }; module_i2c_driver(ade7854_i2c_driver); diff --git a/drivers/staging/iio/meter/ade7854-spi.c b/drivers/staging/iio/meter/ade7854-spi.c index 2413052c5bfb..63e200ffd1f2 100644 --- a/drivers/staging/iio/meter/ade7854-spi.c +++ b/drivers/staging/iio/meter/ade7854-spi.c @@ -296,12 +296,6 @@ static int ade7854_spi_probe(struct spi_device *spi) return ade7854_probe(indio_dev, &spi->dev); } -static int ade7854_spi_remove(struct spi_device *spi) -{ - ade7854_remove(spi_get_drvdata(spi)); - - return 0; -} static const struct spi_device_id ade7854_id[] = { { "ade7854", 0 }, { "ade7858", 0 }, @@ -316,7 +310,6 @@ static struct spi_driver ade7854_driver = { .name = "ade7854", }, .probe = ade7854_spi_probe, - .remove = ade7854_spi_remove, .id_table = ade7854_id, }; module_spi_driver(ade7854_driver); diff --git a/drivers/staging/iio/meter/ade7854.c b/drivers/staging/iio/meter/ade7854.c index a83883596dbc..9e439af7100d 100644 --- a/drivers/staging/iio/meter/ade7854.c +++ b/drivers/staging/iio/meter/ade7854.c @@ -417,7 +417,7 @@ static int ade7854_set_irq(struct device *dev, bool enable) ret = st->read_reg_32(dev, ADE7854_MASK0, &irqen); if (ret) - goto error_ret; + return ret; if (enable) irqen |= BIT(17); /* 1: interrupt enabled when all periodical @@ -426,10 +426,7 @@ static int ade7854_set_irq(struct device *dev, bool enable) irqen &= ~BIT(17); ret = st->write_reg_32(dev, ADE7854_MASK0, irqen); - if (ret) - goto error_ret; -error_ret: return ret; } @@ -548,31 +545,15 @@ int ade7854_probe(struct iio_dev *indio_dev, struct device *dev) indio_dev->info = &ade7854_info; indio_dev->modes = INDIO_DIRECT_MODE; - ret = iio_device_register(indio_dev); + ret = devm_iio_device_register(dev, indio_dev); if (ret) return ret; /* Get the device into a sane initial state */ - ret = ade7854_initial_setup(indio_dev); - if (ret) - goto error_unreg_dev; - - return 0; - -error_unreg_dev: - iio_device_unregister(indio_dev); - return ret; + return ade7854_initial_setup(indio_dev); } EXPORT_SYMBOL(ade7854_probe); -int ade7854_remove(struct iio_dev *indio_dev) -{ - iio_device_unregister(indio_dev); - - return 0; -} -EXPORT_SYMBOL(ade7854_remove); - MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>"); MODULE_DESCRIPTION("Analog Devices ADE7854/58/68/78 Polyphase Energy Meter"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/staging/iio/resolver/ad2s1200.c b/drivers/staging/iio/resolver/ad2s1200.c index 595e711d35a6..82b2d88ca942 100644 --- a/drivers/staging/iio/resolver/ad2s1200.c +++ b/drivers/staging/iio/resolver/ad2s1200.c @@ -31,7 +31,7 @@ /* input clock on serial interface */ #define AD2S1200_HZ 8192000 /* clock period in nano second */ -#define AD2S1200_TSCLK (1000000000/AD2S1200_HZ) +#define AD2S1200_TSCLK (1000000000 / AD2S1200_HZ) struct ad2s1200_state { struct mutex lock; @@ -42,10 +42,10 @@ struct ad2s1200_state { }; static int ad2s1200_read_raw(struct iio_dev *indio_dev, - struct iio_chan_spec const *chan, - int *val, - int *val2, - long m) + struct iio_chan_spec const *chan, + int *val, + int *val2, + long m) { int ret = 0; s16 vel; @@ -113,7 +113,7 @@ static int ad2s1200_probe(struct spi_device *spi) DRV_NAME); if (ret) { dev_err(&spi->dev, "request gpio pin %d failed\n", - pins[pn]); + pins[pn]); return ret; } } diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c index d97aa2827412..6b992634f009 100644 --- a/drivers/staging/iio/resolver/ad2s1210.c +++ b/drivers/staging/iio/resolver/ad2s1210.c @@ -67,7 +67,7 @@ /* default input clock on serial interface */ #define AD2S1210_DEF_CLKIN 8192000 /* clock period in nano second */ -#define AD2S1210_DEF_TCK (1000000000/AD2S1210_DEF_CLKIN) +#define AD2S1210_DEF_TCK (1000000000 / AD2S1210_DEF_CLKIN) #define AD2S1210_DEF_EXCIT 10000 enum ad2s1210_mode { @@ -98,6 +98,7 @@ static const int ad2s1210_mode_vals[4][2] = { [MOD_VEL] = { 0, 1 }, [MOD_CONFIG] = { 1, 0 }, }; + static inline void ad2s1210_set_mode(enum ad2s1210_mode mode, struct ad2s1210_state *st) { @@ -123,7 +124,7 @@ static int ad2s1210_config_write(struct ad2s1210_state *st, u8 data) /* read value from one of the registers */ static int ad2s1210_config_read(struct ad2s1210_state *st, - unsigned char address) + unsigned char address) { struct spi_transfer xfer = { .len = 2, @@ -176,9 +177,9 @@ static const int ad2s1210_res_pins[4][2] = { static inline void ad2s1210_set_resolution_pin(struct ad2s1210_state *st) { gpio_set_value(st->pdata->res[0], - ad2s1210_res_pins[(st->resolution - 10)/2][0]); + ad2s1210_res_pins[(st->resolution - 10) / 2][0]); gpio_set_value(st->pdata->res[1], - ad2s1210_res_pins[(st->resolution - 10)/2][1]); + ad2s1210_res_pins[(st->resolution - 10) / 2][1]); } static inline int ad2s1210_soft_reset(struct ad2s1210_state *st) @@ -282,8 +283,8 @@ static ssize_t ad2s1210_show_control(struct device *dev, } static ssize_t ad2s1210_store_control(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t len) + struct device_attribute *attr, + const char *buf, size_t len) { struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev)); unsigned char udata; @@ -318,9 +319,9 @@ static ssize_t ad2s1210_store_control(struct device *dev, data = ad2s1210_read_resolution_pin(st); if (data != st->resolution) dev_warn(dev, "ad2s1210: resolution settings not match\n"); - } else + } else { ad2s1210_set_resolution_pin(st); - + } ret = len; st->hysteresis = !!(data & AD2S1210_ENABLE_HYSTERESIS); @@ -330,7 +331,8 @@ error_ret: } static ssize_t ad2s1210_show_resolution(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, + char *buf) { struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev)); @@ -338,8 +340,8 @@ static ssize_t ad2s1210_show_resolution(struct device *dev, } static ssize_t ad2s1210_store_resolution(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t len) + struct device_attribute *attr, + const char *buf, size_t len) { struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev)); unsigned char data; @@ -379,8 +381,9 @@ static ssize_t ad2s1210_store_resolution(struct device *dev, data = ad2s1210_read_resolution_pin(st); if (data != st->resolution) dev_warn(dev, "ad2s1210: resolution settings not match\n"); - } else + } else { ad2s1210_set_resolution_pin(st); + } ret = len; error_ret: mutex_unlock(&st->lock); @@ -389,7 +392,7 @@ error_ret: /* read the fault register since last sample */ static ssize_t ad2s1210_show_fault(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, char *buf) { struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev)); int ret; @@ -441,7 +444,8 @@ static ssize_t ad2s1210_show_reg(struct device *dev, } static ssize_t ad2s1210_store_reg(struct device *dev, - struct device_attribute *attr, const char *buf, size_t len) + struct device_attribute *attr, + const char *buf, size_t len) { struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev)); unsigned char data; @@ -497,7 +501,7 @@ static int ad2s1210_read_raw(struct iio_dev *indio_dev, switch (chan->type) { case IIO_ANGL: - pos = be16_to_cpup((__be16 *) st->rx); + pos = be16_to_cpup((__be16 *)st->rx); if (st->hysteresis) pos >>= 16 - st->resolution; *val = pos; @@ -505,7 +509,7 @@ static int ad2s1210_read_raw(struct iio_dev *indio_dev, break; case IIO_ANGL_VEL: negative = st->rx[0] & 0x80; - vel = be16_to_cpup((__be16 *) st->rx); + vel = be16_to_cpup((__be16 *)st->rx); vel >>= 16 - st->resolution; if (vel & 0x8000) { negative = (0xffff >> st->resolution) << st->resolution; @@ -560,7 +564,6 @@ static IIO_DEVICE_ATTR(lot_low_thrd, S_IRUGO | S_IWUSR, ad2s1210_show_reg, ad2s1210_store_reg, AD2S1210_REG_LOT_LOW_THRD); - static const struct iio_chan_spec ad2s1210_channels[] = { { .type = IIO_ANGL, @@ -672,7 +675,7 @@ static int ad2s1210_probe(struct spi_device *spi) struct ad2s1210_state *st; int ret; - if (spi->dev.platform_data == NULL) + if (!spi->dev.platform_data) return -EINVAL; indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st)); diff --git a/drivers/staging/iio/trigger/Kconfig b/drivers/staging/iio/trigger/Kconfig index 710a2f3e787e..0b01d24cea51 100644 --- a/drivers/staging/iio/trigger/Kconfig +++ b/drivers/staging/iio/trigger/Kconfig @@ -5,16 +5,6 @@ comment "Triggers - standalone" if IIO_TRIGGER -config IIO_PERIODIC_RTC_TRIGGER - tristate "Periodic RTC triggers" - depends on RTC_CLASS - help - Provides support for using periodic capable real time - clocks as IIO triggers. - - To compile this driver as a module, choose M here: the - module will be called iio-trig-periodic-rtc. - config IIO_BFIN_TMR_TRIGGER tristate "Blackfin TIMER trigger" depends on BLACKFIN diff --git a/drivers/staging/iio/trigger/Makefile b/drivers/staging/iio/trigger/Makefile index 238481b78e72..1300a21363db 100644 --- a/drivers/staging/iio/trigger/Makefile +++ b/drivers/staging/iio/trigger/Makefile @@ -2,5 +2,4 @@ # Makefile for triggers not associated with iio-devices # -obj-$(CONFIG_IIO_PERIODIC_RTC_TRIGGER) += iio-trig-periodic-rtc.o obj-$(CONFIG_IIO_BFIN_TMR_TRIGGER) += iio-trig-bfin-timer.o diff --git a/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c b/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c deleted file mode 100644 index 00d139331261..000000000000 --- a/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c +++ /dev/null @@ -1,216 +0,0 @@ -/* The industrial I/O periodic RTC trigger driver - * - * Copyright (c) 2008 Jonathan Cameron - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This is a heavily rewritten version of the periodic timer system in - * earlier version of industrialio. It supplies the same functionality - * but via a trigger rather than a specific periodic timer system. - */ - -#include <linux/platform_device.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/slab.h> -#include <linux/rtc.h> -#include <linux/iio/iio.h> -#include <linux/iio/trigger.h> - -static LIST_HEAD(iio_prtc_trigger_list); -static DEFINE_MUTEX(iio_prtc_trigger_list_lock); - -struct iio_prtc_trigger_info { - struct rtc_device *rtc; - unsigned int frequency; - struct rtc_task task; - bool state; -}; - -static int iio_trig_periodic_rtc_set_state(struct iio_trigger *trig, bool state) -{ - struct iio_prtc_trigger_info *trig_info = iio_trigger_get_drvdata(trig); - int ret; - - if (trig_info->frequency == 0 && state) - return -EINVAL; - dev_dbg(&trig_info->rtc->dev, "trigger frequency is %u\n", - trig_info->frequency); - ret = rtc_irq_set_state(trig_info->rtc, &trig_info->task, state); - if (!ret) - trig_info->state = state; - - return ret; -} - -static ssize_t iio_trig_periodic_read_freq(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct iio_trigger *trig = to_iio_trigger(dev); - struct iio_prtc_trigger_info *trig_info = iio_trigger_get_drvdata(trig); - - return sprintf(buf, "%u\n", trig_info->frequency); -} - -static ssize_t iio_trig_periodic_write_freq(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t len) -{ - struct iio_trigger *trig = to_iio_trigger(dev); - struct iio_prtc_trigger_info *trig_info = iio_trigger_get_drvdata(trig); - unsigned int val; - int ret; - - ret = kstrtouint(buf, 10, &val); - if (ret) - goto error_ret; - - if (val > 0) { - ret = rtc_irq_set_freq(trig_info->rtc, &trig_info->task, val); - if (ret == 0 && trig_info->state && trig_info->frequency == 0) - ret = rtc_irq_set_state(trig_info->rtc, - &trig_info->task, 1); - } else { - ret = rtc_irq_set_state(trig_info->rtc, &trig_info->task, 0); - } - if (ret) - goto error_ret; - - trig_info->frequency = val; - - return len; - -error_ret: - return ret; -} - -static DEVICE_ATTR(frequency, S_IRUGO | S_IWUSR, - iio_trig_periodic_read_freq, - iio_trig_periodic_write_freq); - -static struct attribute *iio_trig_prtc_attrs[] = { - &dev_attr_frequency.attr, - NULL, -}; - -static const struct attribute_group iio_trig_prtc_attr_group = { - .attrs = iio_trig_prtc_attrs, -}; - -static const struct attribute_group *iio_trig_prtc_attr_groups[] = { - &iio_trig_prtc_attr_group, - NULL -}; - -static void iio_prtc_trigger_poll(void *private_data) -{ - iio_trigger_poll(private_data); -} - -static const struct iio_trigger_ops iio_prtc_trigger_ops = { - .owner = THIS_MODULE, - .set_trigger_state = &iio_trig_periodic_rtc_set_state, -}; - -static int iio_trig_periodic_rtc_probe(struct platform_device *dev) -{ - char **pdata = dev->dev.platform_data; - struct iio_prtc_trigger_info *trig_info; - struct iio_trigger *trig, *trig2; - - int i, ret; - - for (i = 0;; i++) { - if (!pdata[i]) - break; - trig = iio_trigger_alloc("periodic%s", pdata[i]); - if (!trig) { - ret = -ENOMEM; - goto error_free_completed_registrations; - } - list_add(&trig->alloc_list, &iio_prtc_trigger_list); - - trig_info = kzalloc(sizeof(*trig_info), GFP_KERNEL); - if (!trig_info) { - ret = -ENOMEM; - goto error_put_trigger_and_remove_from_list; - } - iio_trigger_set_drvdata(trig, trig_info); - trig->ops = &iio_prtc_trigger_ops; - /* RTC access */ - trig_info->rtc = rtc_class_open(pdata[i]); - if (!trig_info->rtc) { - ret = -EINVAL; - goto error_free_trig_info; - } - trig_info->task.func = iio_prtc_trigger_poll; - trig_info->task.private_data = trig; - ret = rtc_irq_register(trig_info->rtc, &trig_info->task); - if (ret) - goto error_close_rtc; - trig->dev.groups = iio_trig_prtc_attr_groups; - ret = iio_trigger_register(trig); - if (ret) - goto error_unregister_rtc_irq; - } - return 0; -error_unregister_rtc_irq: - rtc_irq_unregister(trig_info->rtc, &trig_info->task); -error_close_rtc: - rtc_class_close(trig_info->rtc); -error_free_trig_info: - kfree(trig_info); -error_put_trigger_and_remove_from_list: - list_del(&trig->alloc_list); - iio_trigger_put(trig); -error_free_completed_registrations: - list_for_each_entry_safe(trig, - trig2, - &iio_prtc_trigger_list, - alloc_list) { - trig_info = iio_trigger_get_drvdata(trig); - rtc_irq_unregister(trig_info->rtc, &trig_info->task); - rtc_class_close(trig_info->rtc); - kfree(trig_info); - iio_trigger_unregister(trig); - } - return ret; -} - -static int iio_trig_periodic_rtc_remove(struct platform_device *dev) -{ - struct iio_trigger *trig, *trig2; - struct iio_prtc_trigger_info *trig_info; - - mutex_lock(&iio_prtc_trigger_list_lock); - list_for_each_entry_safe(trig, - trig2, - &iio_prtc_trigger_list, - alloc_list) { - trig_info = iio_trigger_get_drvdata(trig); - rtc_irq_unregister(trig_info->rtc, &trig_info->task); - rtc_class_close(trig_info->rtc); - kfree(trig_info); - iio_trigger_unregister(trig); - } - mutex_unlock(&iio_prtc_trigger_list_lock); - return 0; -} - -static struct platform_driver iio_trig_periodic_rtc_driver = { - .probe = iio_trig_periodic_rtc_probe, - .remove = iio_trig_periodic_rtc_remove, - .driver = { - .name = "iio_prtc_trigger", - }, -}; - -module_platform_driver(iio_trig_periodic_rtc_driver); - -MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>"); -MODULE_DESCRIPTION("Periodic realtime clock trigger for the iio subsystem"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/staging/lustre/Kconfig b/drivers/staging/lustre/Kconfig index a224d88bf43d..b7d81096eee9 100644 --- a/drivers/staging/lustre/Kconfig +++ b/drivers/staging/lustre/Kconfig @@ -1,3 +1,3 @@ -source "drivers/staging/lustre/lustre/Kconfig" - source "drivers/staging/lustre/lnet/Kconfig" + +source "drivers/staging/lustre/lustre/Kconfig" diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/libcfs.h index 0d8a91ee5ffc..40af75c4201a 100644 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs.h +++ b/drivers/staging/lustre/include/linux/libcfs/libcfs.h @@ -42,6 +42,8 @@ #include "curproc.h" +#define LIBCFS_VERSION "0.7.0" + #define LOWEST_BIT_SET(x) ((x) & ~((x) - 1)) /* @@ -51,8 +53,6 @@ #define LERRCHKSUM(hexnum) (((hexnum) & 0xf) ^ ((hexnum) >> 4 & 0xf) ^ \ ((hexnum) >> 8 & 0xf)) -#define LUSTRE_SRV_LNET_PID LUSTRE_LNET_PID - #include <linux/list.h> /* need both kernel and user-land acceptor */ @@ -77,7 +77,7 @@ struct cfs_psdev_ops { int (*p_close)(unsigned long, void *); int (*p_read)(struct cfs_psdev_file *, char *, unsigned long); int (*p_write)(struct cfs_psdev_file *, char *, unsigned long); - int (*p_ioctl)(struct cfs_psdev_file *, unsigned long, void *); + int (*p_ioctl)(struct cfs_psdev_file *, unsigned long, void __user *); }; /* @@ -90,7 +90,6 @@ void cfs_enter_debugger(void); * Defined by platform */ int unshare_fs_struct(void); -sigset_t cfs_get_blocked_sigs(void); sigset_t cfs_block_allsigs(void); sigset_t cfs_block_sigs(unsigned long sigs); sigset_t cfs_block_sigsinv(unsigned long sigs); @@ -115,7 +114,6 @@ void cfs_get_random_bytes(void *buf, int size); #include "libcfs_prim.h" #include "libcfs_time.h" #include "libcfs_string.h" -#include "libcfs_kernelcomm.h" #include "libcfs_workitem.h" #include "libcfs_hash.h" #include "libcfs_fail.h" @@ -156,5 +154,9 @@ struct lnet_debugfs_symlink_def { void lustre_insert_debugfs(struct ctl_table *table, const struct lnet_debugfs_symlink_def *symlinks); +int lprocfs_call_handler(void *data, int write, loff_t *ppos, + void __user *buffer, size_t *lenp, + int (*handler)(void *data, int write, + loff_t pos, void __user *buffer, int len)); #endif /* _LIBCFS_H */ diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h index 1530b0458a61..9e62c59714b7 100644 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h +++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h @@ -13,11 +13,6 @@ * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 021110-1307, USA - * * GPL HEADER END */ /* diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h index a1787bb43483..98430e7108c1 100644 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h +++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h @@ -106,7 +106,7 @@ struct ptldebug_header { #define S_LOV 0x00020000 #define S_LQUOTA 0x00040000 #define S_OSD 0x00080000 -/* unused */ +#define S_LFSCK 0x00100000 /* unused */ /* unused */ #define S_LMV 0x00800000 /* b_new_cmd */ diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h index 485ab2670918..5ca99bd6f4e9 100644 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h +++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h @@ -41,11 +41,16 @@ #ifndef __LIBCFS_IOCTL_H__ #define __LIBCFS_IOCTL_H__ -#define LIBCFS_IOCTL_VERSION 0x0001000a +#define LIBCFS_IOCTL_VERSION 0x0001000a +#define LIBCFS_IOCTL_VERSION2 0x0001000b -struct libcfs_ioctl_data { +struct libcfs_ioctl_hdr { __u32 ioc_len; __u32 ioc_version; +}; + +struct libcfs_ioctl_data { + struct libcfs_ioctl_hdr ioc_hdr; __u64 ioc_nid; __u64 ioc_u64[1]; @@ -61,20 +66,15 @@ struct libcfs_ioctl_data { char *ioc_inlbuf2; __u32 ioc_plen1; /* buffers in userspace */ - char *ioc_pbuf1; + void __user *ioc_pbuf1; __u32 ioc_plen2; /* buffers in userspace */ - char *ioc_pbuf2; + void __user *ioc_pbuf2; char ioc_bulk[0]; }; #define ioc_priority ioc_u32[0] -struct libcfs_ioctl_hdr { - __u32 ioc_len; - __u32 ioc_version; -}; - struct libcfs_debug_ioctl_data { struct libcfs_ioctl_hdr hdr; unsigned int subs; @@ -90,7 +90,7 @@ do { \ struct libcfs_ioctl_handler { struct list_head item; - int (*handle_ioctl)(unsigned int cmd, struct libcfs_ioctl_data *data); + int (*handle_ioctl)(unsigned int cmd, struct libcfs_ioctl_hdr *hdr); }; #define DECLARE_IOCTL_HANDLER(ident, func) \ @@ -102,7 +102,6 @@ struct libcfs_ioctl_handler { /* FIXME check conflict with lustre_lib.h */ #define LIBCFS_IOC_DEBUG_MASK _IOWR('f', 250, long) -/* ioctls for manipulating snapshots 30- */ #define IOC_LIBCFS_TYPE 'e' #define IOC_LIBCFS_MIN_NR 30 /* libcfs ioctls */ @@ -113,18 +112,16 @@ struct libcfs_ioctl_handler { /* lnet ioctls */ #define IOC_LIBCFS_GET_NI _IOWR('e', 50, long) #define IOC_LIBCFS_FAIL_NID _IOWR('e', 51, long) -#define IOC_LIBCFS_ADD_ROUTE _IOWR('e', 52, long) -#define IOC_LIBCFS_DEL_ROUTE _IOWR('e', 53, long) -#define IOC_LIBCFS_GET_ROUTE _IOWR('e', 54, long) #define IOC_LIBCFS_NOTIFY_ROUTER _IOWR('e', 55, long) #define IOC_LIBCFS_UNCONFIGURE _IOWR('e', 56, long) -#define IOC_LIBCFS_PORTALS_COMPATIBILITY _IOWR('e', 57, long) +/* #define IOC_LIBCFS_PORTALS_COMPATIBILITY _IOWR('e', 57, long) */ #define IOC_LIBCFS_LNET_DIST _IOWR('e', 58, long) #define IOC_LIBCFS_CONFIGURE _IOWR('e', 59, long) #define IOC_LIBCFS_TESTPROTOCOMPAT _IOWR('e', 60, long) #define IOC_LIBCFS_PING _IOWR('e', 61, long) -#define IOC_LIBCFS_DEBUG_PEER _IOWR('e', 62, long) +/* #define IOC_LIBCFS_DEBUG_PEER _IOWR('e', 62, long) */ #define IOC_LIBCFS_LNETST _IOWR('e', 63, long) +#define IOC_LIBCFS_LNET_FAULT _IOWR('e', 64, long) /* lnd ioctls */ #define IOC_LIBCFS_REGISTER_MYNID _IOWR('e', 70, long) #define IOC_LIBCFS_CLOSE_CONNECTION _IOWR('e', 71, long) @@ -138,7 +135,25 @@ struct libcfs_ioctl_handler { #define IOC_LIBCFS_DEL_INTERFACE _IOWR('e', 79, long) #define IOC_LIBCFS_GET_INTERFACE _IOWR('e', 80, long) -#define IOC_LIBCFS_MAX_NR 80 +/* + * DLC Specific IOCTL numbers. + * In order to maintain backward compatibility with any possible external + * tools which might be accessing the IOCTL numbers, a new group of IOCTL + * number have been allocated. + */ +#define IOCTL_CONFIG_SIZE struct lnet_ioctl_config_data +#define IOC_LIBCFS_ADD_ROUTE _IOWR(IOC_LIBCFS_TYPE, 81, IOCTL_CONFIG_SIZE) +#define IOC_LIBCFS_DEL_ROUTE _IOWR(IOC_LIBCFS_TYPE, 82, IOCTL_CONFIG_SIZE) +#define IOC_LIBCFS_GET_ROUTE _IOWR(IOC_LIBCFS_TYPE, 83, IOCTL_CONFIG_SIZE) +#define IOC_LIBCFS_ADD_NET _IOWR(IOC_LIBCFS_TYPE, 84, IOCTL_CONFIG_SIZE) +#define IOC_LIBCFS_DEL_NET _IOWR(IOC_LIBCFS_TYPE, 85, IOCTL_CONFIG_SIZE) +#define IOC_LIBCFS_GET_NET _IOWR(IOC_LIBCFS_TYPE, 86, IOCTL_CONFIG_SIZE) +#define IOC_LIBCFS_CONFIG_RTR _IOWR(IOC_LIBCFS_TYPE, 87, IOCTL_CONFIG_SIZE) +#define IOC_LIBCFS_ADD_BUF _IOWR(IOC_LIBCFS_TYPE, 88, IOCTL_CONFIG_SIZE) +#define IOC_LIBCFS_GET_BUF _IOWR(IOC_LIBCFS_TYPE, 89, IOCTL_CONFIG_SIZE) +#define IOC_LIBCFS_GET_PEER_INFO _IOWR(IOC_LIBCFS_TYPE, 90, IOCTL_CONFIG_SIZE) +#define IOC_LIBCFS_GET_LNET_STATS _IOWR(IOC_LIBCFS_TYPE, 91, IOCTL_CONFIG_SIZE) +#define IOC_LIBCFS_MAX_NR 91 static inline int libcfs_ioctl_packlen(struct libcfs_ioctl_data *data) { @@ -149,9 +164,9 @@ static inline int libcfs_ioctl_packlen(struct libcfs_ioctl_data *data) return len; } -static inline int libcfs_ioctl_is_invalid(struct libcfs_ioctl_data *data) +static inline bool libcfs_ioctl_is_invalid(struct libcfs_ioctl_data *data) { - if (data->ioc_len > (1<<30)) { + if (data->ioc_hdr.ioc_len > (1 << 30)) { CERROR("LIBCFS ioctl: ioc_len larger than 1<<30\n"); return 1; } @@ -187,7 +202,7 @@ static inline int libcfs_ioctl_is_invalid(struct libcfs_ioctl_data *data) CERROR("LIBCFS ioctl: plen2 nonzero but no pbuf2 pointer\n"); return 1; } - if ((__u32)libcfs_ioctl_packlen(data) != data->ioc_len) { + if ((__u32)libcfs_ioctl_packlen(data) != data->ioc_hdr.ioc_len) { CERROR("LIBCFS ioctl: packlen != ioc_len\n"); return 1; } @@ -207,7 +222,9 @@ static inline int libcfs_ioctl_is_invalid(struct libcfs_ioctl_data *data) int libcfs_register_ioctl(struct libcfs_ioctl_handler *hand); int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand); -int libcfs_ioctl_getdata(char *buf, char *end, void *arg); -int libcfs_ioctl_popdata(void *arg, void *buf, int size); +int libcfs_ioctl_getdata_len(const struct libcfs_ioctl_hdr __user *arg, + __u32 *buf_len); +int libcfs_ioctl_popdata(void __user *arg, void *buf, int size); +int libcfs_ioctl_data_adjust(struct libcfs_ioctl_data *data); #endif /* __LIBCFS_IOCTL_H__ */ diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h index a80d993b882e..dab486261154 100644 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h +++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h @@ -387,11 +387,6 @@ int cfs_percpt_atomic_summary(atomic_t **refs); * Support for temporary event tracing with minimal Heisenberg effect. * -------------------------------------------------------------------- */ -struct libcfs_device_userstate { - int ldu_memhog_pages; - struct page *ldu_memhog_root_page; -}; - #define MKSTR(ptr) ((ptr)) ? (ptr) : "" static inline int cfs_size_round4(int val) diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h index d8d2e7dc212e..e02cde5aeca1 100644 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h +++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h @@ -44,8 +44,6 @@ #define __LIBCFS_STRING_H__ /* libcfs_string.c */ -/* string comparison ignoring case */ -int cfs_strncasecmp(const char *s1, const char *s2, size_t n); /* Convert a text string to a bitmask */ int cfs_str2mask(const char *str, const char *(*bit2str)(int bit), int *oldmask, int minmask, int allmask); diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h index aac59008ad1a..d94b2661658a 100644 --- a/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h +++ b/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h @@ -118,9 +118,6 @@ do { \ #define CDEBUG_STACK() (0L) #endif /* __x86_64__ */ -/* initial pid */ -#define LUSTRE_LNET_PID 12345 - #define __current_nesting_level() (0) /** diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h index 520209f17173..c04979ae0a38 100644 --- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h +++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h @@ -13,11 +13,6 @@ * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 021110-1307, USA - * * GPL HEADER END */ /* diff --git a/drivers/staging/lustre/include/linux/lnet/api.h b/drivers/staging/lustre/include/linux/lnet/api.h index 75285fde15e8..cb0d6b481455 100644 --- a/drivers/staging/lustre/include/linux/lnet/api.h +++ b/drivers/staging/lustre/include/linux/lnet/api.h @@ -48,7 +48,8 @@ /** \defgroup lnet_init_fini Initialization and cleanup * The LNet must be properly initialized before any LNet calls can be made. - * @{ */ + * @{ + */ int LNetNIInit(lnet_pid_t requested_pid); int LNetNIFini(void); /** @} lnet_init_fini */ @@ -71,7 +72,8 @@ int LNetNIFini(void); * it's an entry in the portals table of a process. * * \see LNetMEAttach - * @{ */ + * @{ + */ int LNetGetId(unsigned int index, lnet_process_id_t *id); int LNetDist(lnet_nid_t nid, lnet_nid_t *srcnid, __u32 *order); void LNetSnprintHandle(char *str, int str_len, lnet_handle_any_t handle); @@ -89,7 +91,8 @@ void LNetSnprintHandle(char *str, int str_len, lnet_handle_any_t handle); * incoming requests based on process ID or the match bits provided in the * request. MEs can be dynamically inserted into a match list by LNetMEAttach() * and LNetMEInsert(), and removed from its list by LNetMEUnlink(). - * @{ */ + * @{ + */ int LNetMEAttach(unsigned int portal, lnet_process_id_t match_id_in, __u64 match_bits_in, @@ -120,7 +123,8 @@ int LNetMEUnlink(lnet_handle_me_t current_in); * The LNet API provides two operations to create MDs: LNetMDAttach() * and LNetMDBind(); one operation to unlink and release the resources * associated with a MD: LNetMDUnlink(). - * @{ */ + * @{ + */ int LNetMDAttach(lnet_handle_me_t current_in, lnet_md_t md_in, lnet_unlink_t unlink_in, @@ -154,7 +158,8 @@ int LNetMDUnlink(lnet_handle_md_t md_in); * event from an EQ, and LNetEQWait() can be used to block a process until * an EQ has at least one event. LNetEQPoll() can be used to test or wait * on multiple EQs. - * @{ */ + * @{ + */ int LNetEQAlloc(unsigned int count_in, lnet_eq_handler_t handler, lnet_handle_eq_t *handle_out); @@ -172,7 +177,8 @@ int LNetEQPoll(lnet_handle_eq_t *eventqs_in, * * The LNet API provides two data movement operations: LNetPut() * and LNetGet(). - * @{ */ + * @{ + */ int LNetPut(lnet_nid_t self, lnet_handle_md_t md_in, lnet_ack_req_t ack_req_in, @@ -192,11 +198,12 @@ int LNetGet(lnet_nid_t self, /** \defgroup lnet_misc Miscellaneous operations. * Miscellaneous operations. - * @{ */ - + * @{ + */ int LNetSetLazyPortal(int portal); int LNetClearLazyPortal(int portal); int LNetCtl(unsigned int cmd, void *arg); +void LNetDebugPeer(lnet_process_id_t id); /** @} lnet_misc */ diff --git a/drivers/staging/lustre/include/linux/lnet/lib-dlc.h b/drivers/staging/lustre/include/linux/lnet/lib-dlc.h new file mode 100644 index 000000000000..84a19e96ea04 --- /dev/null +++ b/drivers/staging/lustre/include/linux/lnet/lib-dlc.h @@ -0,0 +1,122 @@ +/* + * LGPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library. + * + * LGPL HEADER END + * + */ +/* + * Copyright (c) 2014, Intel Corporation. + */ +/* + * Author: Amir Shehata <amir.shehata@intel.com> + */ + +#ifndef LNET_DLC_H +#define LNET_DLC_H + +#include "../libcfs/libcfs_ioctl.h" +#include "types.h" + +#define MAX_NUM_SHOW_ENTRIES 32 +#define LNET_MAX_STR_LEN 128 +#define LNET_MAX_SHOW_NUM_CPT 128 +#define LNET_UNDEFINED_HOPS ((__u32) -1) + +struct lnet_ioctl_net_config { + char ni_interfaces[LNET_MAX_INTERFACES][LNET_MAX_STR_LEN]; + __u32 ni_status; + __u32 ni_cpts[LNET_MAX_SHOW_NUM_CPT]; +}; + +#define LNET_TINY_BUF_IDX 0 +#define LNET_SMALL_BUF_IDX 1 +#define LNET_LARGE_BUF_IDX 2 + +/* # different router buffer pools */ +#define LNET_NRBPOOLS (LNET_LARGE_BUF_IDX + 1) + +struct lnet_ioctl_pool_cfg { + struct { + __u32 pl_npages; + __u32 pl_nbuffers; + __u32 pl_credits; + __u32 pl_mincredits; + } pl_pools[LNET_NRBPOOLS]; + __u32 pl_routing; +}; + +struct lnet_ioctl_config_data { + struct libcfs_ioctl_hdr cfg_hdr; + + __u32 cfg_net; + __u32 cfg_count; + __u64 cfg_nid; + __u32 cfg_ncpts; + + union { + struct { + __u32 rtr_hop; + __u32 rtr_priority; + __u32 rtr_flags; + } cfg_route; + struct { + char net_intf[LNET_MAX_STR_LEN]; + __s32 net_peer_timeout; + __s32 net_peer_tx_credits; + __s32 net_peer_rtr_credits; + __s32 net_max_tx_credits; + __u32 net_cksum_algo; + __u32 net_pad; + } cfg_net; + struct { + __u32 buf_enable; + __s32 buf_tiny; + __s32 buf_small; + __s32 buf_large; + } cfg_buffers; + } cfg_config_u; + + char cfg_bulk[0]; +}; + +struct lnet_ioctl_peer { + struct libcfs_ioctl_hdr pr_hdr; + __u32 pr_count; + __u32 pr_pad; + __u64 pr_nid; + + union { + struct { + char cr_aliveness[LNET_MAX_STR_LEN]; + __u32 cr_refcount; + __u32 cr_ni_peer_tx_credits; + __u32 cr_peer_tx_credits; + __u32 cr_peer_rtr_credits; + __u32 cr_peer_min_rtr_credits; + __u32 cr_peer_tx_qnob; + __u32 cr_ncpt; + } pr_peer_credits; + } pr_lnd_u; +}; + +struct lnet_ioctl_lnet_stats { + struct libcfs_ioctl_hdr st_hdr; + struct lnet_counters st_cntrs; +}; + +#endif /* LNET_DLC_H */ diff --git a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h index b67a6607bb3b..dfc0208dc3a7 100644 --- a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h +++ b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h @@ -39,6 +39,7 @@ #include "api.h" #include "lnet.h" #include "lib-types.h" +#include "lib-dlc.h" extern lnet_t the_lnet; /* THE network */ @@ -64,6 +65,19 @@ extern lnet_t the_lnet; /* THE network */ /** exclusive lock */ #define LNET_LOCK_EX CFS_PERCPT_LOCK_EX +static inline int lnet_is_route_alive(lnet_route_t *route) +{ + /* gateway is down */ + if (!route->lr_gateway->lp_alive) + return 0; + /* no NI status, assume it's alive */ + if ((route->lr_gateway->lp_ping_feats & + LNET_PING_FEAT_NI_STATUS) == 0) + return 1; + /* has NI status, check # down NIs */ + return route->lr_downis == 0; +} + static inline int lnet_is_wire_handle_none(lnet_handle_wire_t *wh) { return (wh->wh_interface_cookie == LNET_WIRE_HANDLE_COOKIE_NONE && @@ -72,25 +86,26 @@ static inline int lnet_is_wire_handle_none(lnet_handle_wire_t *wh) static inline int lnet_md_exhausted(lnet_libmd_t *md) { - return (md->md_threshold == 0 || - ((md->md_options & LNET_MD_MAX_SIZE) != 0 && + return (!md->md_threshold || + ((md->md_options & LNET_MD_MAX_SIZE) && md->md_offset + md->md_max_size > md->md_length)); } static inline int lnet_md_unlinkable(lnet_libmd_t *md) { - /* Should unlink md when its refcount is 0 and either: + /* + * Should unlink md when its refcount is 0 and either: * - md has been flagged for deletion (by auto unlink or * LNetM[DE]Unlink, in the latter case md may not be exhausted). * - auto unlink is on and md is exhausted. */ - if (md->md_refcount != 0) + if (md->md_refcount) return 0; - if ((md->md_flags & LNET_MD_FLAG_ZOMBIE) != 0) + if (md->md_flags & LNET_MD_FLAG_ZOMBIE) return 1; - return ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) != 0 && + return ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) && lnet_md_exhausted(md)); } @@ -102,8 +117,10 @@ lnet_cpt_of_cookie(__u64 cookie) { unsigned int cpt = (cookie >> LNET_COOKIE_TYPE_BITS) & LNET_CPT_MASK; - /* LNET_CPT_NUMBER doesn't have to be power2, which means we can - * get illegal cpt from it's invalid cookie */ + /* + * LNET_CPT_NUMBER doesn't have to be power2, which means we can + * get illegal cpt from it's invalid cookie + */ return cpt < LNET_CPT_NUMBER ? cpt : cpt % LNET_CPT_NUMBER; } @@ -183,18 +200,17 @@ lnet_md_alloc(lnet_md_t *umd) unsigned int size; unsigned int niov; - if ((umd->options & LNET_MD_KIOV) != 0) { + if (umd->options & LNET_MD_KIOV) { niov = umd->length; size = offsetof(lnet_libmd_t, md_iov.kiov[niov]); } else { - niov = ((umd->options & LNET_MD_IOVEC) != 0) ? - umd->length : 1; + niov = umd->options & LNET_MD_IOVEC ? umd->length : 1; size = offsetof(lnet_libmd_t, md_iov.iov[niov]); } LIBCFS_ALLOC(md, size); - if (md != NULL) { + if (md) { /* Set here in case of early free */ md->md_options = umd->options; md->md_niov = niov; @@ -209,7 +225,7 @@ lnet_md_free(lnet_libmd_t *md) { unsigned int size; - if ((md->md_options & LNET_MD_KIOV) != 0) + if (md->md_options & LNET_MD_KIOV) size = offsetof(lnet_libmd_t, md_iov.kiov[md->md_niov]); else size = offsetof(lnet_libmd_t, md_iov.iov[md->md_niov]); @@ -264,7 +280,7 @@ lnet_res_lh_invalidate(lnet_libhandle_t *lh) static inline void lnet_eq2handle(lnet_handle_eq_t *handle, lnet_eq_t *eq) { - if (eq == NULL) { + if (!eq) { LNetInvalidateHandle(handle); return; } @@ -278,7 +294,7 @@ lnet_handle2eq(lnet_handle_eq_t *handle) lnet_libhandle_t *lh; lh = lnet_res_lh_lookup(&the_lnet.ln_eq_container, handle->cookie); - if (lh == NULL) + if (!lh) return NULL; return lh_entry(lh, lnet_eq_t, eq_lh); @@ -300,7 +316,7 @@ lnet_handle2md(lnet_handle_md_t *handle) cpt = lnet_cpt_of_cookie(handle->cookie); lh = lnet_res_lh_lookup(the_lnet.ln_md_containers[cpt], handle->cookie); - if (lh == NULL) + if (!lh) return NULL; return lh_entry(lh, lnet_libmd_t, md_lh); @@ -319,7 +335,7 @@ lnet_wire_handle2md(lnet_handle_wire_t *wh) cpt = lnet_cpt_of_cookie(wh->wh_object_cookie); lh = lnet_res_lh_lookup(the_lnet.ln_md_containers[cpt], wh->wh_object_cookie); - if (lh == NULL) + if (!lh) return NULL; return lh_entry(lh, lnet_libmd_t, md_lh); @@ -341,7 +357,7 @@ lnet_handle2me(lnet_handle_me_t *handle) cpt = lnet_cpt_of_cookie(handle->cookie); lh = lnet_res_lh_lookup(the_lnet.ln_me_containers[cpt], handle->cookie); - if (lh == NULL) + if (!lh) return NULL; return lh_entry(lh, lnet_me_t, me_lh); @@ -361,14 +377,14 @@ lnet_peer_decref_locked(lnet_peer_t *lp) { LASSERT(lp->lp_refcount > 0); lp->lp_refcount--; - if (lp->lp_refcount == 0) + if (!lp->lp_refcount) lnet_destroy_peer_locked(lp); } static inline int lnet_isrouter(lnet_peer_t *lp) { - return lp->lp_rtr_refcount != 0; + return lp->lp_rtr_refcount ? 1 : 0; } static inline void @@ -406,6 +422,8 @@ lnet_ni_decref(lnet_ni_t *ni) } void lnet_ni_free(lnet_ni_t *ni); +lnet_ni_t * +lnet_ni_alloc(__u32 net, struct cfs_expr_list *el, struct list_head *nilist); static inline int lnet_nid2peerhash(lnet_nid_t nid) @@ -430,24 +448,41 @@ lnet_ni_t *lnet_nid2ni_locked(lnet_nid_t nid, int cpt); lnet_ni_t *lnet_net2ni_locked(__u32 net, int cpt); lnet_ni_t *lnet_net2ni(__u32 net); -int lnet_init(void); -void lnet_fini(void); +extern int portal_rotor; + +int lnet_lib_init(void); +void lnet_lib_exit(void); int lnet_notify(lnet_ni_t *ni, lnet_nid_t peer, int alive, unsigned long when); void lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive, unsigned long when); -int lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway_nid, +int lnet_add_route(__u32 net, __u32 hops, lnet_nid_t gateway_nid, unsigned int priority); int lnet_check_routes(void); int lnet_del_route(__u32 net, lnet_nid_t gw_nid); void lnet_destroy_routes(void); int lnet_get_route(int idx, __u32 *net, __u32 *hops, lnet_nid_t *gateway, __u32 *alive, __u32 *priority); +int lnet_get_net_config(int idx, __u32 *cpt_count, __u64 *nid, + int *peer_timeout, int *peer_tx_credits, + int *peer_rtr_cr, int *max_tx_credits, + struct lnet_ioctl_net_config *net_config); +int lnet_get_rtr_pool_cfg(int idx, struct lnet_ioctl_pool_cfg *pool_cfg); + void lnet_router_debugfs_init(void); void lnet_router_debugfs_fini(void); int lnet_rtrpools_alloc(int im_a_router); -void lnet_rtrpools_free(void); +void lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages); +int lnet_rtrpools_adjust(int tiny, int small, int large); +int lnet_rtrpools_enable(void); +void lnet_rtrpools_disable(void); +void lnet_rtrpools_free(int keep_pools); lnet_remotenet_t *lnet_find_net_locked(__u32 net); +int lnet_dyn_add_ni(lnet_pid_t requested_pid, char *nets, + __s32 peer_timeout, __s32 peer_cr, __s32 peer_buf_cr, + __s32 credits); +int lnet_dyn_del_ni(__u32 net); +int lnet_clear_lazy_portal(struct lnet_ni *ni, int portal, char *reason); int lnet_islocalnid(lnet_nid_t nid); int lnet_islocalnet(__u32 net); @@ -466,6 +501,8 @@ void lnet_prep_send(lnet_msg_t *msg, int type, lnet_process_id_t target, int lnet_send(lnet_nid_t nid, lnet_msg_t *msg, lnet_nid_t rtr_nid); void lnet_return_tx_credits_locked(lnet_msg_t *msg); void lnet_return_rx_credits_locked(lnet_msg_t *msg); +void lnet_schedule_blocked_locked(lnet_rtrbufpool_t *rbp); +void lnet_drop_routed_msgs_locked(struct list_head *list, int cpt); /* portals functions */ /* portals attributes */ @@ -522,13 +559,22 @@ void lnet_portals_destroy(void); /* message functions */ int lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t fromnid, void *private, int rdma_req); +int lnet_parse_local(lnet_ni_t *ni, lnet_msg_t *msg); +int lnet_parse_forward_locked(lnet_ni_t *ni, lnet_msg_t *msg); + void lnet_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed, unsigned int offset, unsigned int mlen, unsigned int rlen); +void lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, + int delayed, unsigned int offset, + unsigned int mlen, unsigned int rlen); + lnet_msg_t *lnet_create_reply_msg(lnet_ni_t *ni, lnet_msg_t *get_msg); void lnet_set_reply_msg_len(lnet_ni_t *ni, lnet_msg_t *msg, unsigned int len); void lnet_finalize(lnet_ni_t *ni, lnet_msg_t *msg, int rc); +void lnet_drop_message(lnet_ni_t *ni, int cpt, void *private, + unsigned int nob); void lnet_drop_delayed_msg_list(struct list_head *head, char *reason); void lnet_recv_delayed_msg_list(struct list_head *head); @@ -541,6 +587,24 @@ char *lnet_msgtyp2str(int type); void lnet_print_hdr(lnet_hdr_t *hdr); int lnet_fail_nid(lnet_nid_t nid, unsigned int threshold); +/** \addtogroup lnet_fault_simulation @{ */ + +int lnet_fault_ctl(int cmd, struct libcfs_ioctl_data *data); +int lnet_fault_init(void); +void lnet_fault_fini(void); + +bool lnet_drop_rule_match(lnet_hdr_t *hdr); + +int lnet_delay_rule_add(struct lnet_fault_attr *attr); +int lnet_delay_rule_del(lnet_nid_t src, lnet_nid_t dst, bool shutdown); +int lnet_delay_rule_list(int pos, struct lnet_fault_attr *attr, + struct lnet_fault_stat *stat); +void lnet_delay_rule_reset(void); +void lnet_delay_rule_check(void); +bool lnet_delay_rule_match_locked(lnet_hdr_t *hdr, struct lnet_msg *msg); + +/** @} lnet_fault_simulation */ + void lnet_counters_get(lnet_counters_t *counters); void lnet_counters_reset(void); @@ -660,27 +724,30 @@ void lnet_router_checker_stop(void); void lnet_router_ni_update_locked(lnet_peer_t *gw, __u32 net); void lnet_swap_pinginfo(lnet_ping_info_t *info); -int lnet_ping_target_init(void); -void lnet_ping_target_fini(void); -int lnet_ping(lnet_process_id_t id, int timeout_ms, - lnet_process_id_t *ids, int n_ids); - int lnet_parse_ip2nets(char **networksp, char *ip2nets); int lnet_parse_routes(char *route_str, int *im_a_router); int lnet_parse_networks(struct list_head *nilist, char *networks); +int lnet_net_unique(__u32 net, struct list_head *nilist); int lnet_nid2peer_locked(lnet_peer_t **lpp, lnet_nid_t nid, int cpt); lnet_peer_t *lnet_find_peer_locked(struct lnet_peer_table *ptable, lnet_nid_t nid); -void lnet_peer_tables_cleanup(void); +void lnet_peer_tables_cleanup(lnet_ni_t *ni); void lnet_peer_tables_destroy(void); int lnet_peer_tables_create(void); void lnet_debug_peer(lnet_nid_t nid); +int lnet_get_peer_info(__u32 peer_index, __u64 *nid, + char alivness[LNET_MAX_STR_LEN], + __u32 *cpt_iter, __u32 *refcount, + __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits, + __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credtis, + __u32 *peer_tx_qnob); static inline void lnet_peer_set_alive(lnet_peer_t *lp) { - lp->lp_last_alive = lp->lp_last_query = jiffies; + lp->lp_last_query = jiffies; + lp->lp_last_alive = jiffies; if (!lp->lp_alive) lnet_notify_locked(lp, 0, 1, lp->lp_last_alive); } diff --git a/drivers/staging/lustre/include/linux/lnet/lib-types.h b/drivers/staging/lustre/include/linux/lnet/lib-types.h index 3bb9468e0b9d..29c72f8c2f99 100644 --- a/drivers/staging/lustre/include/linux/lnet/lib-types.h +++ b/drivers/staging/lustre/include/linux/lnet/lib-types.h @@ -38,9 +38,9 @@ #include <linux/kthread.h> #include <linux/uio.h> #include <linux/types.h> -#include <net/sock.h> #include "types.h" +#include "lnetctl.h" /* Max payload size */ #define LNET_MAX_PAYLOAD CONFIG_LNET_MAX_PAYLOAD @@ -85,10 +85,10 @@ typedef struct lnet_msg { unsigned int msg_receiving:1; /* being received */ unsigned int msg_txcredit:1; /* taken an NI send credit */ unsigned int msg_peertxcredit:1; /* taken a peer send credit */ - unsigned int msg_rtrcredit:1; /* taken a global - router credit */ + unsigned int msg_rtrcredit:1; /* taken a global router credit */ unsigned int msg_peerrtrcredit:1; /* taken a peer router credit */ unsigned int msg_onactivelist:1; /* on the activelist */ + unsigned int msg_rdma_get:1; struct lnet_peer *msg_txpeer; /* peer I'm sending to */ struct lnet_peer *msg_rxpeer; /* peer I received from */ @@ -113,7 +113,7 @@ typedef struct lnet_libhandle { } lnet_libhandle_t; #define lh_entry(ptr, type, member) \ - ((type *)((char *)(ptr)-(char *)(&((type *)0)->member))) + ((type *)((char *)(ptr) - (char *)(&((type *)0)->member))) typedef struct lnet_eq { struct list_head eq_list; @@ -190,7 +190,8 @@ typedef struct lnet_lnd { void (*lnd_shutdown)(struct lnet_ni *ni); int (*lnd_ctl)(struct lnet_ni *ni, unsigned int cmd, void *arg); - /* In data movement APIs below, payload buffers are described as a set + /* + * In data movement APIs below, payload buffers are described as a set * of 'niov' fragments which are... * EITHER * in virtual memory (struct iovec *iov != NULL) @@ -201,30 +202,36 @@ typedef struct lnet_lnd { * fragments to start from */ - /* Start sending a preformatted message. 'private' is NULL for PUT and + /* + * Start sending a preformatted message. 'private' is NULL for PUT and * GET messages; otherwise this is a response to an incoming message * and 'private' is the 'private' passed to lnet_parse(). Return * non-zero for immediate failure, otherwise complete later with - * lnet_finalize() */ + * lnet_finalize() + */ int (*lnd_send)(struct lnet_ni *ni, void *private, lnet_msg_t *msg); - /* Start receiving 'mlen' bytes of payload data, skipping the following + /* + * Start receiving 'mlen' bytes of payload data, skipping the following * 'rlen' - 'mlen' bytes. 'private' is the 'private' passed to * lnet_parse(). Return non-zero for immediate failure, otherwise * complete later with lnet_finalize(). This also gives back a receive - * credit if the LND does flow control. */ + * credit if the LND does flow control. + */ int (*lnd_recv)(struct lnet_ni *ni, void *private, lnet_msg_t *msg, int delayed, unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov, unsigned int offset, unsigned int mlen, unsigned int rlen); - /* lnet_parse() has had to delay processing of this message + /* + * lnet_parse() has had to delay processing of this message * (e.g. waiting for a forwarding buffer or send credits). Give the * LND a chance to free urgently needed resources. If called, return 0 * for success and do NOT give back a receive credit; that has to wait * until lnd_recv() gets called. On failure return < 0 and - * release resources; lnd_recv() will not be called. */ + * release resources; lnd_recv() will not be called. + */ int (*lnd_eager_recv)(struct lnet_ni *ni, void *private, lnet_msg_t *msg, void **new_privatep); @@ -272,11 +279,14 @@ typedef struct lnet_ni { #define LNET_PROTO_PING_MATCHBITS 0x8000000000000000LL -/* NB: value of these features equal to LNET_PROTO_PING_VERSION_x - * of old LNet, so there shouldn't be any compatibility issue */ +/* + * NB: value of these features equal to LNET_PROTO_PING_VERSION_x + * of old LNet, so there shouldn't be any compatibility issue + */ #define LNET_PING_FEAT_INVAL (0) /* no feature */ #define LNET_PING_FEAT_BASE (1 << 0) /* just a ping */ #define LNET_PING_FEAT_NI_STATUS (1 << 1) /* return NI status */ +#define LNET_PING_FEAT_RTE_DISABLED (1 << 2) /* Routing enabled */ #define LNET_PING_FEAT_MASK (LNET_PING_FEAT_BASE | \ LNET_PING_FEAT_NI_STATUS) @@ -343,13 +353,17 @@ typedef struct lnet_peer { struct lnet_peer_table { int pt_version; /* /proc validity stamp */ int pt_number; /* # peers extant */ + /* # zombies to go to deathrow (and not there yet) */ + int pt_zombies; struct list_head pt_deathrow; /* zombie peers */ struct list_head *pt_hash; /* NID->peer hash */ }; -/* peer aliveness is enabled only on routers for peers in a network where the - * lnet_ni_t::ni_peertimeout has been set to a positive value */ -#define lnet_peer_aliveness_enabled(lp) (the_lnet.ln_routing != 0 && \ +/* + * peer aliveness is enabled only on routers for peers in a network where the + * lnet_ni_t::ni_peertimeout has been set to a positive value + */ +#define lnet_peer_aliveness_enabled(lp) (the_lnet.ln_routing && \ (lp)->lp_ni->ni_peertimeout > 0) typedef struct { @@ -359,7 +373,7 @@ typedef struct { __u32 lr_net; /* remote network number */ int lr_seq; /* sequence for round-robin */ unsigned int lr_downis; /* number of down NIs */ - unsigned int lr_hops; /* how far I am */ + __u32 lr_hops; /* how far I am */ unsigned int lr_priority; /* route priority */ } lnet_route_t; @@ -384,7 +398,10 @@ typedef struct { struct list_head rbp_msgs; /* messages blocking for a buffer */ int rbp_npages; /* # pages in each buffer */ - int rbp_nbuffers; /* # buffers */ + /* requested number of buffers */ + int rbp_req_nbuffers; + /* # buffers actually allocated */ + int rbp_nbuffers; int rbp_credits; /* # free buffers / blocked messages */ int rbp_mincredits; /* low water mark */ @@ -398,7 +415,12 @@ typedef struct { #define LNET_PEER_HASHSIZE 503 /* prime! */ -#define LNET_NRBPOOLS 3 /* # different router buffer pools */ +#define LNET_TINY_BUF_IDX 0 +#define LNET_SMALL_BUF_IDX 1 +#define LNET_LARGE_BUF_IDX 2 + +/* # different router buffer pools */ +#define LNET_NRBPOOLS (LNET_LARGE_BUF_IDX + 1) enum { /* Didn't match anything */ @@ -433,12 +455,16 @@ struct lnet_match_info { #define LNET_MT_HASH_BITS 8 #define LNET_MT_HASH_SIZE (1 << LNET_MT_HASH_BITS) #define LNET_MT_HASH_MASK (LNET_MT_HASH_SIZE - 1) -/* we allocate (LNET_MT_HASH_SIZE + 1) entries for lnet_match_table::mt_hash, - * the last entry is reserved for MEs with ignore-bits */ +/* + * we allocate (LNET_MT_HASH_SIZE + 1) entries for lnet_match_table::mt_hash, + * the last entry is reserved for MEs with ignore-bits + */ #define LNET_MT_HASH_IGNORE LNET_MT_HASH_SIZE -/* __u64 has 2^6 bits, so need 2^(LNET_MT_HASH_BITS - LNET_MT_BITS_U64) which +/* + * __u64 has 2^6 bits, so need 2^(LNET_MT_HASH_BITS - LNET_MT_BITS_U64) which * is 4 __u64s as bit-map, and add an extra __u64 (only use one bit) for the - * ME-list with ignore-bits, which is mtable::mt_hash[LNET_MT_HASH_IGNORE] */ + * ME-list with ignore-bits, which is mtable::mt_hash[LNET_MT_HASH_IGNORE] + */ #define LNET_MT_BITS_U64 6 /* 2^6 bits */ #define LNET_MT_EXHAUSTED_BITS (LNET_MT_HASH_BITS - LNET_MT_BITS_U64) #define LNET_MT_EXHAUSTED_BMAP ((1 << LNET_MT_EXHAUSTED_BITS) + 1) @@ -448,8 +474,10 @@ struct lnet_match_table { /* reserved for upcoming patches, CPU partition ID */ unsigned int mt_cpt; unsigned int mt_portal; /* portal index */ - /* match table is set as "enabled" if there's non-exhausted MD - * attached on mt_mhash, it's only valid for wildcard portal */ + /* + * match table is set as "enabled" if there's non-exhausted MD + * attached on mt_mhash, it's only valid for wildcard portal + */ unsigned int mt_enabled; /* bitmap to flag whether MEs on mt_hash are exhausted or not */ __u64 mt_exhausted[LNET_MT_EXHAUSTED_BMAP]; @@ -546,6 +574,8 @@ typedef struct { struct lnet_peer_table **ln_peer_tables; /* failure simulation */ struct list_head ln_test_peers; + struct list_head ln_drop_rules; + struct list_head ln_delay_rules; struct list_head ln_nis; /* LND instances */ /* NIs bond on specific CPT(s) */ @@ -553,8 +583,6 @@ typedef struct { /* dying LND instances */ struct list_head ln_nis_zombie; lnet_ni_t *ln_loni; /* the loopback NI */ - /* NI to wait for events in */ - lnet_ni_t *ln_eq_waitni; /* remote networks with routes to them */ struct list_head *ln_remote_nets_hash; @@ -584,8 +612,7 @@ typedef struct { struct mutex ln_api_mutex; struct mutex ln_lnd_mutex; - int ln_init; /* lnet_init() - called? */ + struct mutex ln_delay_mutex; /* Have I called LNetNIInit myself? */ int ln_niinit_self; /* LNetNIInit/LNetNIFini counter */ @@ -600,12 +627,24 @@ typedef struct { /* registered LNDs */ struct list_head ln_lnds; - /* space for network names */ - char *ln_network_tokens; - int ln_network_tokens_nob; /* test protocol compatibility flags */ int ln_testprotocompat; + /* + * 0 - load the NIs from the mod params + * 1 - do not load the NIs from the mod params + * Reverse logic to ensure that other calls to LNetNIInit + * need no change + */ + bool ln_nis_from_mod_params; + + /* + * waitq for router checker. As long as there are no routes in + * the list, the router checker will sleep on this queue. when + * routes are added the thread will wake up + */ + wait_queue_head_t ln_rc_waitq; + } lnet_t; #endif diff --git a/drivers/staging/lustre/include/linux/lnet/lnetctl.h b/drivers/staging/lustre/include/linux/lnet/lnetctl.h index bdd69b2af909..39575073b00b 100644 --- a/drivers/staging/lustre/include/linux/lnet/lnetctl.h +++ b/drivers/staging/lustre/include/linux/lnet/lnetctl.h @@ -10,10 +10,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with Portals; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * * header for lnet ioctl */ #ifndef _LNETCTL_H_ @@ -21,6 +17,106 @@ #include "types.h" +/** \addtogroup lnet_fault_simulation + * @{ + */ + +enum { + LNET_CTL_DROP_ADD, + LNET_CTL_DROP_DEL, + LNET_CTL_DROP_RESET, + LNET_CTL_DROP_LIST, + LNET_CTL_DELAY_ADD, + LNET_CTL_DELAY_DEL, + LNET_CTL_DELAY_RESET, + LNET_CTL_DELAY_LIST, +}; + +#define LNET_ACK_BIT BIT(0) +#define LNET_PUT_BIT BIT(1) +#define LNET_GET_BIT BIT(2) +#define LNET_REPLY_BIT BIT(3) + +/** ioctl parameter for LNet fault simulation */ +struct lnet_fault_attr { + /** + * source NID of drop rule + * LNET_NID_ANY is wildcard for all sources + * 255.255.255.255@net is wildcard for all addresses from @net + */ + lnet_nid_t fa_src; + /** destination NID of drop rule, see \a dr_src for details */ + lnet_nid_t fa_dst; + /** + * Portal mask to drop, -1 means all portals, for example: + * fa_ptl_mask = (1 << _LDLM_CB_REQUEST_PORTAL ) | + * (1 << LDLM_CANCEL_REQUEST_PORTAL) + * + * If it is non-zero then only PUT and GET will be filtered, otherwise + * there is no portal filter, all matched messages will be checked. + */ + __u64 fa_ptl_mask; + /** + * message types to drop, for example: + * dra_type = LNET_DROP_ACK_BIT | LNET_DROP_PUT_BIT + * + * If it is non-zero then only specified message types are filtered, + * otherwise all message types will be checked. + */ + __u32 fa_msg_mask; + union { + /** message drop simulation */ + struct { + /** drop rate of this rule */ + __u32 da_rate; + /** + * time interval of message drop, it is exclusive + * with da_rate + */ + __u32 da_interval; + } drop; + /** message latency simulation */ + struct { + __u32 la_rate; + /** + * time interval of message delay, it is exclusive + * with la_rate + */ + __u32 la_interval; + /** latency to delay */ + __u32 la_latency; + } delay; + __u64 space[8]; + } u; +}; + +/** fault simluation stats */ +struct lnet_fault_stat { + /** total # matched messages */ + __u64 fs_count; + /** # dropped LNET_MSG_PUT by this rule */ + __u64 fs_put; + /** # dropped LNET_MSG_ACK by this rule */ + __u64 fs_ack; + /** # dropped LNET_MSG_GET by this rule */ + __u64 fs_get; + /** # dropped LNET_MSG_REPLY by this rule */ + __u64 fs_reply; + union { + struct { + /** total # dropped messages */ + __u64 ds_dropped; + } drop; + struct { + /** total # delayed messages */ + __u64 ls_delayed; + } delay; + __u64 space[8]; + } u; +}; + +/** @} lnet_fault_simulation */ + #define LNET_DEV_ID 0 #define LNET_DEV_PATH "/dev/lnet" #define LNET_DEV_MAJOR 10 diff --git a/drivers/staging/lustre/include/linux/lnet/lnetst.h b/drivers/staging/lustre/include/linux/lnet/lnetst.h index fd1e0fd3696f..417044552d3f 100644 --- a/drivers/staging/lustre/include/linux/lnet/lnetst.h +++ b/drivers/staging/lustre/include/linux/lnet/lnetst.h @@ -245,20 +245,20 @@ typedef struct { int lstio_ses_force; /* IN: force create ? */ /** IN: session features */ unsigned lstio_ses_feats; - lst_sid_t *lstio_ses_idp; /* OUT: session id */ + lst_sid_t __user *lstio_ses_idp; /* OUT: session id */ int lstio_ses_nmlen; /* IN: name length */ - char *lstio_ses_namep; /* IN: session name */ + char __user *lstio_ses_namep; /* IN: session name */ } lstio_session_new_args_t; /* query current session */ typedef struct { - lst_sid_t *lstio_ses_idp; /* OUT: session id */ - int *lstio_ses_keyp; /* OUT: local key */ + lst_sid_t __user *lstio_ses_idp; /* OUT: session id */ + int __user *lstio_ses_keyp; /* OUT: local key */ /** OUT: session features */ - unsigned *lstio_ses_featp; - lstcon_ndlist_ent_t *lstio_ses_ndinfo; /* OUT: */ + unsigned __user *lstio_ses_featp; + lstcon_ndlist_ent_t __user *lstio_ses_ndinfo; /* OUT: */ int lstio_ses_nmlen; /* IN: name length */ - char *lstio_ses_namep; /* OUT: session name */ + char __user *lstio_ses_namep; /* OUT: session name */ } lstio_session_info_args_t; /* delete a session */ @@ -283,26 +283,26 @@ typedef struct { int lstio_dbg_timeout; /* IN: timeout of debug */ int lstio_dbg_nmlen; /* IN: len of name */ - char *lstio_dbg_namep; /* IN: name of + char __user *lstio_dbg_namep; /* IN: name of group|batch */ int lstio_dbg_count; /* IN: # of test nodes to debug */ - lnet_process_id_t *lstio_dbg_idsp; /* IN: id of test + lnet_process_id_t __user *lstio_dbg_idsp; /* IN: id of test nodes */ - struct list_head *lstio_dbg_resultp; /* OUT: list head of + struct list_head __user *lstio_dbg_resultp; /* OUT: list head of result buffer */ } lstio_debug_args_t; typedef struct { - int lstio_grp_key; /* IN: session key */ - int lstio_grp_nmlen; /* IN: name length */ - char *lstio_grp_namep; /* IN: group name */ + int lstio_grp_key; /* IN: session key */ + int lstio_grp_nmlen; /* IN: name length */ + char __user *lstio_grp_namep; /* IN: group name */ } lstio_group_add_args_t; typedef struct { - int lstio_grp_key; /* IN: session key */ - int lstio_grp_nmlen; /* IN: name length */ - char *lstio_grp_namep; /* IN: group name */ + int lstio_grp_key; /* IN: session key */ + int lstio_grp_nmlen; /* IN: name length */ + char __user *lstio_grp_namep; /* IN: group name */ } lstio_group_del_args_t; #define LST_GROUP_CLEAN 1 /* remove inactive nodes in the group */ @@ -315,22 +315,22 @@ typedef struct { int lstio_grp_opc; /* IN: OPC */ int lstio_grp_args; /* IN: arguments */ int lstio_grp_nmlen; /* IN: name length */ - char *lstio_grp_namep; /* IN: group name */ + char __user *lstio_grp_namep; /* IN: group name */ int lstio_grp_count; /* IN: # of nodes id */ - lnet_process_id_t *lstio_grp_idsp; /* IN: array of nodes */ - struct list_head *lstio_grp_resultp; /* OUT: list head of + lnet_process_id_t __user *lstio_grp_idsp; /* IN: array of nodes */ + struct list_head __user *lstio_grp_resultp; /* OUT: list head of result buffer */ } lstio_group_update_args_t; typedef struct { int lstio_grp_key; /* IN: session key */ int lstio_grp_nmlen; /* IN: name length */ - char *lstio_grp_namep; /* IN: group name */ + char __user *lstio_grp_namep; /* IN: group name */ int lstio_grp_count; /* IN: # of nodes */ /** OUT: session features */ - unsigned *lstio_grp_featp; - lnet_process_id_t *lstio_grp_idsp; /* IN: nodes */ - struct list_head *lstio_grp_resultp; /* OUT: list head of + unsigned __user *lstio_grp_featp; + lnet_process_id_t __user *lstio_grp_idsp; /* IN: nodes */ + struct list_head __user *lstio_grp_resultp; /* OUT: list head of result buffer */ } lstio_group_nodes_args_t; @@ -338,18 +338,18 @@ typedef struct { int lstio_grp_key; /* IN: session key */ int lstio_grp_idx; /* IN: group idx */ int lstio_grp_nmlen; /* IN: name len */ - char *lstio_grp_namep; /* OUT: name */ + char __user *lstio_grp_namep; /* OUT: name */ } lstio_group_list_args_t; typedef struct { int lstio_grp_key; /* IN: session key */ int lstio_grp_nmlen; /* IN: name len */ - char *lstio_grp_namep; /* IN: name */ - lstcon_ndlist_ent_t *lstio_grp_entp; /* OUT: description of + char __user *lstio_grp_namep; /* IN: name */ + lstcon_ndlist_ent_t __user *lstio_grp_entp; /* OUT: description of group */ - int *lstio_grp_idxp; /* IN/OUT: node index */ - int *lstio_grp_ndentp; /* IN/OUT: # of nodent */ - lstcon_node_ent_t *lstio_grp_dentsp; /* OUT: nodent array */ + int __user *lstio_grp_idxp; /* IN/OUT: node index */ + int __user *lstio_grp_ndentp; /* IN/OUT: # of nodent */ + lstcon_node_ent_t __user *lstio_grp_dentsp; /* OUT: nodent array */ } lstio_group_info_args_t; #define LST_DEFAULT_BATCH "batch" /* default batch name */ @@ -357,13 +357,13 @@ typedef struct { typedef struct { int lstio_bat_key; /* IN: session key */ int lstio_bat_nmlen; /* IN: name length */ - char *lstio_bat_namep; /* IN: batch name */ + char __user *lstio_bat_namep; /* IN: batch name */ } lstio_batch_add_args_t; typedef struct { int lstio_bat_key; /* IN: session key */ int lstio_bat_nmlen; /* IN: name length */ - char *lstio_bat_namep; /* IN: batch name */ + char __user *lstio_bat_namep; /* IN: batch name */ } lstio_batch_del_args_t; typedef struct { @@ -371,8 +371,8 @@ typedef struct { int lstio_bat_timeout; /* IN: timeout for the batch */ int lstio_bat_nmlen; /* IN: name length */ - char *lstio_bat_namep; /* IN: batch name */ - struct list_head *lstio_bat_resultp; /* OUT: list head of + char __user *lstio_bat_namep; /* IN: batch name */ + struct list_head __user *lstio_bat_resultp; /* OUT: list head of result buffer */ } lstio_batch_run_args_t; @@ -381,8 +381,8 @@ typedef struct { int lstio_bat_force; /* IN: abort unfinished test RPC */ int lstio_bat_nmlen; /* IN: name length */ - char *lstio_bat_namep; /* IN: batch name */ - struct list_head *lstio_bat_resultp; /* OUT: list head of + char __user *lstio_bat_namep; /* IN: batch name */ + struct list_head __user *lstio_bat_resultp; /* OUT: list head of result buffer */ } lstio_batch_stop_args_t; @@ -394,8 +394,8 @@ typedef struct { int lstio_bat_timeout; /* IN: timeout for waiting */ int lstio_bat_nmlen; /* IN: name length */ - char *lstio_bat_namep; /* IN: batch name */ - struct list_head *lstio_bat_resultp; /* OUT: list head of + char __user *lstio_bat_namep; /* IN: batch name */ + struct list_head __user *lstio_bat_resultp; /* OUT: list head of result buffer */ } lstio_batch_query_args_t; @@ -403,21 +403,21 @@ typedef struct { int lstio_bat_key; /* IN: session key */ int lstio_bat_idx; /* IN: index */ int lstio_bat_nmlen; /* IN: name length */ - char *lstio_bat_namep; /* IN: batch name */ + char __user *lstio_bat_namep; /* IN: batch name */ } lstio_batch_list_args_t; typedef struct { int lstio_bat_key; /* IN: session key */ int lstio_bat_nmlen; /* IN: name length */ - char *lstio_bat_namep; /* IN: name */ + char __user *lstio_bat_namep; /* IN: name */ int lstio_bat_server; /* IN: query server or not */ int lstio_bat_testidx; /* IN: test index */ - lstcon_test_batch_ent_t *lstio_bat_entp; /* OUT: batch ent */ + lstcon_test_batch_ent_t __user *lstio_bat_entp; /* OUT: batch ent */ - int *lstio_bat_idxp; /* IN/OUT: index of node */ - int *lstio_bat_ndentp; /* IN/OUT: # of nodent */ - lstcon_node_ent_t *lstio_bat_dentsp; /* array of nodent */ + int __user *lstio_bat_idxp; /* IN/OUT: index of node */ + int __user *lstio_bat_ndentp; /* IN/OUT: # of nodent */ + lstcon_node_ent_t __user *lstio_bat_dentsp; /* array of nodent */ } lstio_batch_info_args_t; /* add stat in session */ @@ -427,10 +427,10 @@ typedef struct { stat request */ int lstio_sta_nmlen; /* IN: group name length */ - char *lstio_sta_namep; /* IN: group name */ + char __user *lstio_sta_namep; /* IN: group name */ int lstio_sta_count; /* IN: # of pid */ - lnet_process_id_t *lstio_sta_idsp; /* IN: pid */ - struct list_head *lstio_sta_resultp; /* OUT: list head of + lnet_process_id_t __user *lstio_sta_idsp; /* IN: pid */ + struct list_head __user *lstio_sta_resultp; /* OUT: list head of result buffer */ } lstio_stat_args_t; @@ -445,7 +445,7 @@ typedef enum { typedef struct { int lstio_tes_key; /* IN: session key */ int lstio_tes_bat_nmlen; /* IN: batch name len */ - char *lstio_tes_bat_name; /* IN: batch name */ + char __user *lstio_tes_bat_name; /* IN: batch name */ int lstio_tes_type; /* IN: test type */ int lstio_tes_oneside; /* IN: one sided test */ int lstio_tes_loop; /* IN: loop count */ @@ -457,20 +457,20 @@ typedef struct { destination groups */ int lstio_tes_sgrp_nmlen; /* IN: source group name length */ - char *lstio_tes_sgrp_name; /* IN: group name */ + char __user *lstio_tes_sgrp_name; /* IN: group name */ int lstio_tes_dgrp_nmlen; /* IN: destination group name length */ - char *lstio_tes_dgrp_name; /* IN: group name */ + char __user *lstio_tes_dgrp_name; /* IN: group name */ int lstio_tes_param_len; /* IN: param buffer len */ - void *lstio_tes_param; /* IN: parameter for specified + void __user *lstio_tes_param; /* IN: parameter for specified test: lstio_bulk_param_t, lstio_ping_param_t, ... more */ - int *lstio_tes_retp; /* OUT: private returned + int __user *lstio_tes_retp; /* OUT: private returned value */ - struct list_head *lstio_tes_resultp; /* OUT: list head of + struct list_head __user *lstio_tes_resultp;/* OUT: list head of result buffer */ } lstio_test_args_t; diff --git a/drivers/staging/lustre/include/linux/lnet/nidstr.h b/drivers/staging/lustre/include/linux/lnet/nidstr.h index 4fc9ddce829d..937fcc9e4a30 100644 --- a/drivers/staging/lustre/include/linux/lnet/nidstr.h +++ b/drivers/staging/lustre/include/linux/lnet/nidstr.h @@ -34,8 +34,10 @@ * Lustre Network Driver types. */ enum { - /* Only add to these values (i.e. don't ever change or redefine them): - * network addresses depend on them... */ + /* + * Only add to these values (i.e. don't ever change or redefine them): + * network addresses depend on them... + */ QSWLND = 1, SOCKLND = 2, GMLND = 3, @@ -67,6 +69,7 @@ static inline char *libcfs_lnd2str(__u32 lnd) return libcfs_lnd2str_r(lnd, libcfs_next_nidstring(), LNET_NIDSTR_SIZE); } + int libcfs_str2lnd(const char *str); char *libcfs_net2str_r(__u32 net, char *buf, size_t buf_size); static inline char *libcfs_net2str(__u32 net) @@ -74,12 +77,14 @@ static inline char *libcfs_net2str(__u32 net) return libcfs_net2str_r(net, libcfs_next_nidstring(), LNET_NIDSTR_SIZE); } + char *libcfs_nid2str_r(lnet_nid_t nid, char *buf, size_t buf_size); static inline char *libcfs_nid2str(lnet_nid_t nid) { return libcfs_nid2str_r(nid, libcfs_next_nidstring(), LNET_NIDSTR_SIZE); } + __u32 libcfs_str2net(const char *str); lnet_nid_t libcfs_str2nid(const char *str); int libcfs_str2anynid(lnet_nid_t *nid, const char *str); diff --git a/drivers/staging/lustre/include/linux/lnet/socklnd.h b/drivers/staging/lustre/include/linux/lnet/socklnd.h index 599c9f6628fb..bc32403f4a08 100644 --- a/drivers/staging/lustre/include/linux/lnet/socklnd.h +++ b/drivers/staging/lustre/include/linux/lnet/socklnd.h @@ -85,14 +85,17 @@ socklnd_init_msg(ksock_msg_t *msg, int type) { msg->ksm_csum = 0; msg->ksm_type = type; - msg->ksm_zc_cookies[0] = msg->ksm_zc_cookies[1] = 0; + msg->ksm_zc_cookies[0] = 0; + msg->ksm_zc_cookies[1] = 0; } #define KSOCK_MSG_NOOP 0xC0 /* ksm_u empty */ #define KSOCK_MSG_LNET 0xC1 /* lnet msg */ -/* We need to know this number to parse hello msg from ksocklnd in - * other LND (usocklnd, for example) */ +/* + * We need to know this number to parse hello msg from ksocklnd in + * other LND (usocklnd, for example) + */ #define KSOCK_PROTO_V2 2 #define KSOCK_PROTO_V3 3 diff --git a/drivers/staging/lustre/include/linux/lnet/types.h b/drivers/staging/lustre/include/linux/lnet/types.h index 11630180c5e7..08f193c341c5 100644 --- a/drivers/staging/lustre/include/linux/lnet/types.h +++ b/drivers/staging/lustre/include/linux/lnet/types.h @@ -36,10 +36,14 @@ #include <linux/types.h> /** \addtogroup lnet - * @{ */ + * @{ + */ + +#define LNET_VERSION "0.6.0" /** \addtogroup lnet_addr - * @{ */ + * @{ + */ /** Portal reserved for LNet's own use. * \see lustre/include/lustre/lustre_idl.h for Lustre portal assignments. @@ -116,10 +120,12 @@ typedef struct { lnet_pid_t pid; } WIRE_ATTR lnet_process_id_packed_t; -/* The wire handle's interface cookie only matches one network interface in +/* + * The wire handle's interface cookie only matches one network interface in * one epoch (i.e. new cookie when the interface restarts or the node * reboots). The object cookie only matches one object on that interface - * during that object's lifetime (i.e. no cookie re-use). */ + * during that object's lifetime (i.e. no cookie re-use). + */ typedef struct { __u64 wh_interface_cookie; __u64 wh_object_cookie; @@ -133,10 +139,12 @@ typedef enum { LNET_MSG_HELLO, } lnet_msg_type_t; -/* The variant fields of the portals message header are aligned on an 8 +/* + * The variant fields of the portals message header are aligned on an 8 * byte boundary in the message header. Note that all types used in these * wire structs MUST be fixed size and the smaller types are placed at the - * end. */ + * end. + */ typedef struct lnet_ack { lnet_handle_wire_t dst_wmd; __u64 match_bits; @@ -185,7 +193,8 @@ typedef struct { } msg; } WIRE_ATTR lnet_hdr_t; -/* A HELLO message contains a magic number and protocol version +/* + * A HELLO message contains a magic number and protocol version * code in the header's dest_nid, the peer's NID in the src_nid, and * LNET_MSG_HELLO in the type field. All other common fields are zero * (including payload_size; i.e. no payload). @@ -208,8 +217,10 @@ typedef struct { #define LNET_PROTO_PING_MAGIC 0x70696E67 /* 'ping' */ /* Placeholder for a future "unified" protocol across all LNDs */ -/* Current LNDs that receive a request with this magic will respond with a - * "stub" reply using their current protocol */ +/* + * Current LNDs that receive a request with this magic will respond with a + * "stub" reply using their current protocol + */ #define LNET_PROTO_MAGIC 0x45726963 /* ! */ #define LNET_PROTO_TCP_VERSION_MAJOR 1 @@ -258,7 +269,7 @@ typedef struct lnet_counters { #define LNET_MAX_INTERFACES 16 -/* +/** * Objects maintained by the LNet are accessed through handles. Handle types * have names of the form lnet_handle_xx_t, where xx is one of the two letter * object type codes ('eq' for event queue, 'md' for memory descriptor, and @@ -318,7 +329,8 @@ typedef struct { /** @} lnet_addr */ /** \addtogroup lnet_me - * @{ */ + * @{ + */ /** * Specifies whether the match entry or memory descriptor should be unlinked @@ -348,7 +360,8 @@ typedef enum { /** @} lnet_me */ /** \addtogroup lnet_md - * @{ */ + * @{ + */ /** * Defines the visible parts of a memory descriptor. Values of this type @@ -450,9 +463,11 @@ typedef struct { lnet_handle_eq_t eq_handle; } lnet_md_t; -/* Max Transfer Unit (minimum supported everywhere). +/* + * Max Transfer Unit (minimum supported everywhere). * CAVEAT EMPTOR, with multinet (i.e. routers forwarding between networks) - * these limits are system wide and not interface-local. */ + * these limits are system wide and not interface-local. + */ #define LNET_MTU_BITS 20 #define LNET_MTU (1 << LNET_MTU_BITS) @@ -506,7 +521,8 @@ typedef struct { /** @} lnet_md */ /** \addtogroup lnet_eq - * @{ */ + * @{ + */ /** * Six types of events can be logged in an event queue. @@ -640,7 +656,8 @@ typedef void (*lnet_eq_handler_t)(lnet_event_t *event); /** @} lnet_eq */ /** \addtogroup lnet_data - * @{ */ + * @{ + */ /** * Specify whether an acknowledgment should be sent by target when the PUT diff --git a/drivers/staging/lustre/lnet/Kconfig b/drivers/staging/lustre/lnet/Kconfig index 00850eeb6a8c..2b5930150cda 100644 --- a/drivers/staging/lustre/lnet/Kconfig +++ b/drivers/staging/lustre/lnet/Kconfig @@ -1,10 +1,16 @@ config LNET - tristate "Lustre networking subsystem" - depends on LUSTRE_FS + tristate "Lustre networking subsystem (LNet)" + depends on INET && m + help + The Lustre network layer, also known as LNet, is a networking abstaction + level API that was initially created to allow Lustre Filesystem to utilize + very different networks like tcp and ib verbs in a uniform way. In the + case of Lustre routers only the LNet layer is required. Lately other + projects are also looking into using LNet as their networking API as well. config LNET_MAX_PAYLOAD - int "Lustre lnet max transfer payload (default 2MB)" - depends on LUSTRE_FS + int "Lustre lnet max transfer payload (default 1MB)" + depends on LNET default "1048576" help This option defines the maximum size of payload in bytes that lnet diff --git a/drivers/staging/lustre/lnet/Makefile b/drivers/staging/lustre/lnet/Makefile index f6f03e304d81..0a380fe88ce8 100644 --- a/drivers/staging/lustre/lnet/Makefile +++ b/drivers/staging/lustre/lnet/Makefile @@ -1 +1 @@ -obj-$(CONFIG_LNET) += lnet/ klnds/ selftest/ +obj-$(CONFIG_LNET) += libcfs/ lnet/ klnds/ selftest/ diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c index cb74ae731b95..0d32e6541a3f 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c @@ -42,15 +42,7 @@ #include <asm/page.h> #include "o2iblnd.h" -static lnd_t the_o2iblnd = { - .lnd_type = O2IBLND, - .lnd_startup = kiblnd_startup, - .lnd_shutdown = kiblnd_shutdown, - .lnd_ctl = kiblnd_ctl, - .lnd_query = kiblnd_query, - .lnd_send = kiblnd_send, - .lnd_recv = kiblnd_recv, -}; +static lnd_t the_o2iblnd; kib_data_t kiblnd_data; @@ -63,7 +55,7 @@ static __u32 kiblnd_cksum(void *ptr, int nob) sum = ((sum << 1) | (sum >> 31)) + *c++; /* ensure I don't return 0 (== no checksum) */ - return (sum == 0) ? 1 : sum; + return !sum ? 1 : sum; } static char *kiblnd_msgtype2str(int type) @@ -145,7 +137,7 @@ static int kiblnd_unpack_rd(kib_msg_t *msg, int flip) int i; LASSERT(msg->ibm_type == IBLND_MSG_GET_REQ || - msg->ibm_type == IBLND_MSG_PUT_ACK); + msg->ibm_type == IBLND_MSG_PUT_ACK); rd = msg->ibm_type == IBLND_MSG_GET_REQ ? &msg->ibm_u.get.ibgm_rd : @@ -189,8 +181,10 @@ void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version, { kib_net_t *net = ni->ni_data; - /* CAVEAT EMPTOR! all message fields not set here should have been - * initialised previously. */ + /* + * CAVEAT EMPTOR! all message fields not set here should have been + * initialised previously. + */ msg->ibm_magic = IBLND_MSG_MAGIC; msg->ibm_version = version; /* ibm_type */ @@ -249,11 +243,13 @@ int kiblnd_unpack_msg(kib_msg_t *msg, int nob) return -EPROTO; } - /* checksum must be computed with ibm_cksum zero and BEFORE anything - * gets flipped */ + /* + * checksum must be computed with ibm_cksum zero and BEFORE anything + * gets flipped + */ msg_cksum = flip ? __swab32(msg->ibm_cksum) : msg->ibm_cksum; msg->ibm_cksum = 0; - if (msg_cksum != 0 && + if (msg_cksum && msg_cksum != kiblnd_cksum(msg, msg_nob)) { CERROR("Bad checksum\n"); return -EPROTO; @@ -326,21 +322,21 @@ int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid) int cpt = lnet_cpt_of_nid(nid); unsigned long flags; - LASSERT(net != NULL); + LASSERT(net); LASSERT(nid != LNET_NID_ANY); LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer)); - if (peer == NULL) { + if (!peer) { CERROR("Cannot allocate peer\n"); return -ENOMEM; } - memset(peer, 0, sizeof(*peer)); /* zero flags etc */ - peer->ibp_ni = ni; peer->ibp_nid = nid; peer->ibp_error = 0; peer->ibp_last_alive = 0; + peer->ibp_max_frags = IBLND_CFG_RDMA_FRAGS; + peer->ibp_queue_depth = *kiblnd_tunables.kib_peertxcredits; atomic_set(&peer->ibp_refcount, 1); /* 1 ref for caller */ INIT_LIST_HEAD(&peer->ibp_list); /* not in the peer table yet */ @@ -350,7 +346,7 @@ int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid) write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); /* always called with a ref on ni, which prevents ni being shutdown */ - LASSERT(net->ibn_shutdown == 0); + LASSERT(!net->ibn_shutdown); /* npeers only grows with the global lock held */ atomic_inc(&net->ibn_npeers); @@ -365,38 +361,36 @@ void kiblnd_destroy_peer(kib_peer_t *peer) { kib_net_t *net = peer->ibp_ni->ni_data; - LASSERT(net != NULL); - LASSERT(atomic_read(&peer->ibp_refcount) == 0); + LASSERT(net); + LASSERT(!atomic_read(&peer->ibp_refcount)); LASSERT(!kiblnd_peer_active(peer)); - LASSERT(peer->ibp_connecting == 0); - LASSERT(peer->ibp_accepting == 0); - LASSERT(list_empty(&peer->ibp_conns)); + LASSERT(kiblnd_peer_idle(peer)); LASSERT(list_empty(&peer->ibp_tx_queue)); LIBCFS_FREE(peer, sizeof(*peer)); - /* NB a peer's connections keep a reference on their peer until + /* + * NB a peer's connections keep a reference on their peer until * they are destroyed, so we can be assured that _all_ state to do * with this peer has been cleaned up when its refcount drops to - * zero. */ + * zero. + */ atomic_dec(&net->ibn_npeers); } kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid) { - /* the caller is responsible for accounting the additional reference - * that this creates */ + /* + * the caller is responsible for accounting the additional reference + * that this creates + */ struct list_head *peer_list = kiblnd_nid2peerlist(nid); struct list_head *tmp; kib_peer_t *peer; list_for_each(tmp, peer_list) { - peer = list_entry(tmp, kib_peer_t, ibp_list); - - LASSERT(peer->ibp_connecting > 0 || /* creating conns */ - peer->ibp_accepting > 0 || - !list_empty(&peer->ibp_conns)); /* active conn */ + LASSERT(!kiblnd_peer_idle(peer)); if (peer->ibp_nid != nid) continue; @@ -431,13 +425,9 @@ static int kiblnd_get_peer_info(lnet_ni_t *ni, int index, read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) { - list_for_each(ptmp, &kiblnd_data.kib_peers[i]) { - peer = list_entry(ptmp, kib_peer_t, ibp_list); - LASSERT(peer->ibp_connecting > 0 || - peer->ibp_accepting > 0 || - !list_empty(&peer->ibp_conns)); + LASSERT(!kiblnd_peer_idle(peer)); if (peer->ibp_ni != ni) continue; @@ -474,8 +464,10 @@ static void kiblnd_del_peer_locked(kib_peer_t *peer) } /* NB closing peer's last conn unlinked it. */ } - /* NB peer now unlinked; might even be freed if the peer table had the - * last ref on it. */ + /* + * NB peer now unlinked; might even be freed if the peer table had the + * last ref on it. + */ } static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid) @@ -493,7 +485,8 @@ static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid) write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); if (nid != LNET_NID_ANY) { - lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers; + lo = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers; + hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers; } else { lo = 0; hi = kiblnd_data.kib_peer_hash_size - 1; @@ -502,9 +495,7 @@ static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid) for (i = lo; i <= hi; i++) { list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) { peer = list_entry(ptmp, kib_peer_t, ibp_list); - LASSERT(peer->ibp_connecting > 0 || - peer->ibp_accepting > 0 || - !list_empty(&peer->ibp_conns)); + LASSERT(!kiblnd_peer_idle(peer)); if (peer->ibp_ni != ni) continue; @@ -516,7 +507,7 @@ static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid) LASSERT(list_empty(&peer->ibp_conns)); list_splice_init(&peer->ibp_tx_queue, - &zombies); + &zombies); } kiblnd_del_peer_locked(peer); @@ -544,11 +535,8 @@ static kib_conn_t *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index) for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) { list_for_each(ptmp, &kiblnd_data.kib_peers[i]) { - peer = list_entry(ptmp, kib_peer_t, ibp_list); - LASSERT(peer->ibp_connecting > 0 || - peer->ibp_accepting > 0 || - !list_empty(&peer->ibp_conns)); + LASSERT(!kiblnd_peer_idle(peer)); if (peer->ibp_ni != ni) continue; @@ -558,7 +546,7 @@ static kib_conn_t *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index) continue; conn = list_entry(ctmp, kib_conn_t, - ibc_list); + ibc_list); kiblnd_conn_addref(conn); read_unlock_irqrestore( &kiblnd_data.kib_global_lock, @@ -597,12 +585,12 @@ static void kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid) int mtu; /* XXX There is no path record for iWARP, set by netdev->change_mtu? */ - if (cmid->route.path_rec == NULL) + if (!cmid->route.path_rec) return; mtu = kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu); LASSERT(mtu >= 0); - if (mtu != 0) + if (mtu) cmid->route.path_rec->mtu = mtu; } @@ -619,13 +607,13 @@ static int kiblnd_get_completion_vector(kib_conn_t *conn, int cpt) return 0; mask = cfs_cpt_cpumask(lnet_cpt_table(), cpt); - if (mask == NULL) + if (!mask) return 0; /* hash NID to CPU id in this partition... */ off = do_div(nid, cpumask_weight(mask)); for_each_cpu(i, mask) { - if (off-- == 0) + if (!off--) return i % vectors; } @@ -634,15 +622,17 @@ static int kiblnd_get_completion_vector(kib_conn_t *conn, int cpt) } kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, - int state, int version) + int state, int version) { - /* CAVEAT EMPTOR: + /* + * CAVEAT EMPTOR: * If the new conn is created successfully it takes over the caller's * ref on 'peer'. It also "owns" 'cmid' and destroys it when it itself * is destroyed. On failure, the caller's ref on 'peer' remains and * she must dispose of 'cmid'. (Actually I'd block forever if I tried * to destroy 'cmid' here since I'm called from the CM which still has - * its ref on 'cmid'). */ + * its ref on 'cmid'). + */ rwlock_t *glock = &kiblnd_data.kib_global_lock; kib_net_t *net = peer->ibp_ni->ni_data; kib_dev_t *dev; @@ -656,7 +646,7 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, int rc; int i; - LASSERT(net != NULL); + LASSERT(net); LASSERT(!in_interrupt()); dev = net->ibn_dev; @@ -668,14 +658,14 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, LIBCFS_CPT_ALLOC(init_qp_attr, lnet_cpt_table(), cpt, sizeof(*init_qp_attr)); - if (init_qp_attr == NULL) { + if (!init_qp_attr) { CERROR("Can't allocate qp_attr for %s\n", libcfs_nid2str(peer->ibp_nid)); goto failed_0; } LIBCFS_CPT_ALLOC(conn, lnet_cpt_table(), cpt, sizeof(*conn)); - if (conn == NULL) { + if (!conn) { CERROR("Can't allocate connection for %s\n", libcfs_nid2str(peer->ibp_nid)); goto failed_1; @@ -686,6 +676,8 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, conn->ibc_peer = peer; /* I take the caller's ref */ cmid->context = conn; /* for future CM callbacks */ conn->ibc_cmid = cmid; + conn->ibc_max_frags = peer->ibp_max_frags; + conn->ibc_queue_depth = peer->ibp_queue_depth; INIT_LIST_HEAD(&conn->ibc_early_rxs); INIT_LIST_HEAD(&conn->ibc_tx_noops); @@ -697,7 +689,7 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, LIBCFS_CPT_ALLOC(conn->ibc_connvars, lnet_cpt_table(), cpt, sizeof(*conn->ibc_connvars)); - if (conn->ibc_connvars == NULL) { + if (!conn->ibc_connvars) { CERROR("Can't allocate in-progress connection state\n"); goto failed_2; } @@ -731,42 +723,42 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, write_unlock_irqrestore(glock, flags); LIBCFS_CPT_ALLOC(conn->ibc_rxs, lnet_cpt_table(), cpt, - IBLND_RX_MSGS(version) * sizeof(kib_rx_t)); - if (conn->ibc_rxs == NULL) { + IBLND_RX_MSGS(conn) * sizeof(kib_rx_t)); + if (!conn->ibc_rxs) { CERROR("Cannot allocate RX buffers\n"); goto failed_2; } rc = kiblnd_alloc_pages(&conn->ibc_rx_pages, cpt, - IBLND_RX_MSG_PAGES(version)); - if (rc != 0) + IBLND_RX_MSG_PAGES(conn)); + if (rc) goto failed_2; kiblnd_map_rx_descs(conn); - cq_attr.cqe = IBLND_CQ_ENTRIES(version); + cq_attr.cqe = IBLND_CQ_ENTRIES(conn); cq_attr.comp_vector = kiblnd_get_completion_vector(conn, cpt); cq = ib_create_cq(cmid->device, kiblnd_cq_completion, kiblnd_cq_event, conn, &cq_attr); if (IS_ERR(cq)) { - CERROR("Can't create CQ: %ld, cqe: %d\n", - PTR_ERR(cq), IBLND_CQ_ENTRIES(version)); + CERROR("Failed to create CQ with %d CQEs: %ld\n", + IBLND_CQ_ENTRIES(conn), PTR_ERR(cq)); goto failed_2; } conn->ibc_cq = cq; rc = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); - if (rc != 0) { - CERROR("Can't request completion notificiation: %d\n", rc); + if (rc) { + CERROR("Can't request completion notification: %d\n", rc); goto failed_2; } init_qp_attr->event_handler = kiblnd_qp_event; init_qp_attr->qp_context = conn; - init_qp_attr->cap.max_send_wr = IBLND_SEND_WRS(version); - init_qp_attr->cap.max_recv_wr = IBLND_RECV_WRS(version); + init_qp_attr->cap.max_send_wr = IBLND_SEND_WRS(conn); + init_qp_attr->cap.max_recv_wr = IBLND_RECV_WRS(conn); init_qp_attr->cap.max_send_sge = 1; init_qp_attr->cap.max_recv_sge = 1; init_qp_attr->sq_sig_type = IB_SIGNAL_REQ_WR; @@ -777,7 +769,7 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, conn->ibc_sched = sched; rc = rdma_create_qp(cmid, conn->ibc_hdev->ibh_pd, init_qp_attr); - if (rc != 0) { + if (rc) { CERROR("Can't create QP: %d, send_wr: %d, recv_wr: %d\n", rc, init_qp_attr->cap.max_send_wr, init_qp_attr->cap.max_recv_wr); @@ -787,33 +779,37 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr)); /* 1 ref for caller and each rxmsg */ - atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(version)); - conn->ibc_nrx = IBLND_RX_MSGS(version); + atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(conn)); + conn->ibc_nrx = IBLND_RX_MSGS(conn); /* post receives */ - for (i = 0; i < IBLND_RX_MSGS(version); i++) { + for (i = 0; i < IBLND_RX_MSGS(conn); i++) { rc = kiblnd_post_rx(&conn->ibc_rxs[i], IBLND_POSTRX_NO_CREDIT); - if (rc != 0) { + if (rc) { CERROR("Can't post rxmsg: %d\n", rc); /* Make posted receives complete */ kiblnd_abort_receives(conn); - /* correct # of posted buffers - * NB locking needed now I'm racing with completion */ + /* + * correct # of posted buffers + * NB locking needed now I'm racing with completion + */ spin_lock_irqsave(&sched->ibs_lock, flags); - conn->ibc_nrx -= IBLND_RX_MSGS(version) - i; + conn->ibc_nrx -= IBLND_RX_MSGS(conn) - i; spin_unlock_irqrestore(&sched->ibs_lock, flags); - /* cmid will be destroyed by CM(ofed) after cm_callback + /* + * cmid will be destroyed by CM(ofed) after cm_callback * returned, so we can't refer it anymore - * (by kiblnd_connd()->kiblnd_destroy_conn) */ + * (by kiblnd_connd()->kiblnd_destroy_conn) + */ rdma_destroy_qp(conn->ibc_cmid); conn->ibc_cmid = NULL; /* Drop my own and unused rxbuffer refcounts */ - while (i++ <= IBLND_RX_MSGS(version)) + while (i++ <= IBLND_RX_MSGS(conn)) kiblnd_conn_decref(conn); return NULL; @@ -822,7 +818,7 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, /* Init successful! */ LASSERT(state == IBLND_CONN_ACTIVE_CONNECT || - state == IBLND_CONN_PASSIVE_WAIT); + state == IBLND_CONN_PASSIVE_WAIT); conn->ibc_state = state; /* 1 more conn */ @@ -830,29 +826,29 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, return conn; failed_2: - kiblnd_destroy_conn(conn); + kiblnd_destroy_conn(conn, true); failed_1: LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr)); failed_0: return NULL; } -void kiblnd_destroy_conn(kib_conn_t *conn) +void kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn) { struct rdma_cm_id *cmid = conn->ibc_cmid; kib_peer_t *peer = conn->ibc_peer; int rc; LASSERT(!in_interrupt()); - LASSERT(atomic_read(&conn->ibc_refcount) == 0); + LASSERT(!atomic_read(&conn->ibc_refcount)); LASSERT(list_empty(&conn->ibc_early_rxs)); LASSERT(list_empty(&conn->ibc_tx_noops)); LASSERT(list_empty(&conn->ibc_tx_queue)); LASSERT(list_empty(&conn->ibc_tx_queue_rsrvd)); LASSERT(list_empty(&conn->ibc_tx_queue_nocred)); LASSERT(list_empty(&conn->ibc_active_txs)); - LASSERT(conn->ibc_noops_posted == 0); - LASSERT(conn->ibc_nsends_posted == 0); + LASSERT(!conn->ibc_noops_posted); + LASSERT(!conn->ibc_nsends_posted); switch (conn->ibc_state) { default: @@ -861,7 +857,7 @@ void kiblnd_destroy_conn(kib_conn_t *conn) case IBLND_CONN_DISCONNECTED: /* connvars should have been freed already */ - LASSERT(conn->ibc_connvars == NULL); + LASSERT(!conn->ibc_connvars); break; case IBLND_CONN_INIT: @@ -869,28 +865,27 @@ void kiblnd_destroy_conn(kib_conn_t *conn) } /* conn->ibc_cmid might be destroyed by CM already */ - if (cmid != NULL && cmid->qp != NULL) + if (cmid && cmid->qp) rdma_destroy_qp(cmid); - if (conn->ibc_cq != NULL) { + if (conn->ibc_cq) { rc = ib_destroy_cq(conn->ibc_cq); - if (rc != 0) + if (rc) CWARN("Error destroying CQ: %d\n", rc); } - if (conn->ibc_rx_pages != NULL) + if (conn->ibc_rx_pages) kiblnd_unmap_rx_descs(conn); - if (conn->ibc_rxs != NULL) { + if (conn->ibc_rxs) { LIBCFS_FREE(conn->ibc_rxs, - IBLND_RX_MSGS(conn->ibc_version) - * sizeof(kib_rx_t)); + IBLND_RX_MSGS(conn) * sizeof(kib_rx_t)); } - if (conn->ibc_connvars != NULL) + if (conn->ibc_connvars) LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars)); - if (conn->ibc_hdev != NULL) + if (conn->ibc_hdev) kiblnd_hdev_decref(conn->ibc_hdev); /* See CAVEAT EMPTOR above in kiblnd_create_conn */ @@ -927,7 +922,7 @@ int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why) } int kiblnd_close_stale_conns_locked(kib_peer_t *peer, - int version, __u64 incarnation) + int version, __u64 incarnation) { kib_conn_t *conn; struct list_head *ctmp; @@ -967,20 +962,18 @@ static int kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid) write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - if (nid != LNET_NID_ANY) - lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers; - else { + if (nid != LNET_NID_ANY) { + lo = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers; + hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers; + } else { lo = 0; hi = kiblnd_data.kib_peer_hash_size - 1; } for (i = lo; i <= hi; i++) { list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) { - peer = list_entry(ptmp, kib_peer_t, ibp_list); - LASSERT(peer->ibp_connecting > 0 || - peer->ibp_accepting > 0 || - !list_empty(&peer->ibp_conns)); + LASSERT(!kiblnd_peer_idle(peer)); if (peer->ibp_ni != ni) continue; @@ -998,10 +991,10 @@ static int kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid) if (nid == LNET_NID_ANY) return 0; - return (count == 0) ? -ENOENT : 0; + return !count ? -ENOENT : 0; } -int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) +static int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) { struct libcfs_ioctl_data *data = arg; int rc = -EINVAL; @@ -1027,14 +1020,14 @@ int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) rc = 0; conn = kiblnd_get_conn_by_idx(ni, data->ioc_count); - if (conn == NULL) { + if (!conn) { rc = -ENOENT; break; } - LASSERT(conn->ibc_cmid != NULL); + LASSERT(conn->ibc_cmid); data->ioc_nid = conn->ibc_peer->ibp_nid; - if (conn->ibc_cmid->route.path_rec == NULL) + if (!conn->ibc_cmid->route.path_rec) data->ioc_u32[0] = 0; /* iWarp has no path MTU */ else data->ioc_u32[0] = @@ -1054,7 +1047,7 @@ int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) return rc; } -void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) +static void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) { unsigned long last_alive = 0; unsigned long now = cfs_time_current(); @@ -1065,21 +1058,19 @@ void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) read_lock_irqsave(glock, flags); peer = kiblnd_find_peer_locked(nid); - if (peer != NULL) { - LASSERT(peer->ibp_connecting > 0 || /* creating conns */ - peer->ibp_accepting > 0 || - !list_empty(&peer->ibp_conns)); /* active conn */ + if (peer) last_alive = peer->ibp_last_alive; - } read_unlock_irqrestore(glock, flags); - if (last_alive != 0) + if (last_alive) *when = last_alive; - /* peer is not persistent in hash, trigger peer creation - * and connection establishment with a NULL tx */ - if (peer == NULL) + /* + * peer is not persistent in hash, trigger peer creation + * and connection establishment with a NULL tx + */ + if (!peer) kiblnd_launch_tx(ni, NULL, nid); CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago\n", @@ -1087,13 +1078,13 @@ void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) last_alive ? cfs_duration_sec(now - last_alive) : -1); } -void kiblnd_free_pages(kib_pages_t *p) +static void kiblnd_free_pages(kib_pages_t *p) { int npages = p->ibp_npages; int i; for (i = 0; i < npages; i++) { - if (p->ibp_pages[i] != NULL) + if (p->ibp_pages[i]) __free_page(p->ibp_pages[i]); } @@ -1107,7 +1098,7 @@ int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages) LIBCFS_CPT_ALLOC(p, lnet_cpt_table(), cpt, offsetof(kib_pages_t, ibp_pages[npages])); - if (p == NULL) { + if (!p) { CERROR("Can't allocate descriptor for %d pages\n", npages); return -ENOMEM; } @@ -1119,7 +1110,7 @@ int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages) p->ibp_pages[i] = alloc_pages_node( cfs_cpt_spread_node(lnet_cpt_table(), cpt), GFP_NOFS, 0); - if (p->ibp_pages[i] == NULL) { + if (!p->ibp_pages[i]) { CERROR("Can't allocate page %d of %d\n", i, npages); kiblnd_free_pages(p); return -ENOMEM; @@ -1135,10 +1126,10 @@ void kiblnd_unmap_rx_descs(kib_conn_t *conn) kib_rx_t *rx; int i; - LASSERT(conn->ibc_rxs != NULL); - LASSERT(conn->ibc_hdev != NULL); + LASSERT(conn->ibc_rxs); + LASSERT(conn->ibc_hdev); - for (i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++) { + for (i = 0; i < IBLND_RX_MSGS(conn); i++) { rx = &conn->ibc_rxs[i]; LASSERT(rx->rx_nob >= 0); /* not posted */ @@ -1162,7 +1153,7 @@ void kiblnd_map_rx_descs(kib_conn_t *conn) int ipg; int i; - for (pg_off = ipg = i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++) { + for (pg_off = ipg = i = 0; i < IBLND_RX_MSGS(conn); i++) { pg = conn->ibc_rx_pages->ibp_pages[ipg]; rx = &conn->ibc_rxs[i]; @@ -1174,7 +1165,7 @@ void kiblnd_map_rx_descs(kib_conn_t *conn) IBLND_MSG_SIZE, DMA_FROM_DEVICE); LASSERT(!kiblnd_dma_mapping_error(conn->ibc_hdev->ibh_ibdev, - rx->rx_msgaddr)); + rx->rx_msgaddr)); KIBLND_UNMAP_ADDR_SET(rx, rx_msgunmap, rx->rx_msgaddr); CDEBUG(D_NET, "rx %d: %p %#llx(%#llx)\n", @@ -1187,7 +1178,7 @@ void kiblnd_map_rx_descs(kib_conn_t *conn) if (pg_off == PAGE_SIZE) { pg_off = 0; ipg++; - LASSERT(ipg <= IBLND_RX_MSG_PAGES(conn->ibc_version)); + LASSERT(ipg <= IBLND_RX_MSG_PAGES(conn)); } } } @@ -1198,9 +1189,9 @@ static void kiblnd_unmap_tx_pool(kib_tx_pool_t *tpo) kib_tx_t *tx; int i; - LASSERT(tpo->tpo_pool.po_allocated == 0); + LASSERT(!tpo->tpo_pool.po_allocated); - if (hdev == NULL) + if (!hdev) return; for (i = 0; i < tpo->tpo_pool.po_size; i++) { @@ -1224,9 +1215,10 @@ static kib_hca_dev_t *kiblnd_current_hdev(kib_dev_t *dev) read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); while (dev->ibd_failover) { read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - if (i++ % 50 == 0) + if (!(i++ % 50)) CDEBUG(D_NET, "%s: Wait for failover\n", dev->ibd_ifname); + set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(cfs_time_seconds(1) / 100); read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); @@ -1252,7 +1244,7 @@ static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo) int ipage; int i; - LASSERT(net != NULL); + LASSERT(net); dev = net->ibn_dev; @@ -1260,7 +1252,7 @@ static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo) CLASSERT(IBLND_MSG_SIZE <= PAGE_SIZE); /* No fancy arithmetic when we do the buffer calculations */ - CLASSERT(PAGE_SIZE % IBLND_MSG_SIZE == 0); + CLASSERT(!(PAGE_SIZE % IBLND_MSG_SIZE)); tpo->tpo_hdev = kiblnd_current_hdev(dev); @@ -1275,7 +1267,7 @@ static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo) tpo->tpo_hdev->ibh_ibdev, tx->tx_msg, IBLND_MSG_SIZE, DMA_TO_DEVICE); LASSERT(!kiblnd_dma_mapping_error(tpo->tpo_hdev->ibh_ibdev, - tx->tx_msgaddr)); + tx->tx_msgaddr)); KIBLND_UNMAP_ADDR_SET(tx, tx_msgunmap, tx->tx_msgaddr); list_add(&tx->tx_list, &pool->po_free_list); @@ -1291,68 +1283,32 @@ static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo) } } -struct ib_mr *kiblnd_find_dma_mr(kib_hca_dev_t *hdev, __u64 addr, __u64 size) +struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, kib_rdma_desc_t *rd, + int negotiated_nfrags) { - __u64 index; - - LASSERT(hdev->ibh_mrs[0] != NULL); - - if (hdev->ibh_nmrs == 1) - return hdev->ibh_mrs[0]; - - index = addr >> hdev->ibh_mr_shift; + __u16 nfrags = (negotiated_nfrags != -1) ? + negotiated_nfrags : *kiblnd_tunables.kib_map_on_demand; - if (index < hdev->ibh_nmrs && - index == ((addr + size - 1) >> hdev->ibh_mr_shift)) - return hdev->ibh_mrs[index]; - - return NULL; -} - -struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, kib_rdma_desc_t *rd) -{ - struct ib_mr *prev_mr; - struct ib_mr *mr; - int i; - - LASSERT(hdev->ibh_mrs[0] != NULL); + LASSERT(hdev->ibh_mrs); if (*kiblnd_tunables.kib_map_on_demand > 0 && - *kiblnd_tunables.kib_map_on_demand <= rd->rd_nfrags) + nfrags <= rd->rd_nfrags) return NULL; - if (hdev->ibh_nmrs == 1) - return hdev->ibh_mrs[0]; - - for (i = 0, mr = prev_mr = NULL; - i < rd->rd_nfrags; i++) { - mr = kiblnd_find_dma_mr(hdev, - rd->rd_frags[i].rf_addr, - rd->rd_frags[i].rf_nob); - if (prev_mr == NULL) - prev_mr = mr; - - if (mr == NULL || prev_mr != mr) { - /* Can't covered by one single MR */ - mr = NULL; - break; - } - } - - return mr; + return hdev->ibh_mrs; } static void kiblnd_destroy_fmr_pool(kib_fmr_pool_t *pool) { - LASSERT(pool->fpo_map_count == 0); + LASSERT(!pool->fpo_map_count); - if (pool->fpo_fmr_pool != NULL) + if (pool->fpo_fmr_pool) ib_destroy_fmr_pool(pool->fpo_fmr_pool); - if (pool->fpo_hdev != NULL) + if (pool->fpo_hdev) kiblnd_hdev_decref(pool->fpo_hdev); - LIBCFS_FREE(pool, sizeof(kib_fmr_pool_t)); + LIBCFS_FREE(pool, sizeof(*pool)); } static void kiblnd_destroy_fmr_pool_list(struct list_head *head) @@ -1387,7 +1343,7 @@ static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps, kib_dev_t *dev = fps->fps_net->ibn_dev; kib_fmr_pool_t *fpo; struct ib_fmr_pool_param param = { - .max_pages_per_fmr = LNET_MAX_PAYLOAD/PAGE_SIZE, + .max_pages_per_fmr = LNET_MAX_PAYLOAD / PAGE_SIZE, .page_shift = PAGE_SHIFT, .access = (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE), @@ -1399,7 +1355,7 @@ static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps, int rc; LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo)); - if (fpo == NULL) + if (!fpo) return -ENOMEM; fpo->fpo_hdev = kiblnd_current_hdev(dev); @@ -1410,7 +1366,7 @@ static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps, CERROR("Failed to create FMR pool: %d\n", rc); kiblnd_hdev_decref(fpo->fpo_hdev); - LIBCFS_FREE(fpo, sizeof(kib_fmr_pool_t)); + LIBCFS_FREE(fpo, sizeof(*fpo)); return rc; } @@ -1424,7 +1380,7 @@ static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps, static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps, struct list_head *zombies) { - if (fps->fps_net == NULL) /* intialized? */ + if (!fps->fps_net) /* intialized? */ return; spin_lock(&fps->fps_lock); @@ -1434,7 +1390,7 @@ static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps, kib_fmr_pool_t, fpo_list); fpo->fpo_failed = 1; list_del(&fpo->fpo_list); - if (fpo->fpo_map_count == 0) + if (!fpo->fpo_map_count) list_add(&fpo->fpo_list, zombies); else list_add(&fpo->fpo_list, &fps->fps_failed_pool_list); @@ -1445,7 +1401,7 @@ static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps, static void kiblnd_fini_fmr_poolset(kib_fmr_poolset_t *fps) { - if (fps->fps_net != NULL) { /* initialized? */ + if (fps->fps_net) { /* initialized? */ kiblnd_destroy_fmr_pool_list(&fps->fps_failed_pool_list); kiblnd_destroy_fmr_pool_list(&fps->fps_pool_list); } @@ -1458,7 +1414,7 @@ static int kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, kib_fmr_pool_t *fpo; int rc; - memset(fps, 0, sizeof(kib_fmr_poolset_t)); + memset(fps, 0, sizeof(*fps)); fps->fps_net = net; fps->fps_cpt = cpt; @@ -1469,7 +1425,7 @@ static int kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, INIT_LIST_HEAD(&fps->fps_failed_pool_list); rc = kiblnd_create_fmr_pool(fps, &fpo); - if (rc == 0) + if (!rc) list_add_tail(&fpo->fpo_list, &fps->fps_pool_list); return rc; @@ -1477,7 +1433,7 @@ static int kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, static int kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, unsigned long now) { - if (fpo->fpo_map_count != 0) /* still in use */ + if (fpo->fpo_map_count) /* still in use */ return 0; if (fpo->fpo_failed) return 1; @@ -1494,11 +1450,11 @@ void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status) int rc; rc = ib_fmr_pool_unmap(fmr->fmr_pfmr); - LASSERT(rc == 0); + LASSERT(!rc); - if (status != 0) { + if (status) { rc = ib_flush_fmr_pool(fpo->fpo_fmr_pool); - LASSERT(rc == 0); + LASSERT(!rc); } fmr->fmr_pool = NULL; @@ -1563,11 +1519,9 @@ int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages, if (fps->fps_increasing) { spin_unlock(&fps->fps_lock); - CDEBUG(D_NET, - "Another thread is allocating new FMR pool, waiting for her to complete\n"); + CDEBUG(D_NET, "Another thread is allocating new FMR pool, waiting for her to complete\n"); schedule(); goto again; - } if (time_before(cfs_time_current(), fps->fps_next_retry)) { @@ -1583,7 +1537,7 @@ int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages, rc = kiblnd_create_fmr_pool(fps, &fpo); spin_lock(&fps->fps_lock); fps->fps_increasing = 0; - if (rc == 0) { + if (!rc) { fps->fps_version++; list_add_tail(&fpo->fpo_list, &fps->fps_pool_list); } else { @@ -1597,7 +1551,7 @@ int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages, static void kiblnd_fini_pool(kib_pool_t *pool) { LASSERT(list_empty(&pool->po_free_list)); - LASSERT(pool->po_allocated == 0); + LASSERT(!pool->po_allocated); CDEBUG(D_NET, "Finalize %s pool\n", pool->po_owner->ps_name); } @@ -1606,7 +1560,7 @@ static void kiblnd_init_pool(kib_poolset_t *ps, kib_pool_t *pool, int size) { CDEBUG(D_NET, "Initialize %s pool\n", ps->ps_name); - memset(pool, 0, sizeof(kib_pool_t)); + memset(pool, 0, sizeof(*pool)); INIT_LIST_HEAD(&pool->po_free_list); pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE); pool->po_owner = ps; @@ -1621,14 +1575,14 @@ static void kiblnd_destroy_pool_list(struct list_head *head) pool = list_entry(head->next, kib_pool_t, po_list); list_del(&pool->po_list); - LASSERT(pool->po_owner != NULL); + LASSERT(pool->po_owner); pool->po_owner->ps_pool_destroy(pool); } } static void kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies) { - if (ps->ps_net == NULL) /* intialized? */ + if (!ps->ps_net) /* intialized? */ return; spin_lock(&ps->ps_lock); @@ -1637,7 +1591,7 @@ static void kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies) kib_pool_t, po_list); po->po_failed = 1; list_del(&po->po_list); - if (po->po_allocated == 0) + if (!po->po_allocated) list_add(&po->po_list, zombies); else list_add(&po->po_list, &ps->ps_failed_pool_list); @@ -1647,7 +1601,7 @@ static void kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies) static void kiblnd_fini_poolset(kib_poolset_t *ps) { - if (ps->ps_net != NULL) { /* initialized? */ + if (ps->ps_net) { /* initialized? */ kiblnd_destroy_pool_list(&ps->ps_failed_pool_list); kiblnd_destroy_pool_list(&ps->ps_pool_list); } @@ -1663,7 +1617,7 @@ static int kiblnd_init_poolset(kib_poolset_t *ps, int cpt, kib_pool_t *pool; int rc; - memset(ps, 0, sizeof(kib_poolset_t)); + memset(ps, 0, sizeof(*ps)); ps->ps_cpt = cpt; ps->ps_net = net; @@ -1680,7 +1634,7 @@ static int kiblnd_init_poolset(kib_poolset_t *ps, int cpt, INIT_LIST_HEAD(&ps->ps_failed_pool_list); rc = ps->ps_pool_create(ps, size, &pool); - if (rc == 0) + if (!rc) list_add(&pool->po_list, &ps->ps_pool_list); else CERROR("Failed to create the first pool for %s\n", ps->ps_name); @@ -1690,7 +1644,7 @@ static int kiblnd_init_poolset(kib_poolset_t *ps, int cpt, static int kiblnd_pool_is_idle(kib_pool_t *pool, unsigned long now) { - if (pool->po_allocated != 0) /* still in use */ + if (pool->po_allocated) /* still in use */ return 0; if (pool->po_failed) return 1; @@ -1706,7 +1660,7 @@ void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node) spin_lock(&ps->ps_lock); - if (ps->ps_node_fini != NULL) + if (ps->ps_node_fini) ps->ps_node_fini(pool, node); LASSERT(pool->po_allocated > 0); @@ -1731,6 +1685,9 @@ struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps) { struct list_head *node; kib_pool_t *pool; + unsigned int interval = 1; + unsigned long time_before; + unsigned int trips = 0; int rc; again: @@ -1744,7 +1701,7 @@ struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps) node = pool->po_free_list.next; list_del(node); - if (ps->ps_node_init != NULL) { + if (ps->ps_node_init) { /* still hold the lock */ ps->ps_node_init(pool, node); } @@ -1756,9 +1713,15 @@ struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps) if (ps->ps_increasing) { /* another thread is allocating a new pool */ spin_unlock(&ps->ps_lock); - CDEBUG(D_NET, "Another thread is allocating new %s pool, waiting for her to complete\n", - ps->ps_name); - schedule(); + trips++; + CDEBUG(D_NET, "Another thread is allocating new %s pool, waiting %d HZs for her to complete. trips = %d\n", + ps->ps_name, interval, trips); + + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(interval); + if (interval < cfs_time_seconds(1)) + interval *= 2; + goto again; } @@ -1772,12 +1735,14 @@ struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps) spin_unlock(&ps->ps_lock); CDEBUG(D_NET, "%s pool exhausted, allocate new pool\n", ps->ps_name); - + time_before = cfs_time_current(); rc = ps->ps_pool_create(ps, ps->ps_pool_size, &pool); + CDEBUG(D_NET, "ps_pool_create took %lu HZ to complete", + cfs_time_current() - time_before); spin_lock(&ps->ps_lock); ps->ps_increasing = 0; - if (rc == 0) { + if (!rc) { list_add_tail(&pool->po_list, &ps->ps_pool_list); } else { ps->ps_next_retry = cfs_time_shift(IBLND_POOL_RETRY); @@ -1794,37 +1759,37 @@ static void kiblnd_destroy_tx_pool(kib_pool_t *pool) kib_tx_pool_t *tpo = container_of(pool, kib_tx_pool_t, tpo_pool); int i; - LASSERT(pool->po_allocated == 0); + LASSERT(!pool->po_allocated); - if (tpo->tpo_tx_pages != NULL) { + if (tpo->tpo_tx_pages) { kiblnd_unmap_tx_pool(tpo); kiblnd_free_pages(tpo->tpo_tx_pages); } - if (tpo->tpo_tx_descs == NULL) + if (!tpo->tpo_tx_descs) goto out; for (i = 0; i < pool->po_size; i++) { kib_tx_t *tx = &tpo->tpo_tx_descs[i]; list_del(&tx->tx_list); - if (tx->tx_pages != NULL) + if (tx->tx_pages) LIBCFS_FREE(tx->tx_pages, LNET_MAX_IOV * sizeof(*tx->tx_pages)); - if (tx->tx_frags != NULL) + if (tx->tx_frags) LIBCFS_FREE(tx->tx_frags, IBLND_MAX_RDMA_FRAGS * sizeof(*tx->tx_frags)); - if (tx->tx_wrq != NULL) + if (tx->tx_wrq) LIBCFS_FREE(tx->tx_wrq, (1 + IBLND_MAX_RDMA_FRAGS) * sizeof(*tx->tx_wrq)); - if (tx->tx_sge != NULL) + if (tx->tx_sge) LIBCFS_FREE(tx->tx_sge, (1 + IBLND_MAX_RDMA_FRAGS) * sizeof(*tx->tx_sge)); - if (tx->tx_rd != NULL) + if (tx->tx_rd) LIBCFS_FREE(tx->tx_rd, offsetof(kib_rdma_desc_t, rd_frags[IBLND_MAX_RDMA_FRAGS])); @@ -1834,7 +1799,7 @@ static void kiblnd_destroy_tx_pool(kib_pool_t *pool) pool->po_size * sizeof(kib_tx_t)); out: kiblnd_fini_pool(pool); - LIBCFS_FREE(tpo, sizeof(kib_tx_pool_t)); + LIBCFS_FREE(tpo, sizeof(*tpo)); } static int kiblnd_tx_pool_size(int ncpts) @@ -1853,7 +1818,7 @@ static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size, kib_tx_pool_t *tpo; LIBCFS_CPT_ALLOC(tpo, lnet_cpt_table(), ps->ps_cpt, sizeof(*tpo)); - if (tpo == NULL) { + if (!tpo) { CERROR("Failed to allocate TX pool\n"); return -ENOMEM; } @@ -1864,15 +1829,15 @@ static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size, tpo->tpo_tx_pages = NULL; npg = (size * IBLND_MSG_SIZE + PAGE_SIZE - 1) / PAGE_SIZE; - if (kiblnd_alloc_pages(&tpo->tpo_tx_pages, ps->ps_cpt, npg) != 0) { + if (kiblnd_alloc_pages(&tpo->tpo_tx_pages, ps->ps_cpt, npg)) { CERROR("Can't allocate tx pages: %d\n", npg); - LIBCFS_FREE(tpo, sizeof(kib_tx_pool_t)); + LIBCFS_FREE(tpo, sizeof(*tpo)); return -ENOMEM; } LIBCFS_CPT_ALLOC(tpo->tpo_tx_descs, lnet_cpt_table(), ps->ps_cpt, size * sizeof(kib_tx_t)); - if (tpo->tpo_tx_descs == NULL) { + if (!tpo->tpo_tx_descs) { CERROR("Can't allocate %d tx descriptors\n", size); ps->ps_pool_destroy(pool); return -ENOMEM; @@ -1884,17 +1849,17 @@ static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size, kib_tx_t *tx = &tpo->tpo_tx_descs[i]; tx->tx_pool = tpo; - if (ps->ps_net->ibn_fmr_ps != NULL) { + if (ps->ps_net->ibn_fmr_ps) { LIBCFS_CPT_ALLOC(tx->tx_pages, lnet_cpt_table(), ps->ps_cpt, LNET_MAX_IOV * sizeof(*tx->tx_pages)); - if (tx->tx_pages == NULL) + if (!tx->tx_pages) break; } LIBCFS_CPT_ALLOC(tx->tx_frags, lnet_cpt_table(), ps->ps_cpt, IBLND_MAX_RDMA_FRAGS * sizeof(*tx->tx_frags)); - if (tx->tx_frags == NULL) + if (!tx->tx_frags) break; sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS); @@ -1902,19 +1867,19 @@ static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size, LIBCFS_CPT_ALLOC(tx->tx_wrq, lnet_cpt_table(), ps->ps_cpt, (1 + IBLND_MAX_RDMA_FRAGS) * sizeof(*tx->tx_wrq)); - if (tx->tx_wrq == NULL) + if (!tx->tx_wrq) break; LIBCFS_CPT_ALLOC(tx->tx_sge, lnet_cpt_table(), ps->ps_cpt, (1 + IBLND_MAX_RDMA_FRAGS) * sizeof(*tx->tx_sge)); - if (tx->tx_sge == NULL) + if (!tx->tx_sge) break; LIBCFS_CPT_ALLOC(tx->tx_rd, lnet_cpt_table(), ps->ps_cpt, offsetof(kib_rdma_desc_t, rd_frags[IBLND_MAX_RDMA_FRAGS])); - if (tx->tx_rd == NULL) + if (!tx->tx_rd) break; } @@ -1945,23 +1910,23 @@ static void kiblnd_net_fini_pools(kib_net_t *net) kib_tx_poolset_t *tps; kib_fmr_poolset_t *fps; - if (net->ibn_tx_ps != NULL) { + if (net->ibn_tx_ps) { tps = net->ibn_tx_ps[i]; kiblnd_fini_poolset(&tps->tps_poolset); } - if (net->ibn_fmr_ps != NULL) { + if (net->ibn_fmr_ps) { fps = net->ibn_fmr_ps[i]; kiblnd_fini_fmr_poolset(fps); } } - if (net->ibn_tx_ps != NULL) { + if (net->ibn_tx_ps) { cfs_percpt_free(net->ibn_tx_ps); net->ibn_tx_ps = NULL; } - if (net->ibn_fmr_ps != NULL) { + if (net->ibn_fmr_ps) { cfs_percpt_free(net->ibn_fmr_ps); net->ibn_fmr_ps = NULL; } @@ -1975,8 +1940,7 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts) int i; read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - if (*kiblnd_tunables.kib_map_on_demand == 0 && - net->ibn_dev->ibd_hdev->ibh_nmrs == 1) { + if (!*kiblnd_tunables.kib_map_on_demand) { read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); goto create_tx_pool; } @@ -1996,7 +1960,7 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts) * TX pool must be created later than FMR, see LU-2268 * for details */ - LASSERT(net->ibn_tx_ps == NULL); + LASSERT(!net->ibn_tx_ps); /* * premapping can fail if ibd_nmr > 1, so we always create @@ -2005,56 +1969,45 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts) net->ibn_fmr_ps = cfs_percpt_alloc(lnet_cpt_table(), sizeof(kib_fmr_poolset_t)); - if (net->ibn_fmr_ps == NULL) { + if (!net->ibn_fmr_ps) { CERROR("Failed to allocate FMR pool array\n"); rc = -ENOMEM; goto failed; } for (i = 0; i < ncpts; i++) { - cpt = (cpts == NULL) ? i : cpts[i]; + cpt = !cpts ? i : cpts[i]; rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, net, kiblnd_fmr_pool_size(ncpts), kiblnd_fmr_flush_trigger(ncpts)); - if (rc == -ENOSYS && i == 0) /* no FMR */ - break; - - if (rc != 0) { /* a real error */ + if (rc) { CERROR("Can't initialize FMR pool for CPT %d: %d\n", cpt, rc); goto failed; } } - if (i > 0) { + if (i > 0) LASSERT(i == ncpts); - goto create_tx_pool; - } - - cfs_percpt_free(net->ibn_fmr_ps); - net->ibn_fmr_ps = NULL; - - CWARN("Device does not support FMR\n"); - goto failed; create_tx_pool: net->ibn_tx_ps = cfs_percpt_alloc(lnet_cpt_table(), sizeof(kib_tx_poolset_t)); - if (net->ibn_tx_ps == NULL) { + if (!net->ibn_tx_ps) { CERROR("Failed to allocate tx pool array\n"); rc = -ENOMEM; goto failed; } for (i = 0; i < ncpts; i++) { - cpt = (cpts == NULL) ? i : cpts[i]; + cpt = !cpts ? i : cpts[i]; rc = kiblnd_init_poolset(&net->ibn_tx_ps[cpt]->tps_poolset, cpt, net, "TX", kiblnd_tx_pool_size(ncpts), kiblnd_create_tx_pool, kiblnd_destroy_tx_pool, kiblnd_tx_init, NULL); - if (rc != 0) { + if (rc) { CERROR("Can't initialize TX pool for CPT %d: %d\n", cpt, rc); goto failed; @@ -2064,14 +2017,16 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts) return 0; failed: kiblnd_net_fini_pools(net); - LASSERT(rc != 0); + LASSERT(rc); return rc; } static int kiblnd_hdev_get_attr(kib_hca_dev_t *hdev) { - /* It's safe to assume a HCA can handle a page size - * matching that of the native system */ + /* + * It's safe to assume a HCA can handle a page size + * matching that of the native system + */ hdev->ibh_page_shift = PAGE_SHIFT; hdev->ibh_page_size = 1 << PAGE_SHIFT; hdev->ibh_page_mask = ~((__u64)hdev->ibh_page_size - 1); @@ -2082,44 +2037,28 @@ static int kiblnd_hdev_get_attr(kib_hca_dev_t *hdev) return 0; } - for (hdev->ibh_mr_shift = 0; - hdev->ibh_mr_shift < 64; hdev->ibh_mr_shift++) { - if (hdev->ibh_mr_size == (1ULL << hdev->ibh_mr_shift) || - hdev->ibh_mr_size == (1ULL << hdev->ibh_mr_shift) - 1) - return 0; - } - CERROR("Invalid mr size: %#llx\n", hdev->ibh_mr_size); return -EINVAL; } static void kiblnd_hdev_cleanup_mrs(kib_hca_dev_t *hdev) { - int i; - - if (hdev->ibh_nmrs == 0 || hdev->ibh_mrs == NULL) + if (!hdev->ibh_mrs) return; - for (i = 0; i < hdev->ibh_nmrs; i++) { - if (hdev->ibh_mrs[i] == NULL) - break; + ib_dereg_mr(hdev->ibh_mrs); - ib_dereg_mr(hdev->ibh_mrs[i]); - } - - LIBCFS_FREE(hdev->ibh_mrs, sizeof(*hdev->ibh_mrs) * hdev->ibh_nmrs); - hdev->ibh_mrs = NULL; - hdev->ibh_nmrs = 0; + hdev->ibh_mrs = NULL; } void kiblnd_hdev_destroy(kib_hca_dev_t *hdev) { kiblnd_hdev_cleanup_mrs(hdev); - if (hdev->ibh_pd != NULL) + if (hdev->ibh_pd) ib_dealloc_pd(hdev->ibh_pd); - if (hdev->ibh_cmid != NULL) + if (hdev->ibh_cmid) rdma_destroy_id(hdev->ibh_cmid); LIBCFS_FREE(hdev, sizeof(*hdev)); @@ -2132,18 +2071,9 @@ static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev) int acflags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE; rc = kiblnd_hdev_get_attr(hdev); - if (rc != 0) + if (rc) return rc; - LIBCFS_ALLOC(hdev->ibh_mrs, 1 * sizeof(*hdev->ibh_mrs)); - if (hdev->ibh_mrs == NULL) { - CERROR("Failed to allocate MRs table\n"); - return -ENOMEM; - } - - hdev->ibh_mrs[0] = NULL; - hdev->ibh_nmrs = 1; - mr = ib_get_dma_mr(hdev->ibh_pd, acflags); if (IS_ERR(mr)) { CERROR("Failed ib_get_dma_mr : %ld\n", PTR_ERR(mr)); @@ -2151,7 +2081,7 @@ static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev) return PTR_ERR(mr); } - hdev->ibh_mrs[0] = mr; + hdev->ibh_mrs = mr; return 0; } @@ -2170,12 +2100,13 @@ static int kiblnd_dev_need_failover(kib_dev_t *dev) struct sockaddr_in dstaddr; int rc; - if (dev->ibd_hdev == NULL || /* initializing */ - dev->ibd_hdev->ibh_cmid == NULL || /* listener is dead */ + if (!dev->ibd_hdev || /* initializing */ + !dev->ibd_hdev->ibh_cmid || /* listener is dead */ *kiblnd_tunables.kib_dev_failover > 1) /* debugging */ return 1; - /* XXX: it's UGLY, but I don't have better way to find + /* + * XXX: it's UGLY, but I don't have better way to find * ib-bonding HCA failover because: * * a. no reliable CM event for HCA failover... @@ -2184,7 +2115,8 @@ static int kiblnd_dev_need_failover(kib_dev_t *dev) * We have only two choices at this point: * * a. rdma_bind_addr(), it will conflict with listener cmid - * b. rdma_resolve_addr() to zero addr */ + * b. rdma_resolve_addr() to zero addr + */ cmid = kiblnd_rdma_create_id(kiblnd_dummy_callback, dev, RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(cmid)) { @@ -2201,7 +2133,7 @@ static int kiblnd_dev_need_failover(kib_dev_t *dev) dstaddr.sin_family = AF_INET; rc = rdma_resolve_addr(cmid, (struct sockaddr *)&srcaddr, (struct sockaddr *)&dstaddr, 1); - if (rc != 0 || cmid->device == NULL) { + if (rc || !cmid->device) { CERROR("Failed to bind %s:%pI4h to device(%p): %d\n", dev->ibd_ifname, &dev->ibd_ifip, cmid->device, rc); @@ -2230,24 +2162,27 @@ int kiblnd_dev_failover(kib_dev_t *dev) int i; LASSERT(*kiblnd_tunables.kib_dev_failover > 1 || - dev->ibd_can_failover || - dev->ibd_hdev == NULL); + dev->ibd_can_failover || !dev->ibd_hdev); rc = kiblnd_dev_need_failover(dev); if (rc <= 0) goto out; - if (dev->ibd_hdev != NULL && - dev->ibd_hdev->ibh_cmid != NULL) { - /* XXX it's not good to close old listener at here, + if (dev->ibd_hdev && + dev->ibd_hdev->ibh_cmid) { + /* + * XXX it's not good to close old listener at here, * because we can fail to create new listener. * But we have to close it now, otherwise rdma_bind_addr - * will return EADDRINUSE... How crap! */ + * will return EADDRINUSE... How crap! + */ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); cmid = dev->ibd_hdev->ibh_cmid; - /* make next schedule of kiblnd_dev_need_failover() - * return 1 for me */ + /* + * make next schedule of kiblnd_dev_need_failover() + * return 1 for me + */ dev->ibd_hdev->ibh_cmid = NULL; write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); @@ -2269,7 +2204,7 @@ int kiblnd_dev_failover(kib_dev_t *dev) /* Bind to failover device or port */ rc = rdma_bind_addr(cmid, (struct sockaddr *)&addr); - if (rc != 0 || cmid->device == NULL) { + if (rc || !cmid->device) { CERROR("Failed to bind %s:%pI4h to device(%p): %d\n", dev->ibd_ifname, &dev->ibd_ifip, cmid->device, rc); @@ -2278,7 +2213,7 @@ int kiblnd_dev_failover(kib_dev_t *dev) } LIBCFS_ALLOC(hdev, sizeof(*hdev)); - if (hdev == NULL) { + if (!hdev) { CERROR("Failed to allocate kib_hca_dev\n"); rdma_destroy_id(cmid); rc = -ENOMEM; @@ -2300,13 +2235,13 @@ int kiblnd_dev_failover(kib_dev_t *dev) hdev->ibh_pd = pd; rc = rdma_listen(cmid, 0); - if (rc != 0) { + if (rc) { CERROR("Can't start new listener: %d\n", rc); goto out; } rc = kiblnd_hdev_setup_mrs(hdev); - if (rc != 0) { + if (rc) { CERROR("Can't setup device: %d\n", rc); goto out; } @@ -2334,10 +2269,10 @@ int kiblnd_dev_failover(kib_dev_t *dev) kiblnd_destroy_pool_list(&zombie_ppo); if (!list_empty(&zombie_fpo)) kiblnd_destroy_fmr_pool_list(&zombie_fpo); - if (hdev != NULL) + if (hdev) kiblnd_hdev_decref(hdev); - if (rc != 0) + if (rc) dev->ibd_failed_failover++; else dev->ibd_failed_failover = 0; @@ -2347,13 +2282,13 @@ int kiblnd_dev_failover(kib_dev_t *dev) void kiblnd_destroy_dev(kib_dev_t *dev) { - LASSERT(dev->ibd_nnets == 0); + LASSERT(!dev->ibd_nnets); LASSERT(list_empty(&dev->ibd_nets)); list_del(&dev->ibd_fail_list); list_del(&dev->ibd_list); - if (dev->ibd_hdev != NULL) + if (dev->ibd_hdev) kiblnd_hdev_decref(dev->ibd_hdev); LIBCFS_FREE(dev, sizeof(*dev)); @@ -2369,7 +2304,7 @@ static kib_dev_t *kiblnd_create_dev(char *ifname) int rc; rc = lnet_ipif_query(ifname, &up, &ip, &netmask); - if (rc != 0) { + if (rc) { CERROR("Can't query IPoIB interface %s: %d\n", ifname, rc); return NULL; @@ -2381,11 +2316,11 @@ static kib_dev_t *kiblnd_create_dev(char *ifname) } LIBCFS_ALLOC(dev, sizeof(*dev)); - if (dev == NULL) + if (!dev) return NULL; netdev = dev_get_by_name(&init_net, ifname); - if (netdev == NULL) { + if (!netdev) { dev->ibd_can_failover = 0; } else { dev->ibd_can_failover = !!(netdev->flags & IFF_MASTER); @@ -2400,14 +2335,13 @@ static kib_dev_t *kiblnd_create_dev(char *ifname) /* initialize the device */ rc = kiblnd_dev_failover(dev); - if (rc != 0) { + if (rc) { CERROR("Can't initialize device: %d\n", rc); LIBCFS_FREE(dev, sizeof(*dev)); return NULL; } - list_add_tail(&dev->ibd_list, - &kiblnd_data.kib_devs); + list_add_tail(&dev->ibd_list, &kiblnd_data.kib_devs); return dev; } @@ -2424,18 +2358,22 @@ static void kiblnd_base_shutdown(void) case IBLND_INIT_ALL: case IBLND_INIT_DATA: - LASSERT(kiblnd_data.kib_peers != NULL); + LASSERT(kiblnd_data.kib_peers); for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) LASSERT(list_empty(&kiblnd_data.kib_peers[i])); LASSERT(list_empty(&kiblnd_data.kib_connd_zombies)); LASSERT(list_empty(&kiblnd_data.kib_connd_conns)); + LASSERT(list_empty(&kiblnd_data.kib_reconn_list)); + LASSERT(list_empty(&kiblnd_data.kib_reconn_wait)); /* flag threads to terminate; wake and wait for them to die */ kiblnd_data.kib_shutdown = 1; - /* NB: we really want to stop scheduler threads net by net + /* + * NB: we really want to stop scheduler threads net by net * instead of the whole module, this should be improved - * with dynamic configuration LNet */ + * with dynamic configuration LNet + */ cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) wake_up_all(&sched->ibs_waitq); @@ -2443,7 +2381,7 @@ static void kiblnd_base_shutdown(void) wake_up_all(&kiblnd_data.kib_failover_waitq); i = 2; - while (atomic_read(&kiblnd_data.kib_nthreads) != 0) { + while (atomic_read(&kiblnd_data.kib_nthreads)) { i++; /* power of 2 ? */ CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, @@ -2459,20 +2397,20 @@ static void kiblnd_base_shutdown(void) break; } - if (kiblnd_data.kib_peers != NULL) { + if (kiblnd_data.kib_peers) { LIBCFS_FREE(kiblnd_data.kib_peers, sizeof(struct list_head) * kiblnd_data.kib_peer_hash_size); } - if (kiblnd_data.kib_scheds != NULL) + if (kiblnd_data.kib_scheds) cfs_percpt_free(kiblnd_data.kib_scheds); kiblnd_data.kib_init = IBLND_INIT_NOTHING; module_put(THIS_MODULE); } -void kiblnd_shutdown(lnet_ni_t *ni) +static void kiblnd_shutdown(lnet_ni_t *ni) { kib_net_t *net = ni->ni_data; rwlock_t *g_lock = &kiblnd_data.kib_global_lock; @@ -2481,7 +2419,7 @@ void kiblnd_shutdown(lnet_ni_t *ni) LASSERT(kiblnd_data.kib_init == IBLND_INIT_ALL); - if (net == NULL) + if (!net) goto out; write_lock_irqsave(g_lock, flags); @@ -2498,7 +2436,7 @@ void kiblnd_shutdown(lnet_ni_t *ni) /* Wait for all peer state to clean up */ i = 2; - while (atomic_read(&net->ibn_npeers) != 0) { + while (atomic_read(&net->ibn_npeers)) { i++; CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n? */ "%s: waiting for %d peers to disconnect\n", @@ -2519,10 +2457,9 @@ void kiblnd_shutdown(lnet_ni_t *ni) /* fall through */ case IBLND_INIT_NOTHING: - LASSERT(atomic_read(&net->ibn_nconns) == 0); + LASSERT(!atomic_read(&net->ibn_nconns)); - if (net->ibn_dev != NULL && - net->ibn_dev->ibd_nnets == 0) + if (net->ibn_dev && !net->ibn_dev->ibd_nnets) kiblnd_destroy_dev(net->ibn_dev); break; @@ -2558,7 +2495,7 @@ static int kiblnd_base_startup(void) kiblnd_data.kib_peer_hash_size = IBLND_PEER_HASH_SIZE; LIBCFS_ALLOC(kiblnd_data.kib_peers, sizeof(struct list_head) * kiblnd_data.kib_peer_hash_size); - if (kiblnd_data.kib_peers == NULL) + if (!kiblnd_data.kib_peers) goto failed; for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]); @@ -2566,12 +2503,15 @@ static int kiblnd_base_startup(void) spin_lock_init(&kiblnd_data.kib_connd_lock); INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns); INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies); + INIT_LIST_HEAD(&kiblnd_data.kib_reconn_list); + INIT_LIST_HEAD(&kiblnd_data.kib_reconn_wait); + init_waitqueue_head(&kiblnd_data.kib_connd_waitq); init_waitqueue_head(&kiblnd_data.kib_failover_waitq); kiblnd_data.kib_scheds = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*sched)); - if (kiblnd_data.kib_scheds == NULL) + if (!kiblnd_data.kib_scheds) goto failed; cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) { @@ -2585,8 +2525,10 @@ static int kiblnd_base_startup(void) if (*kiblnd_tunables.kib_nscheds > 0) { nthrs = min(nthrs, *kiblnd_tunables.kib_nscheds); } else { - /* max to half of CPUs, another half is reserved for - * upper layer modules */ + /* + * max to half of CPUs, another half is reserved for + * upper layer modules + */ nthrs = min(max(IBLND_N_SCHED, nthrs >> 1), nthrs); } @@ -2601,16 +2543,16 @@ static int kiblnd_base_startup(void) /*****************************************************/ rc = kiblnd_thread_start(kiblnd_connd, NULL, "kiblnd_connd"); - if (rc != 0) { + if (rc) { CERROR("Can't spawn o2iblnd connd: %d\n", rc); goto failed; } - if (*kiblnd_tunables.kib_dev_failover != 0) + if (*kiblnd_tunables.kib_dev_failover) rc = kiblnd_thread_start(kiblnd_failover_thread, NULL, "kiblnd_failover"); - if (rc != 0) { + if (rc) { CERROR("Can't spawn o2iblnd failover thread: %d\n", rc); goto failed; } @@ -2632,7 +2574,7 @@ static int kiblnd_start_schedulers(struct kib_sched_info *sched) int nthrs; int i; - if (sched->ibs_nthreads == 0) { + if (!sched->ibs_nthreads) { if (*kiblnd_tunables.kib_nscheds > 0) { nthrs = sched->ibs_nthreads_max; } else { @@ -2655,7 +2597,7 @@ static int kiblnd_start_schedulers(struct kib_sched_info *sched) snprintf(name, sizeof(name), "kiblnd_sd_%02ld_%02ld", KIB_THREAD_CPT(id), KIB_THREAD_TID(id)); rc = kiblnd_thread_start(kiblnd_scheduler, (void *)id, name); - if (rc == 0) + if (!rc) continue; CERROR("Can't spawn thread %d for scheduler[%d]: %d\n", @@ -2677,14 +2619,14 @@ static int kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts, for (i = 0; i < ncpts; i++) { struct kib_sched_info *sched; - cpt = (cpts == NULL) ? i : cpts[i]; + cpt = !cpts ? i : cpts[i]; sched = kiblnd_data.kib_scheds[cpt]; if (!newdev && sched->ibs_nthreads > 0) continue; rc = kiblnd_start_schedulers(kiblnd_data.kib_scheds[cpt]); - if (rc != 0) { + if (rc) { CERROR("Failed to start scheduler threads for %s\n", dev->ibd_ifname); return rc; @@ -2702,30 +2644,30 @@ static kib_dev_t *kiblnd_dev_search(char *ifname) colon = strchr(ifname, ':'); list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) { - if (strcmp(&dev->ibd_ifname[0], ifname) == 0) + if (!strcmp(&dev->ibd_ifname[0], ifname)) return dev; - if (alias != NULL) + if (alias) continue; colon2 = strchr(dev->ibd_ifname, ':'); - if (colon != NULL) + if (colon) *colon = 0; - if (colon2 != NULL) + if (colon2) *colon2 = 0; - if (strcmp(&dev->ibd_ifname[0], ifname) == 0) + if (!strcmp(&dev->ibd_ifname[0], ifname)) alias = dev; - if (colon != NULL) + if (colon) *colon = ':'; - if (colon2 != NULL) + if (colon2) *colon2 = ':'; } return alias; } -int kiblnd_startup(lnet_ni_t *ni) +static int kiblnd_startup(lnet_ni_t *ni) { char *ifname; kib_dev_t *ibdev = NULL; @@ -2739,13 +2681,13 @@ int kiblnd_startup(lnet_ni_t *ni) if (kiblnd_data.kib_init == IBLND_INIT_NOTHING) { rc = kiblnd_base_startup(); - if (rc != 0) + if (rc) return rc; } LIBCFS_ALLOC(net, sizeof(*net)); ni->ni_data = net; - if (net == NULL) + if (!net) goto net_failed; ktime_get_real_ts64(&tv); @@ -2757,11 +2699,11 @@ int kiblnd_startup(lnet_ni_t *ni) ni->ni_peertxcredits = *kiblnd_tunables.kib_peertxcredits; ni->ni_peerrtrcredits = *kiblnd_tunables.kib_peerrtrcredits; - if (ni->ni_interfaces[0] != NULL) { + if (ni->ni_interfaces[0]) { /* Use the IPoIB interface specified in 'networks=' */ CLASSERT(LNET_MAX_INTERFACES > 1); - if (ni->ni_interfaces[1] != NULL) { + if (ni->ni_interfaces[1]) { CERROR("Multiple interfaces not supported\n"); goto failed; } @@ -2778,12 +2720,12 @@ int kiblnd_startup(lnet_ni_t *ni) ibdev = kiblnd_dev_search(ifname); - newdev = ibdev == NULL; + newdev = !ibdev; /* hmm...create kib_dev even for alias */ - if (ibdev == NULL || strcmp(&ibdev->ibd_ifname[0], ifname) != 0) + if (!ibdev || strcmp(&ibdev->ibd_ifname[0], ifname)) ibdev = kiblnd_create_dev(ifname); - if (ibdev == NULL) + if (!ibdev) goto failed; net->ibn_dev = ibdev; @@ -2791,11 +2733,11 @@ int kiblnd_startup(lnet_ni_t *ni) rc = kiblnd_dev_start_threads(ibdev, newdev, ni->ni_cpts, ni->ni_ncpts); - if (rc != 0) + if (rc) goto failed; rc = kiblnd_net_init_pools(net, ni->ni_cpts, ni->ni_ncpts); - if (rc != 0) { + if (rc) { CERROR("Failed to initialize NI pools: %d\n", rc); goto failed; } @@ -2810,7 +2752,7 @@ int kiblnd_startup(lnet_ni_t *ni) return 0; failed: - if (net->ibn_dev == NULL && ibdev != NULL) + if (!net->ibn_dev && ibdev) kiblnd_destroy_dev(ibdev); net_failed: @@ -2820,25 +2762,35 @@ net_failed: return -ENETDOWN; } -static void __exit kiblnd_module_fini(void) +static lnd_t the_o2iblnd = { + .lnd_type = O2IBLND, + .lnd_startup = kiblnd_startup, + .lnd_shutdown = kiblnd_shutdown, + .lnd_ctl = kiblnd_ctl, + .lnd_query = kiblnd_query, + .lnd_send = kiblnd_send, + .lnd_recv = kiblnd_recv, +}; + +static void __exit ko2iblnd_exit(void) { lnet_unregister_lnd(&the_o2iblnd); } -static int __init kiblnd_module_init(void) +static int __init ko2iblnd_init(void) { int rc; CLASSERT(sizeof(kib_msg_t) <= IBLND_MSG_SIZE); CLASSERT(offsetof(kib_msg_t, - ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) - <= IBLND_MSG_SIZE); + ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) + <= IBLND_MSG_SIZE); CLASSERT(offsetof(kib_msg_t, - ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) - <= IBLND_MSG_SIZE); + ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) + <= IBLND_MSG_SIZE); rc = kiblnd_tunables_init(); - if (rc != 0) + if (rc) return rc; lnet_register_lnd(&the_o2iblnd); @@ -2847,8 +2799,9 @@ static int __init kiblnd_module_init(void) } MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>"); -MODULE_DESCRIPTION("Kernel OpenIB gen2 LND v2.00"); +MODULE_DESCRIPTION("OpenIB gen2 LNet Network Driver"); +MODULE_VERSION("2.7.0"); MODULE_LICENSE("GPL"); -module_init(kiblnd_module_init); -module_exit(kiblnd_module_fini); +module_init(ko2iblnd_init); +module_exit(ko2iblnd_exit); diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h index 025faa9f86b3..bfcbdd167da7 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h @@ -60,17 +60,17 @@ #include <net/sock.h> #include <linux/in.h> +#include <rdma/rdma_cm.h> +#include <rdma/ib_cm.h> +#include <rdma/ib_verbs.h> +#include <rdma/ib_fmr_pool.h> + #define DEBUG_SUBSYSTEM S_LND #include "../../../include/linux/libcfs/libcfs.h" #include "../../../include/linux/lnet/lnet.h" #include "../../../include/linux/lnet/lib-lnet.h" -#include <rdma/rdma_cm.h> -#include <rdma/ib_cm.h> -#include <rdma/ib_verbs.h> -#include <rdma/ib_fmr_pool.h> - #define IBLND_PEER_HASH_SIZE 101 /* # peer lists */ /* # scheduler loops before reschedule */ #define IBLND_RESCHED 100 @@ -146,9 +146,9 @@ kiblnd_concurrent_sends_v1(void) #define IBLND_OOB_CAPABLE(v) ((v) != IBLND_MSG_VERSION_1) #define IBLND_OOB_MSGS(v) (IBLND_OOB_CAPABLE(v) ? 2 : 0) -#define IBLND_MSG_SIZE (4<<10) /* max size of queued messages (inc hdr) */ +#define IBLND_MSG_SIZE (4 << 10) /* max size of queued messages (inc hdr) */ #define IBLND_MAX_RDMA_FRAGS LNET_MAX_IOV /* max # of fragments supported */ -#define IBLND_CFG_RDMA_FRAGS (*kiblnd_tunables.kib_map_on_demand != 0 ? \ +#define IBLND_CFG_RDMA_FRAGS (*kiblnd_tunables.kib_map_on_demand ? \ *kiblnd_tunables.kib_map_on_demand : \ IBLND_MAX_RDMA_FRAGS) /* max # of fragments configured by user */ #define IBLND_RDMA_FRAGS(v) ((v) == IBLND_MSG_VERSION_1 ? \ @@ -162,18 +162,17 @@ kiblnd_concurrent_sends_v1(void) #define IBLND_FMR_POOL 256 #define IBLND_FMR_POOL_FLUSH 192 -/* TX messages (shared by all connections) */ -#define IBLND_TX_MSGS() (*kiblnd_tunables.kib_ntx) - -/* RX messages (per connection) */ -#define IBLND_RX_MSGS(v) (IBLND_MSG_QUEUE_SIZE(v) * 2 + IBLND_OOB_MSGS(v)) -#define IBLND_RX_MSG_BYTES(v) (IBLND_RX_MSGS(v) * IBLND_MSG_SIZE) -#define IBLND_RX_MSG_PAGES(v) ((IBLND_RX_MSG_BYTES(v) + PAGE_SIZE - 1) / PAGE_SIZE) +#define IBLND_RX_MSGS(c) \ + ((c->ibc_queue_depth) * 2 + IBLND_OOB_MSGS(c->ibc_version)) +#define IBLND_RX_MSG_BYTES(c) (IBLND_RX_MSGS(c) * IBLND_MSG_SIZE) +#define IBLND_RX_MSG_PAGES(c) \ + ((IBLND_RX_MSG_BYTES(c) + PAGE_SIZE - 1) / PAGE_SIZE) /* WRs and CQEs (per connection) */ -#define IBLND_RECV_WRS(v) IBLND_RX_MSGS(v) -#define IBLND_SEND_WRS(v) ((IBLND_RDMA_FRAGS(v) + 1) * IBLND_CONCURRENT_SENDS(v)) -#define IBLND_CQ_ENTRIES(v) (IBLND_RECV_WRS(v) + IBLND_SEND_WRS(v)) +#define IBLND_RECV_WRS(c) IBLND_RX_MSGS(c) +#define IBLND_SEND_WRS(c) \ + ((c->ibc_max_frags + 1) * IBLND_CONCURRENT_SENDS(c->ibc_version)) +#define IBLND_CQ_ENTRIES(c) (IBLND_RECV_WRS(c) + IBLND_SEND_WRS(c)) struct kib_hca_dev; @@ -209,8 +208,7 @@ typedef struct kib_hca_dev { __u64 ibh_page_mask; /* page mask of current HCA */ int ibh_mr_shift; /* bits shift of max MR size */ __u64 ibh_mr_size; /* size of MR */ - int ibh_nmrs; /* # of global MRs */ - struct ib_mr **ibh_mrs; /* global MR */ + struct ib_mr *ibh_mrs; /* global MR */ struct ib_pd *ibh_pd; /* PD */ kib_dev_t *ibh_dev; /* owner */ atomic_t ibh_ref; /* refcount */ @@ -350,6 +348,16 @@ typedef struct { void *kib_connd; /* the connd task (serialisation assertions) */ struct list_head kib_connd_conns; /* connections to setup/teardown */ struct list_head kib_connd_zombies; /* connections with zero refcount */ + /* connections to reconnect */ + struct list_head kib_reconn_list; + /* peers wait for reconnection */ + struct list_head kib_reconn_wait; + /** + * The second that peers are pulled out from \a kib_reconn_wait + * for reconnection. + */ + time64_t kib_reconn_sec; + wait_queue_head_t kib_connd_waitq; /* connection daemon sleeps here */ spinlock_t kib_connd_lock; /* serialise */ struct ib_qp_attr kib_error_qpa; /* QP->ERROR */ @@ -465,10 +473,10 @@ typedef struct { #define IBLND_REJECT_FATAL 3 /* Anything else */ #define IBLND_REJECT_CONN_UNCOMPAT 4 /* incompatible version peer */ #define IBLND_REJECT_CONN_STALE 5 /* stale peer */ -#define IBLND_REJECT_RDMA_FRAGS 6 /* Fatal: peer's rdma frags can't match */ - /* mine */ -#define IBLND_REJECT_MSG_QUEUE_SIZE 7 /* Fatal: peer's msg queue size can't */ - /* match mine */ +/* peer's rdma frags doesn't match mine */ +#define IBLND_REJECT_RDMA_FRAGS 6 +/* peer's msg queue size doesn't match mine */ +#define IBLND_REJECT_MSG_QUEUE_SIZE 7 /***********************************************************************/ @@ -527,6 +535,8 @@ typedef struct kib_conn { struct list_head ibc_list; /* stash on peer's conn list */ struct list_head ibc_sched_list; /* schedule for attention */ __u16 ibc_version; /* version of connection */ + /* reconnect later */ + __u16 ibc_reconnect:1; __u64 ibc_incarnation; /* which instance of the peer */ atomic_t ibc_refcount; /* # users */ int ibc_state; /* what's happening */ @@ -536,6 +546,10 @@ typedef struct kib_conn { int ibc_outstanding_credits; /* # credits to return */ int ibc_reserved_credits; /* # ACK/DONE msg credits */ int ibc_comms_error; /* set on comms error */ + /* connections queue depth */ + __u16 ibc_queue_depth; + /* connections max frags */ + __u16 ibc_max_frags; unsigned int ibc_nrx:16; /* receive buffers owned */ unsigned int ibc_scheduled:1; /* scheduled for attention */ unsigned int ibc_ready:1; /* CQ callback fired */ @@ -572,18 +586,29 @@ typedef struct kib_peer { struct list_head ibp_list; /* stash on global peer list */ lnet_nid_t ibp_nid; /* who's on the other end(s) */ lnet_ni_t *ibp_ni; /* LNet interface */ - atomic_t ibp_refcount; /* # users */ struct list_head ibp_conns; /* all active connections */ struct list_head ibp_tx_queue; /* msgs waiting for a conn */ - __u16 ibp_version; /* version of peer */ __u64 ibp_incarnation; /* incarnation of peer */ - int ibp_connecting; /* current active connection attempts - */ - int ibp_accepting; /* current passive connection attempts - */ - int ibp_error; /* errno on closing this peer */ - unsigned long ibp_last_alive; /* when (in jiffies) I was last alive - */ + /* when (in jiffies) I was last alive */ + unsigned long ibp_last_alive; + /* # users */ + atomic_t ibp_refcount; + /* version of peer */ + __u16 ibp_version; + /* current passive connection attempts */ + unsigned short ibp_accepting; + /* current active connection attempts */ + unsigned short ibp_connecting; + /* reconnect this peer later */ + unsigned short ibp_reconnecting:1; + /* # consecutive reconnection attempts to this peer */ + unsigned int ibp_reconnected; + /* errno on closing this peer */ + int ibp_error; + /* max map_on_demand */ + __u16 ibp_max_frags; + /* max_peer_credits */ + __u16 ibp_queue_depth; } kib_peer_t; extern kib_data_t kiblnd_data; @@ -611,7 +636,7 @@ kiblnd_dev_can_failover(kib_dev_t *dev) if (!list_empty(&dev->ibd_fail_list)) /* already scheduled */ return 0; - if (*kiblnd_tunables.kib_dev_failover == 0) /* disabled */ + if (!*kiblnd_tunables.kib_dev_failover) /* disabled */ return 0; if (*kiblnd_tunables.kib_dev_failover > 1) /* force failover */ @@ -661,6 +686,20 @@ do { \ kiblnd_destroy_peer(peer); \ } while (0) +static inline bool +kiblnd_peer_connecting(kib_peer_t *peer) +{ + return peer->ibp_connecting || + peer->ibp_reconnecting || + peer->ibp_accepting; +} + +static inline bool +kiblnd_peer_idle(kib_peer_t *peer) +{ + return !kiblnd_peer_connecting(peer) && list_empty(&peer->ibp_conns); +} + static inline struct list_head * kiblnd_nid2peerlist(lnet_nid_t nid) { @@ -691,7 +730,8 @@ kiblnd_send_keepalive(kib_conn_t *conn) { return (*kiblnd_tunables.kib_keepalive > 0) && cfs_time_after(jiffies, conn->ibc_last_send + - *kiblnd_tunables.kib_keepalive*HZ); + msecs_to_jiffies(*kiblnd_tunables.kib_keepalive * + MSEC_PER_SEC)); } static inline int @@ -710,16 +750,16 @@ kiblnd_need_noop(kib_conn_t *conn) /* No tx to piggyback NOOP onto or no credit to send a tx */ return (list_empty(&conn->ibc_tx_queue) || - conn->ibc_credits == 0); + !conn->ibc_credits); } if (!list_empty(&conn->ibc_tx_noops) || /* NOOP already queued */ !list_empty(&conn->ibc_tx_queue_nocred) || /* piggyback NOOP */ - conn->ibc_credits == 0) /* no credit */ + !conn->ibc_credits) /* no credit */ return 0; if (conn->ibc_credits == 1 && /* last credit reserved for */ - conn->ibc_outstanding_credits == 0) /* giving back credits */ + !conn->ibc_outstanding_credits) /* giving back credits */ return 0; /* No tx to piggyback NOOP onto or no credit to send a tx */ @@ -755,18 +795,19 @@ kiblnd_queue2str(kib_conn_t *conn, struct list_head *q) /* CAVEAT EMPTOR: We rely on descriptor alignment to allow us to use the */ /* lowest bits of the work request id to stash the work item type. */ -#define IBLND_WID_TX 0 -#define IBLND_WID_RDMA 1 -#define IBLND_WID_RX 2 -#define IBLND_WID_MASK 3UL +#define IBLND_WID_INVAL 0 +#define IBLND_WID_TX 1 +#define IBLND_WID_RX 2 +#define IBLND_WID_RDMA 3 +#define IBLND_WID_MASK 3UL static inline __u64 kiblnd_ptr2wreqid(void *ptr, int type) { unsigned long lptr = (unsigned long)ptr; - LASSERT((lptr & IBLND_WID_MASK) == 0); - LASSERT((type & ~IBLND_WID_MASK) == 0); + LASSERT(!(lptr & IBLND_WID_MASK)); + LASSERT(!(type & ~IBLND_WID_MASK)); return (__u64)(lptr | type); } @@ -907,9 +948,8 @@ static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev, #define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len) struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, - kib_rdma_desc_t *rd); -struct ib_mr *kiblnd_find_dma_mr(kib_hca_dev_t *hdev, - __u64 addr, __u64 size); + kib_rdma_desc_t *rd, + int negotiated_nfrags); void kiblnd_map_rx_descs(kib_conn_t *conn); void kiblnd_unmap_rx_descs(kib_conn_t *conn); void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node); @@ -919,11 +959,6 @@ int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages, __u64 iov, kib_fmr_t *fmr); void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status); -int kiblnd_startup(lnet_ni_t *ni); -void kiblnd_shutdown(lnet_ni_t *ni); -int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg); -void kiblnd_query(struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when); - int kiblnd_tunables_init(void); void kiblnd_tunables_fini(void); @@ -933,7 +968,6 @@ int kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name); int kiblnd_failover_thread(void *arg); int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages); -void kiblnd_free_pages(kib_pages_t *p); int kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event); @@ -942,39 +976,30 @@ int kiblnd_translate_mtu(int value); int kiblnd_dev_failover(kib_dev_t *dev); int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid); void kiblnd_destroy_peer(kib_peer_t *peer); +bool kiblnd_reconnect_peer(kib_peer_t *peer); void kiblnd_destroy_dev(kib_dev_t *dev); void kiblnd_unlink_peer_locked(kib_peer_t *peer); -void kiblnd_peer_alive(kib_peer_t *peer); kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid); -void kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error); int kiblnd_close_stale_conns_locked(kib_peer_t *peer, - int version, __u64 incarnation); + int version, __u64 incarnation); int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why); -void kiblnd_connreq_done(kib_conn_t *conn, int status); kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, - int state, int version); -void kiblnd_destroy_conn(kib_conn_t *conn); + int state, int version); +void kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn); void kiblnd_close_conn(kib_conn_t *conn, int error); void kiblnd_close_conn_locked(kib_conn_t *conn, int error); -int kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type, - int nob, kib_rdma_desc_t *dstrd, __u64 dstcookie); - void kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid); -void kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn); -void kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn); -void kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob); void kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist, - int status); -void kiblnd_check_sends (kib_conn_t *conn); + int status); void kiblnd_qp_event(struct ib_event *event, void *arg); void kiblnd_cq_event(struct ib_event *event, void *arg); void kiblnd_cq_completion(struct ib_cq *cq, void *arg); void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version, - int credits, lnet_nid_t dstnid, __u64 dststamp); + int credits, lnet_nid_t dstnid, __u64 dststamp); int kiblnd_unpack_msg(kib_msg_t *msg, int nob); int kiblnd_post_rx(kib_rx_t *rx, int credit); diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c index c7b9ccb13f1c..2323e8d3a318 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c @@ -40,6 +40,15 @@ #include "o2iblnd.h" +static void kiblnd_peer_alive(kib_peer_t *peer); +static void kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error); +static void kiblnd_check_sends(kib_conn_t *conn); +static void kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, + int type, int body_nob); +static int kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type, + int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie); +static void kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn); +static void kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn); static void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx); static void @@ -50,12 +59,12 @@ kiblnd_tx_done(lnet_ni_t *ni, kib_tx_t *tx) int rc; int i; - LASSERT(net != NULL); + LASSERT(net); LASSERT(!in_interrupt()); LASSERT(!tx->tx_queued); /* mustn't be queued for sending */ - LASSERT(tx->tx_sending == 0); /* mustn't be awaiting sent callback */ + LASSERT(!tx->tx_sending); /* mustn't be awaiting sent callback */ LASSERT(!tx->tx_waiting); /* mustn't be awaiting peer response */ - LASSERT(tx->tx_pool != NULL); + LASSERT(tx->tx_pool); kiblnd_unmap_tx(ni, tx); @@ -64,7 +73,7 @@ kiblnd_tx_done(lnet_ni_t *ni, kib_tx_t *tx) lntmsg[1] = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL; rc = tx->tx_status; - if (tx->tx_conn != NULL) { + if (tx->tx_conn) { LASSERT(ni == tx->tx_conn->ibc_peer->ibp_ni); kiblnd_conn_decref(tx->tx_conn); @@ -78,7 +87,7 @@ kiblnd_tx_done(lnet_ni_t *ni, kib_tx_t *tx) /* delay finalize until my descs have been freed */ for (i = 0; i < 2; i++) { - if (lntmsg[i] == NULL) + if (!lntmsg[i]) continue; lnet_finalize(ni, lntmsg[i], rc); @@ -111,19 +120,19 @@ kiblnd_get_idle_tx(lnet_ni_t *ni, lnet_nid_t target) tps = net->ibn_tx_ps[lnet_cpt_of_nid(target)]; node = kiblnd_pool_alloc_node(&tps->tps_poolset); - if (node == NULL) + if (!node) return NULL; - tx = container_of(node, kib_tx_t, tx_list); + tx = list_entry(node, kib_tx_t, tx_list); - LASSERT(tx->tx_nwrq == 0); + LASSERT(!tx->tx_nwrq); LASSERT(!tx->tx_queued); - LASSERT(tx->tx_sending == 0); + LASSERT(!tx->tx_sending); LASSERT(!tx->tx_waiting); - LASSERT(tx->tx_status == 0); - LASSERT(tx->tx_conn == NULL); - LASSERT(tx->tx_lntmsg[0] == NULL); - LASSERT(tx->tx_lntmsg[1] == NULL); - LASSERT(tx->tx_nfrags == 0); + LASSERT(!tx->tx_status); + LASSERT(!tx->tx_conn); + LASSERT(!tx->tx_lntmsg[0]); + LASSERT(!tx->tx_lntmsg[1]); + LASSERT(!tx->tx_nfrags); return tx; } @@ -149,17 +158,15 @@ kiblnd_post_rx(kib_rx_t *rx, int credit) kib_conn_t *conn = rx->rx_conn; kib_net_t *net = conn->ibc_peer->ibp_ni->ni_data; struct ib_recv_wr *bad_wrq = NULL; - struct ib_mr *mr; + struct ib_mr *mr = conn->ibc_hdev->ibh_mrs; int rc; - LASSERT(net != NULL); + LASSERT(net); LASSERT(!in_interrupt()); LASSERT(credit == IBLND_POSTRX_NO_CREDIT || credit == IBLND_POSTRX_PEER_CREDIT || credit == IBLND_POSTRX_RSRVD_CREDIT); - - mr = kiblnd_find_dma_mr(conn->ibc_hdev, rx->rx_msgaddr, IBLND_MSG_SIZE); - LASSERT(mr != NULL); + LASSERT(mr); rx->rx_sge.lkey = mr->lkey; rx->rx_sge.addr = rx->rx_msgaddr; @@ -185,7 +192,7 @@ kiblnd_post_rx(kib_rx_t *rx, int credit) */ kiblnd_conn_addref(conn); rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq, &bad_wrq); - if (unlikely(rc != 0)) { + if (unlikely(rc)) { CERROR("Can't post rx for %s: %d, bad_wrq: %p\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), rc, bad_wrq); rx->rx_nob = 0; @@ -194,7 +201,7 @@ kiblnd_post_rx(kib_rx_t *rx, int credit) if (conn->ibc_state < IBLND_CONN_ESTABLISHED) /* Initial post */ goto out; - if (unlikely(rc != 0)) { + if (unlikely(rc)) { kiblnd_close_conn(conn, rc); kiblnd_drop_rx(rx); /* No more posts for this rx */ goto out; @@ -225,7 +232,7 @@ kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie) kib_tx_t *tx = list_entry(tmp, kib_tx_t, tx_list); LASSERT(!tx->tx_queued); - LASSERT(tx->tx_sending != 0 || tx->tx_waiting); + LASSERT(tx->tx_sending || tx->tx_waiting); if (tx->tx_cookie != cookie) continue; @@ -251,7 +258,7 @@ kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie) spin_lock(&conn->ibc_lock); tx = kiblnd_find_waiting_tx_locked(conn, txtype, cookie); - if (tx == NULL) { + if (!tx) { spin_unlock(&conn->ibc_lock); CWARN("Unmatched completion type %x cookie %#llx from %s\n", @@ -260,7 +267,7 @@ kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie) return; } - if (tx->tx_status == 0) { /* success so far */ + if (!tx->tx_status) { /* success so far */ if (status < 0) /* failed? */ tx->tx_status = status; else if (txtype == IBLND_MSG_GET_REQ) @@ -269,7 +276,7 @@ kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie) tx->tx_waiting = 0; - idle = !tx->tx_queued && (tx->tx_sending == 0); + idle = !tx->tx_queued && !tx->tx_sending; if (idle) list_del(&tx->tx_list); @@ -285,7 +292,7 @@ kiblnd_send_completion(kib_conn_t *conn, int type, int status, __u64 cookie) lnet_ni_t *ni = conn->ibc_peer->ibp_ni; kib_tx_t *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid); - if (tx == NULL) { + if (!tx) { CERROR("Can't get tx for completion %x for %s\n", type, libcfs_nid2str(conn->ibc_peer->ibp_nid)); return; @@ -316,19 +323,18 @@ kiblnd_handle_rx(kib_rx_t *rx) msg->ibm_type, credits, libcfs_nid2str(conn->ibc_peer->ibp_nid)); - if (credits != 0) { + if (credits) { /* Have I received credits that will let me send? */ spin_lock(&conn->ibc_lock); if (conn->ibc_credits + credits > - IBLND_MSG_QUEUE_SIZE(conn->ibc_version)) { + conn->ibc_queue_depth) { rc2 = conn->ibc_credits; spin_unlock(&conn->ibc_lock); CERROR("Bad credits from %s: %d + %d > %d\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), - rc2, credits, - IBLND_MSG_QUEUE_SIZE(conn->ibc_version)); + rc2, credits, conn->ibc_queue_depth); kiblnd_close_conn(conn, -EPROTO); kiblnd_post_rx(rx, IBLND_POSTRX_NO_CREDIT); @@ -360,7 +366,7 @@ kiblnd_handle_rx(kib_rx_t *rx) break; } - if (credits != 0) /* credit already posted */ + if (credits) /* credit already posted */ post_credit = IBLND_POSTRX_NO_CREDIT; else /* a keepalive NOOP */ post_credit = IBLND_POSTRX_PEER_CREDIT; @@ -396,12 +402,12 @@ kiblnd_handle_rx(kib_rx_t *rx) spin_lock(&conn->ibc_lock); tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ, - msg->ibm_u.putack.ibpam_src_cookie); - if (tx != NULL) + msg->ibm_u.putack.ibpam_src_cookie); + if (tx) list_del(&tx->tx_list); spin_unlock(&conn->ibc_lock); - if (tx == NULL) { + if (!tx) { CERROR("Unmatched PUT_ACK from %s\n", libcfs_nid2str(conn->ibc_peer->ibp_nid)); rc = -EPROTO; @@ -409,10 +415,11 @@ kiblnd_handle_rx(kib_rx_t *rx) } LASSERT(tx->tx_waiting); - /* CAVEAT EMPTOR: I could be racing with tx_complete, but... + /* + * CAVEAT EMPTOR: I could be racing with tx_complete, but... * (a) I can overwrite tx_msg since my peer has received it! - * (b) tx_waiting set tells tx_complete() it's not done. */ - + * (b) tx_waiting set tells tx_complete() it's not done. + */ tx->tx_nwrq = 0; /* overwrite PUT_REQ */ rc2 = kiblnd_init_rdma(conn, tx, IBLND_MSG_PUT_DONE, @@ -469,7 +476,7 @@ kiblnd_rx_complete(kib_rx_t *rx, int status, int nob) int rc; int err = -EIO; - LASSERT(net != NULL); + LASSERT(net); LASSERT(rx->rx_nob < 0); /* was posted */ rx->rx_nob = 0; /* isn't now */ @@ -486,9 +493,9 @@ kiblnd_rx_complete(kib_rx_t *rx, int status, int nob) rx->rx_nob = nob; rc = kiblnd_unpack_msg(msg, rx->rx_nob); - if (rc != 0) { + if (rc) { CERROR("Error %d unpacking rx from %s\n", - rc, libcfs_nid2str(conn->ibc_peer->ibp_nid)); + rc, libcfs_nid2str(conn->ibc_peer->ibp_nid)); goto failed; } @@ -497,7 +504,7 @@ kiblnd_rx_complete(kib_rx_t *rx, int status, int nob) msg->ibm_srcstamp != conn->ibc_incarnation || msg->ibm_dststamp != net->ibn_incarnation) { CERROR("Stale rx from %s\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid)); + libcfs_nid2str(conn->ibc_peer->ibp_nid)); err = -ESTALE; goto failed; } @@ -537,7 +544,7 @@ kiblnd_kvaddr_to_page(unsigned long vaddr) if (is_vmalloc_addr((void *)vaddr)) { page = vmalloc_to_page((void *)vaddr); - LASSERT(page != NULL); + LASSERT(page); return page; } #ifdef CONFIG_HIGHMEM @@ -549,7 +556,7 @@ kiblnd_kvaddr_to_page(unsigned long vaddr) } #endif page = virt_to_page(vaddr); - LASSERT(page != NULL); + LASSERT(page); return page; } @@ -565,8 +572,8 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) int rc; int i; - LASSERT(tx->tx_pool != NULL); - LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL); + LASSERT(tx->tx_pool); + LASSERT(tx->tx_pool->tpo_pool.po_owner); hdev = tx->tx_pool->tpo_hdev; @@ -582,13 +589,15 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) fps = net->ibn_fmr_ps[cpt]; rc = kiblnd_fmr_pool_map(fps, pages, npages, 0, &tx->fmr); - if (rc != 0) { + if (rc) { CERROR("Can't map %d pages: %d\n", npages, rc); return rc; } - /* If rd is not tx_rd, it's going to get sent to a peer, who will need - * the rkey */ + /* + * If rd is not tx_rd, it's going to get sent to a peer, who will need + * the rkey + */ rd->rd_key = (rd != tx->tx_rd) ? tx->fmr.fmr_pfmr->fmr->rkey : tx->fmr.fmr_pfmr->fmr->lkey; rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask; @@ -602,14 +611,14 @@ static void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx) { kib_net_t *net = ni->ni_data; - LASSERT(net != NULL); + LASSERT(net); if (net->ibn_fmr_ps && tx->fmr.fmr_pfmr) { kiblnd_fmr_pool_unmap(&tx->fmr, tx->tx_status); tx->fmr.fmr_pfmr = NULL; } - if (tx->tx_nfrags != 0) { + if (tx->tx_nfrags) { kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev, tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir); tx->tx_nfrags = 0; @@ -625,8 +634,10 @@ static int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, __u32 nob; int i; - /* If rd is not tx_rd, it's going to get sent to a peer and I'm the - * RDMA sink */ + /* + * If rd is not tx_rd, it's going to get sent to a peer and I'm the + * RDMA sink + */ tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; tx->tx_nfrags = nfrags; @@ -641,15 +652,15 @@ static int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, nob += rd->rd_frags[i].rf_nob; } - /* looking for pre-mapping MR */ - mr = kiblnd_find_rd_dma_mr(hdev, rd); - if (mr != NULL) { + mr = kiblnd_find_rd_dma_mr(hdev, rd, tx->tx_conn ? + tx->tx_conn->ibc_max_frags : -1); + if (mr) { /* found pre-mapping MR */ rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey; return 0; } - if (net->ibn_fmr_ps != NULL) + if (net->ibn_fmr_ps) return kiblnd_fmr_map_tx(net, tx, rd, nob); return -EINVAL; @@ -668,7 +679,7 @@ kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, LASSERT(nob > 0); LASSERT(niov > 0); - LASSERT(net != NULL); + LASSERT(net); while (offset >= iov->iov_len) { offset -= iov->iov_len; @@ -684,7 +695,7 @@ kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, vaddr = ((unsigned long)iov->iov_base) + offset; page_offset = vaddr & (PAGE_SIZE - 1); page = kiblnd_kvaddr_to_page(vaddr); - if (page == NULL) { + if (!page) { CERROR("Can't find page\n"); return -EFAULT; } @@ -710,7 +721,7 @@ kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, static int kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, - int nkiov, lnet_kiov_t *kiov, int offset, int nob) + int nkiov, lnet_kiov_t *kiov, int offset, int nob) { kib_net_t *net = ni->ni_data; struct scatterlist *sg; @@ -720,7 +731,7 @@ kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, LASSERT(nob > 0); LASSERT(nkiov > 0); - LASSERT(net != NULL); + LASSERT(net); while (offset >= kiov->kiov_len) { offset -= kiov->kiov_len; @@ -750,26 +761,24 @@ kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, static int kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit) - __releases(conn->ibc_lock) - __acquires(conn->ibc_lock) + __must_hold(&conn->ibc_lock) { kib_msg_t *msg = tx->tx_msg; kib_peer_t *peer = conn->ibc_peer; int ver = conn->ibc_version; int rc; int done; - struct ib_send_wr *bad_wrq; LASSERT(tx->tx_queued); /* We rely on this for QP sizing */ LASSERT(tx->tx_nwrq > 0); - LASSERT(tx->tx_nwrq <= 1 + IBLND_RDMA_FRAGS(ver)); + LASSERT(tx->tx_nwrq <= 1 + conn->ibc_max_frags); - LASSERT(credit == 0 || credit == 1); + LASSERT(!credit || credit == 1); LASSERT(conn->ibc_outstanding_credits >= 0); - LASSERT(conn->ibc_outstanding_credits <= IBLND_MSG_QUEUE_SIZE(ver)); + LASSERT(conn->ibc_outstanding_credits <= conn->ibc_queue_depth); LASSERT(conn->ibc_credits >= 0); - LASSERT(conn->ibc_credits <= IBLND_MSG_QUEUE_SIZE(ver)); + LASSERT(conn->ibc_credits <= conn->ibc_queue_depth); if (conn->ibc_nsends_posted == IBLND_CONCURRENT_SENDS(ver)) { /* tx completions outstanding... */ @@ -778,13 +787,13 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit) return -EAGAIN; } - if (credit != 0 && conn->ibc_credits == 0) { /* no credits */ + if (credit && !conn->ibc_credits) { /* no credits */ CDEBUG(D_NET, "%s: no credits\n", libcfs_nid2str(peer->ibp_nid)); return -EAGAIN; } - if (credit != 0 && !IBLND_OOB_CAPABLE(ver) && + if (credit && !IBLND_OOB_CAPABLE(ver) && conn->ibc_credits == 1 && /* last credit reserved */ msg->ibm_type != IBLND_MSG_NOOP) { /* for NOOP */ CDEBUG(D_NET, "%s: not using last credit\n", @@ -800,9 +809,11 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit) (!kiblnd_need_noop(conn) || /* redundant NOOP */ (IBLND_OOB_CAPABLE(ver) && /* posted enough NOOP */ conn->ibc_noops_posted == IBLND_OOB_MSGS(ver)))) { - /* OK to drop when posted enough NOOPs, since + /* + * OK to drop when posted enough NOOPs, since * kiblnd_check_sends will queue NOOP again when - * posted NOOPs complete */ + * posted NOOPs complete + */ spin_unlock(&conn->ibc_lock); kiblnd_tx_done(peer->ibp_ni, tx); spin_lock(&conn->ibc_lock); @@ -821,12 +832,14 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit) if (msg->ibm_type == IBLND_MSG_NOOP) conn->ibc_noops_posted++; - /* CAVEAT EMPTOR! This tx could be the PUT_DONE of an RDMA + /* + * CAVEAT EMPTOR! This tx could be the PUT_DONE of an RDMA * PUT. If so, it was first queued here as a PUT_REQ, sent and * stashed on ibc_active_txs, matched by an incoming PUT_ACK, * and then re-queued here. It's (just) possible that * tx_sending is non-zero if we've not done the tx_complete() - * from the first send; hence the ++ rather than = below. */ + * from the first send; hence the ++ rather than = below. + */ tx->tx_sending++; list_add(&tx->tx_list, &conn->ibc_active_txs); @@ -838,16 +851,25 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit) /* close_conn will launch failover */ rc = -ENETDOWN; } else { - rc = ib_post_send(conn->ibc_cmid->qp, &tx->tx_wrq->wr, &bad_wrq); + struct ib_send_wr *wrq = &tx->tx_wrq[tx->tx_nwrq - 1].wr; + + LASSERTF(wrq->wr_id == kiblnd_ptr2wreqid(tx, IBLND_WID_TX), + "bad wr_id %llx, opc %d, flags %d, peer: %s\n", + wrq->wr_id, wrq->opcode, wrq->send_flags, + libcfs_nid2str(conn->ibc_peer->ibp_nid)); + wrq = NULL; + rc = ib_post_send(conn->ibc_cmid->qp, &tx->tx_wrq->wr, &wrq); } conn->ibc_last_send = jiffies; - if (rc == 0) + if (!rc) return 0; - /* NB credits are transferred in the actual - * message, which can only be the last work item */ + /* + * NB credits are transferred in the actual + * message, which can only be the last work item + */ conn->ibc_credits += credit; conn->ibc_outstanding_credits += msg->ibm_credits; conn->ibc_nsends_posted--; @@ -858,7 +880,7 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit) tx->tx_waiting = 0; tx->tx_sending--; - done = (tx->tx_sending == 0); + done = !tx->tx_sending; if (done) list_del(&tx->tx_list); @@ -881,7 +903,7 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit) return -EIO; } -void +static void kiblnd_check_sends(kib_conn_t *conn) { int ver = conn->ibc_version; @@ -899,13 +921,13 @@ kiblnd_check_sends(kib_conn_t *conn) LASSERT(conn->ibc_nsends_posted <= IBLND_CONCURRENT_SENDS(ver)); LASSERT(!IBLND_OOB_CAPABLE(ver) || - conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver)); + conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver)); LASSERT(conn->ibc_reserved_credits >= 0); while (conn->ibc_reserved_credits > 0 && !list_empty(&conn->ibc_tx_queue_rsrvd)) { tx = list_entry(conn->ibc_tx_queue_rsrvd.next, - kib_tx_t, tx_list); + kib_tx_t, tx_list); list_del(&tx->tx_list); list_add_tail(&tx->tx_list, &conn->ibc_tx_queue); conn->ibc_reserved_credits--; @@ -915,23 +937,21 @@ kiblnd_check_sends(kib_conn_t *conn) spin_unlock(&conn->ibc_lock); tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid); - if (tx != NULL) + if (tx) kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0); spin_lock(&conn->ibc_lock); - if (tx != NULL) + if (tx) kiblnd_queue_tx_locked(tx, conn); } - kiblnd_conn_addref(conn); /* 1 ref for me.... (see b21911) */ - for (;;) { int credit; if (!list_empty(&conn->ibc_tx_queue_nocred)) { credit = 0; tx = list_entry(conn->ibc_tx_queue_nocred.next, - kib_tx_t, tx_list); + kib_tx_t, tx_list); } else if (!list_empty(&conn->ibc_tx_noops)) { LASSERT(!IBLND_OOB_CAPABLE(ver)); credit = 1; @@ -940,17 +960,16 @@ kiblnd_check_sends(kib_conn_t *conn) } else if (!list_empty(&conn->ibc_tx_queue)) { credit = 1; tx = list_entry(conn->ibc_tx_queue.next, - kib_tx_t, tx_list); - } else + kib_tx_t, tx_list); + } else { break; + } - if (kiblnd_post_tx_locked(conn, tx, credit) != 0) + if (kiblnd_post_tx_locked(conn, tx, credit)) break; } spin_unlock(&conn->ibc_lock); - - kiblnd_conn_decref(conn); /* ...until here */ } static void @@ -976,9 +995,10 @@ kiblnd_tx_complete(kib_tx_t *tx, int status) spin_lock(&conn->ibc_lock); - /* I could be racing with rdma completion. Whoever makes 'tx' idle - * gets to free it, which also drops its ref on 'conn'. */ - + /* + * I could be racing with rdma completion. Whoever makes 'tx' idle + * gets to free it, which also drops its ref on 'conn'. + */ tx->tx_sending--; conn->ibc_nsends_posted--; if (tx->tx_msg->ibm_type == IBLND_MSG_NOOP) @@ -989,7 +1009,7 @@ kiblnd_tx_complete(kib_tx_t *tx, int status) tx->tx_status = -EIO; } - idle = (tx->tx_sending == 0) && /* This is the final callback */ + idle = !tx->tx_sending && /* This is the final callback */ !tx->tx_waiting && /* Not waiting for peer */ !tx->tx_queued; /* Not re-queued (PUT_DONE) */ if (idle) @@ -1007,24 +1027,22 @@ kiblnd_tx_complete(kib_tx_t *tx, int status) kiblnd_conn_decref(conn); /* ...until here */ } -void +static void kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob) { kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev; struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq]; struct ib_rdma_wr *wrq = &tx->tx_wrq[tx->tx_nwrq]; int nob = offsetof(kib_msg_t, ibm_u) + body_nob; - struct ib_mr *mr; + struct ib_mr *mr = hdev->ibh_mrs; LASSERT(tx->tx_nwrq >= 0); LASSERT(tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1); LASSERT(nob <= IBLND_MSG_SIZE); + LASSERT(mr); kiblnd_init_msg(tx->tx_msg, type, body_nob); - mr = kiblnd_find_dma_mr(hdev, tx->tx_msgaddr, nob); - LASSERT(mr != NULL); - sge->lkey = mr->lkey; sge->addr = tx->tx_msgaddr; sge->length = nob; @@ -1041,25 +1059,23 @@ kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob) tx->tx_nwrq++; } -int +static int kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type, - int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie) + int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie) { kib_msg_t *ibmsg = tx->tx_msg; kib_rdma_desc_t *srcrd = tx->tx_rd; struct ib_sge *sge = &tx->tx_sge[0]; struct ib_rdma_wr *wrq = &tx->tx_wrq[0], *next; int rc = resid; - int srcidx; - int dstidx; + int srcidx = 0; + int dstidx = 0; int wrknob; LASSERT(!in_interrupt()); - LASSERT(tx->tx_nwrq == 0); + LASSERT(!tx->tx_nwrq); LASSERT(type == IBLND_MSG_GET_DONE || - type == IBLND_MSG_PUT_DONE); - - srcidx = dstidx = 0; + type == IBLND_MSG_PUT_DONE); while (resid > 0) { if (srcidx >= srcrd->rd_nfrags) { @@ -1074,10 +1090,10 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type, break; } - if (tx->tx_nwrq == IBLND_RDMA_FRAGS(conn->ibc_version)) { - CERROR("RDMA too fragmented for %s (%d): %d/%d src %d/%d dst frags\n", + if (tx->tx_nwrq >= conn->ibc_max_frags) { + CERROR("RDMA has too many fragments for peer %s (%d), src idx/frags: %d/%d dst idx/frags: %d/%d\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), - IBLND_RDMA_FRAGS(conn->ibc_version), + conn->ibc_max_frags, srcidx, srcrd->rd_nfrags, dstidx, dstrd->rd_nfrags); rc = -EMSGSIZE; @@ -1127,7 +1143,7 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type, return rc; } -void +static void kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn) { struct list_head *q; @@ -1137,9 +1153,11 @@ kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn) LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED); tx->tx_queued = 1; - tx->tx_deadline = jiffies + (*kiblnd_tunables.kib_timeout * HZ); + tx->tx_deadline = jiffies + + msecs_to_jiffies(*kiblnd_tunables.kib_timeout * + MSEC_PER_SEC); - if (tx->tx_conn == NULL) { + if (!tx->tx_conn) { kiblnd_conn_addref(conn); tx->tx_conn = conn; LASSERT(tx->tx_msg->ibm_type != IBLND_MSG_PUT_DONE); @@ -1180,7 +1198,7 @@ kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn) list_add_tail(&tx->tx_list, q); } -void +static void kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn) { spin_lock(&conn->ibc_lock); @@ -1200,19 +1218,19 @@ static int kiblnd_resolve_addr(struct rdma_cm_id *cmid, /* allow the port to be reused */ rc = rdma_set_reuseaddr(cmid, 1); - if (rc != 0) { + if (rc) { CERROR("Unable to set reuse on cmid: %d\n", rc); return rc; } /* look for a free privileged port */ - for (port = PROT_SOCK-1; port > 0; port--) { + for (port = PROT_SOCK - 1; port > 0; port--) { srcaddr->sin_port = htons(port); rc = rdma_resolve_addr(cmid, (struct sockaddr *)srcaddr, (struct sockaddr *)dstaddr, timeout_ms); - if (rc == 0) { + if (!rc) { CDEBUG(D_NET, "bound to port %hu\n", port); return 0; } else if (rc == -EADDRINUSE || rc == -EADDRNOTAVAIL) { @@ -1237,8 +1255,9 @@ kiblnd_connect_peer(kib_peer_t *peer) struct sockaddr_in dstaddr; int rc; - LASSERT(net != NULL); + LASSERT(net); LASSERT(peer->ibp_connecting > 0); + LASSERT(!peer->ibp_reconnecting); cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, peer, RDMA_PS_TCP, IB_QPT_RC); @@ -1271,14 +1290,14 @@ kiblnd_connect_peer(kib_peer_t *peer) (struct sockaddr *)&dstaddr, *kiblnd_tunables.kib_timeout * 1000); } - if (rc != 0) { + if (rc) { /* Can't initiate address resolution: */ CERROR("Can't resolve addr for %s: %d\n", libcfs_nid2str(peer->ibp_nid), rc); goto failed2; } - LASSERT(cmid->device != NULL); + LASSERT(cmid->device); CDEBUG(D_NET, "%s: connection bound to %s:%pI4h:%s\n", libcfs_nid2str(peer->ibp_nid), dev->ibd_ifname, &dev->ibd_ifip, cmid->device->name); @@ -1286,12 +1305,64 @@ kiblnd_connect_peer(kib_peer_t *peer) return; failed2: + kiblnd_peer_connect_failed(peer, 1, rc); kiblnd_peer_decref(peer); /* cmid's ref */ rdma_destroy_id(cmid); + return; failed: kiblnd_peer_connect_failed(peer, 1, rc); } +bool +kiblnd_reconnect_peer(kib_peer_t *peer) +{ + rwlock_t *glock = &kiblnd_data.kib_global_lock; + char *reason = NULL; + struct list_head txs; + unsigned long flags; + + INIT_LIST_HEAD(&txs); + + write_lock_irqsave(glock, flags); + if (!peer->ibp_reconnecting) { + if (peer->ibp_accepting) + reason = "accepting"; + else if (peer->ibp_connecting) + reason = "connecting"; + else if (!list_empty(&peer->ibp_conns)) + reason = "connected"; + else /* connected then closed */ + reason = "closed"; + + goto no_reconnect; + } + + LASSERT(!peer->ibp_accepting && !peer->ibp_connecting && + list_empty(&peer->ibp_conns)); + peer->ibp_reconnecting = 0; + + if (!kiblnd_peer_active(peer)) { + list_splice_init(&peer->ibp_tx_queue, &txs); + reason = "unlinked"; + goto no_reconnect; + } + + peer->ibp_connecting++; + peer->ibp_reconnected++; + write_unlock_irqrestore(glock, flags); + + kiblnd_connect_peer(peer); + return true; + +no_reconnect: + write_unlock_irqrestore(glock, flags); + + CWARN("Abort reconnection of %s: %s\n", + libcfs_nid2str(peer->ibp_nid), reason); + kiblnd_txlist_done(peer->ibp_ni, &txs, -ECONNABORTED); + return false; +} + void kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) { @@ -1302,25 +1373,28 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) unsigned long flags; int rc; - /* If I get here, I've committed to send, so I complete the tx with - * failure on any problems */ - - LASSERT(tx == NULL || tx->tx_conn == NULL); /* only set when assigned a conn */ - LASSERT(tx == NULL || tx->tx_nwrq > 0); /* work items have been set up */ + /* + * If I get here, I've committed to send, so I complete the tx with + * failure on any problems + */ + LASSERT(!tx || !tx->tx_conn); /* only set when assigned a conn */ + LASSERT(!tx || tx->tx_nwrq > 0); /* work items have been set up */ - /* First time, just use a read lock since I expect to find my peer - * connected */ + /* + * First time, just use a read lock since I expect to find my peer + * connected + */ read_lock_irqsave(g_lock, flags); peer = kiblnd_find_peer_locked(nid); - if (peer != NULL && !list_empty(&peer->ibp_conns)) { + if (peer && !list_empty(&peer->ibp_conns)) { /* Found a peer with an established connection */ conn = kiblnd_get_conn_locked(peer); kiblnd_conn_addref(conn); /* 1 ref for me... */ read_unlock_irqrestore(g_lock, flags); - if (tx != NULL) + if (tx) kiblnd_queue_tx(tx, conn); kiblnd_conn_decref(conn); /* ...to here */ return; @@ -1331,14 +1405,13 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) write_lock(g_lock); peer = kiblnd_find_peer_locked(nid); - if (peer != NULL) { + if (peer) { if (list_empty(&peer->ibp_conns)) { /* found a peer, but it's still connecting... */ - LASSERT(peer->ibp_connecting != 0 || - peer->ibp_accepting != 0); - if (tx != NULL) + LASSERT(kiblnd_peer_connecting(peer)); + if (tx) list_add_tail(&tx->tx_list, - &peer->ibp_tx_queue); + &peer->ibp_tx_queue); write_unlock_irqrestore(g_lock, flags); } else { conn = kiblnd_get_conn_locked(peer); @@ -1346,7 +1419,7 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) write_unlock_irqrestore(g_lock, flags); - if (tx != NULL) + if (tx) kiblnd_queue_tx(tx, conn); kiblnd_conn_decref(conn); /* ...to here */ } @@ -1357,9 +1430,9 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) /* Allocate a peer ready to add to the peer table and retry */ rc = kiblnd_create_peer(ni, &peer, nid); - if (rc != 0) { + if (rc) { CERROR("Can't create peer %s\n", libcfs_nid2str(nid)); - if (tx != NULL) { + if (tx) { tx->tx_status = -EHOSTUNREACH; tx->tx_waiting = 0; kiblnd_tx_done(ni, tx); @@ -1370,14 +1443,13 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) write_lock_irqsave(g_lock, flags); peer2 = kiblnd_find_peer_locked(nid); - if (peer2 != NULL) { + if (peer2) { if (list_empty(&peer2->ibp_conns)) { /* found a peer, but it's still connecting... */ - LASSERT(peer2->ibp_connecting != 0 || - peer2->ibp_accepting != 0); - if (tx != NULL) + LASSERT(kiblnd_peer_connecting(peer2)); + if (tx) list_add_tail(&tx->tx_list, - &peer2->ibp_tx_queue); + &peer2->ibp_tx_queue); write_unlock_irqrestore(g_lock, flags); } else { conn = kiblnd_get_conn_locked(peer2); @@ -1385,7 +1457,7 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) write_unlock_irqrestore(g_lock, flags); - if (tx != NULL) + if (tx) kiblnd_queue_tx(tx, conn); kiblnd_conn_decref(conn); /* ...to here */ } @@ -1395,13 +1467,13 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) } /* Brand new peer */ - LASSERT(peer->ibp_connecting == 0); + LASSERT(!peer->ibp_connecting); peer->ibp_connecting = 1; /* always called with a ref on ni, which prevents ni being shutdown */ - LASSERT(((kib_net_t *)ni->ni_data)->ibn_shutdown == 0); + LASSERT(!((kib_net_t *)ni->ni_data)->ibn_shutdown); - if (tx != NULL) + if (tx) list_add_tail(&tx->tx_list, &peer->ibp_tx_queue); kiblnd_peer_addref(peer); @@ -1437,13 +1509,13 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n", payload_nob, payload_niov, libcfs_id2str(target)); - LASSERT(payload_nob == 0 || payload_niov > 0); + LASSERT(!payload_nob || payload_niov > 0); LASSERT(payload_niov <= LNET_MAX_IOV); /* Thread context */ LASSERT(!in_interrupt()); /* payload is either all vaddrs or all pages */ - LASSERT(!(payload_kiov != NULL && payload_iov != NULL)); + LASSERT(!(payload_kiov && payload_iov)); switch (type) { default: @@ -1451,7 +1523,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) return -EIO; case LNET_MSG_ACK: - LASSERT(payload_nob == 0); + LASSERT(!payload_nob); break; case LNET_MSG_GET: @@ -1464,7 +1536,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) break; /* send IMMEDIATE */ tx = kiblnd_get_idle_tx(ni, target.nid); - if (tx == NULL) { + if (!tx) { CERROR("Can't allocate txd for GET to %s\n", libcfs_nid2str(target.nid)); return -ENOMEM; @@ -1472,7 +1544,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) ibmsg = tx->tx_msg; rd = &ibmsg->ibm_u.get.ibgm_rd; - if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0) + if (!(lntmsg->msg_md->md_options & LNET_MD_KIOV)) rc = kiblnd_setup_rd_iov(ni, tx, rd, lntmsg->msg_md->md_niov, lntmsg->msg_md->md_iov.iov, @@ -1482,7 +1554,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) lntmsg->msg_md->md_niov, lntmsg->msg_md->md_iov.kiov, 0, lntmsg->msg_md->md_length); - if (rc != 0) { + if (rc) { CERROR("Can't setup GET sink for %s: %d\n", libcfs_nid2str(target.nid), rc); kiblnd_tx_done(ni, tx); @@ -1496,7 +1568,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) kiblnd_init_tx_msg(ni, tx, IBLND_MSG_GET_REQ, nob); tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg); - if (tx->tx_lntmsg[1] == NULL) { + if (!tx->tx_lntmsg[1]) { CERROR("Can't create reply for GET -> %s\n", libcfs_nid2str(target.nid)); kiblnd_tx_done(ni, tx); @@ -1516,14 +1588,14 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) break; /* send IMMEDIATE */ tx = kiblnd_get_idle_tx(ni, target.nid); - if (tx == NULL) { + if (!tx) { CERROR("Can't allocate %s txd for %s\n", type == LNET_MSG_PUT ? "PUT" : "REPLY", libcfs_nid2str(target.nid)); return -ENOMEM; } - if (payload_kiov == NULL) + if (!payload_kiov) rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd, payload_niov, payload_iov, payload_offset, payload_nob); @@ -1531,7 +1603,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd, payload_niov, payload_kiov, payload_offset, payload_nob); - if (rc != 0) { + if (rc) { CERROR("Can't setup PUT src for %s: %d\n", libcfs_nid2str(target.nid), rc); kiblnd_tx_done(ni, tx); @@ -1555,16 +1627,16 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) <= IBLND_MSG_SIZE); tx = kiblnd_get_idle_tx(ni, target.nid); - if (tx == NULL) { + if (!tx) { CERROR("Can't send %d to %s: tx descs exhausted\n", - type, libcfs_nid2str(target.nid)); + type, libcfs_nid2str(target.nid)); return -ENOMEM; } ibmsg = tx->tx_msg; ibmsg->ibm_u.immediate.ibim_hdr = *hdr; - if (payload_kiov != NULL) + if (payload_kiov) lnet_copy_kiov2flat(IBLND_MSG_SIZE, ibmsg, offsetof(kib_msg_t, ibm_u.immediate.ibim_payload), payload_niov, payload_kiov, @@ -1596,22 +1668,22 @@ kiblnd_reply(lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg) int rc; tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid); - if (tx == NULL) { + if (!tx) { CERROR("Can't get tx for REPLY to %s\n", libcfs_nid2str(target.nid)); goto failed_0; } - if (nob == 0) + if (!nob) rc = 0; - else if (kiov == NULL) + else if (!kiov) rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd, niov, iov, offset, nob); else rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd, niov, kiov, offset, nob); - if (rc != 0) { + if (rc) { CERROR("Can't setup GET src for %s: %d\n", libcfs_nid2str(target.nid), rc); goto failed_1; @@ -1627,12 +1699,11 @@ kiblnd_reply(lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg) goto failed_1; } - if (nob == 0) { + if (!nob) { /* No RDMA: local completion may happen now! */ lnet_finalize(ni, lntmsg, 0); } else { - /* RDMA: lnet_finalize(lntmsg) when it - * completes */ + /* RDMA: lnet_finalize(lntmsg) when it completes */ tx->tx_lntmsg[0] = lntmsg; } @@ -1647,8 +1718,8 @@ kiblnd_reply(lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg) int kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, - unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov, - unsigned int offset, unsigned int mlen, unsigned int rlen) + unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov, + unsigned int offset, unsigned int mlen, unsigned int rlen) { kib_rx_t *rx = private; kib_msg_t *rxmsg = rx->rx_msg; @@ -1661,7 +1732,7 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, LASSERT(mlen <= rlen); LASSERT(!in_interrupt()); /* Either all pages or all vaddrs */ - LASSERT(!(kiov != NULL && iov != NULL)); + LASSERT(!(kiov && iov)); switch (rxmsg->ibm_type) { default: @@ -1671,13 +1742,13 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[rlen]); if (nob > rx->rx_nob) { CERROR("Immediate message from %s too big: %d(%d)\n", - libcfs_nid2str(rxmsg->ibm_u.immediate.ibim_hdr.src_nid), - nob, rx->rx_nob); + libcfs_nid2str(rxmsg->ibm_u.immediate.ibim_hdr.src_nid), + nob, rx->rx_nob); rc = -EPROTO; break; } - if (kiov != NULL) + if (kiov) lnet_copy_flat2kiov(niov, kiov, offset, IBLND_MSG_SIZE, rxmsg, offsetof(kib_msg_t, ibm_u.immediate.ibim_payload), @@ -1694,7 +1765,7 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, kib_msg_t *txmsg; kib_rdma_desc_t *rd; - if (mlen == 0) { + if (!mlen) { lnet_finalize(ni, lntmsg, 0); kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, 0, rxmsg->ibm_u.putreq.ibprm_cookie); @@ -1702,7 +1773,7 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, } tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid); - if (tx == NULL) { + if (!tx) { CERROR("Can't allocate tx for %s\n", libcfs_nid2str(conn->ibc_peer->ibp_nid)); /* Not replying will break the connection */ @@ -1712,13 +1783,13 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, txmsg = tx->tx_msg; rd = &txmsg->ibm_u.putack.ibpam_rd; - if (kiov == NULL) + if (!kiov) rc = kiblnd_setup_rd_iov(ni, tx, rd, niov, iov, offset, mlen); else rc = kiblnd_setup_rd_kiov(ni, tx, rd, niov, kiov, offset, mlen); - if (rc != 0) { + if (rc) { CERROR("Can't setup PUT sink for %s: %d\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), rc); kiblnd_tx_done(ni, tx); @@ -1744,7 +1815,7 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, } case IBLND_MSG_GET_REQ: - if (lntmsg != NULL) { + if (lntmsg) { /* Optimized GET; RDMA lntmsg's payload */ kiblnd_reply(ni, rx, lntmsg); } else { @@ -1778,7 +1849,7 @@ kiblnd_thread_fini(void) atomic_dec(&kiblnd_data.kib_nthreads); } -void +static void kiblnd_peer_alive(kib_peer_t *peer) { /* This is racy, but everyone's only writing cfs_time_current() */ @@ -1795,10 +1866,7 @@ kiblnd_peer_notify(kib_peer_t *peer) read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - if (list_empty(&peer->ibp_conns) && - peer->ibp_accepting == 0 && - peer->ibp_connecting == 0 && - peer->ibp_error != 0) { + if (kiblnd_peer_idle(peer) && peer->ibp_error) { error = peer->ibp_error; peer->ibp_error = 0; @@ -1807,7 +1875,7 @@ kiblnd_peer_notify(kib_peer_t *peer) read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - if (error != 0) + if (error) lnet_notify(peer->ibp_ni, peer->ibp_nid, 0, last_alive); } @@ -1815,25 +1883,27 @@ kiblnd_peer_notify(kib_peer_t *peer) void kiblnd_close_conn_locked(kib_conn_t *conn, int error) { - /* This just does the immediate housekeeping. 'error' is zero for a + /* + * This just does the immediate housekeeping. 'error' is zero for a * normal shutdown which can happen only after the connection has been * established. If the connection is established, schedule the - * connection to be finished off by the connd. Otherwise the connd is + * connection to be finished off by the connd. Otherwise the connd is * already dealing with it (either to set it up or tear it down). - * Caller holds kib_global_lock exclusively in irq context */ + * Caller holds kib_global_lock exclusively in irq context + */ kib_peer_t *peer = conn->ibc_peer; kib_dev_t *dev; unsigned long flags; - LASSERT(error != 0 || conn->ibc_state >= IBLND_CONN_ESTABLISHED); + LASSERT(error || conn->ibc_state >= IBLND_CONN_ESTABLISHED); - if (error != 0 && conn->ibc_comms_error == 0) + if (error && !conn->ibc_comms_error) conn->ibc_comms_error = error; if (conn->ibc_state != IBLND_CONN_ESTABLISHED) return; /* already being handled */ - if (error == 0 && + if (!error && list_empty(&conn->ibc_tx_noops) && list_empty(&conn->ibc_tx_queue) && list_empty(&conn->ibc_tx_queue_rsrvd) && @@ -1843,12 +1913,12 @@ kiblnd_close_conn_locked(kib_conn_t *conn, int error) libcfs_nid2str(peer->ibp_nid)); } else { CNETERR("Closing conn to %s: error %d%s%s%s%s%s\n", - libcfs_nid2str(peer->ibp_nid), error, - list_empty(&conn->ibc_tx_queue) ? "" : "(sending)", - list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)", - list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)", - list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)", - list_empty(&conn->ibc_active_txs) ? "" : "(waiting)"); + libcfs_nid2str(peer->ibp_nid), error, + list_empty(&conn->ibc_tx_queue) ? "" : "(sending)", + list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)", + list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)", + list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)", + list_empty(&conn->ibc_active_txs) ? "" : "(waiting)"); } dev = ((kib_net_t *)peer->ibp_ni->ni_data)->ibn_dev; @@ -1865,7 +1935,7 @@ kiblnd_close_conn_locked(kib_conn_t *conn, int error) kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING); - if (error != 0 && + if (error && kiblnd_dev_can_failover(dev)) { list_add_tail(&dev->ibd_fail_list, &kiblnd_data.kib_failed_devs); @@ -1929,8 +1999,7 @@ kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs) if (txs == &conn->ibc_active_txs) { LASSERT(!tx->tx_queued); - LASSERT(tx->tx_waiting || - tx->tx_sending != 0); + LASSERT(tx->tx_waiting || tx->tx_sending); } else { LASSERT(tx->tx_queued); } @@ -1938,7 +2007,7 @@ kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs) tx->tx_status = -ECONNABORTED; tx->tx_waiting = 0; - if (tx->tx_sending == 0) { + if (!tx->tx_sending) { tx->tx_queued = 0; list_del(&tx->tx_list); list_add(&tx->tx_list, &zombies); @@ -1958,14 +2027,17 @@ kiblnd_finalise_conn(kib_conn_t *conn) kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED); - /* abort_receives moves QP state to IB_QPS_ERR. This is only required + /* + * abort_receives moves QP state to IB_QPS_ERR. This is only required * for connections that didn't get as far as being connected, because - * rdma_disconnect() does this for free. */ + * rdma_disconnect() does this for free. + */ kiblnd_abort_receives(conn); - /* Complete all tx descs not waiting for sends to complete. - * NB we should be safe from RDMA now that the QP has changed state */ - + /* + * Complete all tx descs not waiting for sends to complete. + * NB we should be safe from RDMA now that the QP has changed state + */ kiblnd_abort_txs(conn, &conn->ibc_tx_noops); kiblnd_abort_txs(conn, &conn->ibc_tx_queue); kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd); @@ -1975,13 +2047,13 @@ kiblnd_finalise_conn(kib_conn_t *conn) kiblnd_handle_early_rxs(conn); } -void +static void kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error) { LIST_HEAD(zombies); unsigned long flags; - LASSERT(error != 0); + LASSERT(error); LASSERT(!in_interrupt()); write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); @@ -1994,14 +2066,14 @@ kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error) peer->ibp_accepting--; } - if (peer->ibp_connecting != 0 || - peer->ibp_accepting != 0) { + if (kiblnd_peer_connecting(peer)) { /* another connection attempt under way... */ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, - flags); + flags); return; } + peer->ibp_reconnected = 0; if (list_empty(&peer->ibp_conns)) { /* Take peer's blocked transmits to complete with error */ list_add(&zombies, &peer->ibp_tx_queue); @@ -2029,7 +2101,7 @@ kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error) kiblnd_txlist_done(peer->ibp_ni, &zombies, -EHOSTUNREACH); } -void +static void kiblnd_connreq_done(kib_conn_t *conn, int status) { kib_peer_t *peer = conn->ibc_peer; @@ -2047,14 +2119,14 @@ kiblnd_connreq_done(kib_conn_t *conn, int status) LASSERT(!in_interrupt()); LASSERT((conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT && - peer->ibp_connecting > 0) || + peer->ibp_connecting > 0) || (conn->ibc_state == IBLND_CONN_PASSIVE_WAIT && - peer->ibp_accepting > 0)); + peer->ibp_accepting > 0)); LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars)); conn->ibc_connvars = NULL; - if (status != 0) { + if (status) { /* failed to establish connection */ kiblnd_peer_connect_failed(peer, active, status); kiblnd_finalise_conn(conn); @@ -2068,16 +2140,19 @@ kiblnd_connreq_done(kib_conn_t *conn, int status) kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED); kiblnd_peer_alive(peer); - /* Add conn to peer's list and nuke any dangling conns from a different - * peer instance... */ + /* + * Add conn to peer's list and nuke any dangling conns from a different + * peer instance... + */ kiblnd_conn_addref(conn); /* +1 ref for ibc_list */ list_add(&conn->ibc_list, &peer->ibp_conns); + peer->ibp_reconnected = 0; if (active) peer->ibp_connecting--; else peer->ibp_accepting--; - if (peer->ibp_version == 0) { + if (!peer->ibp_version) { peer->ibp_version = conn->ibc_version; peer->ibp_incarnation = conn->ibc_incarnation; } @@ -2095,7 +2170,7 @@ kiblnd_connreq_done(kib_conn_t *conn, int status) list_del_init(&peer->ibp_tx_queue); if (!kiblnd_peer_active(peer) || /* peer has been deleted */ - conn->ibc_comms_error != 0) { /* error has happened already */ + conn->ibc_comms_error) { /* error has happened already */ lnet_ni_t *ni = peer->ibp_ni; /* start to shut down connection */ @@ -2107,6 +2182,16 @@ kiblnd_connreq_done(kib_conn_t *conn, int status) return; } + /** + * refcount taken by cmid is not reliable after I released the glock + * because this connection is visible to other threads now, another + * thread can find and close this connection right after I released + * the glock, if kiblnd_cm_callback for RDMA_CM_EVENT_DISCONNECTED is + * called, it can release the connection refcount taken by cmid. + * It means the connection could be destroyed before I finish my + * operations on it. + */ + kiblnd_conn_addref(conn); write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); /* Schedule blocked txs */ @@ -2122,6 +2207,8 @@ kiblnd_connreq_done(kib_conn_t *conn, int status) /* schedule blocked rxs */ kiblnd_handle_early_rxs(conn); + + kiblnd_conn_decref(conn); } static void @@ -2131,7 +2218,7 @@ kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej) rc = rdma_reject(cmid, rej, sizeof(*rej)); - if (rc != 0) + if (rc) CWARN("Error %d sending reject\n", rc); } @@ -2159,14 +2246,14 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) /* cmid inherits 'context' from the corresponding listener id */ ibdev = (kib_dev_t *)cmid->context; - LASSERT(ibdev != NULL); + LASSERT(ibdev); memset(&rej, 0, sizeof(rej)); rej.ibr_magic = IBLND_MSG_MAGIC; rej.ibr_why = IBLND_REJECT_FATAL; rej.ibr_cp.ibcp_max_msg_size = IBLND_MSG_SIZE; - peer_addr = (struct sockaddr_in *)&(cmid->route.addr.dst_addr); + peer_addr = (struct sockaddr_in *)&cmid->route.addr.dst_addr; if (*kiblnd_tunables.kib_require_priv_port && ntohs(peer_addr->sin_port) >= PROT_SOCK) { __u32 ip = ntohl(peer_addr->sin_addr.s_addr); @@ -2181,12 +2268,14 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) goto failed; } - /* Future protocol version compatibility support! If the + /* + * Future protocol version compatibility support! If the * o2iblnd-specific protocol changes, or when LNET unifies * protocols over all LNDs, the initial connection will * negotiate a protocol version. I trap this here to avoid * console errors; the reject tells the peer which protocol I - * speak. */ + * speak. + */ if (reqmsg->ibm_magic == LNET_PROTO_MAGIC || reqmsg->ibm_magic == __swab32(LNET_PROTO_MAGIC)) goto failed; @@ -2200,7 +2289,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) goto failed; rc = kiblnd_unpack_msg(reqmsg, priv_nob); - if (rc != 0) { + if (rc) { CERROR("Can't parse connection request: %d\n", rc); goto failed; } @@ -2208,17 +2297,17 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) nid = reqmsg->ibm_srcnid; ni = lnet_net2ni(LNET_NIDNET(reqmsg->ibm_dstnid)); - if (ni != NULL) { + if (ni) { net = (kib_net_t *)ni->ni_data; rej.ibr_incarnation = net->ibn_incarnation; } - if (ni == NULL || /* no matching net */ + if (!ni || /* no matching net */ ni->ni_nid != reqmsg->ibm_dstnid || /* right NET, wrong NID! */ net->ibn_dev != ibdev) { /* wrong device */ - CERROR("Can't accept %s on %s (%s:%d:%pI4h): bad dst nid %s\n", + CERROR("Can't accept conn from %s on %s (%s:%d:%pI4h): bad dst nid %s\n", libcfs_nid2str(nid), - ni == NULL ? "NA" : libcfs_nid2str(ni->ni_nid), + !ni ? "NA" : libcfs_nid2str(ni->ni_nid), ibdev->ibd_ifname, ibdev->ibd_nnets, &ibdev->ibd_ifip, libcfs_nid2str(reqmsg->ibm_dstnid)); @@ -2227,7 +2316,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) } /* check time stamp as soon as possible */ - if (reqmsg->ibm_dststamp != 0 && + if (reqmsg->ibm_dststamp && reqmsg->ibm_dststamp != net->ibn_incarnation) { CWARN("Stale connection request\n"); rej.ibr_why = IBLND_REJECT_CONN_STALE; @@ -2243,10 +2332,11 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) goto failed; } - if (reqmsg->ibm_u.connparams.ibcp_queue_depth != + if (reqmsg->ibm_u.connparams.ibcp_queue_depth > IBLND_MSG_QUEUE_SIZE(version)) { - CERROR("Can't accept %s: incompatible queue depth %d (%d wanted)\n", - libcfs_nid2str(nid), reqmsg->ibm_u.connparams.ibcp_queue_depth, + CERROR("Can't accept conn from %s, queue depth too large: %d (<=%d wanted)\n", + libcfs_nid2str(nid), + reqmsg->ibm_u.connparams.ibcp_queue_depth, IBLND_MSG_QUEUE_SIZE(version)); if (version == IBLND_MSG_VERSION) @@ -2255,18 +2345,28 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) goto failed; } - if (reqmsg->ibm_u.connparams.ibcp_max_frags != + if (reqmsg->ibm_u.connparams.ibcp_max_frags > IBLND_RDMA_FRAGS(version)) { - CERROR("Can't accept %s(version %x): incompatible max_frags %d (%d wanted)\n", - libcfs_nid2str(nid), version, - reqmsg->ibm_u.connparams.ibcp_max_frags, - IBLND_RDMA_FRAGS(version)); + CWARN("Can't accept conn from %s (version %x): max_frags %d too large (%d wanted)\n", + libcfs_nid2str(nid), version, + reqmsg->ibm_u.connparams.ibcp_max_frags, + IBLND_RDMA_FRAGS(version)); - if (version == IBLND_MSG_VERSION) + if (version >= IBLND_MSG_VERSION) rej.ibr_why = IBLND_REJECT_RDMA_FRAGS; goto failed; + } else if (reqmsg->ibm_u.connparams.ibcp_max_frags < + IBLND_RDMA_FRAGS(version) && !net->ibn_fmr_ps) { + CWARN("Can't accept conn from %s (version %x): max_frags %d incompatible without FMR pool (%d wanted)\n", + libcfs_nid2str(nid), version, + reqmsg->ibm_u.connparams.ibcp_max_frags, + IBLND_RDMA_FRAGS(version)); + + if (version >= IBLND_MSG_VERSION) + rej.ibr_why = IBLND_REJECT_RDMA_FRAGS; + goto failed; } if (reqmsg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) { @@ -2279,17 +2379,21 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) /* assume 'nid' is a new peer; create */ rc = kiblnd_create_peer(ni, &peer, nid); - if (rc != 0) { + if (rc) { CERROR("Can't create peer for %s\n", libcfs_nid2str(nid)); rej.ibr_why = IBLND_REJECT_NO_RESOURCES; goto failed; } + /* We have validated the peer's parameters so use those */ + peer->ibp_max_frags = reqmsg->ibm_u.connparams.ibcp_max_frags; + peer->ibp_queue_depth = reqmsg->ibm_u.connparams.ibcp_queue_depth; + write_lock_irqsave(g_lock, flags); peer2 = kiblnd_find_peer_locked(nid); - if (peer2 != NULL) { - if (peer2->ibp_version == 0) { + if (peer2) { + if (!peer2->ibp_version) { peer2->ibp_version = version; peer2->ibp_incarnation = reqmsg->ibm_srcstamp; } @@ -2298,10 +2402,16 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) if (peer2->ibp_incarnation != reqmsg->ibm_srcstamp || peer2->ibp_version != version) { kiblnd_close_peer_conns_locked(peer2, -ESTALE); + + if (kiblnd_peer_active(peer2)) { + peer2->ibp_incarnation = reqmsg->ibm_srcstamp; + peer2->ibp_version = version; + } write_unlock_irqrestore(g_lock, flags); - CWARN("Conn stale %s [old ver: %x, new ver: %x]\n", - libcfs_nid2str(nid), peer2->ibp_version, version); + CWARN("Conn stale %s version %x/%x incarnation %llu/%llu\n", + libcfs_nid2str(nid), peer2->ibp_version, version, + peer2->ibp_incarnation, reqmsg->ibm_srcstamp); kiblnd_peer_decref(peer); rej.ibr_why = IBLND_REJECT_CONN_STALE; @@ -2309,7 +2419,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) } /* tie-break connection race in favour of the higher NID */ - if (peer2->ibp_connecting != 0 && + if (peer2->ibp_connecting && nid < ni->ni_nid) { write_unlock_irqrestore(g_lock, flags); @@ -2320,24 +2430,37 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) goto failed; } + /** + * passive connection is allowed even this peer is waiting for + * reconnection. + */ + peer2->ibp_reconnecting = 0; peer2->ibp_accepting++; kiblnd_peer_addref(peer2); + /** + * Race with kiblnd_launch_tx (active connect) to create peer + * so copy validated parameters since we now know what the + * peer's limits are + */ + peer2->ibp_max_frags = peer->ibp_max_frags; + peer2->ibp_queue_depth = peer->ibp_queue_depth; + write_unlock_irqrestore(g_lock, flags); kiblnd_peer_decref(peer); peer = peer2; } else { /* Brand new peer */ - LASSERT(peer->ibp_accepting == 0); - LASSERT(peer->ibp_version == 0 && - peer->ibp_incarnation == 0); + LASSERT(!peer->ibp_accepting); + LASSERT(!peer->ibp_version && + !peer->ibp_incarnation); peer->ibp_accepting = 1; peer->ibp_version = version; peer->ibp_incarnation = reqmsg->ibm_srcstamp; /* I have a ref on ni that prevents it being shutdown */ - LASSERT(net->ibn_shutdown == 0); + LASSERT(!net->ibn_shutdown); kiblnd_peer_addref(peer); list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid)); @@ -2345,31 +2468,33 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) write_unlock_irqrestore(g_lock, flags); } - conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_PASSIVE_WAIT, version); - if (conn == NULL) { + conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_PASSIVE_WAIT, + version); + if (!conn) { kiblnd_peer_connect_failed(peer, 0, -ENOMEM); kiblnd_peer_decref(peer); rej.ibr_why = IBLND_REJECT_NO_RESOURCES; goto failed; } - /* conn now "owns" cmid, so I return success from here on to ensure the - * CM callback doesn't destroy cmid. */ - + /* + * conn now "owns" cmid, so I return success from here on to ensure the + * CM callback doesn't destroy cmid. + */ conn->ibc_incarnation = reqmsg->ibm_srcstamp; - conn->ibc_credits = IBLND_MSG_QUEUE_SIZE(version); - conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(version); - LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(version) - <= IBLND_RX_MSGS(version)); + conn->ibc_credits = conn->ibc_queue_depth; + conn->ibc_reserved_credits = conn->ibc_queue_depth; + LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + + IBLND_OOB_MSGS(version) <= IBLND_RX_MSGS(conn)); ackmsg = &conn->ibc_connvars->cv_msg; memset(ackmsg, 0, sizeof(*ackmsg)); kiblnd_init_msg(ackmsg, IBLND_MSG_CONNACK, sizeof(ackmsg->ibm_u.connparams)); - ackmsg->ibm_u.connparams.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version); + ackmsg->ibm_u.connparams.ibcp_queue_depth = conn->ibc_queue_depth; + ackmsg->ibm_u.connparams.ibcp_max_frags = conn->ibc_max_frags; ackmsg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE; - ackmsg->ibm_u.connparams.ibcp_max_frags = IBLND_RDMA_FRAGS(version); kiblnd_pack_msg(ni, ackmsg, version, 0, nid, reqmsg->ibm_srcstamp); @@ -2385,7 +2510,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) CDEBUG(D_NET, "Accept %s\n", libcfs_nid2str(nid)); rc = rdma_accept(cmid, &cp); - if (rc != 0) { + if (rc) { CERROR("Can't accept %s: %d\n", libcfs_nid2str(nid), rc); rej.ibr_version = version; rej.ibr_why = IBLND_REJECT_FATAL; @@ -2399,7 +2524,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) return 0; failed: - if (ni != NULL) + if (ni) lnet_ni_decref(ni); rej.ibr_version = version; @@ -2411,45 +2536,82 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) } static void -kiblnd_reconnect(kib_conn_t *conn, int version, - __u64 incarnation, int why, kib_connparams_t *cp) +kiblnd_check_reconnect(kib_conn_t *conn, int version, + __u64 incarnation, int why, kib_connparams_t *cp) { + rwlock_t *glock = &kiblnd_data.kib_global_lock; kib_peer_t *peer = conn->ibc_peer; char *reason; - int retry = 0; + int msg_size = IBLND_MSG_SIZE; + int frag_num = -1; + int queue_dep = -1; + bool reconnect; unsigned long flags; LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT); LASSERT(peer->ibp_connecting > 0); /* 'conn' at least */ + LASSERT(!peer->ibp_reconnecting); - write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + if (cp) { + msg_size = cp->ibcp_max_msg_size; + frag_num = cp->ibcp_max_frags; + queue_dep = cp->ibcp_queue_depth; + } - /* retry connection if it's still needed and no other connection + write_lock_irqsave(glock, flags); + /** + * retry connection if it's still needed and no other connection * attempts (active or passive) are in progress * NB: reconnect is still needed even when ibp_tx_queue is * empty if ibp_version != version because reconnect may be - * initiated by kiblnd_query() */ - if ((!list_empty(&peer->ibp_tx_queue) || - peer->ibp_version != version) && - peer->ibp_connecting == 1 && - peer->ibp_accepting == 0) { - retry = 1; - peer->ibp_connecting++; - - peer->ibp_version = version; - peer->ibp_incarnation = incarnation; + * initiated by kiblnd_query() + */ + reconnect = (!list_empty(&peer->ibp_tx_queue) || + peer->ibp_version != version) && + peer->ibp_connecting == 1 && + !peer->ibp_accepting; + if (!reconnect) { + reason = "no need"; + goto out; } - write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - - if (!retry) - return; - switch (why) { default: reason = "Unknown"; break; + case IBLND_REJECT_RDMA_FRAGS: + if (!cp) { + reason = "can't negotiate max frags"; + goto out; + } + if (!*kiblnd_tunables.kib_map_on_demand) { + reason = "map_on_demand must be enabled"; + goto out; + } + if (conn->ibc_max_frags <= frag_num) { + reason = "unsupported max frags"; + goto out; + } + + peer->ibp_max_frags = frag_num; + reason = "rdma fragments"; + break; + + case IBLND_REJECT_MSG_QUEUE_SIZE: + if (!cp) { + reason = "can't negotiate queue depth"; + goto out; + } + if (conn->ibc_queue_depth <= queue_dep) { + reason = "unsupported queue depth"; + goto out; + } + + peer->ibp_queue_depth = queue_dep; + reason = "queue depth"; + break; + case IBLND_REJECT_CONN_STALE: reason = "stale"; break; @@ -2463,14 +2625,24 @@ kiblnd_reconnect(kib_conn_t *conn, int version, break; } - CNETERR("%s: retrying (%s), %x, %x, queue_dep: %d, max_frag: %d, msg_size: %d\n", - libcfs_nid2str(peer->ibp_nid), - reason, IBLND_MSG_VERSION, version, - cp != NULL ? cp->ibcp_queue_depth : IBLND_MSG_QUEUE_SIZE(version), - cp != NULL ? cp->ibcp_max_frags : IBLND_RDMA_FRAGS(version), - cp != NULL ? cp->ibcp_max_msg_size : IBLND_MSG_SIZE); + conn->ibc_reconnect = 1; + peer->ibp_reconnecting = 1; + peer->ibp_version = version; + if (incarnation) + peer->ibp_incarnation = incarnation; +out: + write_unlock_irqrestore(glock, flags); - kiblnd_connect_peer(peer); + CNETERR("%s: %s (%s), %x, %x, msg_size: %d, queue_depth: %d/%d, max_frags: %d/%d\n", + libcfs_nid2str(peer->ibp_nid), + reconnect ? "reconnect" : "don't reconnect", + reason, IBLND_MSG_VERSION, version, msg_size, + conn->ibc_queue_depth, queue_dep, + conn->ibc_max_frags, frag_num); + /** + * if conn::ibc_reconnect is TRUE, connd will reconnect to the peer + * while destroying the zombie + */ } static void @@ -2483,8 +2655,8 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob) switch (reason) { case IB_CM_REJ_STALE_CONN: - kiblnd_reconnect(conn, IBLND_MSG_VERSION, 0, - IBLND_REJECT_CONN_STALE, NULL); + kiblnd_check_reconnect(conn, IBLND_MSG_VERSION, 0, + IBLND_REJECT_CONN_STALE, NULL); break; case IB_CM_REJ_INVALID_SERVICE_ID: @@ -2521,9 +2693,11 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob) if (priv_nob >= sizeof(kib_rej_t) && rej->ibr_version > IBLND_MSG_VERSION_1) { - /* priv_nob is always 148 in current version + /* + * priv_nob is always 148 in current version * of OFED, so we still need to check version. - * (define of IB_CM_REJ_PRIVATE_DATA_SIZE) */ + * (define of IB_CM_REJ_PRIVATE_DATA_SIZE) + */ cp = &rej->ibr_cp; if (flip) { @@ -2564,24 +2738,11 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob) case IBLND_REJECT_CONN_RACE: case IBLND_REJECT_CONN_STALE: case IBLND_REJECT_CONN_UNCOMPAT: - kiblnd_reconnect(conn, rej->ibr_version, - incarnation, rej->ibr_why, cp); - break; - case IBLND_REJECT_MSG_QUEUE_SIZE: - CERROR("%s rejected: incompatible message queue depth %d, %d\n", - libcfs_nid2str(peer->ibp_nid), - cp != NULL ? cp->ibcp_queue_depth : - IBLND_MSG_QUEUE_SIZE(rej->ibr_version), - IBLND_MSG_QUEUE_SIZE(conn->ibc_version)); - break; - case IBLND_REJECT_RDMA_FRAGS: - CERROR("%s rejected: incompatible # of RDMA fragments %d, %d\n", - libcfs_nid2str(peer->ibp_nid), - cp != NULL ? cp->ibcp_max_frags : - IBLND_RDMA_FRAGS(rej->ibr_version), - IBLND_RDMA_FRAGS(conn->ibc_version)); + kiblnd_check_reconnect(conn, rej->ibr_version, + incarnation, + rej->ibr_why, cp); break; case IBLND_REJECT_NO_RESOURCES: @@ -2623,9 +2784,9 @@ kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob) int rc = kiblnd_unpack_msg(msg, priv_nob); unsigned long flags; - LASSERT(net != NULL); + LASSERT(net); - if (rc != 0) { + if (rc) { CERROR("Can't unpack connack from %s: %d\n", libcfs_nid2str(peer->ibp_nid), rc); goto failed; @@ -2645,22 +2806,22 @@ kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob) goto failed; } - if (msg->ibm_u.connparams.ibcp_queue_depth != - IBLND_MSG_QUEUE_SIZE(ver)) { - CERROR("%s has incompatible queue depth %d(%d wanted)\n", + if (msg->ibm_u.connparams.ibcp_queue_depth > + conn->ibc_queue_depth) { + CERROR("%s has incompatible queue depth %d (<=%d wanted)\n", libcfs_nid2str(peer->ibp_nid), msg->ibm_u.connparams.ibcp_queue_depth, - IBLND_MSG_QUEUE_SIZE(ver)); + conn->ibc_queue_depth); rc = -EPROTO; goto failed; } - if (msg->ibm_u.connparams.ibcp_max_frags != - IBLND_RDMA_FRAGS(ver)) { - CERROR("%s has incompatible max_frags %d (%d wanted)\n", + if (msg->ibm_u.connparams.ibcp_max_frags > + conn->ibc_max_frags) { + CERROR("%s has incompatible max_frags %d (<=%d wanted)\n", libcfs_nid2str(peer->ibp_nid), msg->ibm_u.connparams.ibcp_max_frags, - IBLND_RDMA_FRAGS(ver)); + conn->ibc_max_frags); rc = -EPROTO; goto failed; } @@ -2682,7 +2843,7 @@ kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob) rc = -ESTALE; read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - if (rc != 0) { + if (rc) { CERROR("Bad connection reply from %s, rc = %d, version: %x max_frags: %d\n", libcfs_nid2str(peer->ibp_nid), rc, msg->ibm_version, msg->ibm_u.connparams.ibcp_max_frags); @@ -2690,21 +2851,24 @@ kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob) } conn->ibc_incarnation = msg->ibm_srcstamp; - conn->ibc_credits = - conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(ver); - LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(ver) - <= IBLND_RX_MSGS(ver)); + conn->ibc_credits = msg->ibm_u.connparams.ibcp_queue_depth; + conn->ibc_reserved_credits = msg->ibm_u.connparams.ibcp_queue_depth; + conn->ibc_queue_depth = msg->ibm_u.connparams.ibcp_queue_depth; + conn->ibc_max_frags = msg->ibm_u.connparams.ibcp_max_frags; + LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + + IBLND_OOB_MSGS(ver) <= IBLND_RX_MSGS(conn)); kiblnd_connreq_done(conn, 0); return; failed: - /* NB My QP has already established itself, so I handle anything going + /* + * NB My QP has already established itself, so I handle anything going * wrong here by setting ibc_comms_error. * kiblnd_connreq_done(0) moves the conn state to ESTABLISHED, but then - * immediately tears it down. */ - - LASSERT(rc != 0); + * immediately tears it down. + */ + LASSERT(rc); conn->ibc_comms_error = rc; kiblnd_connreq_done(conn, 0); } @@ -2724,28 +2888,30 @@ kiblnd_active_connect(struct rdma_cm_id *cmid) read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); incarnation = peer->ibp_incarnation; - version = (peer->ibp_version == 0) ? IBLND_MSG_VERSION : - peer->ibp_version; + version = !peer->ibp_version ? IBLND_MSG_VERSION : + peer->ibp_version; read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT, version); - if (conn == NULL) { + conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT, + version); + if (!conn) { kiblnd_peer_connect_failed(peer, 1, -ENOMEM); kiblnd_peer_decref(peer); /* lose cmid's ref */ return -ENOMEM; } - /* conn "owns" cmid now, so I return success from here on to ensure the + /* + * conn "owns" cmid now, so I return success from here on to ensure the * CM callback doesn't destroy cmid. conn also takes over cmid's ref - * on peer */ - + * on peer + */ msg = &conn->ibc_connvars->cv_msg; memset(msg, 0, sizeof(*msg)); kiblnd_init_msg(msg, IBLND_MSG_CONNREQ, sizeof(msg->ibm_u.connparams)); - msg->ibm_u.connparams.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version); - msg->ibm_u.connparams.ibcp_max_frags = IBLND_RDMA_FRAGS(version); + msg->ibm_u.connparams.ibcp_queue_depth = conn->ibc_queue_depth; + msg->ibm_u.connparams.ibcp_max_frags = conn->ibc_max_frags; msg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE; kiblnd_pack_msg(peer->ibp_ni, msg, version, @@ -2764,7 +2930,7 @@ kiblnd_active_connect(struct rdma_cm_id *cmid) LASSERT(conn->ibc_cmid == cmid); rc = rdma_connect(cmid, &cp); - if (rc != 0) { + if (rc) { CERROR("Can't connect to %s: %d\n", libcfs_nid2str(peer->ibp_nid), rc); kiblnd_connreq_done(conn, rc); @@ -2798,10 +2964,10 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) case RDMA_CM_EVENT_ADDR_ERROR: peer = (kib_peer_t *)cmid->context; CNETERR("%s: ADDR ERROR %d\n", - libcfs_nid2str(peer->ibp_nid), event->status); + libcfs_nid2str(peer->ibp_nid), event->status); kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH); kiblnd_peer_decref(peer); - return -EHOSTUNREACH; /* rc != 0 destroys cmid */ + return -EHOSTUNREACH; /* rc destroys cmid */ case RDMA_CM_EVENT_ADDR_RESOLVED: peer = (kib_peer_t *)cmid->context; @@ -2809,14 +2975,14 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) CDEBUG(D_NET, "%s Addr resolved: %d\n", libcfs_nid2str(peer->ibp_nid), event->status); - if (event->status != 0) { + if (event->status) { CNETERR("Can't resolve address for %s: %d\n", libcfs_nid2str(peer->ibp_nid), event->status); rc = event->status; } else { rc = rdma_resolve_route( cmid, *kiblnd_tunables.kib_timeout * 1000); - if (rc == 0) + if (!rc) return 0; /* Can't initiate route resolution */ CERROR("Can't resolve route for %s: %d\n", @@ -2824,7 +2990,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) } kiblnd_peer_connect_failed(peer, 1, rc); kiblnd_peer_decref(peer); - return rc; /* rc != 0 destroys cmid */ + return rc; /* rc destroys cmid */ case RDMA_CM_EVENT_ROUTE_ERROR: peer = (kib_peer_t *)cmid->context; @@ -2832,28 +2998,28 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) libcfs_nid2str(peer->ibp_nid), event->status); kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH); kiblnd_peer_decref(peer); - return -EHOSTUNREACH; /* rc != 0 destroys cmid */ + return -EHOSTUNREACH; /* rc destroys cmid */ case RDMA_CM_EVENT_ROUTE_RESOLVED: peer = (kib_peer_t *)cmid->context; CDEBUG(D_NET, "%s Route resolved: %d\n", libcfs_nid2str(peer->ibp_nid), event->status); - if (event->status == 0) + if (!event->status) return kiblnd_active_connect(cmid); CNETERR("Can't resolve route for %s: %d\n", - libcfs_nid2str(peer->ibp_nid), event->status); + libcfs_nid2str(peer->ibp_nid), event->status); kiblnd_peer_connect_failed(peer, 1, event->status); kiblnd_peer_decref(peer); - return event->status; /* rc != 0 destroys cmid */ + return event->status; /* rc destroys cmid */ case RDMA_CM_EVENT_UNREACHABLE: conn = (kib_conn_t *)cmid->context; LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT || conn->ibc_state == IBLND_CONN_PASSIVE_WAIT); CNETERR("%s: UNREACHABLE %d\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status); + libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status); kiblnd_connreq_done(conn, -ENETDOWN); kiblnd_conn_decref(conn); return 0; @@ -2876,8 +3042,8 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) case IBLND_CONN_PASSIVE_WAIT: CERROR("%s: REJECTED %d\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), - event->status); + libcfs_nid2str(conn->ibc_peer->ibp_nid), + event->status); kiblnd_connreq_done(conn, -ECONNRESET); break; @@ -2933,8 +3099,10 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) LCONSOLE_ERROR_MSG(0x131, "Received notification of device removal\n" "Please shutdown LNET to allow this to proceed\n"); - /* Can't remove network from underneath LNET for now, so I have - * to ignore this */ + /* + * Can't remove network from underneath LNET for now, so I have + * to ignore this + */ return 0; case RDMA_CM_EVENT_ADDR_CHANGE: @@ -2956,7 +3124,7 @@ kiblnd_check_txs_locked(kib_conn_t *conn, struct list_head *txs) LASSERT(tx->tx_queued); } else { LASSERT(!tx->tx_queued); - LASSERT(tx->tx_waiting || tx->tx_sending != 0); + LASSERT(tx->tx_waiting || tx->tx_sending); } if (cfs_time_aftereq(jiffies, tx->tx_deadline)) { @@ -2989,13 +3157,16 @@ kiblnd_check_conns(int idx) struct list_head *ptmp; kib_peer_t *peer; kib_conn_t *conn; + kib_conn_t *temp; kib_conn_t *tmp; struct list_head *ctmp; unsigned long flags; - /* NB. We expect to have a look at all the peers and not find any + /* + * NB. We expect to have a look at all the peers and not find any * RDMAs to time out, so we just use a shared lock while we - * take a look... */ + * take a look... + */ read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); list_for_each(ptmp, peers) { @@ -3028,8 +3199,7 @@ kiblnd_check_conns(int idx) conn->ibc_reserved_credits); list_add(&conn->ibc_connd_list, &closes); } else { - list_add(&conn->ibc_connd_list, - &checksends); + list_add(&conn->ibc_connd_list, &checksends); } /* +ref for 'closes' or 'checksends' */ kiblnd_conn_addref(conn); @@ -3040,21 +3210,23 @@ kiblnd_check_conns(int idx) read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - /* Handle timeout by closing the whole + /* + * Handle timeout by closing the whole * connection. We can only be sure RDMA activity - * has ceased once the QP has been modified. */ + * has ceased once the QP has been modified. + */ list_for_each_entry_safe(conn, tmp, &closes, ibc_connd_list) { list_del(&conn->ibc_connd_list); kiblnd_close_conn(conn, -ETIMEDOUT); kiblnd_conn_decref(conn); } - /* In case we have enough credits to return via a + /* + * In case we have enough credits to return via a * NOOP, but there were no non-blocking tx descs - * free to do it last time... */ - while (!list_empty(&checksends)) { - conn = list_entry(checksends.next, - kib_conn_t, ibc_connd_list); + * free to do it last time... + */ + list_for_each_entry_safe(conn, temp, &checksends, ibc_connd_list) { list_del(&conn->ibc_connd_list); kiblnd_check_sends(conn); kiblnd_conn_decref(conn); @@ -3074,9 +3246,21 @@ kiblnd_disconnect_conn(kib_conn_t *conn) kiblnd_peer_notify(conn->ibc_peer); } +/** + * High-water for reconnection to the same peer, reconnection attempt should + * be delayed after trying more than KIB_RECONN_HIGH_RACE. + */ +#define KIB_RECONN_HIGH_RACE 10 +/** + * Allow connd to take a break and handle other things after consecutive + * reconnection attemps. + */ +#define KIB_RECONN_BREAK 100 + int kiblnd_connd(void *arg) { + spinlock_t *lock= &kiblnd_data.kib_connd_lock; wait_queue_t wait; unsigned long flags; kib_conn_t *conn; @@ -3091,39 +3275,79 @@ kiblnd_connd(void *arg) init_waitqueue_entry(&wait, current); kiblnd_data.kib_connd = current; - spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); + spin_lock_irqsave(lock, flags); while (!kiblnd_data.kib_shutdown) { + int reconn = 0; dropped_lock = 0; if (!list_empty(&kiblnd_data.kib_connd_zombies)) { + kib_peer_t *peer = NULL; + conn = list_entry(kiblnd_data.kib_connd_zombies.next, - kib_conn_t, ibc_list); + kib_conn_t, ibc_list); list_del(&conn->ibc_list); + if (conn->ibc_reconnect) { + peer = conn->ibc_peer; + kiblnd_peer_addref(peer); + } - spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, - flags); + spin_unlock_irqrestore(lock, flags); dropped_lock = 1; - kiblnd_destroy_conn(conn); + kiblnd_destroy_conn(conn, !peer); + + spin_lock_irqsave(lock, flags); + if (!peer) + continue; - spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); + conn->ibc_peer = peer; + if (peer->ibp_reconnected < KIB_RECONN_HIGH_RACE) + list_add_tail(&conn->ibc_list, + &kiblnd_data.kib_reconn_list); + else + list_add_tail(&conn->ibc_list, + &kiblnd_data.kib_reconn_wait); } if (!list_empty(&kiblnd_data.kib_connd_conns)) { conn = list_entry(kiblnd_data.kib_connd_conns.next, - kib_conn_t, ibc_list); + kib_conn_t, ibc_list); list_del(&conn->ibc_list); - spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, - flags); + spin_unlock_irqrestore(lock, flags); dropped_lock = 1; kiblnd_disconnect_conn(conn); kiblnd_conn_decref(conn); - spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); + spin_lock_irqsave(lock, flags); + } + + while (reconn < KIB_RECONN_BREAK) { + if (kiblnd_data.kib_reconn_sec != + ktime_get_real_seconds()) { + kiblnd_data.kib_reconn_sec = ktime_get_real_seconds(); + list_splice_init(&kiblnd_data.kib_reconn_wait, + &kiblnd_data.kib_reconn_list); + } + + if (list_empty(&kiblnd_data.kib_reconn_list)) + break; + + conn = list_entry(kiblnd_data.kib_reconn_list.next, + kib_conn_t, ibc_list); + list_del(&conn->ibc_list); + + spin_unlock_irqrestore(lock, flags); + dropped_lock = 1; + + reconn += kiblnd_reconnect_peer(conn->ibc_peer); + kiblnd_peer_decref(conn->ibc_peer); + LIBCFS_FREE(conn, sizeof(*conn)); + + spin_lock_irqsave(lock, flags); } /* careful with the jiffy wrap... */ @@ -3133,21 +3357,22 @@ kiblnd_connd(void *arg) const int p = 1; int chunk = kiblnd_data.kib_peer_hash_size; - spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); + spin_unlock_irqrestore(lock, flags); dropped_lock = 1; - /* Time to check for RDMA timeouts on a few more + /* + * Time to check for RDMA timeouts on a few more * peers: I do checks every 'p' seconds on a * proportion of the peer table and I need to check * every connection 'n' times within a timeout * interval, to ensure I detect a timeout on any * connection within (n+1)/n times the timeout - * interval. */ - + * interval. + */ if (*kiblnd_tunables.kib_timeout > n * p) chunk = (chunk * n * p) / *kiblnd_tunables.kib_timeout; - if (chunk == 0) + if (!chunk) chunk = 1; for (i = 0; i < chunk; i++) { @@ -3156,8 +3381,8 @@ kiblnd_connd(void *arg) kiblnd_data.kib_peer_hash_size; } - deadline += p * HZ; - spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); + deadline += msecs_to_jiffies(p * MSEC_PER_SEC); + spin_lock_irqsave(lock, flags); } if (dropped_lock) @@ -3166,15 +3391,15 @@ kiblnd_connd(void *arg) /* Nothing to do for 'timeout' */ set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&kiblnd_data.kib_connd_waitq, &wait); - spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); + spin_unlock_irqrestore(lock, flags); schedule_timeout(timeout); remove_wait_queue(&kiblnd_data.kib_connd_waitq, &wait); - spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); + spin_lock_irqsave(lock, flags); } - spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); + spin_unlock_irqrestore(lock, flags); kiblnd_thread_fini(); return 0; @@ -3206,12 +3431,14 @@ kiblnd_complete(struct ib_wc *wc) LBUG(); case IBLND_WID_RDMA: - /* We only get RDMA completion notification if it fails. All + /* + * We only get RDMA completion notification if it fails. All * subsequent work items, including the final SEND will fail * too. However we can't print out any more info about the * failing RDMA because 'tx' might be back on the idle list or * even reused already if we didn't manage to post all our work - * items */ + * items + */ CNETERR("RDMA (tx: %p) failed: %d\n", kiblnd_wreqid2ptr(wc->wr_id), wc->status); return; @@ -3230,11 +3457,13 @@ kiblnd_complete(struct ib_wc *wc) void kiblnd_cq_completion(struct ib_cq *cq, void *arg) { - /* NB I'm not allowed to schedule this conn once its refcount has + /* + * NB I'm not allowed to schedule this conn once its refcount has * reached 0. Since fundamentally I'm racing with scheduler threads * consuming my CQ I could be called after all completions have - * occurred. But in this case, ibc_nrx == 0 && ibc_nsends_posted == 0 - * and this CQ is about to be destroyed so I NOOP. */ + * occurred. But in this case, !ibc_nrx && !ibc_nsends_posted + * and this CQ is about to be destroyed so I NOOP. + */ kib_conn_t *conn = arg; struct kib_sched_info *sched = conn->ibc_sched; unsigned long flags; @@ -3288,7 +3517,7 @@ kiblnd_scheduler(void *arg) sched = kiblnd_data.kib_scheds[KIB_THREAD_CPT(id)]; rc = cfs_cpt_bind(lnet_cpt_table(), sched->ibs_cpt); - if (rc != 0) { + if (rc) { CWARN("Failed to bind on CPT %d, please verify whether all CPUs are healthy and reload modules if necessary, otherwise your system might under risk of low performance\n", sched->ibs_cpt); } @@ -3308,8 +3537,8 @@ kiblnd_scheduler(void *arg) did_something = 0; if (!list_empty(&sched->ibs_conns)) { - conn = list_entry(sched->ibs_conns.next, - kib_conn_t, ibc_sched_list); + conn = list_entry(sched->ibs_conns.next, kib_conn_t, + ibc_sched_list); /* take over kib_sched_conns' ref on conn... */ LASSERT(conn->ibc_scheduled); list_del(&conn->ibc_sched_list); @@ -3317,8 +3546,10 @@ kiblnd_scheduler(void *arg) spin_unlock_irqrestore(&sched->ibs_lock, flags); + wc.wr_id = IBLND_WID_INVAL; + rc = ib_poll_cq(conn->ibc_cq, 1, &wc); - if (rc == 0) { + if (!rc) { rc = ib_req_notify_cq(conn->ibc_cq, IB_CQ_NEXT_COMP); if (rc < 0) { @@ -3327,13 +3558,22 @@ kiblnd_scheduler(void *arg) kiblnd_close_conn(conn, -EIO); kiblnd_conn_decref(conn); spin_lock_irqsave(&sched->ibs_lock, - flags); + flags); continue; } rc = ib_poll_cq(conn->ibc_cq, 1, &wc); } + if (unlikely(rc > 0 && wc.wr_id == IBLND_WID_INVAL)) { + LCONSOLE_ERROR("ib_poll_cq (rc: %d) returned invalid wr_id, opcode %d, status: %d, vendor_err: %d, conn: %s status: %d\nplease upgrade firmware and OFED or contact vendor.\n", + rc, wc.opcode, wc.status, + wc.vendor_err, + libcfs_nid2str(conn->ibc_peer->ibp_nid), + conn->ibc_state); + rc = -EINVAL; + } + if (rc < 0) { CWARN("%s: ib_poll_cq failed: %d, closing connection\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), @@ -3346,21 +3586,23 @@ kiblnd_scheduler(void *arg) spin_lock_irqsave(&sched->ibs_lock, flags); - if (rc != 0 || conn->ibc_ready) { - /* There may be another completion waiting; get + if (rc || conn->ibc_ready) { + /* + * There may be another completion waiting; get * another scheduler to check while I handle - * this one... */ + * this one... + */ /* +1 ref for sched_conns */ kiblnd_conn_addref(conn); list_add_tail(&conn->ibc_sched_list, - &sched->ibs_conns); + &sched->ibs_conns); if (waitqueue_active(&sched->ibs_waitq)) wake_up(&sched->ibs_waitq); } else { conn->ibc_scheduled = 0; } - if (rc != 0) { + if (rc) { spin_unlock_irqrestore(&sched->ibs_lock, flags); kiblnd_complete(&wc); @@ -3400,7 +3642,7 @@ kiblnd_failover_thread(void *arg) unsigned long flags; int rc; - LASSERT(*kiblnd_tunables.kib_dev_failover != 0); + LASSERT(*kiblnd_tunables.kib_dev_failover); cfs_block_allsigs(); @@ -3459,13 +3701,15 @@ kiblnd_failover_thread(void *arg) remove_wait_queue(&kiblnd_data.kib_failover_waitq, &wait); write_lock_irqsave(glock, flags); - if (!long_sleep || rc != 0) + if (!long_sleep || rc) continue; - /* have a long sleep, routine check all active devices, + /* + * have a long sleep, routine check all active devices, * we need checking like this because if there is not active * connection on the dev and no SEND from local, we may listen - * on wrong HCA for ever while there is a bonding failover */ + * on wrong HCA for ever while there is a bonding failover + */ list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) { if (kiblnd_dev_can_failover(dev)) { list_add_tail(&dev->ibd_fail_list, diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c index 1d4e7efb53d4..b4607dad3712 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c @@ -52,8 +52,10 @@ static int timeout = 50; module_param(timeout, int, 0644); MODULE_PARM_DESC(timeout, "timeout (seconds)"); -/* Number of threads in each scheduler pool which is percpt, - * we will estimate reasonable value based on CPUs if it's set to zero. */ +/* + * Number of threads in each scheduler pool which is percpt, + * we will estimate reasonable value based on CPUs if it's set to zero. + */ static int nscheds; module_param(nscheds, int, 0444); MODULE_PARM_DESC(nscheds, "number of threads in each scheduler pool"); @@ -200,7 +202,7 @@ kiblnd_tunables_init(void) if (*kiblnd_tunables.kib_map_on_demand == 1) *kiblnd_tunables.kib_map_on_demand = 2; /* don't make sense to create map if only one fragment */ - if (*kiblnd_tunables.kib_concurrent_sends == 0) { + if (!*kiblnd_tunables.kib_concurrent_sends) { if (*kiblnd_tunables.kib_map_on_demand > 0 && *kiblnd_tunables.kib_map_on_demand <= IBLND_MAX_RDMA_FRAGS / 8) *kiblnd_tunables.kib_concurrent_sends = (*kiblnd_tunables.kib_peertxcredits) * 2; diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c index 05aa90ea597a..cca7b2f7f1a7 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c @@ -70,7 +70,7 @@ ksocknal_create_route(__u32 ipaddr, int port) ksock_route_t *route; LIBCFS_ALLOC(route, sizeof(*route)); - if (route == NULL) + if (!route) return NULL; atomic_set(&route->ksnr_refcount, 1); @@ -91,9 +91,9 @@ ksocknal_create_route(__u32 ipaddr, int port) void ksocknal_destroy_route(ksock_route_t *route) { - LASSERT(atomic_read(&route->ksnr_refcount) == 0); + LASSERT(!atomic_read(&route->ksnr_refcount)); - if (route->ksnr_peer != NULL) + if (route->ksnr_peer) ksocknal_peer_decref(route->ksnr_peer); LIBCFS_FREE(route, sizeof(*route)); @@ -102,6 +102,7 @@ ksocknal_destroy_route(ksock_route_t *route) static int ksocknal_create_peer(ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id) { + int cpt = lnet_cpt_of_nid(id.nid); ksock_net_t *net = ni->ni_data; ksock_peer_t *peer; @@ -109,8 +110,8 @@ ksocknal_create_peer(ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id) LASSERT(id.pid != LNET_PID_ANY); LASSERT(!in_interrupt()); - LIBCFS_ALLOC(peer, sizeof(*peer)); - if (peer == NULL) + LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer)); + if (!peer) return -ENOMEM; peer->ksnp_ni = ni; @@ -152,10 +153,10 @@ ksocknal_destroy_peer(ksock_peer_t *peer) ksock_net_t *net = peer->ksnp_ni->ni_data; CDEBUG(D_NET, "peer %s %p deleted\n", - libcfs_id2str(peer->ksnp_id), peer); + libcfs_id2str(peer->ksnp_id), peer); - LASSERT(atomic_read(&peer->ksnp_refcount) == 0); - LASSERT(peer->ksnp_accepting == 0); + LASSERT(!atomic_read(&peer->ksnp_refcount)); + LASSERT(!peer->ksnp_accepting); LASSERT(list_empty(&peer->ksnp_conns)); LASSERT(list_empty(&peer->ksnp_routes)); LASSERT(list_empty(&peer->ksnp_tx_queue)); @@ -163,10 +164,12 @@ ksocknal_destroy_peer(ksock_peer_t *peer) LIBCFS_FREE(peer, sizeof(*peer)); - /* NB a peer's connections and routes keep a reference on their peer + /* + * NB a peer's connections and routes keep a reference on their peer * until they are destroyed, so we can be assured that _all_ state to * do with this peer has been cleaned up when its refcount drops to - * zero. */ + * zero. + */ spin_lock_bh(&net->ksnn_lock); net->ksnn_npeers--; spin_unlock_bh(&net->ksnn_lock); @@ -180,7 +183,6 @@ ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id) ksock_peer_t *peer; list_for_each(tmp, peer_list) { - peer = list_entry(tmp, ksock_peer_t, ksnp_list); LASSERT(!peer->ksnp_closing); @@ -207,7 +209,7 @@ ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id) read_lock(&ksocknal_data.ksnd_global_lock); peer = ksocknal_find_peer_locked(ni, id); - if (peer != NULL) /* +1 ref for caller? */ + if (peer) /* +1 ref for caller? */ ksocknal_peer_addref(peer); read_unlock(&ksocknal_data.ksnd_global_lock); @@ -226,9 +228,11 @@ ksocknal_unlink_peer_locked(ksock_peer_t *peer) ip = peer->ksnp_passive_ips[i]; iface = ksocknal_ip2iface(peer->ksnp_ni, ip); - /* All IPs in peer->ksnp_passive_ips[] come from the - * interface list, therefore the call must succeed. */ - LASSERT(iface != NULL); + /* + * All IPs in peer->ksnp_passive_ips[] come from the + * interface list, therefore the call must succeed. + */ + LASSERT(iface); CDEBUG(D_NET, "peer=%p iface=%p ksni_nroutes=%d\n", peer, iface, iface->ksni_nroutes); @@ -246,8 +250,8 @@ ksocknal_unlink_peer_locked(ksock_peer_t *peer) static int ksocknal_get_peer_info(lnet_ni_t *ni, int index, - lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip, - int *port, int *conn_count, int *share_count) + lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip, + int *port, int *conn_count, int *share_count) { ksock_peer_t *peer; struct list_head *ptmp; @@ -260,14 +264,13 @@ ksocknal_get_peer_info(lnet_ni_t *ni, int index, read_lock(&ksocknal_data.ksnd_global_lock); for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) { - list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) { peer = list_entry(ptmp, ksock_peer_t, ksnp_list); if (peer->ksnp_ni != ni) continue; - if (peer->ksnp_n_passive_ips == 0 && + if (!peer->ksnp_n_passive_ips && list_empty(&peer->ksnp_routes)) { if (index-- > 0) continue; @@ -301,7 +304,7 @@ ksocknal_get_peer_info(lnet_ni_t *ni, int index, continue; route = list_entry(rtmp, ksock_route_t, - ksnr_list); + ksnr_list); *id = peer->ksnp_id; *myip = route->ksnr_myipaddr; @@ -330,7 +333,7 @@ ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn) ksocknal_route_addref(route); if (route->ksnr_myipaddr != conn->ksnc_myipaddr) { - if (route->ksnr_myipaddr == 0) { + if (!route->ksnr_myipaddr) { /* route wasn't bound locally yet (the initial route) */ CDEBUG(D_NET, "Binding %s %pI4h to %pI4h\n", libcfs_id2str(peer->ksnp_id), @@ -345,21 +348,23 @@ ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn) iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni, route->ksnr_myipaddr); - if (iface != NULL) + if (iface) iface->ksni_nroutes--; } route->ksnr_myipaddr = conn->ksnc_myipaddr; iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni, route->ksnr_myipaddr); - if (iface != NULL) + if (iface) iface->ksni_nroutes++; } - route->ksnr_connected |= (1<<type); + route->ksnr_connected |= (1 << type); route->ksnr_conn_count++; - /* Successful connection => further attempts can - * proceed immediately */ + /* + * Successful connection => further attempts can + * proceed immediately + */ route->ksnr_retry_interval = 0; } @@ -371,10 +376,10 @@ ksocknal_add_route_locked(ksock_peer_t *peer, ksock_route_t *route) ksock_route_t *route2; LASSERT(!peer->ksnp_closing); - LASSERT(route->ksnr_peer == NULL); + LASSERT(!route->ksnr_peer); LASSERT(!route->ksnr_scheduled); LASSERT(!route->ksnr_connecting); - LASSERT(route->ksnr_connected == 0); + LASSERT(!route->ksnr_connected); /* LASSERT(unique) */ list_for_each(tmp, &peer->ksnp_routes) { @@ -382,8 +387,8 @@ ksocknal_add_route_locked(ksock_peer_t *peer, ksock_route_t *route) if (route2->ksnr_ipaddr == route->ksnr_ipaddr) { CERROR("Duplicate route %s %pI4h\n", - libcfs_id2str(peer->ksnp_id), - &route->ksnr_ipaddr); + libcfs_id2str(peer->ksnp_id), + &route->ksnr_ipaddr); LBUG(); } } @@ -425,10 +430,10 @@ ksocknal_del_route_locked(ksock_route_t *route) ksocknal_close_conn_locked(conn, 0); } - if (route->ksnr_myipaddr != 0) { + if (route->ksnr_myipaddr) { iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni, route->ksnr_myipaddr); - if (iface != NULL) + if (iface) iface->ksni_nroutes--; } @@ -438,8 +443,10 @@ ksocknal_del_route_locked(ksock_route_t *route) if (list_empty(&peer->ksnp_routes) && list_empty(&peer->ksnp_conns)) { - /* I've just removed the last route to a peer with no active - * connections */ + /* + * I've just removed the last route to a peer with no active + * connections + */ ksocknal_unlink_peer_locked(peer); } } @@ -460,11 +467,11 @@ ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port) /* Have a brand new peer ready... */ rc = ksocknal_create_peer(&peer, ni, id); - if (rc != 0) + if (rc) return rc; route = ksocknal_create_route(ipaddr, port); - if (route == NULL) { + if (!route) { ksocknal_peer_decref(peer); return -ENOMEM; } @@ -472,16 +479,16 @@ ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port) write_lock_bh(&ksocknal_data.ksnd_global_lock); /* always called with a ref on ni, so shutdown can't have started */ - LASSERT(((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0); + LASSERT(!((ksock_net_t *) ni->ni_data)->ksnn_shutdown); peer2 = ksocknal_find_peer_locked(ni, id); - if (peer2 != NULL) { + if (peer2) { ksocknal_peer_decref(peer); peer = peer2; } else { /* peer table takes my ref on peer */ list_add_tail(&peer->ksnp_list, - ksocknal_nid2peerlist(id.nid)); + ksocknal_nid2peerlist(id.nid)); } route2 = NULL; @@ -493,7 +500,7 @@ ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port) route2 = NULL; } - if (route2 == NULL) { + if (!route2) { ksocknal_add_route_locked(peer, route); route->ksnr_share_count++; } else { @@ -524,7 +531,7 @@ ksocknal_del_peer_locked(ksock_peer_t *peer, __u32 ip) route = list_entry(tmp, ksock_route_t, ksnr_list); /* no match */ - if (!(ip == 0 || route->ksnr_ipaddr == ip)) + if (!(!ip || route->ksnr_ipaddr == ip)) continue; route->ksnr_share_count = 0; @@ -538,15 +545,16 @@ ksocknal_del_peer_locked(ksock_peer_t *peer, __u32 ip) nshared += route->ksnr_share_count; } - if (nshared == 0) { - /* remove everything else if there are no explicit entries - * left */ - + if (!nshared) { + /* + * remove everything else if there are no explicit entries + * left + */ list_for_each_safe(tmp, nxt, &peer->ksnp_routes) { route = list_entry(tmp, ksock_route_t, ksnr_list); /* we should only be removing auto-entries */ - LASSERT(route->ksnr_share_count == 0); + LASSERT(!route->ksnr_share_count); ksocknal_del_route_locked(route); } @@ -575,16 +583,16 @@ ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip) write_lock_bh(&ksocknal_data.ksnd_global_lock); - if (id.nid != LNET_NID_ANY) - lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers); - else { + if (id.nid != LNET_NID_ANY) { + lo = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers); + hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers); + } else { lo = 0; hi = ksocknal_data.ksnd_peer_hash_size - 1; } for (i = lo; i <= hi; i++) { - list_for_each_safe(ptmp, pnxt, - &ksocknal_data.ksnd_peers[i]) { + list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) { peer = list_entry(ptmp, ksock_peer_t, ksnp_list); if (peer->ksnp_ni != ni) @@ -604,7 +612,7 @@ ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip) LASSERT(list_empty(&peer->ksnp_routes)); list_splice_init(&peer->ksnp_tx_queue, - &zombies); + &zombies); } ksocknal_peer_decref(peer); /* ...till here */ @@ -645,7 +653,7 @@ ksocknal_get_conn_by_idx(lnet_ni_t *ni, int index) continue; conn = list_entry(ctmp, ksock_conn_t, - ksnc_list); + ksnc_list); ksocknal_conn_addref(conn); read_unlock(&ksocknal_data.ksnd_global_lock); return conn; @@ -692,8 +700,10 @@ ksocknal_local_ipvec(lnet_ni_t *ni, __u32 *ipaddrs) nip = net->ksnn_ninterfaces; LASSERT(nip <= LNET_MAX_INTERFACES); - /* Only offer interfaces for additional connections if I have - * more than one. */ + /* + * Only offer interfaces for additional connections if I have + * more than one. + */ if (nip < 2) { read_unlock(&ksocknal_data.ksnd_global_lock); return 0; @@ -701,7 +711,7 @@ ksocknal_local_ipvec(lnet_ni_t *ni, __u32 *ipaddrs) for (i = 0; i < nip; i++) { ipaddrs[i] = net->ksnn_interfaces[i].ksni_ipaddr; - LASSERT(ipaddrs[i] != 0); + LASSERT(ipaddrs[i]); } read_unlock(&ksocknal_data.ksnd_global_lock); @@ -719,11 +729,11 @@ ksocknal_match_peerip(ksock_interface_t *iface, __u32 *ips, int nips) int i; for (i = 0; i < nips; i++) { - if (ips[i] == 0) + if (!ips[i]) continue; this_xor = ips[i] ^ iface->ksni_ipaddr; - this_netmatch = ((this_xor & iface->ksni_netmask) == 0) ? 1 : 0; + this_netmatch = !(this_xor & iface->ksni_netmask) ? 1 : 0; if (!(best < 0 || best_netmatch < this_netmatch || @@ -757,38 +767,45 @@ ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips) int best_netmatch; int best_npeers; - /* CAVEAT EMPTOR: We do all our interface matching with an + /* + * CAVEAT EMPTOR: We do all our interface matching with an * exclusive hold of global lock at IRQ priority. We're only * expecting to be dealing with small numbers of interfaces, so the - * O(n**3)-ness shouldn't matter */ - - /* Also note that I'm not going to return more than n_peerips - * interfaces, even if I have more myself */ - + * O(n**3)-ness shouldn't matter + */ + /* + * Also note that I'm not going to return more than n_peerips + * interfaces, even if I have more myself + */ write_lock_bh(global_lock); LASSERT(n_peerips <= LNET_MAX_INTERFACES); LASSERT(net->ksnn_ninterfaces <= LNET_MAX_INTERFACES); - /* Only match interfaces for additional connections - * if I have > 1 interface */ + /* + * Only match interfaces for additional connections + * if I have > 1 interface + */ n_ips = (net->ksnn_ninterfaces < 2) ? 0 : min(n_peerips, net->ksnn_ninterfaces); for (i = 0; peer->ksnp_n_passive_ips < n_ips; i++) { /* ^ yes really... */ - /* If we have any new interfaces, first tick off all the + /* + * If we have any new interfaces, first tick off all the * peer IPs that match old interfaces, then choose new * interfaces to match the remaining peer IPS. * We don't forget interfaces we've stopped using; we might - * start using them again... */ - + * start using them again... + */ if (i < peer->ksnp_n_passive_ips) { /* Old interface. */ ip = peer->ksnp_passive_ips[i]; best_iface = ksocknal_ip2iface(peer->ksnp_ni, ip); + /* peer passive ips are kept up to date */ + LASSERT(best_iface); } else { /* choose a new interface */ LASSERT(i == peer->ksnp_n_passive_ips); @@ -810,9 +827,9 @@ ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips) k = ksocknal_match_peerip(iface, peerips, n_peerips); xor = ip ^ peerips[k]; - this_netmatch = ((xor & iface->ksni_netmask) == 0) ? 1 : 0; + this_netmatch = !(xor & iface->ksni_netmask) ? 1 : 0; - if (!(best_iface == NULL || + if (!(!best_iface || best_netmatch < this_netmatch || (best_netmatch == this_netmatch && best_npeers > iface->ksni_npeers))) @@ -823,10 +840,12 @@ ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips) best_npeers = iface->ksni_npeers; } + LASSERT(best_iface); + best_iface->ksni_npeers++; ip = best_iface->ksni_ipaddr; peer->ksnp_passive_ips[i] = ip; - peer->ksnp_n_passive_ips = i+1; + peer->ksnp_n_passive_ips = i + 1; } /* mark the best matching peer IP used */ @@ -860,16 +879,19 @@ ksocknal_create_routes(ksock_peer_t *peer, int port, int i; int j; - /* CAVEAT EMPTOR: We do all our interface matching with an + /* + * CAVEAT EMPTOR: We do all our interface matching with an * exclusive hold of global lock at IRQ priority. We're only * expecting to be dealing with small numbers of interfaces, so the - * O(n**3)-ness here shouldn't matter */ - + * O(n**3)-ness here shouldn't matter + */ write_lock_bh(global_lock); if (net->ksnn_ninterfaces < 2) { - /* Only create additional connections - * if I have > 1 interface */ + /* + * Only create additional connections + * if I have > 1 interface + */ write_unlock_bh(global_lock); return; } @@ -877,13 +899,13 @@ ksocknal_create_routes(ksock_peer_t *peer, int port, LASSERT(npeer_ipaddrs <= LNET_MAX_INTERFACES); for (i = 0; i < npeer_ipaddrs; i++) { - if (newroute != NULL) { + if (newroute) { newroute->ksnr_ipaddr = peer_ipaddrs[i]; } else { write_unlock_bh(global_lock); newroute = ksocknal_create_route(peer_ipaddrs[i], port); - if (newroute == NULL) + if (!newroute) return; write_lock_bh(global_lock); @@ -904,7 +926,7 @@ ksocknal_create_routes(ksock_peer_t *peer, int port, route = NULL; } - if (route != NULL) + if (route) continue; best_iface = NULL; @@ -920,21 +942,21 @@ ksocknal_create_routes(ksock_peer_t *peer, int port, /* Using this interface already? */ list_for_each(rtmp, &peer->ksnp_routes) { route = list_entry(rtmp, ksock_route_t, - ksnr_list); + ksnr_list); if (route->ksnr_myipaddr == iface->ksni_ipaddr) break; route = NULL; } - if (route != NULL) + if (route) continue; - this_netmatch = (((iface->ksni_ipaddr ^ + this_netmatch = (!((iface->ksni_ipaddr ^ newroute->ksnr_ipaddr) & - iface->ksni_netmask) == 0) ? 1 : 0; + iface->ksni_netmask)) ? 1 : 0; - if (!(best_iface == NULL || + if (!(!best_iface || best_netmatch < this_netmatch || (best_netmatch == this_netmatch && best_nroutes > iface->ksni_nroutes))) @@ -945,7 +967,7 @@ ksocknal_create_routes(ksock_peer_t *peer, int port, best_nroutes = iface->ksni_nroutes; } - if (best_iface == NULL) + if (!best_iface) continue; newroute->ksnr_myipaddr = best_iface->ksni_ipaddr; @@ -956,7 +978,7 @@ ksocknal_create_routes(ksock_peer_t *peer, int port, } write_unlock_bh(global_lock); - if (newroute != NULL) + if (newroute) ksocknal_route_decref(newroute); } @@ -969,10 +991,10 @@ ksocknal_accept(lnet_ni_t *ni, struct socket *sock) int peer_port; rc = lnet_sock_getaddr(sock, 1, &peer_ip, &peer_port); - LASSERT(rc == 0); /* we succeeded before */ + LASSERT(!rc); /* we succeeded before */ LIBCFS_ALLOC(cr, sizeof(*cr)); - if (cr == NULL) { + if (!cr) { LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from %pI4h: memory exhausted\n", &peer_ip); return -ENOMEM; @@ -997,7 +1019,6 @@ ksocknal_connecting(ksock_peer_t *peer, __u32 ipaddr) ksock_route_t *route; list_for_each_entry(route, &peer->ksnp_routes, ksnr_list) { - if (route->ksnr_ipaddr == ipaddr) return route->ksnr_connecting; } @@ -1006,7 +1027,7 @@ ksocknal_connecting(ksock_peer_t *peer, __u32 ipaddr) int ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, - struct socket *sock, int type) + struct socket *sock, int type) { rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock; LIST_HEAD(zombies); @@ -1026,12 +1047,12 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, int active; char *warn = NULL; - active = (route != NULL); + active = !!route; LASSERT(active == (type != SOCKLND_CONN_NONE)); LIBCFS_ALLOC(conn, sizeof(*conn)); - if (conn == NULL) { + if (!conn) { rc = -ENOMEM; goto failed_0; } @@ -1039,8 +1060,10 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, conn->ksnc_peer = NULL; conn->ksnc_route = NULL; conn->ksnc_sock = sock; - /* 2 ref, 1 for conn, another extra ref prevents socket - * being closed before establishment of connection */ + /* + * 2 ref, 1 for conn, another extra ref prevents socket + * being closed before establishment of connection + */ atomic_set(&conn->ksnc_sock_refcount, 2); conn->ksnc_type = type; ksocknal_lib_save_callback(sock, conn); @@ -1057,21 +1080,22 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, LIBCFS_ALLOC(hello, offsetof(ksock_hello_msg_t, kshm_ips[LNET_MAX_INTERFACES])); - if (hello == NULL) { + if (!hello) { rc = -ENOMEM; goto failed_1; } /* stash conn's local and remote addrs */ rc = ksocknal_lib_get_conn_addrs(conn); - if (rc != 0) + if (rc) goto failed_1; - /* Find out/confirm peer's NID and connection type and get the + /* + * Find out/confirm peer's NID and connection type and get the * vector of interfaces she's willing to let me connect to. * Passive connections use the listener timeout since the peer sends - * eagerly */ - + * eagerly + */ if (active) { peer = route->ksnr_peer; LASSERT(ni == peer->ksnp_ni); @@ -1084,7 +1108,7 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, conn->ksnc_proto = peer->ksnp_proto; write_unlock_bh(global_lock); - if (conn->ksnc_proto == NULL) { + if (!conn->ksnc_proto) { conn->ksnc_proto = &ksocknal_protocol_v3x; #if SOCKNAL_VERSION_DEBUG if (*ksocknal_tunables.ksnd_protocol == 2) @@ -1095,7 +1119,7 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, } rc = ksocknal_send_hello(ni, conn, peerid.nid, hello); - if (rc != 0) + if (rc) goto failed_1; } else { peerid.nid = LNET_NID_ANY; @@ -1109,8 +1133,8 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, if (rc < 0) goto failed_1; - LASSERT(rc == 0 || active); - LASSERT(conn->ksnc_proto != NULL); + LASSERT(!rc || active); + LASSERT(conn->ksnc_proto); LASSERT(peerid.nid != LNET_NID_ANY); cpt = lnet_cpt_of_nid(peerid.nid); @@ -1120,20 +1144,22 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, write_lock_bh(global_lock); } else { rc = ksocknal_create_peer(&peer, ni, peerid); - if (rc != 0) + if (rc) goto failed_1; write_lock_bh(global_lock); /* called with a ref on ni, so shutdown can't have started */ - LASSERT(((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0); + LASSERT(!((ksock_net_t *) ni->ni_data)->ksnn_shutdown); peer2 = ksocknal_find_peer_locked(ni, peerid); - if (peer2 == NULL) { - /* NB this puts an "empty" peer in the peer - * table (which takes my ref) */ + if (!peer2) { + /* + * NB this puts an "empty" peer in the peer + * table (which takes my ref) + */ list_add_tail(&peer->ksnp_list, - ksocknal_nid2peerlist(peerid.nid)); + ksocknal_nid2peerlist(peerid.nid)); } else { ksocknal_peer_decref(peer); peer = peer2; @@ -1143,8 +1169,10 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, ksocknal_peer_addref(peer); peer->ksnp_accepting++; - /* Am I already connecting to this guy? Resolve in - * favour of higher NID... */ + /* + * Am I already connecting to this guy? Resolve in + * favour of higher NID... + */ if (peerid.nid < ni->ni_nid && ksocknal_connecting(peer, conn->ksnc_ipaddr)) { rc = EALREADY; @@ -1161,8 +1189,9 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, goto failed_2; } - if (peer->ksnp_proto == NULL) { - /* Never connected before. + if (!peer->ksnp_proto) { + /* + * Never connected before. * NB recv_hello may have returned EPROTO to signal my peer * wants a different protocol than the one I asked for. */ @@ -1198,8 +1227,10 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, goto failed_2; } - /* Refuse to duplicate an existing connection, unless this is a - * loopback connection */ + /* + * Refuse to duplicate an existing connection, unless this is a + * loopback connection + */ if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) { list_for_each(tmp, &peer->ksnp_conns) { conn2 = list_entry(tmp, ksock_conn_t, ksnc_list); @@ -1209,9 +1240,11 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, conn2->ksnc_type != conn->ksnc_type) continue; - /* Reply on a passive connection attempt so the peer - * realises we're connected. */ - LASSERT(rc == 0); + /* + * Reply on a passive connection attempt so the peer + * realises we're connected. + */ + LASSERT(!rc); if (!active) rc = EALREADY; @@ -1220,9 +1253,11 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, } } - /* If the connection created by this route didn't bind to the IP + /* + * If the connection created by this route didn't bind to the IP * address the route connected to, the connection/route matching - * code below probably isn't going to work. */ + * code below probably isn't going to work. + */ if (active && route->ksnr_ipaddr != conn->ksnc_ipaddr) { CERROR("Route %s %pI4h connected to %pI4h\n", @@ -1231,10 +1266,12 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, &conn->ksnc_ipaddr); } - /* Search for a route corresponding to the new connection and + /* + * Search for a route corresponding to the new connection and * create an association. This allows incoming connections created * by routes in my peer to match my own route entries so I don't - * continually create duplicate routes. */ + * continually create duplicate routes. + */ list_for_each(tmp, &peer->ksnp_routes) { route = list_entry(tmp, ksock_route_t, ksnr_list); @@ -1278,14 +1315,14 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, write_unlock_bh(global_lock); - /* We've now got a new connection. Any errors from here on are just + /* + * We've now got a new connection. Any errors from here on are just * like "normal" comms errors and we close the connection normally. * NB (a) we still have to send the reply HELLO for passive * connections, * (b) normal I/O on the conn is blocked until I setup and call the * socket callbacks. */ - CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d incarnation:%lld sched[%d:%d]\n", libcfs_id2str(peerid), conn->ksnc_proto->pro_version, &conn->ksnc_myipaddr, &conn->ksnc_ipaddr, @@ -1305,12 +1342,14 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, LIBCFS_FREE(hello, offsetof(ksock_hello_msg_t, kshm_ips[LNET_MAX_INTERFACES])); - /* setup the socket AFTER I've received hello (it disables + /* + * setup the socket AFTER I've received hello (it disables * SO_LINGER). I might call back to the acceptor who may want * to send a protocol version response and then close the * socket; this ensures the socket only tears down after the - * response has been sent. */ - if (rc == 0) + * response has been sent. + */ + if (!rc) rc = ksocknal_lib_setup_sock(sock); write_lock_bh(global_lock); @@ -1323,14 +1362,14 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, write_unlock_bh(global_lock); - if (rc != 0) { + if (rc) { write_lock_bh(global_lock); if (!conn->ksnc_closing) { /* could be closed by another thread */ ksocknal_close_conn_locked(conn, rc); } write_unlock_bh(global_lock); - } else if (ksocknal_connsock_addref(conn) == 0) { + } else if (!ksocknal_connsock_addref(conn)) { /* Allow I/O to proceed. */ ksocknal_read_callback(conn); ksocknal_write_callback(conn); @@ -1352,19 +1391,21 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, write_unlock_bh(global_lock); - if (warn != NULL) { + if (warn) { if (rc < 0) CERROR("Not creating conn %s type %d: %s\n", libcfs_id2str(peerid), conn->ksnc_type, warn); else CDEBUG(D_NET, "Not creating conn %s type %d: %s\n", - libcfs_id2str(peerid), conn->ksnc_type, warn); + libcfs_id2str(peerid), conn->ksnc_type, warn); } if (!active) { if (rc > 0) { - /* Request retry by replying with CONN_NONE - * ksnc_proto has been set already */ + /* + * Request retry by replying with CONN_NONE + * ksnc_proto has been set already + */ conn->ksnc_type = SOCKLND_CONN_NONE; hello->kshm_nips = 0; ksocknal_send_hello(ni, conn, peerid.nid, hello); @@ -1379,7 +1420,7 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, ksocknal_peer_decref(peer); failed_1: - if (hello != NULL) + if (hello) LIBCFS_FREE(hello, offsetof(ksock_hello_msg_t, kshm_ips[LNET_MAX_INTERFACES])); @@ -1393,15 +1434,17 @@ failed_0: void ksocknal_close_conn_locked(ksock_conn_t *conn, int error) { - /* This just does the immmediate housekeeping, and queues the + /* + * This just does the immmediate housekeeping, and queues the * connection for the reaper to terminate. - * Caller holds ksnd_global_lock exclusively in irq context */ + * Caller holds ksnd_global_lock exclusively in irq context + */ ksock_peer_t *peer = conn->ksnc_peer; ksock_route_t *route; ksock_conn_t *conn2; struct list_head *tmp; - LASSERT(peer->ksnp_error == 0); + LASSERT(!peer->ksnp_error); LASSERT(!conn->ksnc_closing); conn->ksnc_closing = 1; @@ -1409,10 +1452,10 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error) list_del(&conn->ksnc_list); route = conn->ksnc_route; - if (route != NULL) { + if (route) { /* dissociate conn from route... */ LASSERT(!route->ksnr_deleted); - LASSERT((route->ksnr_connected & (1 << conn->ksnc_type)) != 0); + LASSERT(route->ksnr_connected & (1 << conn->ksnc_type)); conn2 = NULL; list_for_each(tmp, &peer->ksnp_conns) { @@ -1424,7 +1467,7 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error) conn2 = NULL; } - if (conn2 == NULL) + if (!conn2) route->ksnr_connected &= ~(1 << conn->ksnc_type); conn->ksnc_route = NULL; @@ -1445,15 +1488,17 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error) LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x); - /* throw them to the last connection..., - * these TXs will be send to /dev/null by scheduler */ + /* + * throw them to the last connection..., + * these TXs will be send to /dev/null by scheduler + */ list_for_each_entry(tx, &peer->ksnp_tx_queue, - tx_list) + tx_list) ksocknal_tx_prep(conn, tx); spin_lock_bh(&conn->ksnc_scheduler->kss_lock); list_splice_init(&peer->ksnp_tx_queue, - &conn->ksnc_tx_queue); + &conn->ksnc_tx_queue); spin_unlock_bh(&conn->ksnc_scheduler->kss_lock); } @@ -1461,8 +1506,10 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error) peer->ksnp_error = error; /* stash last conn close reason */ if (list_empty(&peer->ksnp_routes)) { - /* I've just closed last conn belonging to a - * peer with no routes to it */ + /* + * I've just closed last conn belonging to a + * peer with no routes to it + */ ksocknal_unlink_peer_locked(peer); } } @@ -1470,7 +1517,7 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error) spin_lock_bh(&ksocknal_data.ksnd_reaper_lock); list_add_tail(&conn->ksnc_list, - &ksocknal_data.ksnd_deathrow_conns); + &ksocknal_data.ksnd_deathrow_conns); wake_up(&ksocknal_data.ksnd_reaper_waitq); spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock); @@ -1482,16 +1529,17 @@ ksocknal_peer_failed(ksock_peer_t *peer) int notify = 0; unsigned long last_alive = 0; - /* There has been a connection failure or comms error; but I'll only + /* + * There has been a connection failure or comms error; but I'll only * tell LNET I think the peer is dead if it's to another kernel and - * there are no connections or connection attempts in existence. */ - + * there are no connections or connection attempts in existence. + */ read_lock(&ksocknal_data.ksnd_global_lock); - if ((peer->ksnp_id.pid & LNET_PID_USERFLAG) == 0 && + if (!(peer->ksnp_id.pid & LNET_PID_USERFLAG) && list_empty(&peer->ksnp_conns) && - peer->ksnp_accepting == 0 && - ksocknal_find_connecting_route_locked(peer) == NULL) { + !peer->ksnp_accepting && + !ksocknal_find_connecting_route_locked(peer)) { notify = 1; last_alive = peer->ksnp_last_alive; } @@ -1500,7 +1548,7 @@ ksocknal_peer_failed(ksock_peer_t *peer) if (notify) lnet_notify(peer->ksnp_ni, peer->ksnp_id.nid, 0, - last_alive); + last_alive); } void @@ -1508,12 +1556,15 @@ ksocknal_finalize_zcreq(ksock_conn_t *conn) { ksock_peer_t *peer = conn->ksnc_peer; ksock_tx_t *tx; + ksock_tx_t *temp; ksock_tx_t *tmp; LIST_HEAD(zlist); - /* NB safe to finalize TXs because closing of socket will - * abort all buffered data */ - LASSERT(conn->ksnc_sock == NULL); + /* + * NB safe to finalize TXs because closing of socket will + * abort all buffered data + */ + LASSERT(!conn->ksnc_sock); spin_lock(&peer->ksnp_lock); @@ -1521,7 +1572,7 @@ ksocknal_finalize_zcreq(ksock_conn_t *conn) if (tx->tx_conn != conn) continue; - LASSERT(tx->tx_msg.ksm_zc_cookies[0] != 0); + LASSERT(tx->tx_msg.ksm_zc_cookies[0]); tx->tx_msg.ksm_zc_cookies[0] = 0; tx->tx_zc_aborted = 1; /* mark it as not-acked */ @@ -1531,9 +1582,7 @@ ksocknal_finalize_zcreq(ksock_conn_t *conn) spin_unlock(&peer->ksnp_lock); - while (!list_empty(&zlist)) { - tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list); - + list_for_each_entry_safe(tx, temp, &zlist, tx_zc_list) { list_del(&tx->tx_zc_list); ksocknal_tx_decref(tx); } @@ -1542,10 +1591,12 @@ ksocknal_finalize_zcreq(ksock_conn_t *conn) void ksocknal_terminate_conn(ksock_conn_t *conn) { - /* This gets called by the reaper (guaranteed thread context) to + /* + * This gets called by the reaper (guaranteed thread context) to * disengage the socket from its callbacks and close it. * ksnc_refcount will eventually hit zero, and then the reaper will - * destroy it. */ + * destroy it. + */ ksock_peer_t *peer = conn->ksnc_peer; ksock_sched_t *sched = conn->ksnc_scheduler; int failed = 0; @@ -1561,7 +1612,7 @@ ksocknal_terminate_conn(ksock_conn_t *conn) if (!conn->ksnc_tx_scheduled && !list_empty(&conn->ksnc_tx_queue)) { list_add_tail(&conn->ksnc_tx_list, - &sched->kss_tx_conns); + &sched->kss_tx_conns); conn->ksnc_tx_scheduled = 1; /* extra ref for scheduler */ ksocknal_conn_addref(conn); @@ -1576,11 +1627,13 @@ ksocknal_terminate_conn(ksock_conn_t *conn) ksocknal_lib_reset_callback(conn->ksnc_sock, conn); - /* OK, so this conn may not be completely disengaged from its - * scheduler yet, but it _has_ committed to terminate... */ + /* + * OK, so this conn may not be completely disengaged from its + * scheduler yet, but it _has_ committed to terminate... + */ conn->ksnc_scheduler->kss_nconns--; - if (peer->ksnp_error != 0) { + if (peer->ksnp_error) { /* peer's last conn closed in error */ LASSERT(list_empty(&peer->ksnp_conns)); failed = 1; @@ -1592,11 +1645,13 @@ ksocknal_terminate_conn(ksock_conn_t *conn) if (failed) ksocknal_peer_failed(peer); - /* The socket is closed on the final put; either here, or in + /* + * The socket is closed on the final put; either here, or in * ksocknal_{send,recv}msg(). Since we set up the linger2 option * when the connection was established, this will close the socket * immediately, aborting anything buffered in it. Any hung - * zero-copy transmits will therefore complete in finite time. */ + * zero-copy transmits will therefore complete in finite time. + */ ksocknal_connsock_decref(conn); } @@ -1605,7 +1660,7 @@ ksocknal_queue_zombie_conn(ksock_conn_t *conn) { /* Queue the conn for the reaper to destroy */ - LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0); + LASSERT(!atomic_read(&conn->ksnc_conn_refcount)); spin_lock_bh(&ksocknal_data.ksnd_reaper_lock); list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns); @@ -1622,10 +1677,10 @@ ksocknal_destroy_conn(ksock_conn_t *conn) /* Final coup-de-grace of the reaper */ CDEBUG(D_NET, "connection %p\n", conn); - LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0); - LASSERT(atomic_read(&conn->ksnc_sock_refcount) == 0); - LASSERT(conn->ksnc_sock == NULL); - LASSERT(conn->ksnc_route == NULL); + LASSERT(!atomic_read(&conn->ksnc_conn_refcount)); + LASSERT(!atomic_read(&conn->ksnc_sock_refcount)); + LASSERT(!conn->ksnc_sock); + LASSERT(!conn->ksnc_route); LASSERT(!conn->ksnc_tx_scheduled); LASSERT(!conn->ksnc_rx_scheduled); LASSERT(list_empty(&conn->ksnc_tx_queue)); @@ -1642,7 +1697,7 @@ ksocknal_destroy_conn(ksock_conn_t *conn) cfs_duration_sec(cfs_time_sub(cfs_time_current(), last_rcv))); lnet_finalize(conn->ksnc_peer->ksnp_ni, - conn->ksnc_cookie, -EIO); + conn->ksnc_cookie, -EIO); break; case SOCKNAL_RX_LNET_HEADER: if (conn->ksnc_rx_started) @@ -1685,8 +1740,7 @@ ksocknal_close_peer_conns_locked(ksock_peer_t *peer, __u32 ipaddr, int why) list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) { conn = list_entry(ctmp, ksock_conn_t, ksnc_list); - if (ipaddr == 0 || - conn->ksnc_ipaddr == ipaddr) { + if (!ipaddr || conn->ksnc_ipaddr == ipaddr) { count++; ksocknal_close_conn_locked(conn, why); } @@ -1724,17 +1778,17 @@ ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr) write_lock_bh(&ksocknal_data.ksnd_global_lock); - if (id.nid != LNET_NID_ANY) - lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers); - else { + if (id.nid != LNET_NID_ANY) { + lo = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers); + hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers); + } else { lo = 0; hi = ksocknal_data.ksnd_peer_hash_size - 1; } for (i = lo; i <= hi; i++) { list_for_each_safe(ptmp, pnxt, - &ksocknal_data.ksnd_peers[i]) { - + &ksocknal_data.ksnd_peers[i]) { peer = list_entry(ptmp, ksock_peer_t, ksnp_list); if (!((id.nid == LNET_NID_ANY || id.nid == peer->ksnp_id.nid) && @@ -1748,10 +1802,10 @@ ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr) write_unlock_bh(&ksocknal_data.ksnd_global_lock); /* wildcards always succeed */ - if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0) + if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || !ipaddr) return 0; - if (count == 0) + if (!count) return -ENOENT; else return 0; @@ -1760,15 +1814,17 @@ ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr) void ksocknal_notify(lnet_ni_t *ni, lnet_nid_t gw_nid, int alive) { - /* The router is telling me she's been notified of a change in - * gateway state.... */ + /* + * The router is telling me she's been notified of a change in + * gateway state.... + */ lnet_process_id_t id = {0}; id.nid = gw_nid; id.pid = LNET_PID_ANY; CDEBUG(D_NET, "gw %s %s\n", libcfs_nid2str(gw_nid), - alive ? "up" : "down"); + alive ? "up" : "down"); if (!alive) { /* If the gateway crashed, close all open connections... */ @@ -1776,8 +1832,10 @@ ksocknal_notify(lnet_ni_t *ni, lnet_nid_t gw_nid, int alive) return; } - /* ...otherwise do nothing. We can only establish new connections - * if we have autroutes, and these connect on demand. */ + /* + * ...otherwise do nothing. We can only establish new connections + * if we have autroutes, and these connect on demand. + */ } void @@ -1788,12 +1846,15 @@ ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) unsigned long now = cfs_time_current(); ksock_peer_t *peer = NULL; rwlock_t *glock = &ksocknal_data.ksnd_global_lock; - lnet_process_id_t id = {.nid = nid, .pid = LUSTRE_SRV_LNET_PID}; + lnet_process_id_t id = { + .nid = nid, + .pid = LNET_PID_LUSTRE, + }; read_lock(glock); peer = ksocknal_find_peer_locked(ni, id); - if (peer != NULL) { + if (peer) { struct list_head *tmp; ksock_conn_t *conn; int bufnob; @@ -1812,13 +1873,13 @@ ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) } last_alive = peer->ksnp_last_alive; - if (ksocknal_find_connectable_route_locked(peer) == NULL) + if (!ksocknal_find_connectable_route_locked(peer)) connect = 0; } read_unlock(glock); - if (last_alive != 0) + if (last_alive) *when = last_alive; CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago, connect %d\n", @@ -1834,7 +1895,7 @@ ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) write_lock_bh(glock); peer = ksocknal_find_peer_locked(ni, id); - if (peer != NULL) + if (peer) ksocknal_launch_all_connections_locked(peer); write_unlock_bh(glock); @@ -1857,7 +1918,7 @@ ksocknal_push_peer(ksock_peer_t *peer) list_for_each(tmp, &peer->ksnp_conns) { if (i++ == index) { conn = list_entry(tmp, ksock_conn_t, - ksnc_list); + ksnc_list); ksocknal_conn_addref(conn); break; } @@ -1865,7 +1926,7 @@ ksocknal_push_peer(ksock_peer_t *peer) read_unlock(&ksocknal_data.ksnd_global_lock); - if (conn == NULL) + if (!conn) break; ksocknal_lib_push_conn(conn); @@ -1885,7 +1946,8 @@ static int ksocknal_push(lnet_ni_t *ni, lnet_process_id_t id) start = &ksocknal_data.ksnd_peers[0]; end = &ksocknal_data.ksnd_peers[hsize - 1]; } else { - start = end = ksocknal_nid2peerlist(id.nid); + start = ksocknal_nid2peerlist(id.nid); + end = ksocknal_nid2peerlist(id.nid); } for (tmp = start; tmp <= end; tmp++) { @@ -1910,7 +1972,7 @@ static int ksocknal_push(lnet_ni_t *ni, lnet_process_id_t id) } read_unlock(&ksocknal_data.ksnd_global_lock); - if (i == 0) /* no match */ + if (!i) /* no match */ break; rc = 0; @@ -1934,14 +1996,13 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask) struct list_head *rtmp; ksock_route_t *route; - if (ipaddress == 0 || - netmask == 0) + if (!ipaddress || !netmask) return -EINVAL; write_lock_bh(&ksocknal_data.ksnd_global_lock); iface = ksocknal_ip2iface(ni, ipaddress); - if (iface != NULL) { + if (iface) { /* silently ignore dups */ rc = 0; } else if (net->ksnn_ninterfaces == LNET_MAX_INTERFACES) { @@ -1957,16 +2018,15 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask) for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) { list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) { peer = list_entry(ptmp, ksock_peer_t, - ksnp_list); + ksnp_list); for (j = 0; j < peer->ksnp_n_passive_ips; j++) if (peer->ksnp_passive_ips[j] == ipaddress) iface->ksni_npeers++; list_for_each(rtmp, &peer->ksnp_routes) { - route = list_entry(rtmp, - ksock_route_t, - ksnr_list); + route = list_entry(rtmp, ksock_route_t, + ksnr_list); if (route->ksnr_myipaddr == ipaddress) iface->ksni_nroutes++; @@ -1995,8 +2055,8 @@ ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr) for (i = 0; i < peer->ksnp_n_passive_ips; i++) if (peer->ksnp_passive_ips[i] == ipaddr) { - for (j = i+1; j < peer->ksnp_n_passive_ips; j++) - peer->ksnp_passive_ips[j-1] = + for (j = i + 1; j < peer->ksnp_n_passive_ips; j++) + peer->ksnp_passive_ips[j - 1] = peer->ksnp_passive_ips[j]; peer->ksnp_n_passive_ips--; break; @@ -2008,7 +2068,7 @@ ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr) if (route->ksnr_myipaddr != ipaddr) continue; - if (route->ksnr_share_count != 0) { + if (route->ksnr_share_count) { /* Manually created; keep, but unbind */ route->ksnr_myipaddr = 0; } else { @@ -2041,23 +2101,21 @@ ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress) for (i = 0; i < net->ksnn_ninterfaces; i++) { this_ip = net->ksnn_interfaces[i].ksni_ipaddr; - if (!(ipaddress == 0 || - ipaddress == this_ip)) + if (!(!ipaddress || ipaddress == this_ip)) continue; rc = 0; - for (j = i+1; j < net->ksnn_ninterfaces; j++) - net->ksnn_interfaces[j-1] = + for (j = i + 1; j < net->ksnn_ninterfaces; j++) + net->ksnn_interfaces[j - 1] = net->ksnn_interfaces[j]; net->ksnn_ninterfaces--; for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) { list_for_each_safe(tmp, nxt, - &ksocknal_data.ksnd_peers[j]) { - peer = list_entry(tmp, ksock_peer_t, - ksnp_list); + &ksocknal_data.ksnd_peers[j]) { + peer = list_entry(tmp, ksock_peer_t, ksnp_list); if (peer->ksnp_ni != ni) continue; @@ -2121,7 +2179,7 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) rc = ksocknal_get_peer_info(ni, data->ioc_count, &id, &myip, &ip, &port, &conn_count, &share_count); - if (rc != 0) + if (rc) return rc; data->ioc_nid = id.nid; @@ -2136,7 +2194,7 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) case IOC_LIBCFS_ADD_PEER: id.nid = data->ioc_nid; - id.pid = LUSTRE_SRV_LNET_PID; + id.pid = LNET_PID_LUSTRE; return ksocknal_add_peer(ni, id, data->ioc_u32[0], /* IP */ data->ioc_u32[1]); /* port */ @@ -2153,7 +2211,7 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) int nagle; ksock_conn_t *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count); - if (conn == NULL) + if (!conn) return -ENOENT; ksocknal_lib_get_conn_tunables(conn, &txmem, &rxmem, &nagle); @@ -2202,14 +2260,14 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) static void ksocknal_free_buffers(void) { - LASSERT(atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0); + LASSERT(!atomic_read(&ksocknal_data.ksnd_nactive_txs)); - if (ksocknal_data.ksnd_sched_info != NULL) { + if (ksocknal_data.ksnd_sched_info) { struct ksock_sched_info *info; int i; cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) { - if (info->ksi_scheds != NULL) { + if (info->ksi_scheds) { LIBCFS_FREE(info->ksi_scheds, info->ksi_nthreads_max * sizeof(info->ksi_scheds[0])); @@ -2219,21 +2277,21 @@ ksocknal_free_buffers(void) } LIBCFS_FREE(ksocknal_data.ksnd_peers, - sizeof(struct list_head) * - ksocknal_data.ksnd_peer_hash_size); + sizeof(struct list_head) * + ksocknal_data.ksnd_peer_hash_size); spin_lock(&ksocknal_data.ksnd_tx_lock); if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) { struct list_head zlist; ksock_tx_t *tx; + ksock_tx_t *temp; list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs); list_del_init(&ksocknal_data.ksnd_idle_noop_txs); spin_unlock(&ksocknal_data.ksnd_tx_lock); - while (!list_empty(&zlist)) { - tx = list_entry(zlist.next, ksock_tx_t, tx_list); + list_for_each_entry_safe(tx, temp, &zlist, tx_list) { list_del(&tx->tx_list); LIBCFS_FREE(tx, tx->tx_desc_size); } @@ -2250,7 +2308,7 @@ ksocknal_base_shutdown(void) int i; int j; - LASSERT(ksocknal_data.ksnd_nnets == 0); + LASSERT(!ksocknal_data.ksnd_nnets); switch (ksocknal_data.ksnd_init) { default: @@ -2258,7 +2316,7 @@ ksocknal_base_shutdown(void) case SOCKNAL_INIT_ALL: case SOCKNAL_INIT_DATA: - LASSERT(ksocknal_data.ksnd_peers != NULL); + LASSERT(ksocknal_data.ksnd_peers); for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) LASSERT(list_empty(&ksocknal_data.ksnd_peers[i])); @@ -2268,14 +2326,13 @@ ksocknal_base_shutdown(void) LASSERT(list_empty(&ksocknal_data.ksnd_connd_connreqs)); LASSERT(list_empty(&ksocknal_data.ksnd_connd_routes)); - if (ksocknal_data.ksnd_sched_info != NULL) { + if (ksocknal_data.ksnd_sched_info) { cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) { - if (info->ksi_scheds == NULL) + if (!info->ksi_scheds) continue; for (j = 0; j < info->ksi_nthreads_max; j++) { - sched = &info->ksi_scheds[j]; LASSERT(list_empty( &sched->kss_tx_conns)); @@ -2283,7 +2340,7 @@ ksocknal_base_shutdown(void) &sched->kss_rx_conns)); LASSERT(list_empty( &sched->kss_zombie_noop_txs)); - LASSERT(sched->kss_nconns == 0); + LASSERT(!sched->kss_nconns); } } } @@ -2293,10 +2350,10 @@ ksocknal_base_shutdown(void) wake_up_all(&ksocknal_data.ksnd_connd_waitq); wake_up_all(&ksocknal_data.ksnd_reaper_waitq); - if (ksocknal_data.ksnd_sched_info != NULL) { + if (ksocknal_data.ksnd_sched_info) { cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) { - if (info->ksi_scheds == NULL) + if (!info->ksi_scheds) continue; for (j = 0; j < info->ksi_nthreads_max; j++) { @@ -2308,7 +2365,7 @@ ksocknal_base_shutdown(void) i = 4; read_lock(&ksocknal_data.ksnd_global_lock); - while (ksocknal_data.ksnd_nthreads != 0) { + while (ksocknal_data.ksnd_nthreads) { i++; CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */ "waiting for %d threads to terminate\n", @@ -2332,7 +2389,6 @@ ksocknal_base_shutdown(void) static __u64 ksocknal_new_incarnation(void) { - /* The incarnation number is the time this module loaded and it * identifies this particular instance of the socknal. */ @@ -2347,15 +2403,15 @@ ksocknal_base_startup(void) int i; LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING); - LASSERT(ksocknal_data.ksnd_nnets == 0); + LASSERT(!ksocknal_data.ksnd_nnets); memset(&ksocknal_data, 0, sizeof(ksocknal_data)); /* zero pointers */ ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE; LIBCFS_ALLOC(ksocknal_data.ksnd_peers, - sizeof(struct list_head) * - ksocknal_data.ksnd_peer_hash_size); - if (ksocknal_data.ksnd_peers == NULL) + sizeof(struct list_head) * + ksocknal_data.ksnd_peer_hash_size); + if (!ksocknal_data.ksnd_peers) return -ENOMEM; for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) @@ -2386,7 +2442,7 @@ ksocknal_base_startup(void) ksocknal_data.ksnd_sched_info = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*info)); - if (ksocknal_data.ksnd_sched_info == NULL) + if (!ksocknal_data.ksnd_sched_info) goto failed; cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) { @@ -2397,8 +2453,10 @@ ksocknal_base_startup(void) if (*ksocknal_tunables.ksnd_nscheds > 0) { nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds); } else { - /* max to half of CPUs, assume another half should be - * reserved for upper layer modules */ + /* + * max to half of CPUs, assume another half should be + * reserved for upper layer modules + */ nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs); } @@ -2407,7 +2465,7 @@ ksocknal_base_startup(void) LIBCFS_CPT_ALLOC(info->ksi_scheds, lnet_cpt_table(), i, info->ksi_nthreads_max * sizeof(*sched)); - if (info->ksi_scheds == NULL) + if (!info->ksi_scheds) goto failed; for (; nthrs > 0; nthrs--) { @@ -2425,8 +2483,10 @@ ksocknal_base_startup(void) ksocknal_data.ksnd_connd_starting = 0; ksocknal_data.ksnd_connd_failed_stamp = 0; ksocknal_data.ksnd_connd_starting_stamp = ktime_get_real_seconds(); - /* must have at least 2 connds to remain responsive to accepts while - * connecting */ + /* + * must have at least 2 connds to remain responsive to accepts while + * connecting + */ if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1) *ksocknal_tunables.ksnd_nconnds = SOCKNAL_CONND_RESV + 1; @@ -2446,7 +2506,7 @@ ksocknal_base_startup(void) snprintf(name, sizeof(name), "socknal_cd%02d", i); rc = ksocknal_thread_start(ksocknal_connd, (void *)((ulong_ptr_t)i), name); - if (rc != 0) { + if (rc) { spin_lock_bh(&ksocknal_data.ksnd_connd_lock); ksocknal_data.ksnd_connd_starting--; spin_unlock_bh(&ksocknal_data.ksnd_connd_lock); @@ -2456,7 +2516,7 @@ ksocknal_base_startup(void) } rc = ksocknal_thread_start(ksocknal_reaper, NULL, "socknal_reaper"); - if (rc != 0) { + if (rc) { CERROR("Can't spawn socknal reaper: %d\n", rc); goto failed; } @@ -2491,7 +2551,7 @@ ksocknal_debug_peerhash(lnet_ni_t *ni) } } - if (peer != NULL) { + if (peer) { ksock_route_t *route; ksock_conn_t *conn; @@ -2515,9 +2575,9 @@ ksocknal_debug_peerhash(lnet_ni_t *ni) list_for_each(tmp, &peer->ksnp_conns) { conn = list_entry(tmp, ksock_conn_t, ksnc_list); CWARN("Conn: ref %d, sref %d, t %d, c %d\n", - atomic_read(&conn->ksnc_conn_refcount), - atomic_read(&conn->ksnc_sock_refcount), - conn->ksnc_type, conn->ksnc_closing); + atomic_read(&conn->ksnc_conn_refcount), + atomic_read(&conn->ksnc_sock_refcount), + conn->ksnc_type, conn->ksnc_closing); } } @@ -2548,7 +2608,7 @@ ksocknal_shutdown(lnet_ni_t *ni) /* Wait for all peer state to clean up */ i = 2; spin_lock_bh(&net->ksnn_lock); - while (net->ksnn_npeers != 0) { + while (net->ksnn_npeers) { spin_unlock_bh(&net->ksnn_lock); i++; @@ -2565,15 +2625,15 @@ ksocknal_shutdown(lnet_ni_t *ni) spin_unlock_bh(&net->ksnn_lock); for (i = 0; i < net->ksnn_ninterfaces; i++) { - LASSERT(net->ksnn_interfaces[i].ksni_npeers == 0); - LASSERT(net->ksnn_interfaces[i].ksni_nroutes == 0); + LASSERT(!net->ksnn_interfaces[i].ksni_npeers); + LASSERT(!net->ksnn_interfaces[i].ksni_nroutes); } list_del(&net->ksnn_list); LIBCFS_FREE(net, sizeof(*net)); ksocknal_data.ksnd_nnets--; - if (ksocknal_data.ksnd_nnets == 0) + if (!ksocknal_data.ksnd_nnets) ksocknal_base_shutdown(); } @@ -2601,7 +2661,7 @@ ksocknal_enumerate_interfaces(ksock_net_t *net) continue; rc = lnet_ipif_query(names[i], &up, &ip, &mask); - if (rc != 0) { + if (rc) { CWARN("Can't get interface %s info: %d\n", names[i], rc); continue; @@ -2628,7 +2688,7 @@ ksocknal_enumerate_interfaces(ksock_net_t *net) lnet_ipif_free_enumeration(names, n); - if (j == 0) + if (!j) CERROR("Can't find any usable interfaces\n"); return j; @@ -2647,21 +2707,20 @@ ksocknal_search_new_ipif(ksock_net_t *net) ksock_net_t *tmp; int j; - if (colon != NULL) /* ignore alias device */ + if (colon) /* ignore alias device */ *colon = 0; - list_for_each_entry(tmp, &ksocknal_data.ksnd_nets, - ksnn_list) { + list_for_each_entry(tmp, &ksocknal_data.ksnd_nets, ksnn_list) { for (j = 0; !found && j < tmp->ksnn_ninterfaces; j++) { char *ifnam2 = &tmp->ksnn_interfaces[j].ksni_name[0]; char *colon2 = strchr(ifnam2, ':'); - if (colon2 != NULL) + if (colon2) *colon2 = 0; - found = strcmp(ifnam, ifnam2) == 0; - if (colon2 != NULL) + found = !strcmp(ifnam, ifnam2); + if (colon2) *colon2 = ':'; } if (found) @@ -2669,7 +2728,7 @@ ksocknal_search_new_ipif(ksock_net_t *net) } new_ipif += !found; - if (colon != NULL) + if (colon) *colon = ':'; } @@ -2683,7 +2742,7 @@ ksocknal_start_schedulers(struct ksock_sched_info *info) int rc = 0; int i; - if (info->ksi_nthreads == 0) { + if (!info->ksi_nthreads) { if (*ksocknal_tunables.ksnd_nscheds > 0) { nthrs = info->ksi_nthreads_max; } else { @@ -2711,7 +2770,7 @@ ksocknal_start_schedulers(struct ksock_sched_info *info) rc = ksocknal_thread_start(ksocknal_scheduler, (void *)id, name); - if (rc == 0) + if (!rc) continue; CERROR("Can't spawn thread %d for scheduler[%d]: %d\n", @@ -2734,7 +2793,7 @@ ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts) for (i = 0; i < ncpts; i++) { struct ksock_sched_info *info; - int cpt = (cpts == NULL) ? i : cpts[i]; + int cpt = !cpts ? i : cpts[i]; LASSERT(cpt < cfs_cpt_number(lnet_cpt_table())); info = ksocknal_data.ksnd_sched_info[cpt]; @@ -2743,7 +2802,7 @@ ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts) continue; rc = ksocknal_start_schedulers(info); - if (rc != 0) + if (rc) return rc; } return 0; @@ -2760,12 +2819,12 @@ ksocknal_startup(lnet_ni_t *ni) if (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING) { rc = ksocknal_base_startup(); - if (rc != 0) + if (rc) return rc; } LIBCFS_ALLOC(net, sizeof(*net)); - if (net == NULL) + if (!net) goto fail_0; spin_lock_init(&net->ksnn_lock); @@ -2776,7 +2835,7 @@ ksocknal_startup(lnet_ni_t *ni) ni->ni_peertxcredits = *ksocknal_tunables.ksnd_peertxcredits; ni->ni_peerrtrcredits = *ksocknal_tunables.ksnd_peerrtrcredits; - if (ni->ni_interfaces[0] == NULL) { + if (!ni->ni_interfaces[0]) { rc = ksocknal_enumerate_interfaces(net); if (rc <= 0) goto fail_1; @@ -2786,14 +2845,14 @@ ksocknal_startup(lnet_ni_t *ni) for (i = 0; i < LNET_MAX_INTERFACES; i++) { int up; - if (ni->ni_interfaces[i] == NULL) + if (!ni->ni_interfaces[i]) break; rc = lnet_ipif_query(ni->ni_interfaces[i], &up, - &net->ksnn_interfaces[i].ksni_ipaddr, - &net->ksnn_interfaces[i].ksni_netmask); + &net->ksnn_interfaces[i].ksni_ipaddr, + &net->ksnn_interfaces[i].ksni_netmask); - if (rc != 0) { + if (rc) { CERROR("Can't get interface %s info: %d\n", ni->ni_interfaces[i], rc); goto fail_1; @@ -2814,7 +2873,7 @@ ksocknal_startup(lnet_ni_t *ni) /* call it before add it to ksocknal_data.ksnd_nets */ rc = ksocknal_net_start_threads(net, ni->ni_cpts, ni->ni_ncpts); - if (rc != 0) + if (rc) goto fail_1; ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), @@ -2828,20 +2887,18 @@ ksocknal_startup(lnet_ni_t *ni) fail_1: LIBCFS_FREE(net, sizeof(*net)); fail_0: - if (ksocknal_data.ksnd_nnets == 0) + if (!ksocknal_data.ksnd_nnets) ksocknal_base_shutdown(); return -ENETDOWN; } -static void __exit -ksocknal_module_fini(void) +static void __exit ksocklnd_exit(void) { lnet_unregister_lnd(&the_ksocklnd); } -static int __init -ksocknal_module_init(void) +static int __init ksocklnd_init(void) { int rc; @@ -2861,7 +2918,7 @@ ksocknal_module_init(void) the_ksocklnd.lnd_accept = ksocknal_accept; rc = ksocknal_tunables_init(); - if (rc != 0) + if (rc) return rc; lnet_register_lnd(&the_ksocklnd); @@ -2870,9 +2927,9 @@ ksocknal_module_init(void) } MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>"); -MODULE_DESCRIPTION("Kernel TCP Socket LND v3.0.0"); +MODULE_DESCRIPTION("TCP Socket LNet Network Driver"); +MODULE_VERSION("2.7.0"); MODULE_LICENSE("GPL"); -MODULE_VERSION("3.0.0"); -module_init(ksocknal_module_init); -module_exit(ksocknal_module_fini); +module_init(ksocklnd_init); +module_exit(ksocklnd_exit); diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h index f4fa72550657..a60d72f9432f 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h @@ -19,10 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with Portals; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * */ #ifndef _SOCKLND_SOCKLND_H_ @@ -69,8 +65,10 @@ #define SOCKNAL_VERSION_DEBUG 0 /* enable protocol version debugging */ -/* risk kmap deadlock on multi-frag I/O (backs off to single-frag if disabled). - * no risk if we're not running on a CONFIG_HIGHMEM platform. */ +/* + * risk kmap deadlock on multi-frag I/O (backs off to single-frag if disabled). + * no risk if we're not running on a CONFIG_HIGHMEM platform. + */ #ifdef CONFIG_HIGHMEM # define SOCKNAL_RISK_KMAP_DEADLOCK 0 #else @@ -237,15 +235,16 @@ typedef struct { #define SOCKNAL_INIT_DATA 1 #define SOCKNAL_INIT_ALL 2 -/* A packet just assembled for transmission is represented by 1 or more +/* + * A packet just assembled for transmission is represented by 1 or more * struct iovec fragments (the first frag contains the portals header), * followed by 0 or more lnet_kiov_t fragments. * * On the receive side, initially 1 struct iovec fragment is posted for * receive (the header). Once the header has been received, the payload is * received into either struct iovec or lnet_kiov_t fragments, depending on - * what the header matched or whether the message needs forwarding. */ - + * what the header matched or whether the message needs forwarding. + */ struct ksock_conn; /* forward ref */ struct ksock_peer; /* forward ref */ struct ksock_route; /* forward ref */ @@ -284,12 +283,14 @@ typedef struct /* transmit packet */ } tx_frags; } ksock_tx_t; -#define KSOCK_NOOP_TX_SIZE ((int)offsetof(ksock_tx_t, tx_frags.paged.kiov[0])) +#define KSOCK_NOOP_TX_SIZE (offsetof(ksock_tx_t, tx_frags.paged.kiov[0])) /* network zero copy callback descriptor embedded in ksock_tx_t */ -/* space for the rx frag descriptors; we either read a single contiguous - * header, or up to LNET_MAX_IOV frags of payload of either type. */ +/* + * space for the rx frag descriptors; we either read a single contiguous + * header, or up to LNET_MAX_IOV frags of payload of either type. + */ typedef union { struct kvec iov[LNET_MAX_IOV]; lnet_kiov_t kiov[LNET_MAX_IOV]; @@ -463,11 +464,13 @@ typedef struct ksock_proto { /* handle ZC ACK */ int (*pro_handle_zcack)(ksock_conn_t *, __u64, __u64); - /* msg type matches the connection type: + /* + * msg type matches the connection type: * return value: * return MATCH_NO : no * return MATCH_YES : matching type - * return MATCH_MAY : can be backup */ + * return MATCH_MAY : can be backup + */ int (*pro_match_tx)(ksock_conn_t *, ksock_tx_t *, int); } ksock_proto_t; diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c index 477b385f15e0..976fd78926e0 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c @@ -19,9 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with Portals; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "socklnd.h" @@ -47,10 +44,10 @@ ksocknal_alloc_tx(int type, int size) spin_unlock(&ksocknal_data.ksnd_tx_lock); } - if (tx == NULL) + if (!tx) LIBCFS_ALLOC(tx, size); - if (tx == NULL) + if (!tx) return NULL; atomic_set(&tx->tx_refcount, 1); @@ -70,7 +67,7 @@ ksocknal_alloc_tx_noop(__u64 cookie, int nonblk) ksock_tx_t *tx; tx = ksocknal_alloc_tx(KSOCK_MSG_NOOP, KSOCK_NOOP_TX_SIZE); - if (tx == NULL) { + if (!tx) { CERROR("Can't allocate noop tx desc\n"); return NULL; } @@ -90,11 +87,11 @@ ksocknal_alloc_tx_noop(__u64 cookie, int nonblk) } void -ksocknal_free_tx (ksock_tx_t *tx) +ksocknal_free_tx(ksock_tx_t *tx) { atomic_dec(&ksocknal_data.ksnd_nactive_txs); - if (tx->tx_lnetmsg == NULL && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) { + if (!tx->tx_lnetmsg && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) { /* it's a noop tx */ spin_lock(&ksocknal_data.ksnd_tx_lock); @@ -107,7 +104,7 @@ ksocknal_free_tx (ksock_tx_t *tx) } static int -ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx) +ksocknal_send_iov(ksock_conn_t *conn, ksock_tx_t *tx) { struct kvec *iov = tx->tx_iov; int nob; @@ -122,7 +119,7 @@ ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx) return rc; nob = rc; - LASSERT (nob <= tx->tx_resid); + LASSERT(nob <= tx->tx_resid); tx->tx_resid -= nob; /* "consume" iov */ @@ -138,19 +135,19 @@ ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx) nob -= iov->iov_len; tx->tx_iov = ++iov; tx->tx_niov--; - } while (nob != 0); + } while (nob); return rc; } static int -ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx) +ksocknal_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx) { lnet_kiov_t *kiov = tx->tx_kiov; int nob; int rc; - LASSERT(tx->tx_niov == 0); + LASSERT(!tx->tx_niov); LASSERT(tx->tx_nkiov > 0); /* Never touch tx->tx_kiov inside ksocknal_lib_send_kiov() */ @@ -160,7 +157,7 @@ ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx) return rc; nob = rc; - LASSERT (nob <= tx->tx_resid); + LASSERT(nob <= tx->tx_resid); tx->tx_resid -= nob; /* "consume" kiov */ @@ -176,27 +173,27 @@ ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx) nob -= (int)kiov->kiov_len; tx->tx_kiov = ++kiov; tx->tx_nkiov--; - } while (nob != 0); + } while (nob); return rc; } static int -ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx) +ksocknal_transmit(ksock_conn_t *conn, ksock_tx_t *tx) { int rc; int bufnob; - if (ksocknal_data.ksnd_stall_tx != 0) { + if (ksocknal_data.ksnd_stall_tx) { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_tx)); } - LASSERT(tx->tx_resid != 0); + LASSERT(tx->tx_resid); rc = ksocknal_connsock_addref(conn); - if (rc != 0) { - LASSERT (conn->ksnc_closing); + if (rc) { + LASSERT(conn->ksnc_closing); return -ESHUTDOWN; } @@ -205,10 +202,10 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx) /* testing... */ ksocknal_data.ksnd_enomem_tx--; rc = -EAGAIN; - } else if (tx->tx_niov != 0) { - rc = ksocknal_send_iov (conn, tx); + } else if (tx->tx_niov) { + rc = ksocknal_send_iov(conn, tx); } else { - rc = ksocknal_send_kiov (conn, tx); + rc = ksocknal_send_kiov(conn, tx); } bufnob = conn->ksnc_sock->sk->sk_wmem_queued; @@ -216,8 +213,10 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx) conn->ksnc_tx_bufnob += rc; /* account it */ if (bufnob < conn->ksnc_tx_bufnob) { - /* allocated send buffer bytes < computed; infer - * something got ACKed */ + /* + * allocated send buffer bytes < computed; infer + * something got ACKed + */ conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout); conn->ksnc_peer->ksnp_last_alive = cfs_time_current(); @@ -227,7 +226,7 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx) if (rc <= 0) { /* Didn't write anything? */ - if (rc == 0) /* some stacks return 0 instead of -EAGAIN */ + if (!rc) /* some stacks return 0 instead of -EAGAIN */ rc = -EAGAIN; /* Check if EAGAIN is due to memory pressure */ @@ -238,17 +237,17 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx) } /* socket's wmem_queued now includes 'rc' bytes */ - atomic_sub (rc, &conn->ksnc_tx_nob); + atomic_sub(rc, &conn->ksnc_tx_nob); rc = 0; - } while (tx->tx_resid != 0); + } while (tx->tx_resid); ksocknal_connsock_decref(conn); return rc; } static int -ksocknal_recv_iov (ksock_conn_t *conn) +ksocknal_recv_iov(ksock_conn_t *conn) { struct kvec *iov = conn->ksnc_rx_iov; int nob; @@ -256,8 +255,10 @@ ksocknal_recv_iov (ksock_conn_t *conn) LASSERT(conn->ksnc_rx_niov > 0); - /* Never touch conn->ksnc_rx_iov or change connection - * status inside ksocknal_lib_recv_iov */ + /* + * Never touch conn->ksnc_rx_iov or change connection + * status inside ksocknal_lib_recv_iov + */ rc = ksocknal_lib_recv_iov(conn); if (rc <= 0) @@ -287,13 +288,13 @@ ksocknal_recv_iov (ksock_conn_t *conn) nob -= iov->iov_len; conn->ksnc_rx_iov = ++iov; conn->ksnc_rx_niov--; - } while (nob != 0); + } while (nob); return rc; } static int -ksocknal_recv_kiov (ksock_conn_t *conn) +ksocknal_recv_kiov(ksock_conn_t *conn) { lnet_kiov_t *kiov = conn->ksnc_rx_kiov; int nob; @@ -301,8 +302,10 @@ ksocknal_recv_kiov (ksock_conn_t *conn) LASSERT(conn->ksnc_rx_nkiov > 0); - /* Never touch conn->ksnc_rx_kiov or change connection - * status inside ksocknal_lib_recv_iov */ + /* + * Never touch conn->ksnc_rx_kiov or change connection + * status inside ksocknal_lib_recv_iov + */ rc = ksocknal_lib_recv_kiov(conn); if (rc <= 0) @@ -332,41 +335,43 @@ ksocknal_recv_kiov (ksock_conn_t *conn) nob -= kiov->kiov_len; conn->ksnc_rx_kiov = ++kiov; conn->ksnc_rx_nkiov--; - } while (nob != 0); + } while (nob); return 1; } static int -ksocknal_receive (ksock_conn_t *conn) +ksocknal_receive(ksock_conn_t *conn) { - /* Return 1 on success, 0 on EOF, < 0 on error. + /* + * Return 1 on success, 0 on EOF, < 0 on error. * Caller checks ksnc_rx_nob_wanted to determine - * progress/completion. */ + * progress/completion. + */ int rc; - if (ksocknal_data.ksnd_stall_rx != 0) { + if (ksocknal_data.ksnd_stall_rx) { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_rx)); } rc = ksocknal_connsock_addref(conn); - if (rc != 0) { - LASSERT (conn->ksnc_closing); + if (rc) { + LASSERT(conn->ksnc_closing); return -ESHUTDOWN; } for (;;) { - if (conn->ksnc_rx_niov != 0) - rc = ksocknal_recv_iov (conn); + if (conn->ksnc_rx_niov) + rc = ksocknal_recv_iov(conn); else - rc = ksocknal_recv_kiov (conn); + rc = ksocknal_recv_kiov(conn); if (rc <= 0) { /* error/EOF or partial receive */ if (rc == -EAGAIN) { rc = 1; - } else if (rc == 0 && conn->ksnc_rx_started) { + } else if (!rc && conn->ksnc_rx_started) { /* EOF in the middle of a message */ rc = -EPROTO; } @@ -375,7 +380,7 @@ ksocknal_receive (ksock_conn_t *conn) /* Completed a fragment */ - if (conn->ksnc_rx_nob_wanted == 0) { + if (!conn->ksnc_rx_nob_wanted) { rc = 1; break; } @@ -386,36 +391,36 @@ ksocknal_receive (ksock_conn_t *conn) } void -ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx) +ksocknal_tx_done(lnet_ni_t *ni, ksock_tx_t *tx) { lnet_msg_t *lnetmsg = tx->tx_lnetmsg; - int rc = (tx->tx_resid == 0 && !tx->tx_zc_aborted) ? 0 : -EIO; + int rc = (!tx->tx_resid && !tx->tx_zc_aborted) ? 0 : -EIO; - LASSERT(ni != NULL || tx->tx_conn != NULL); + LASSERT(ni || tx->tx_conn); - if (tx->tx_conn != NULL) + if (tx->tx_conn) ksocknal_conn_decref(tx->tx_conn); - if (ni == NULL && tx->tx_conn != NULL) + if (!ni && tx->tx_conn) ni = tx->tx_conn->ksnc_peer->ksnp_ni; - ksocknal_free_tx (tx); - if (lnetmsg != NULL) /* KSOCK_MSG_NOOP go without lnetmsg */ - lnet_finalize (ni, lnetmsg, rc); + ksocknal_free_tx(tx); + if (lnetmsg) /* KSOCK_MSG_NOOP go without lnetmsg */ + lnet_finalize(ni, lnetmsg, rc); } void -ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int error) +ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int error) { ksock_tx_t *tx; - while (!list_empty (txlist)) { + while (!list_empty(txlist)) { tx = list_entry(txlist->next, ksock_tx_t, tx_list); - if (error && tx->tx_lnetmsg != NULL) { + if (error && tx->tx_lnetmsg) { CNETERR("Deleting packet type %d len %d %s->%s\n", - le32_to_cpu (tx->tx_lnetmsg->msg_hdr.type), - le32_to_cpu (tx->tx_lnetmsg->msg_hdr.payload_length), + le32_to_cpu(tx->tx_lnetmsg->msg_hdr.type), + le32_to_cpu(tx->tx_lnetmsg->msg_hdr.payload_length), libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.src_nid)), libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.dest_nid))); } else if (error) { @@ -435,12 +440,14 @@ ksocknal_check_zc_req(ksock_tx_t *tx) ksock_conn_t *conn = tx->tx_conn; ksock_peer_t *peer = conn->ksnc_peer; - /* Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx + /* + * Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx * to ksnp_zc_req_list if some fragment of this message should be sent * zero-copy. Our peer will send an ACK containing this cookie when * she has received this message to tell us we can signal completion. * tx_msg.ksm_zc_cookies[0] remains non-zero while tx is on - * ksnp_zc_req_list. */ + * ksnp_zc_req_list. + */ LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); LASSERT(tx->tx_zc_capable); @@ -450,9 +457,10 @@ ksocknal_check_zc_req(ksock_tx_t *tx) !conn->ksnc_zc_capable) return; - /* assign cookie and queue tx to pending list, it will be released when - * a matching ack is received. See ksocknal_handle_zcack() */ - + /* + * assign cookie and queue tx to pending list, it will be released when + * a matching ack is received. See ksocknal_handle_zcack() + */ ksocknal_tx_addref(tx); spin_lock(&peer->ksnp_lock); @@ -461,11 +469,11 @@ ksocknal_check_zc_req(ksock_tx_t *tx) tx->tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout); - LASSERT(tx->tx_msg.ksm_zc_cookies[0] == 0); + LASSERT(!tx->tx_msg.ksm_zc_cookies[0]); tx->tx_msg.ksm_zc_cookies[0] = peer->ksnp_zc_next_cookie++; - if (peer->ksnp_zc_next_cookie == 0) + if (!peer->ksnp_zc_next_cookie) peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1; list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list); @@ -485,7 +493,7 @@ ksocknal_uncheck_zc_req(ksock_tx_t *tx) spin_lock(&peer->ksnp_lock); - if (tx->tx_msg.ksm_zc_cookies[0] == 0) { + if (!tx->tx_msg.ksm_zc_cookies[0]) { /* Not waiting for an ACK */ spin_unlock(&peer->ksnp_lock); return; @@ -500,20 +508,20 @@ ksocknal_uncheck_zc_req(ksock_tx_t *tx) } static int -ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx) +ksocknal_process_transmit(ksock_conn_t *conn, ksock_tx_t *tx) { int rc; if (tx->tx_zc_capable && !tx->tx_zc_checked) ksocknal_check_zc_req(tx); - rc = ksocknal_transmit (conn, tx); + rc = ksocknal_transmit(conn, tx); CDEBUG(D_NET, "send(%d) %d\n", tx->tx_resid, rc); - if (tx->tx_resid == 0) { + if (!tx->tx_resid) { /* Sent everything OK */ - LASSERT (rc == 0); + LASSERT(!rc); return 0; } @@ -532,13 +540,13 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx) spin_lock_bh(&ksocknal_data.ksnd_reaper_lock); /* enomem list takes over scheduler's ref... */ - LASSERT (conn->ksnc_tx_scheduled); + LASSERT(conn->ksnc_tx_scheduled); list_add_tail(&conn->ksnc_tx_list, - &ksocknal_data.ksnd_enomem_conns); + &ksocknal_data.ksnd_enomem_conns); if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(), SOCKNAL_ENOMEM_RETRY), ksocknal_data.ksnd_reaper_waketime)) - wake_up (&ksocknal_data.ksnd_reaper_waitq); + wake_up(&ksocknal_data.ksnd_reaper_waitq); spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock); return rc; @@ -569,21 +577,19 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx) ksocknal_uncheck_zc_req(tx); /* it's not an error if conn is being closed */ - ksocknal_close_conn_and_siblings (conn, - (conn->ksnc_closing) ? 0 : rc); + ksocknal_close_conn_and_siblings(conn, (conn->ksnc_closing) ? 0 : rc); return rc; } static void -ksocknal_launch_connection_locked (ksock_route_t *route) +ksocknal_launch_connection_locked(ksock_route_t *route) { - /* called holding write lock on ksnd_global_lock */ LASSERT(!route->ksnr_scheduled); LASSERT(!route->ksnr_connecting); - LASSERT((ksocknal_route_mask() & ~route->ksnr_connected) != 0); + LASSERT(ksocknal_route_mask() & ~route->ksnr_connected); route->ksnr_scheduled = 1; /* scheduling conn for connd */ ksocknal_route_addref(route); /* extra ref for connd */ @@ -591,14 +597,14 @@ ksocknal_launch_connection_locked (ksock_route_t *route) spin_lock_bh(&ksocknal_data.ksnd_connd_lock); list_add_tail(&route->ksnr_connd_list, - &ksocknal_data.ksnd_connd_routes); + &ksocknal_data.ksnd_connd_routes); wake_up(&ksocknal_data.ksnd_connd_waitq); spin_unlock_bh(&ksocknal_data.ksnd_connd_lock); } void -ksocknal_launch_all_connections_locked (ksock_peer_t *peer) +ksocknal_launch_all_connections_locked(ksock_peer_t *peer) { ksock_route_t *route; @@ -606,7 +612,7 @@ ksocknal_launch_all_connections_locked (ksock_peer_t *peer) for (;;) { /* launch any/all connections that need it */ route = ksocknal_find_connectable_route_locked(peer); - if (route == NULL) + if (!route) return; ksocknal_launch_connection_locked(route); @@ -623,15 +629,15 @@ ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk) int tnob = 0; int fnob = 0; - list_for_each (tmp, &peer->ksnp_conns) { + list_for_each(tmp, &peer->ksnp_conns) { ksock_conn_t *c = list_entry(tmp, ksock_conn_t, ksnc_list); int nob = atomic_read(&c->ksnc_tx_nob) + c->ksnc_sock->sk->sk_wmem_queued; int rc; LASSERT(!c->ksnc_closing); - LASSERT(c->ksnc_proto != NULL && - c->ksnc_proto->pro_match_tx != NULL); + LASSERT(c->ksnc_proto && + c->ksnc_proto->pro_match_tx); rc = c->ksnc_proto->pro_match_tx(c, tx, nonblk); @@ -642,7 +648,7 @@ ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk) continue; case SOCKNAL_MATCH_YES: /* typed connection */ - if (typed == NULL || tnob > nob || + if (!typed || tnob > nob || (tnob == nob && *ksocknal_tunables.ksnd_round_robin && cfs_time_after(typed->ksnc_tx_last_post, c->ksnc_tx_last_post))) { typed = c; @@ -651,7 +657,7 @@ ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk) break; case SOCKNAL_MATCH_MAY: /* fallback connection */ - if (fallback == NULL || fnob > nob || + if (!fallback || fnob > nob || (fnob == nob && *ksocknal_tunables.ksnd_round_robin && cfs_time_after(fallback->ksnc_tx_last_post, c->ksnc_tx_last_post))) { fallback = c; @@ -662,9 +668,9 @@ ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk) } /* prefer the typed selection */ - conn = (typed != NULL) ? typed : fallback; + conn = (typed) ? typed : fallback; - if (conn != NULL) + if (conn) conn->ksnc_tx_last_post = cfs_time_current(); return conn; @@ -675,48 +681,51 @@ ksocknal_tx_prep(ksock_conn_t *conn, ksock_tx_t *tx) { conn->ksnc_proto->pro_pack(tx); - atomic_add (tx->tx_nob, &conn->ksnc_tx_nob); + atomic_add(tx->tx_nob, &conn->ksnc_tx_nob); ksocknal_conn_addref(conn); /* +1 ref for tx */ tx->tx_conn = conn; } void -ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) +ksocknal_queue_tx_locked(ksock_tx_t *tx, ksock_conn_t *conn) { ksock_sched_t *sched = conn->ksnc_scheduler; ksock_msg_t *msg = &tx->tx_msg; ksock_tx_t *ztx = NULL; int bufnob = 0; - /* called holding global lock (read or irq-write) and caller may + /* + * called holding global lock (read or irq-write) and caller may * not have dropped this lock between finding conn and calling me, * so we don't need the {get,put}connsock dance to deref - * ksnc_sock... */ + * ksnc_sock... + */ LASSERT(!conn->ksnc_closing); CDEBUG(D_NET, "Sending to %s ip %pI4h:%d\n", - libcfs_id2str(conn->ksnc_peer->ksnp_id), - &conn->ksnc_ipaddr, - conn->ksnc_port); + libcfs_id2str(conn->ksnc_peer->ksnp_id), + &conn->ksnc_ipaddr, conn->ksnc_port); ksocknal_tx_prep(conn, tx); - /* Ensure the frags we've been given EXACTLY match the number of + /* + * Ensure the frags we've been given EXACTLY match the number of * bytes we want to send. Many TCP/IP stacks disregard any total * size parameters passed to them and just look at the frags. * * We always expect at least 1 mapped fragment containing the - * complete ksocknal message header. */ - LASSERT(lnet_iov_nob (tx->tx_niov, tx->tx_iov) + + * complete ksocknal message header. + */ + LASSERT(lnet_iov_nob(tx->tx_niov, tx->tx_iov) + lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) == (unsigned int)tx->tx_nob); LASSERT(tx->tx_niov >= 1); LASSERT(tx->tx_resid == tx->tx_nob); - CDEBUG (D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n", - tx, (tx->tx_lnetmsg != NULL) ? tx->tx_lnetmsg->msg_hdr.type : - KSOCK_MSG_NOOP, - tx->tx_nob, tx->tx_niov, tx->tx_nkiov); + CDEBUG(D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n", + tx, (tx->tx_lnetmsg) ? tx->tx_lnetmsg->msg_hdr.type : + KSOCK_MSG_NOOP, + tx->tx_nob, tx->tx_niov, tx->tx_nkiov); /* * FIXME: SOCK_WMEM_QUEUED and SOCK_ERROR could block in __DARWIN8__ @@ -725,7 +734,7 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) bufnob = conn->ksnc_sock->sk->sk_wmem_queued; spin_lock_bh(&sched->kss_lock); - if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) { + if (list_empty(&conn->ksnc_tx_queue) && !bufnob) { /* First packet starts the timeout */ conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout); @@ -736,26 +745,30 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) } if (msg->ksm_type == KSOCK_MSG_NOOP) { - /* The packet is noop ZC ACK, try to piggyback the ack_cookie - * on a normal packet so I don't need to send it */ - LASSERT(msg->ksm_zc_cookies[1] != 0); - LASSERT(conn->ksnc_proto->pro_queue_tx_zcack != NULL); + /* + * The packet is noop ZC ACK, try to piggyback the ack_cookie + * on a normal packet so I don't need to send it + */ + LASSERT(msg->ksm_zc_cookies[1]); + LASSERT(conn->ksnc_proto->pro_queue_tx_zcack); if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0)) ztx = tx; /* ZC ACK piggybacked on ztx release tx later */ } else { - /* It's a normal packet - can it piggback a noop zc-ack that - * has been queued already? */ - LASSERT(msg->ksm_zc_cookies[1] == 0); - LASSERT(conn->ksnc_proto->pro_queue_tx_msg != NULL); + /* + * It's a normal packet - can it piggback a noop zc-ack that + * has been queued already? + */ + LASSERT(!msg->ksm_zc_cookies[1]); + LASSERT(conn->ksnc_proto->pro_queue_tx_msg); ztx = conn->ksnc_proto->pro_queue_tx_msg(conn, tx); /* ztx will be released later */ } - if (ztx != NULL) { - atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob); + if (ztx) { + atomic_sub(ztx->tx_nob, &conn->ksnc_tx_nob); list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs); } @@ -763,24 +776,23 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) !conn->ksnc_tx_scheduled) { /* not scheduled to send */ /* +1 ref for scheduler */ ksocknal_conn_addref(conn); - list_add_tail (&conn->ksnc_tx_list, - &sched->kss_tx_conns); + list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns); conn->ksnc_tx_scheduled = 1; - wake_up (&sched->kss_waitq); + wake_up(&sched->kss_waitq); } spin_unlock_bh(&sched->kss_lock); } ksock_route_t * -ksocknal_find_connectable_route_locked (ksock_peer_t *peer) +ksocknal_find_connectable_route_locked(ksock_peer_t *peer) { unsigned long now = cfs_time_current(); struct list_head *tmp; ksock_route_t *route; - list_for_each (tmp, &peer->ksnp_routes) { - route = list_entry (tmp, ksock_route_t, ksnr_list); + list_for_each(tmp, &peer->ksnp_routes) { + route = list_entry(tmp, ksock_route_t, ksnr_list); LASSERT(!route->ksnr_connecting || route->ksnr_scheduled); @@ -788,10 +800,10 @@ ksocknal_find_connectable_route_locked (ksock_peer_t *peer) continue; /* all route types connected ? */ - if ((ksocknal_route_mask() & ~route->ksnr_connected) == 0) + if (!(ksocknal_route_mask() & ~route->ksnr_connected)) continue; - if (!(route->ksnr_retry_interval == 0 || /* first attempt */ + if (!(!route->ksnr_retry_interval || /* first attempt */ cfs_time_aftereq(now, route->ksnr_timeout))) { CDEBUG(D_NET, "Too soon to retry route %pI4h (cnted %d, interval %ld, %ld secs later)\n", @@ -809,13 +821,13 @@ ksocknal_find_connectable_route_locked (ksock_peer_t *peer) } ksock_route_t * -ksocknal_find_connecting_route_locked (ksock_peer_t *peer) +ksocknal_find_connecting_route_locked(ksock_peer_t *peer) { struct list_head *tmp; ksock_route_t *route; - list_for_each (tmp, &peer->ksnp_routes) { - route = list_entry (tmp, ksock_route_t, ksnr_list); + list_for_each(tmp, &peer->ksnp_routes) { + route = list_entry(tmp, ksock_route_t, ksnr_list); LASSERT(!route->ksnr_connecting || route->ksnr_scheduled); @@ -827,7 +839,7 @@ ksocknal_find_connecting_route_locked (ksock_peer_t *peer) } int -ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) +ksocknal_launch_packet(lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) { ksock_peer_t *peer; ksock_conn_t *conn; @@ -835,21 +847,23 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) int retry; int rc; - LASSERT(tx->tx_conn == NULL); + LASSERT(!tx->tx_conn); g_lock = &ksocknal_data.ksnd_global_lock; for (retry = 0;; retry = 1) { read_lock(g_lock); peer = ksocknal_find_peer_locked(ni, id); - if (peer != NULL) { - if (ksocknal_find_connectable_route_locked(peer) == NULL) { + if (peer) { + if (!ksocknal_find_connectable_route_locked(peer)) { conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk); - if (conn != NULL) { - /* I've got no routes that need to be + if (conn) { + /* + * I've got no routes that need to be * connecting and I do have an actual - * connection... */ - ksocknal_queue_tx_locked (tx, conn); + * connection... + */ + ksocknal_queue_tx_locked(tx, conn); read_unlock(g_lock); return 0; } @@ -862,12 +876,12 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) write_lock_bh(g_lock); peer = ksocknal_find_peer_locked(ni, id); - if (peer != NULL) + if (peer) break; write_unlock_bh(g_lock); - if ((id.pid & LNET_PID_USERFLAG) != 0) { + if (id.pid & LNET_PID_USERFLAG) { CERROR("Refusing to create a connection to userspace process %s\n", libcfs_id2str(id)); return -EHOSTUNREACH; @@ -881,7 +895,7 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) rc = ksocknal_add_peer(ni, id, LNET_NIDADDR(id.nid), lnet_acceptor_port()); - if (rc != 0) { + if (rc) { CERROR("Can't add peer %s: %d\n", libcfs_id2str(id), rc); return rc; @@ -891,21 +905,21 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) ksocknal_launch_all_connections_locked(peer); conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk); - if (conn != NULL) { + if (conn) { /* Connection exists; queue message on it */ - ksocknal_queue_tx_locked (tx, conn); + ksocknal_queue_tx_locked(tx, conn); write_unlock_bh(g_lock); return 0; } if (peer->ksnp_accepting > 0 || - ksocknal_find_connecting_route_locked (peer) != NULL) { + ksocknal_find_connecting_route_locked(peer)) { /* the message is going to be pinned to the peer */ tx->tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout); /* Queue the message until a connection is established */ - list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue); + list_add_tail(&tx->tx_list, &peer->ksnp_tx_queue); write_unlock_bh(g_lock); return 0; } @@ -932,19 +946,20 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) int desc_size; int rc; - /* NB 'private' is different depending on what we're sending. - * Just ignore it... */ - + /* + * NB 'private' is different depending on what we're sending. + * Just ignore it... + */ CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n", payload_nob, payload_niov, libcfs_id2str(target)); - LASSERT(payload_nob == 0 || payload_niov > 0); + LASSERT(!payload_nob || payload_niov > 0); LASSERT(payload_niov <= LNET_MAX_IOV); /* payload is either all vaddrs or all pages */ - LASSERT (!(payload_kiov != NULL && payload_iov != NULL)); - LASSERT (!in_interrupt ()); + LASSERT(!(payload_kiov && payload_iov)); + LASSERT(!in_interrupt()); - if (payload_iov != NULL) + if (payload_iov) desc_size = offsetof(ksock_tx_t, tx_frags.virt.iov[1 + payload_niov]); else @@ -954,7 +969,7 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) if (lntmsg->msg_vmflush) mpflag = cfs_memory_pressure_get_and_set(); tx = ksocknal_alloc_tx(KSOCK_MSG_LNET, desc_size); - if (tx == NULL) { + if (!tx) { CERROR("Can't allocate tx desc type %d size %d\n", type, desc_size); if (lntmsg->msg_vmflush) @@ -965,7 +980,7 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) tx->tx_conn = NULL; /* set when assigned a conn */ tx->tx_lnetmsg = lntmsg; - if (payload_iov != NULL) { + if (payload_iov) { tx->tx_kiov = NULL; tx->tx_nkiov = 0; tx->tx_iov = tx->tx_frags.virt.iov; @@ -992,7 +1007,7 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) if (!mpflag) cfs_memory_pressure_restore(mpflag); - if (rc == 0) + if (!rc) return 0; ksocknal_free_tx(tx); @@ -1014,7 +1029,7 @@ ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name) } void -ksocknal_thread_fini (void) +ksocknal_thread_fini(void) { write_lock_bh(&ksocknal_data.ksnd_global_lock); ksocknal_data.ksnd_nthreads--; @@ -1022,7 +1037,7 @@ ksocknal_thread_fini (void) } int -ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip) +ksocknal_new_packet(ksock_conn_t *conn, int nob_to_skip) { static char ksocknal_slop_buffer[4096]; @@ -1030,14 +1045,14 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip) unsigned int niov; int skipped; - LASSERT(conn->ksnc_proto != NULL); + LASSERT(conn->ksnc_proto); - if ((*ksocknal_tunables.ksnd_eager_ack & conn->ksnc_type) != 0) { + if (*ksocknal_tunables.ksnd_eager_ack & conn->ksnc_type) { /* Remind the socket to ack eagerly... */ ksocknal_lib_eager_ack(conn); } - if (nob_to_skip == 0) { /* right at next packet boundary now */ + if (!nob_to_skip) { /* right at next packet boundary now */ conn->ksnc_rx_started = 0; mb(); /* racing with timeout thread */ @@ -1061,11 +1076,11 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip) conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space; conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg.ksm_u.lnetmsg; - conn->ksnc_rx_iov[0].iov_len = sizeof (lnet_hdr_t); + conn->ksnc_rx_iov[0].iov_len = sizeof(lnet_hdr_t); break; default: - LBUG (); + LBUG(); } conn->ksnc_rx_niov = 1; @@ -1075,9 +1090,10 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip) return 1; } - /* Set up to skip as much as possible now. If there's more left - * (ran out of iov entries) we'll get called again */ - + /* + * Set up to skip as much as possible now. If there's more left + * (ran out of iov entries) we'll get called again + */ conn->ksnc_rx_state = SOCKNAL_RX_SLOP; conn->ksnc_rx_nob_left = nob_to_skip; conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space; @@ -1093,8 +1109,8 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip) skipped += nob; nob_to_skip -= nob; - } while (nob_to_skip != 0 && /* mustn't overflow conn's rx iov */ - niov < sizeof(conn->ksnc_rx_iov_space) / sizeof (struct iovec)); + } while (nob_to_skip && /* mustn't overflow conn's rx iov */ + niov < sizeof(conn->ksnc_rx_iov_space) / sizeof(struct iovec)); conn->ksnc_rx_niov = niov; conn->ksnc_rx_kiov = NULL; @@ -1104,13 +1120,13 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip) } static int -ksocknal_process_receive (ksock_conn_t *conn) +ksocknal_process_receive(ksock_conn_t *conn) { lnet_hdr_t *lhdr; lnet_process_id_t *id; int rc; - LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0); + LASSERT(atomic_read(&conn->ksnc_conn_refcount) > 0); /* NB: sched lock NOT held */ /* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */ @@ -1119,13 +1135,13 @@ ksocknal_process_receive (ksock_conn_t *conn) conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER || conn->ksnc_rx_state == SOCKNAL_RX_SLOP); again: - if (conn->ksnc_rx_nob_wanted != 0) { + if (conn->ksnc_rx_nob_wanted) { rc = ksocknal_receive(conn); if (rc <= 0) { - LASSERT (rc != -EAGAIN); + LASSERT(rc != -EAGAIN); - if (rc == 0) + if (!rc) CDEBUG(D_NET, "[%p] EOF from %s ip %pI4h:%d\n", conn, libcfs_id2str(conn->ksnc_peer->ksnp_id), @@ -1139,12 +1155,12 @@ ksocknal_process_receive (ksock_conn_t *conn) conn->ksnc_port); /* it's not an error if conn is being closed */ - ksocknal_close_conn_and_siblings (conn, - (conn->ksnc_closing) ? 0 : rc); - return (rc == 0 ? -ESHUTDOWN : rc); + ksocknal_close_conn_and_siblings(conn, + (conn->ksnc_closing) ? 0 : rc); + return (!rc ? -ESHUTDOWN : rc); } - if (conn->ksnc_rx_nob_wanted != 0) { + if (conn->ksnc_rx_nob_wanted) { /* short read */ return -EAGAIN; } @@ -1169,7 +1185,7 @@ ksocknal_process_receive (ksock_conn_t *conn) } if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP && - conn->ksnc_msg.ksm_csum != 0 && /* has checksum */ + conn->ksnc_msg.ksm_csum && /* has checksum */ conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) { /* NOOP Checksum error */ CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n", @@ -1180,10 +1196,10 @@ ksocknal_process_receive (ksock_conn_t *conn) return -EIO; } - if (conn->ksnc_msg.ksm_zc_cookies[1] != 0) { + if (conn->ksnc_msg.ksm_zc_cookies[1]) { __u64 cookie = 0; - LASSERT (conn->ksnc_proto != &ksocknal_protocol_v1x); + LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x); if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP) cookie = conn->ksnc_msg.ksm_zc_cookies[0]; @@ -1191,7 +1207,7 @@ ksocknal_process_receive (ksock_conn_t *conn) rc = conn->ksnc_proto->pro_handle_zcack(conn, cookie, conn->ksnc_msg.ksm_zc_cookies[1]); - if (rc != 0) { + if (rc) { CERROR("%s: Unknown ZC-ACK cookie: %llu, %llu\n", libcfs_id2str(conn->ksnc_peer->ksnp_id), cookie, conn->ksnc_msg.ksm_zc_cookies[1]); @@ -1202,7 +1218,7 @@ ksocknal_process_receive (ksock_conn_t *conn) } if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP) { - ksocknal_new_packet (conn, 0); + ksocknal_new_packet(conn, 0); return 0; /* NOOP is done and just return */ } @@ -1224,7 +1240,7 @@ ksocknal_process_receive (ksock_conn_t *conn) /* unpack message header */ conn->ksnc_proto->pro_unpack(&conn->ksnc_msg); - if ((conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) != 0) { + if (conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) { /* Userspace peer */ lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr; id = &conn->ksnc_peer->ksnp_id; @@ -1243,14 +1259,14 @@ ksocknal_process_receive (ksock_conn_t *conn) if (rc < 0) { /* I just received garbage: give up on this conn */ ksocknal_new_packet(conn, 0); - ksocknal_close_conn_and_siblings (conn, rc); + ksocknal_close_conn_and_siblings(conn, rc); ksocknal_conn_decref(conn); return -EPROTO; } /* I'm racing with ksocknal_recv() */ - LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_PARSE || - conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD); + LASSERT(conn->ksnc_rx_state == SOCKNAL_RX_PARSE || + conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD); if (conn->ksnc_rx_state != SOCKNAL_RX_LNET_PAYLOAD) return 0; @@ -1262,8 +1278,8 @@ ksocknal_process_receive (ksock_conn_t *conn) /* payload all received */ rc = 0; - if (conn->ksnc_rx_nob_left == 0 && /* not truncating */ - conn->ksnc_msg.ksm_csum != 0 && /* has checksum */ + if (!conn->ksnc_rx_nob_left && /* not truncating */ + conn->ksnc_msg.ksm_csum && /* has checksum */ conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) { CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n", libcfs_id2str(conn->ksnc_peer->ksnp_id), @@ -1271,7 +1287,7 @@ ksocknal_process_receive (ksock_conn_t *conn) rc = -EIO; } - if (rc == 0 && conn->ksnc_msg.ksm_zc_cookies[0] != 0) { + if (!rc && conn->ksnc_msg.ksm_zc_cookies[0]) { LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x); lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr; @@ -1285,16 +1301,16 @@ ksocknal_process_receive (ksock_conn_t *conn) lnet_finalize(conn->ksnc_peer->ksnp_ni, conn->ksnc_cookie, rc); - if (rc != 0) { + if (rc) { ksocknal_new_packet(conn, 0); - ksocknal_close_conn_and_siblings (conn, rc); + ksocknal_close_conn_and_siblings(conn, rc); return -EPROTO; } /* Fall through */ case SOCKNAL_RX_SLOP: /* starting new packet? */ - if (ksocknal_new_packet (conn, conn->ksnc_rx_nob_left)) + if (ksocknal_new_packet(conn, conn->ksnc_rx_nob_left)) return 0; /* come back later */ goto again; /* try to finish reading slop now */ @@ -1308,9 +1324,9 @@ ksocknal_process_receive (ksock_conn_t *conn) } int -ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed, - unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov, - unsigned int offset, unsigned int mlen, unsigned int rlen) +ksocknal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed, + unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov, + unsigned int offset, unsigned int mlen, unsigned int rlen) { ksock_conn_t *conn = private; ksock_sched_t *sched = conn->ksnc_scheduler; @@ -1322,7 +1338,7 @@ ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed, conn->ksnc_rx_nob_wanted = mlen; conn->ksnc_rx_nob_left = rlen; - if (mlen == 0 || iov != NULL) { + if (!mlen || iov) { conn->ksnc_rx_nkiov = 0; conn->ksnc_rx_kiov = NULL; conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov; @@ -1349,8 +1365,8 @@ ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed, switch (conn->ksnc_rx_state) { case SOCKNAL_RX_PARSE_WAIT: list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns); - wake_up (&sched->kss_waitq); - LASSERT (conn->ksnc_rx_ready); + wake_up(&sched->kss_waitq); + LASSERT(conn->ksnc_rx_ready); break; case SOCKNAL_RX_PARSE: @@ -1396,7 +1412,7 @@ int ksocknal_scheduler(void *arg) cfs_block_allsigs(); rc = cfs_cpt_bind(lnet_cpt_table(), info->ksi_cpt); - if (rc != 0) { + if (rc) { CERROR("Can't set CPT affinity to %d: %d\n", info->ksi_cpt, rc); } @@ -1408,18 +1424,20 @@ int ksocknal_scheduler(void *arg) /* Ensure I progress everything semi-fairly */ - if (!list_empty (&sched->kss_rx_conns)) { + if (!list_empty(&sched->kss_rx_conns)) { conn = list_entry(sched->kss_rx_conns.next, - ksock_conn_t, ksnc_rx_list); + ksock_conn_t, ksnc_rx_list); list_del(&conn->ksnc_rx_list); LASSERT(conn->ksnc_rx_scheduled); LASSERT(conn->ksnc_rx_ready); - /* clear rx_ready in case receive isn't complete. + /* + * clear rx_ready in case receive isn't complete. * Do it BEFORE we call process_recv, since * data_ready can set it any time after we release - * kss_lock. */ + * kss_lock. + */ conn->ksnc_rx_ready = 0; spin_unlock_bh(&sched->kss_lock); @@ -1431,18 +1449,20 @@ int ksocknal_scheduler(void *arg) LASSERT(conn->ksnc_rx_scheduled); /* Did process_receive get everything it wanted? */ - if (rc == 0) + if (!rc) conn->ksnc_rx_ready = 1; if (conn->ksnc_rx_state == SOCKNAL_RX_PARSE) { - /* Conn blocked waiting for ksocknal_recv() + /* + * Conn blocked waiting for ksocknal_recv() * I change its state (under lock) to signal - * it can be rescheduled */ + * it can be rescheduled + */ conn->ksnc_rx_state = SOCKNAL_RX_PARSE_WAIT; } else if (conn->ksnc_rx_ready) { /* reschedule for rx */ - list_add_tail (&conn->ksnc_rx_list, - &sched->kss_rx_conns); + list_add_tail(&conn->ksnc_rx_list, + &sched->kss_rx_conns); } else { conn->ksnc_rx_scheduled = 0; /* drop my ref */ @@ -1452,25 +1472,24 @@ int ksocknal_scheduler(void *arg) did_something = 1; } - if (!list_empty (&sched->kss_tx_conns)) { + if (!list_empty(&sched->kss_tx_conns)) { LIST_HEAD(zlist); if (!list_empty(&sched->kss_zombie_noop_txs)) { - list_add(&zlist, - &sched->kss_zombie_noop_txs); + list_add(&zlist, &sched->kss_zombie_noop_txs); list_del_init(&sched->kss_zombie_noop_txs); } conn = list_entry(sched->kss_tx_conns.next, - ksock_conn_t, ksnc_tx_list); - list_del (&conn->ksnc_tx_list); + ksock_conn_t, ksnc_tx_list); + list_del(&conn->ksnc_tx_list); LASSERT(conn->ksnc_tx_scheduled); LASSERT(conn->ksnc_tx_ready); LASSERT(!list_empty(&conn->ksnc_tx_queue)); tx = list_entry(conn->ksnc_tx_queue.next, - ksock_tx_t, tx_list); + ksock_tx_t, tx_list); if (conn->ksnc_tx_carrier == tx) ksocknal_next_tx_carrier(conn); @@ -1478,16 +1497,20 @@ int ksocknal_scheduler(void *arg) /* dequeue now so empty list => more to send */ list_del(&tx->tx_list); - /* Clear tx_ready in case send isn't complete. Do + /* + * Clear tx_ready in case send isn't complete. Do * it BEFORE we call process_transmit, since * write_space can set it any time after we release - * kss_lock. */ + * kss_lock. + */ conn->ksnc_tx_ready = 0; spin_unlock_bh(&sched->kss_lock); if (!list_empty(&zlist)) { - /* free zombie noop txs, it's fast because - * noop txs are just put in freelist */ + /* + * free zombie noop txs, it's fast because + * noop txs are just put in freelist + */ ksocknal_txlist_done(NULL, &zlist, 0); } @@ -1496,8 +1519,7 @@ int ksocknal_scheduler(void *arg) if (rc == -ENOMEM || rc == -EAGAIN) { /* Incomplete send: replace tx on HEAD of tx_queue */ spin_lock_bh(&sched->kss_lock); - list_add(&tx->tx_list, - &conn->ksnc_tx_queue); + list_add(&tx->tx_list, &conn->ksnc_tx_queue); } else { /* Complete send; tx -ref */ ksocknal_tx_decref(tx); @@ -1508,13 +1530,15 @@ int ksocknal_scheduler(void *arg) } if (rc == -ENOMEM) { - /* Do nothing; after a short timeout, this - * conn will be reposted on kss_tx_conns. */ + /* + * Do nothing; after a short timeout, this + * conn will be reposted on kss_tx_conns. + */ } else if (conn->ksnc_tx_ready && !list_empty(&conn->ksnc_tx_queue)) { /* reschedule for tx */ list_add_tail(&conn->ksnc_tx_list, - &sched->kss_tx_conns); + &sched->kss_tx_conns); } else { conn->ksnc_tx_scheduled = 0; /* drop my ref */ @@ -1533,7 +1557,7 @@ int ksocknal_scheduler(void *arg) rc = wait_event_interruptible_exclusive( sched->kss_waitq, !ksocknal_sched_cansleep(sched)); - LASSERT (rc == 0); + LASSERT(!rc); } else { cond_resched(); } @@ -1551,7 +1575,7 @@ int ksocknal_scheduler(void *arg) * Add connection to kss_rx_conns of scheduler * and wakeup the scheduler. */ -void ksocknal_read_callback (ksock_conn_t *conn) +void ksocknal_read_callback(ksock_conn_t *conn) { ksock_sched_t *sched; @@ -1562,13 +1586,12 @@ void ksocknal_read_callback (ksock_conn_t *conn) conn->ksnc_rx_ready = 1; if (!conn->ksnc_rx_scheduled) { /* not being progressed */ - list_add_tail(&conn->ksnc_rx_list, - &sched->kss_rx_conns); + list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns); conn->ksnc_rx_scheduled = 1; /* extra ref for scheduler */ ksocknal_conn_addref(conn); - wake_up (&sched->kss_waitq); + wake_up(&sched->kss_waitq); } spin_unlock_bh(&sched->kss_lock); } @@ -1577,7 +1600,7 @@ void ksocknal_read_callback (ksock_conn_t *conn) * Add connection to kss_tx_conns of scheduler * and wakeup the scheduler. */ -void ksocknal_write_callback (ksock_conn_t *conn) +void ksocknal_write_callback(ksock_conn_t *conn) { ksock_sched_t *sched; @@ -1589,20 +1612,19 @@ void ksocknal_write_callback (ksock_conn_t *conn) if (!conn->ksnc_tx_scheduled && /* not being progressed */ !list_empty(&conn->ksnc_tx_queue)) { /* packets to send */ - list_add_tail (&conn->ksnc_tx_list, - &sched->kss_tx_conns); + list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns); conn->ksnc_tx_scheduled = 1; /* extra ref for scheduler */ ksocknal_conn_addref(conn); - wake_up (&sched->kss_waitq); + wake_up(&sched->kss_waitq); } spin_unlock_bh(&sched->kss_lock); } static ksock_proto_t * -ksocknal_parse_proto_version (ksock_hello_msg_t *hello) +ksocknal_parse_proto_version(ksock_hello_msg_t *hello) { __u32 version = 0; @@ -1611,7 +1633,7 @@ ksocknal_parse_proto_version (ksock_hello_msg_t *hello) else if (hello->kshm_magic == __swab32(LNET_PROTO_MAGIC)) version = __swab32(hello->kshm_version); - if (version != 0) { + if (version) { #if SOCKNAL_VERSION_DEBUG if (*ksocknal_tunables.ksnd_protocol == 1) return NULL; @@ -1632,11 +1654,11 @@ ksocknal_parse_proto_version (ksock_hello_msg_t *hello) if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) { lnet_magicversion_t *hmv = (lnet_magicversion_t *)hello; - CLASSERT(sizeof (lnet_magicversion_t) == - offsetof (ksock_hello_msg_t, kshm_src_nid)); + CLASSERT(sizeof(lnet_magicversion_t) == + offsetof(ksock_hello_msg_t, kshm_src_nid)); - if (hmv->version_major == cpu_to_le16 (KSOCK_PROTO_V1_MAJOR) && - hmv->version_minor == cpu_to_le16 (KSOCK_PROTO_V1_MINOR)) + if (hmv->version_major == cpu_to_le16(KSOCK_PROTO_V1_MAJOR) && + hmv->version_minor == cpu_to_le16(KSOCK_PROTO_V1_MINOR)) return &ksocknal_protocol_v1x; } @@ -1644,8 +1666,8 @@ ksocknal_parse_proto_version (ksock_hello_msg_t *hello) } int -ksocknal_send_hello (lnet_ni_t *ni, ksock_conn_t *conn, - lnet_nid_t peer_nid, ksock_hello_msg_t *hello) +ksocknal_send_hello(lnet_ni_t *ni, ksock_conn_t *conn, + lnet_nid_t peer_nid, ksock_hello_msg_t *hello) { /* CAVEAT EMPTOR: this byte flips 'ipaddrs' */ ksock_net_t *net = (ksock_net_t *)ni->ni_data; @@ -1653,7 +1675,7 @@ ksocknal_send_hello (lnet_ni_t *ni, ksock_conn_t *conn, LASSERT(hello->kshm_nips <= LNET_MAX_INTERFACES); /* rely on caller to hold a ref on socket so it wouldn't disappear */ - LASSERT(conn->ksnc_proto != NULL); + LASSERT(conn->ksnc_proto); hello->kshm_src_nid = ni->ni_nid; hello->kshm_dst_nid = peer_nid; @@ -1682,9 +1704,9 @@ ksocknal_invert_type(int type) } int -ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, - ksock_hello_msg_t *hello, lnet_process_id_t *peerid, - __u64 *incarnation) +ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn, + ksock_hello_msg_t *hello, lnet_process_id_t *peerid, + __u64 *incarnation) { /* Return < 0 fatal error * 0 success @@ -1692,7 +1714,7 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, * EPROTO protocol version mismatch */ struct socket *sock = conn->ksnc_sock; - int active = (conn->ksnc_proto != NULL); + int active = !!conn->ksnc_proto; int timeout; int proto_match; int rc; @@ -1705,20 +1727,20 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, timeout = active ? *ksocknal_tunables.ksnd_timeout : lnet_acceptor_timeout(); - rc = lnet_sock_read(sock, &hello->kshm_magic, sizeof (hello->kshm_magic), timeout); - if (rc != 0) { + rc = lnet_sock_read(sock, &hello->kshm_magic, sizeof(hello->kshm_magic), timeout); + if (rc) { CERROR("Error %d reading HELLO from %pI4h\n", - rc, &conn->ksnc_ipaddr); - LASSERT (rc < 0); + rc, &conn->ksnc_ipaddr); + LASSERT(rc < 0); return rc; } if (hello->kshm_magic != LNET_PROTO_MAGIC && hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) && - hello->kshm_magic != le32_to_cpu (LNET_PROTO_TCP_MAGIC)) { + hello->kshm_magic != le32_to_cpu(LNET_PROTO_TCP_MAGIC)) { /* Unexpected magic! */ CERROR("Bad magic(1) %#08x (%#08x expected) from %pI4h\n", - __cpu_to_le32 (hello->kshm_magic), + __cpu_to_le32(hello->kshm_magic), LNET_PROTO_TCP_MAGIC, &conn->ksnc_ipaddr); return -EPROTO; @@ -1726,15 +1748,15 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, rc = lnet_sock_read(sock, &hello->kshm_version, sizeof(hello->kshm_version), timeout); - if (rc != 0) { + if (rc) { CERROR("Error %d reading HELLO from %pI4h\n", - rc, &conn->ksnc_ipaddr); + rc, &conn->ksnc_ipaddr); LASSERT(rc < 0); return rc; } proto = ksocknal_parse_proto_version(hello); - if (proto == NULL) { + if (!proto) { if (!active) { /* unknown protocol from peer, tell peer my protocol */ conn->ksnc_proto = &ksocknal_protocol_v3x; @@ -1760,7 +1782,7 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, /* receive the rest of hello message anyway */ rc = conn->ksnc_proto->pro_recv_hello(conn, hello, timeout); - if (rc != 0) { + if (rc) { CERROR("Error %d reading or checking hello from from %pI4h\n", rc, &conn->ksnc_ipaddr); LASSERT(rc < 0); @@ -1792,8 +1814,8 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, conn->ksnc_type = ksocknal_invert_type(hello->kshm_ctype); if (conn->ksnc_type == SOCKLND_CONN_NONE) { CERROR("Unexpected type %d from %s ip %pI4h\n", - hello->kshm_ctype, libcfs_id2str(*peerid), - &conn->ksnc_ipaddr); + hello->kshm_ctype, libcfs_id2str(*peerid), + &conn->ksnc_ipaddr); return -EPROTO; } @@ -1816,9 +1838,8 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, if (ksocknal_invert_type(hello->kshm_ctype) != conn->ksnc_type) { CERROR("Mismatched types: me %d, %s ip %pI4h %d\n", - conn->ksnc_type, libcfs_id2str(*peerid), - &conn->ksnc_ipaddr, - hello->kshm_ctype); + conn->ksnc_type, libcfs_id2str(*peerid), + &conn->ksnc_ipaddr, hello->kshm_ctype); return -EPROTO; } @@ -1826,7 +1847,7 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, } static int -ksocknal_connect (ksock_route_t *route) +ksocknal_connect(ksock_route_t *route) { LIST_HEAD(zombies); ksock_peer_t *peer = route->ksnr_peer; @@ -1850,10 +1871,12 @@ ksocknal_connect (ksock_route_t *route) for (;;) { wanted = ksocknal_route_mask() & ~route->ksnr_connected; - /* stop connecting if peer/route got closed under me, or - * route got connected while queued */ + /* + * stop connecting if peer/route got closed under me, or + * route got connected while queued + */ if (peer->ksnp_closing || route->ksnr_deleted || - wanted == 0) { + !wanted) { retry_later = 0; break; } @@ -1869,14 +1892,14 @@ ksocknal_connect (ksock_route_t *route) if (retry_later) /* needs reschedule */ break; - if ((wanted & (1 << SOCKLND_CONN_ANY)) != 0) { + if (wanted & (1 << SOCKLND_CONN_ANY)) { type = SOCKLND_CONN_ANY; - } else if ((wanted & (1 << SOCKLND_CONN_CONTROL)) != 0) { + } else if (wanted & (1 << SOCKLND_CONN_CONTROL)) { type = SOCKLND_CONN_CONTROL; - } else if ((wanted & (1 << SOCKLND_CONN_BULK_IN)) != 0) { + } else if (wanted & (1 << SOCKLND_CONN_BULK_IN)) { type = SOCKLND_CONN_BULK_IN; } else { - LASSERT ((wanted & (1 << SOCKLND_CONN_BULK_OUT)) != 0); + LASSERT(wanted & (1 << SOCKLND_CONN_BULK_OUT)); type = SOCKLND_CONN_BULK_OUT; } @@ -1893,7 +1916,7 @@ ksocknal_connect (ksock_route_t *route) rc = lnet_connect(&sock, peer->ksnp_id.nid, route->ksnr_myipaddr, route->ksnr_ipaddr, route->ksnr_port); - if (rc != 0) + if (rc) goto failed; rc = ksocknal_create_conn(peer->ksnp_ni, route, sock, type); @@ -1904,9 +1927,11 @@ ksocknal_connect (ksock_route_t *route) goto failed; } - /* A +ve RC means I have to retry because I lost the connection - * race or I have to renegotiate protocol version */ - retry_later = (rc != 0); + /* + * A +ve RC means I have to retry because I lost the connection + * race or I have to renegotiate protocol version + */ + retry_later = (rc); if (retry_later) CDEBUG(D_NET, "peer %s: conn race, retry later.\n", libcfs_nid2str(peer->ksnp_id.nid)); @@ -1918,17 +1943,20 @@ ksocknal_connect (ksock_route_t *route) route->ksnr_connecting = 0; if (retry_later) { - /* re-queue for attention; this frees me up to handle - * the peer's incoming connection request */ - + /* + * re-queue for attention; this frees me up to handle + * the peer's incoming connection request + */ if (rc == EALREADY || - (rc == 0 && peer->ksnp_accepting > 0)) { - /* We want to introduce a delay before next + (!rc && peer->ksnp_accepting > 0)) { + /* + * We want to introduce a delay before next * attempt to connect if we lost conn race, * but the race is resolved quickly usually, - * so min_reconnectms should be good heuristic */ + * so min_reconnectms should be good heuristic + */ route->ksnr_retry_interval = - cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000; + cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms) / 1000; route->ksnr_timeout = cfs_time_add(cfs_time_current(), route->ksnr_retry_interval); } @@ -1949,30 +1977,34 @@ ksocknal_connect (ksock_route_t *route) route->ksnr_retry_interval *= 2; route->ksnr_retry_interval = max(route->ksnr_retry_interval, - cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000); + cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms) / 1000); route->ksnr_retry_interval = min(route->ksnr_retry_interval, - cfs_time_seconds(*ksocknal_tunables.ksnd_max_reconnectms)/1000); + cfs_time_seconds(*ksocknal_tunables.ksnd_max_reconnectms) / 1000); - LASSERT (route->ksnr_retry_interval != 0); + LASSERT(route->ksnr_retry_interval); route->ksnr_timeout = cfs_time_add(cfs_time_current(), route->ksnr_retry_interval); if (!list_empty(&peer->ksnp_tx_queue) && - peer->ksnp_accepting == 0 && - ksocknal_find_connecting_route_locked(peer) == NULL) { + !peer->ksnp_accepting && + !ksocknal_find_connecting_route_locked(peer)) { ksock_conn_t *conn; - /* ksnp_tx_queue is queued on a conn on successful - * connection for V1.x and V2.x */ - if (!list_empty (&peer->ksnp_conns)) { + /* + * ksnp_tx_queue is queued on a conn on successful + * connection for V1.x and V2.x + */ + if (!list_empty(&peer->ksnp_conns)) { conn = list_entry(peer->ksnp_conns.next, - ksock_conn_t, ksnc_list); - LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x); + ksock_conn_t, ksnc_list); + LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x); } - /* take all the blocked packets while I've got the lock and - * complete below... */ + /* + * take all the blocked packets while I've got the lock and + * complete below... + */ list_splice_init(&peer->ksnp_tx_queue, &zombies); } @@ -2011,8 +2043,10 @@ ksocknal_connd_check_start(time64_t sec, long *timeout) if (total >= *ksocknal_tunables.ksnd_nconnds_max || total > ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV) { - /* can't create more connd, or still have enough - * threads to handle more connecting */ + /* + * can't create more connd, or still have enough + * threads to handle more connecting + */ return 0; } @@ -2041,7 +2075,7 @@ ksocknal_connd_check_start(time64_t sec, long *timeout) rc = ksocknal_thread_start(ksocknal_connd, NULL, name); spin_lock_bh(&ksocknal_data.ksnd_connd_lock); - if (rc == 0) + if (!rc) return 1; /* we tried ... */ @@ -2093,8 +2127,10 @@ ksocknal_connd_check_stop(time64_t sec, long *timeout) ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV; } -/* Go through connd_routes queue looking for a route that we can process - * right now, @timeout_p can be updated if we need to come back later */ +/* + * Go through connd_routes queue looking for a route that we can process + * right now, @timeout_p can be updated if we need to come back later + */ static ksock_route_t * ksocknal_connd_get_route_locked(signed long *timeout_p) { @@ -2104,10 +2140,9 @@ ksocknal_connd_get_route_locked(signed long *timeout_p) now = cfs_time_current(); /* connd_routes can contain both pending and ordinary routes */ - list_for_each_entry (route, &ksocknal_data.ksnd_connd_routes, - ksnr_connd_list) { - - if (route->ksnr_retry_interval == 0 || + list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes, + ksnr_connd_list) { + if (!route->ksnr_retry_interval || cfs_time_aftereq(now, route->ksnr_timeout)) return route; @@ -2120,7 +2155,7 @@ ksocknal_connd_get_route_locked(signed long *timeout_p) } int -ksocknal_connd (void *arg) +ksocknal_connd(void *arg) { spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock; ksock_connreq_t *cr; @@ -2172,15 +2207,17 @@ ksocknal_connd (void *arg) spin_lock_bh(connd_lock); } - /* Only handle an outgoing connection request if there + /* + * Only handle an outgoing connection request if there * is a thread left to handle incoming connections and - * create new connd */ + * create new connd + */ if (ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV < ksocknal_data.ksnd_connd_running) { route = ksocknal_connd_get_route_locked(&timeout); } - if (route != NULL) { - list_del (&route->ksnr_connd_list); + if (route) { + list_del(&route->ksnr_connd_list); ksocknal_data.ksnd_connd_connecting++; spin_unlock_bh(connd_lock); dropped_lock = 1; @@ -2231,24 +2268,26 @@ ksocknal_connd (void *arg) } static ksock_conn_t * -ksocknal_find_timed_out_conn (ksock_peer_t *peer) +ksocknal_find_timed_out_conn(ksock_peer_t *peer) { /* We're called with a shared lock on ksnd_global_lock */ ksock_conn_t *conn; struct list_head *ctmp; - list_for_each (ctmp, &peer->ksnp_conns) { + list_for_each(ctmp, &peer->ksnp_conns) { int error; - conn = list_entry (ctmp, ksock_conn_t, ksnc_list); + conn = list_entry(ctmp, ksock_conn_t, ksnc_list); /* Don't need the {get,put}connsock dance to deref ksnc_sock */ LASSERT(!conn->ksnc_closing); - /* SOCK_ERROR will reset error code of socket in - * some platform (like Darwin8.x) */ + /* + * SOCK_ERROR will reset error code of socket in + * some platform (like Darwin8.x) + */ error = conn->ksnc_sock->sk->sk_err; - if (error != 0) { + if (error) { ksocknal_conn_addref(conn); switch (error) { @@ -2292,11 +2331,13 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer) } if ((!list_empty(&conn->ksnc_tx_queue) || - conn->ksnc_sock->sk->sk_wmem_queued != 0) && + conn->ksnc_sock->sk->sk_wmem_queued) && cfs_time_aftereq(cfs_time_current(), conn->ksnc_tx_deadline)) { - /* Timed out messages queued for sending or - * buffered in the socket's send buffer */ + /* + * Timed out messages queued for sending or + * buffered in the socket's send buffer + */ ksocknal_conn_addref(conn); CNETERR("Timeout sending data to %s (%pI4h:%d) the network or that node may be down.\n", libcfs_id2str(peer->ksnp_id), @@ -2313,20 +2354,18 @@ static inline void ksocknal_flush_stale_txs(ksock_peer_t *peer) { ksock_tx_t *tx; + ksock_tx_t *tmp; LIST_HEAD(stale_txs); write_lock_bh(&ksocknal_data.ksnd_global_lock); - while (!list_empty (&peer->ksnp_tx_queue)) { - tx = list_entry (peer->ksnp_tx_queue.next, - ksock_tx_t, tx_list); - + list_for_each_entry_safe(tx, tmp, &peer->ksnp_tx_queue, tx_list) { if (!cfs_time_aftereq(cfs_time_current(), tx->tx_deadline)) break; - list_del (&tx->tx_list); - list_add_tail (&tx->tx_list, &stale_txs); + list_del(&tx->tx_list); + list_add_tail(&tx->tx_list, &stale_txs); } write_unlock_bh(&ksocknal_data.ksnd_global_lock); @@ -2336,6 +2375,7 @@ ksocknal_flush_stale_txs(ksock_peer_t *peer) static int ksocknal_send_keepalive_locked(ksock_peer_t *peer) + __must_hold(&ksocknal_data.ksnd_global_lock) { ksock_sched_t *sched; ksock_conn_t *conn; @@ -2356,12 +2396,14 @@ ksocknal_send_keepalive_locked(ksock_peer_t *peer) if (time_before(cfs_time_current(), peer->ksnp_send_keepalive)) return 0; - /* retry 10 secs later, so we wouldn't put pressure - * on this peer if we failed to send keepalive this time */ + /* + * retry 10 secs later, so we wouldn't put pressure + * on this peer if we failed to send keepalive this time + */ peer->ksnp_send_keepalive = cfs_time_shift(10); conn = ksocknal_find_conn_locked(peer, NULL, 1); - if (conn != NULL) { + if (conn) { sched = conn->ksnc_scheduler; spin_lock_bh(&sched->kss_lock); @@ -2378,12 +2420,12 @@ ksocknal_send_keepalive_locked(ksock_peer_t *peer) /* cookie = 1 is reserved for keepalive PING */ tx = ksocknal_alloc_tx_noop(1, 1); - if (tx == NULL) { + if (!tx) { read_lock(&ksocknal_data.ksnd_global_lock); return -ENOMEM; } - if (ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id) == 0) { + if (!ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id)) { read_lock(&ksocknal_data.ksnd_global_lock); return 1; } @@ -2395,7 +2437,7 @@ ksocknal_send_keepalive_locked(ksock_peer_t *peer) } static void -ksocknal_check_peer_timeouts (int idx) +ksocknal_check_peer_timeouts(int idx) { struct list_head *peers = &ksocknal_data.ksnd_peers[idx]; ksock_peer_t *peer; @@ -2403,9 +2445,11 @@ ksocknal_check_peer_timeouts (int idx) ksock_tx_t *tx; again: - /* NB. We expect to have a look at all the peers and not find any + /* + * NB. We expect to have a look at all the peers and not find any * connections to time out, so we just use a shared lock while we - * take a look... */ + * take a look... + */ read_lock(&ksocknal_data.ksnd_global_lock); list_for_each_entry(peer, peers, ksnp_list) { @@ -2413,35 +2457,37 @@ ksocknal_check_peer_timeouts (int idx) int resid = 0; int n = 0; - if (ksocknal_send_keepalive_locked(peer) != 0) { + if (ksocknal_send_keepalive_locked(peer)) { read_unlock(&ksocknal_data.ksnd_global_lock); goto again; } - conn = ksocknal_find_timed_out_conn (peer); + conn = ksocknal_find_timed_out_conn(peer); - if (conn != NULL) { + if (conn) { read_unlock(&ksocknal_data.ksnd_global_lock); - ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT); + ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT); - /* NB we won't find this one again, but we can't + /* + * NB we won't find this one again, but we can't * just proceed with the next peer, since we dropped - * ksnd_global_lock and it might be dead already! */ + * ksnd_global_lock and it might be dead already! + */ ksocknal_conn_decref(conn); goto again; } - /* we can't process stale txs right here because we're - * holding only shared lock */ - if (!list_empty (&peer->ksnp_tx_queue)) { - ksock_tx_t *tx = - list_entry (peer->ksnp_tx_queue.next, - ksock_tx_t, tx_list); + /* + * we can't process stale txs right here because we're + * holding only shared lock + */ + if (!list_empty(&peer->ksnp_tx_queue)) { + ksock_tx_t *tx = list_entry(peer->ksnp_tx_queue.next, + ksock_tx_t, tx_list); if (cfs_time_aftereq(cfs_time_current(), tx->tx_deadline)) { - ksocknal_peer_addref(peer); read_unlock(&ksocknal_data.ksnd_global_lock); @@ -2466,13 +2512,13 @@ ksocknal_check_peer_timeouts (int idx) n++; } - if (n == 0) { + if (!n) { spin_unlock(&peer->ksnp_lock); continue; } tx = list_entry(peer->ksnp_zc_req_list.next, - ksock_tx_t, tx_zc_list); + ksock_tx_t, tx_zc_list); deadline = tx->tx_deadline; resid = tx->tx_resid; conn = tx->tx_conn; @@ -2486,7 +2532,7 @@ ksocknal_check_peer_timeouts (int idx) cfs_duration_sec(cfs_time_current() - deadline), resid, conn->ksnc_sock->sk->sk_wmem_queued); - ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT); + ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT); ksocknal_conn_decref(conn); goto again; } @@ -2495,7 +2541,7 @@ ksocknal_check_peer_timeouts (int idx) } int -ksocknal_reaper (void *arg) +ksocknal_reaper(void *arg) { wait_queue_t wait; ksock_conn_t *conn; @@ -2515,12 +2561,10 @@ ksocknal_reaper (void *arg) spin_lock_bh(&ksocknal_data.ksnd_reaper_lock); while (!ksocknal_data.ksnd_shuttingdown) { - - if (!list_empty (&ksocknal_data.ksnd_deathrow_conns)) { - conn = list_entry (ksocknal_data. \ - ksnd_deathrow_conns.next, - ksock_conn_t, ksnc_list); - list_del (&conn->ksnc_list); + if (!list_empty(&ksocknal_data.ksnd_deathrow_conns)) { + conn = list_entry(ksocknal_data.ksnd_deathrow_conns.next, + ksock_conn_t, ksnc_list); + list_del(&conn->ksnc_list); spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock); @@ -2531,10 +2575,10 @@ ksocknal_reaper (void *arg) continue; } - if (!list_empty (&ksocknal_data.ksnd_zombie_conns)) { - conn = list_entry (ksocknal_data.ksnd_zombie_conns.\ - next, ksock_conn_t, ksnc_list); - list_del (&conn->ksnc_list); + if (!list_empty(&ksocknal_data.ksnd_zombie_conns)) { + conn = list_entry(ksocknal_data.ksnd_zombie_conns.next, + ksock_conn_t, ksnc_list); + list_del(&conn->ksnc_list); spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock); @@ -2544,9 +2588,9 @@ ksocknal_reaper (void *arg) continue; } - if (!list_empty (&ksocknal_data.ksnd_enomem_conns)) { + if (!list_empty(&ksocknal_data.ksnd_enomem_conns)) { list_add(&enomem_conns, - &ksocknal_data.ksnd_enomem_conns); + &ksocknal_data.ksnd_enomem_conns); list_del_init(&ksocknal_data.ksnd_enomem_conns); } @@ -2554,10 +2598,10 @@ ksocknal_reaper (void *arg) /* reschedule all the connections that stalled with ENOMEM... */ nenomem_conns = 0; - while (!list_empty (&enomem_conns)) { - conn = list_entry (enomem_conns.next, - ksock_conn_t, ksnc_tx_list); - list_del (&conn->ksnc_tx_list); + while (!list_empty(&enomem_conns)) { + conn = list_entry(enomem_conns.next, ksock_conn_t, + ksnc_tx_list); + list_del(&conn->ksnc_tx_list); sched = conn->ksnc_scheduler; @@ -2566,7 +2610,7 @@ ksocknal_reaper (void *arg) LASSERT(conn->ksnc_tx_scheduled); conn->ksnc_tx_ready = 1; list_add_tail(&conn->ksnc_tx_list, - &sched->kss_tx_conns); + &sched->kss_tx_conns); wake_up(&sched->kss_waitq); spin_unlock_bh(&sched->kss_lock); @@ -2580,21 +2624,22 @@ ksocknal_reaper (void *arg) const int p = 1; int chunk = ksocknal_data.ksnd_peer_hash_size; - /* Time to check for timeouts on a few more peers: I do + /* + * Time to check for timeouts on a few more peers: I do * checks every 'p' seconds on a proportion of the peer * table and I need to check every connection 'n' times * within a timeout interval, to ensure I detect a * timeout on any connection within (n+1)/n times the - * timeout interval. */ - + * timeout interval. + */ if (*ksocknal_tunables.ksnd_timeout > n * p) chunk = (chunk * n * p) / *ksocknal_tunables.ksnd_timeout; - if (chunk == 0) + if (!chunk) chunk = 1; for (i = 0; i < chunk; i++) { - ksocknal_check_peer_timeouts (peer_index); + ksocknal_check_peer_timeouts(peer_index); peer_index = (peer_index + 1) % ksocknal_data.ksnd_peer_hash_size; } @@ -2602,25 +2647,27 @@ ksocknal_reaper (void *arg) deadline = cfs_time_add(deadline, cfs_time_seconds(p)); } - if (nenomem_conns != 0) { - /* Reduce my timeout if I rescheduled ENOMEM conns. + if (nenomem_conns) { + /* + * Reduce my timeout if I rescheduled ENOMEM conns. * This also prevents me getting woken immediately - * if any go back on my enomem list. */ + * if any go back on my enomem list. + */ timeout = SOCKNAL_ENOMEM_RETRY; } ksocknal_data.ksnd_reaper_waketime = cfs_time_add(cfs_time_current(), timeout); - set_current_state (TASK_INTERRUPTIBLE); - add_wait_queue (&ksocknal_data.ksnd_reaper_waitq, &wait); + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait); if (!ksocknal_data.ksnd_shuttingdown && - list_empty (&ksocknal_data.ksnd_deathrow_conns) && - list_empty (&ksocknal_data.ksnd_zombie_conns)) + list_empty(&ksocknal_data.ksnd_deathrow_conns) && + list_empty(&ksocknal_data.ksnd_zombie_conns)) schedule_timeout(timeout); - set_current_state (TASK_RUNNING); - remove_wait_queue (&ksocknal_data.ksnd_reaper_waitq, &wait); + set_current_state(TASK_RUNNING); + remove_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait); spin_lock_bh(&ksocknal_data.ksnd_reaper_lock); } diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c index cf8e43bd3c03..3e1f24e77f64 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c @@ -45,13 +45,13 @@ ksocknal_lib_get_conn_addrs(ksock_conn_t *conn) /* Didn't need the {get,put}connsock dance to deref ksnc_sock... */ LASSERT(!conn->ksnc_closing); - if (rc != 0) { + if (rc) { CERROR("Error %d getting sock peer IP\n", rc); return rc; } rc = lnet_sock_getaddr(conn->ksnc_sock, 0, &conn->ksnc_myipaddr, NULL); - if (rc != 0) { + if (rc) { CERROR("Error %d getting sock local IP\n", rc); return rc; } @@ -67,9 +67,11 @@ ksocknal_lib_zc_capable(ksock_conn_t *conn) if (conn->ksnc_proto == &ksocknal_protocol_v1x) return 0; - /* ZC if the socket supports scatter/gather and doesn't need software - * checksums */ - return ((caps & NETIF_F_SG) != 0 && (caps & NETIF_F_CSUM_MASK) != 0); + /* + * ZC if the socket supports scatter/gather and doesn't need software + * checksums + */ + return ((caps & NETIF_F_SG) && (caps & NETIF_F_CSUM_MASK)); } int @@ -82,12 +84,13 @@ ksocknal_lib_send_iov(ksock_conn_t *conn, ksock_tx_t *tx) if (*ksocknal_tunables.ksnd_enable_csum && /* checksum enabled */ conn->ksnc_proto == &ksocknal_protocol_v2x && /* V2.x connection */ tx->tx_nob == tx->tx_resid && /* frist sending */ - tx->tx_msg.ksm_csum == 0) /* not checksummed */ + !tx->tx_msg.ksm_csum) /* not checksummed */ ksocknal_lib_csum_tx(tx); - /* NB we can't trust socket ops to either consume our iovs - * or leave them alone. */ - + /* + * NB we can't trust socket ops to either consume our iovs + * or leave them alone. + */ { #if SOCKNAL_SINGLE_FRAG_TX struct kvec scratch; @@ -123,11 +126,13 @@ ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx) int nob; /* Not NOOP message */ - LASSERT(tx->tx_lnetmsg != NULL); + LASSERT(tx->tx_lnetmsg); - /* NB we can't trust socket ops to either consume our iovs - * or leave them alone. */ - if (tx->tx_msg.ksm_zc_cookies[0] != 0) { + /* + * NB we can't trust socket ops to either consume our iovs + * or leave them alone. + */ + if (tx->tx_msg.ksm_zc_cookies[0]) { /* Zero copy is enabled */ struct sock *sk = sock->sk; struct page *page = kiov->kiov_page; @@ -136,13 +141,13 @@ ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx) int msgflg = MSG_DONTWAIT; CDEBUG(D_NET, "page %p + offset %x for %d\n", - page, offset, kiov->kiov_len); + page, offset, kiov->kiov_len); if (!list_empty(&conn->ksnc_tx_queue) || fragsize < tx->tx_resid) msgflg |= MSG_MORE; - if (sk->sk_prot->sendpage != NULL) { + if (sk->sk_prot->sendpage) { rc = sk->sk_prot->sendpage(sk, page, offset, fragsize, msgflg); } else { @@ -187,13 +192,14 @@ ksocknal_lib_eager_ack(ksock_conn_t *conn) int opt = 1; struct socket *sock = conn->ksnc_sock; - /* Remind the socket to ACK eagerly. If I don't, the socket might + /* + * Remind the socket to ACK eagerly. If I don't, the socket might * think I'm about to send something it could piggy-back the ACK * on, introducing delay in completing zero-copy sends in my - * peer. */ - - kernel_setsockopt(sock, SOL_TCP, TCP_QUICKACK, - (char *)&opt, sizeof(opt)); + * peer. + */ + kernel_setsockopt(sock, SOL_TCP, TCP_QUICKACK, (char *)&opt, + sizeof(opt)); } int @@ -218,8 +224,10 @@ ksocknal_lib_recv_iov(ksock_conn_t *conn) int sum; __u32 saved_csum; - /* NB we can't trust socket ops to either consume our iovs - * or leave them alone. */ + /* + * NB we can't trust socket ops to either consume our iovs + * or leave them alone. + */ LASSERT(niov > 0); for (nob = i = 0; i < niov; i++) { @@ -228,8 +236,8 @@ ksocknal_lib_recv_iov(ksock_conn_t *conn) } LASSERT(nob <= conn->ksnc_rx_nob_wanted); - rc = kernel_recvmsg(conn->ksnc_sock, &msg, - scratchiov, niov, nob, MSG_DONTWAIT); + rc = kernel_recvmsg(conn->ksnc_sock, &msg, scratchiov, niov, nob, + MSG_DONTWAIT); saved_csum = 0; if (conn->ksnc_proto == &ksocknal_protocol_v2x) { @@ -237,7 +245,7 @@ ksocknal_lib_recv_iov(ksock_conn_t *conn) conn->ksnc_msg.ksm_csum = 0; } - if (saved_csum != 0) { + if (saved_csum) { /* accumulate checksum */ for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) { LASSERT(i < niov); @@ -258,7 +266,7 @@ ksocknal_lib_recv_iov(ksock_conn_t *conn) static void ksocknal_lib_kiov_vunmap(void *addr) { - if (addr == NULL) + if (!addr) return; vunmap(addr); @@ -272,7 +280,7 @@ ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov, int nob; int i; - if (!*ksocknal_tunables.ksnd_zc_recv || pages == NULL) + if (!*ksocknal_tunables.ksnd_zc_recv || !pages) return NULL; LASSERT(niov <= LNET_MAX_IOV); @@ -282,7 +290,7 @@ ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov, return NULL; for (nob = i = 0; i < niov; i++) { - if ((kiov[i].kiov_offset != 0 && i > 0) || + if ((kiov[i].kiov_offset && i > 0) || (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_CACHE_SIZE && i < niov - 1)) return NULL; @@ -291,7 +299,7 @@ ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov, } addr = vmap(pages, niov, VM_MAP, PAGE_KERNEL); - if (addr == NULL) + if (!addr) return NULL; iov->iov_base = addr + kiov[0].kiov_offset; @@ -329,10 +337,12 @@ ksocknal_lib_recv_kiov(ksock_conn_t *conn) int fragnob; int n; - /* NB we can't trust socket ops to either consume our iovs - * or leave them alone. */ + /* + * NB we can't trust socket ops to either consume our iovs + * or leave them alone. + */ addr = ksocknal_lib_kiov_vmap(kiov, niov, scratchiov, pages); - if (addr != NULL) { + if (addr) { nob = scratchiov[0].iov_len; n = 1; @@ -347,17 +357,19 @@ ksocknal_lib_recv_kiov(ksock_conn_t *conn) LASSERT(nob <= conn->ksnc_rx_nob_wanted); - rc = kernel_recvmsg(conn->ksnc_sock, &msg, - (struct kvec *)scratchiov, n, nob, MSG_DONTWAIT); + rc = kernel_recvmsg(conn->ksnc_sock, &msg, (struct kvec *)scratchiov, + n, nob, MSG_DONTWAIT); - if (conn->ksnc_msg.ksm_csum != 0) { + if (conn->ksnc_msg.ksm_csum) { for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) { LASSERT(i < niov); - /* Dang! have to kmap again because I have nowhere to + /* + * Dang! have to kmap again because I have nowhere to * stash the mapped address. But by doing it while the * page is still mapped, the kernel just bumps the map - * count and returns me the address it stashed. */ + * count and returns me the address it stashed. + */ base = kmap(kiov[i].kiov_page) + kiov[i].kiov_offset; fragnob = kiov[i].kiov_len; if (fragnob > sum) @@ -370,7 +382,7 @@ ksocknal_lib_recv_kiov(ksock_conn_t *conn) } } - if (addr != NULL) { + if (addr) { ksocknal_lib_kiov_vunmap(addr); } else { for (i = 0; i < niov; i++) @@ -388,7 +400,7 @@ ksocknal_lib_csum_tx(ksock_tx_t *tx) void *base; LASSERT(tx->tx_iov[0].iov_base == &tx->tx_msg); - LASSERT(tx->tx_conn != NULL); + LASSERT(tx->tx_conn); LASSERT(tx->tx_conn->ksnc_proto == &ksocknal_protocol_v2x); tx->tx_msg.ksm_csum = 0; @@ -396,7 +408,7 @@ ksocknal_lib_csum_tx(ksock_tx_t *tx) csum = ksocknal_csum(~0, tx->tx_iov[0].iov_base, tx->tx_iov[0].iov_len); - if (tx->tx_kiov != NULL) { + if (tx->tx_kiov) { for (i = 0; i < tx->tx_nkiov; i++) { base = kmap(tx->tx_kiov[i].kiov_page) + tx->tx_kiov[i].kiov_offset; @@ -427,22 +439,22 @@ ksocknal_lib_get_conn_tunables(ksock_conn_t *conn, int *txmem, int *rxmem, int * int rc; rc = ksocknal_connsock_addref(conn); - if (rc != 0) { + if (rc) { LASSERT(conn->ksnc_closing); *txmem = *rxmem = *nagle = 0; return -ESHUTDOWN; } rc = lnet_sock_getbuf(sock, txmem, rxmem); - if (rc == 0) { + if (!rc) { len = sizeof(*nagle); rc = kernel_getsockopt(sock, SOL_TCP, TCP_NODELAY, - (char *)nagle, &len); + (char *)nagle, &len); } ksocknal_connsock_decref(conn); - if (rc == 0) + if (!rc) *nagle = !*nagle; else *txmem = *rxmem = *nagle = 0; @@ -463,23 +475,24 @@ ksocknal_lib_setup_sock(struct socket *sock) sock->sk->sk_allocation = GFP_NOFS; - /* Ensure this socket aborts active sends immediately when we close - * it. */ - + /* + * Ensure this socket aborts active sends immediately when we close + * it. + */ linger.l_onoff = 0; linger.l_linger = 0; - rc = kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER, - (char *)&linger, sizeof(linger)); - if (rc != 0) { + rc = kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER, (char *)&linger, + sizeof(linger)); + if (rc) { CERROR("Can't set SO_LINGER: %d\n", rc); return rc; } option = -1; - rc = kernel_setsockopt(sock, SOL_TCP, TCP_LINGER2, - (char *)&option, sizeof(option)); - if (rc != 0) { + rc = kernel_setsockopt(sock, SOL_TCP, TCP_LINGER2, (char *)&option, + sizeof(option)); + if (rc) { CERROR("Can't set SO_LINGER2: %d\n", rc); return rc; } @@ -488,8 +501,8 @@ ksocknal_lib_setup_sock(struct socket *sock) option = 1; rc = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, - (char *)&option, sizeof(option)); - if (rc != 0) { + (char *)&option, sizeof(option)); + if (rc) { CERROR("Can't disable nagle: %d\n", rc); return rc; } @@ -497,10 +510,10 @@ ksocknal_lib_setup_sock(struct socket *sock) rc = lnet_sock_setbuf(sock, *ksocknal_tunables.ksnd_tx_buffer_size, *ksocknal_tunables.ksnd_rx_buffer_size); - if (rc != 0) { + if (rc) { CERROR("Can't set buffer tx %d, rx %d buffers: %d\n", - *ksocknal_tunables.ksnd_tx_buffer_size, - *ksocknal_tunables.ksnd_rx_buffer_size, rc); + *ksocknal_tunables.ksnd_tx_buffer_size, + *ksocknal_tunables.ksnd_rx_buffer_size, rc); return rc; } @@ -514,9 +527,9 @@ ksocknal_lib_setup_sock(struct socket *sock) do_keepalive = (keep_idle > 0 && keep_count > 0 && keep_intvl > 0); option = (do_keepalive ? 1 : 0); - rc = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, - (char *)&option, sizeof(option)); - if (rc != 0) { + rc = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, (char *)&option, + sizeof(option)); + if (rc) { CERROR("Can't set SO_KEEPALIVE: %d\n", rc); return rc; } @@ -524,23 +537,23 @@ ksocknal_lib_setup_sock(struct socket *sock) if (!do_keepalive) return 0; - rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE, - (char *)&keep_idle, sizeof(keep_idle)); - if (rc != 0) { + rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE, (char *)&keep_idle, + sizeof(keep_idle)); + if (rc) { CERROR("Can't set TCP_KEEPIDLE: %d\n", rc); return rc; } rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL, - (char *)&keep_intvl, sizeof(keep_intvl)); - if (rc != 0) { + (char *)&keep_intvl, sizeof(keep_intvl)); + if (rc) { CERROR("Can't set TCP_KEEPINTVL: %d\n", rc); return rc; } - rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT, - (char *)&keep_count, sizeof(keep_count)); - if (rc != 0) { + rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT, (char *)&keep_count, + sizeof(keep_count)); + if (rc) { CERROR("Can't set TCP_KEEPCNT: %d\n", rc); return rc; } @@ -558,7 +571,7 @@ ksocknal_lib_push_conn(ksock_conn_t *conn) int rc; rc = ksocknal_connsock_addref(conn); - if (rc != 0) /* being shut down */ + if (rc) /* being shut down */ return; sk = conn->ksnc_sock->sk; @@ -570,8 +583,8 @@ ksocknal_lib_push_conn(ksock_conn_t *conn) release_sock(sk); rc = kernel_setsockopt(conn->ksnc_sock, SOL_TCP, TCP_NODELAY, - (char *)&val, sizeof(val)); - LASSERT(rc == 0); + (char *)&val, sizeof(val)); + LASSERT(!rc); lock_sock(sk); tp->nonagle = nonagle; @@ -593,11 +606,12 @@ ksocknal_data_ready(struct sock *sk) read_lock(&ksocknal_data.ksnd_global_lock); conn = sk->sk_user_data; - if (conn == NULL) { /* raced with ksocknal_terminate_conn */ + if (!conn) { /* raced with ksocknal_terminate_conn */ LASSERT(sk->sk_data_ready != &ksocknal_data_ready); sk->sk_data_ready(sk); - } else + } else { ksocknal_read_callback(conn); + } read_unlock(&ksocknal_data.ksnd_global_lock); } @@ -619,14 +633,14 @@ ksocknal_write_space(struct sock *sk) CDEBUG(D_NET, "sk %p wspace %d low water %d conn %p%s%s%s\n", sk, wspace, min_wpace, conn, - (conn == NULL) ? "" : (conn->ksnc_tx_ready ? + !conn ? "" : (conn->ksnc_tx_ready ? " ready" : " blocked"), - (conn == NULL) ? "" : (conn->ksnc_tx_scheduled ? + !conn ? "" : (conn->ksnc_tx_scheduled ? " scheduled" : " idle"), - (conn == NULL) ? "" : (list_empty(&conn->ksnc_tx_queue) ? + !conn ? "" : (list_empty(&conn->ksnc_tx_queue) ? " empty" : " queued")); - if (conn == NULL) { /* raced with ksocknal_terminate_conn */ + if (!conn) { /* raced with ksocknal_terminate_conn */ LASSERT(sk->sk_write_space != &ksocknal_write_space); sk->sk_write_space(sk); @@ -637,10 +651,11 @@ ksocknal_write_space(struct sock *sk) if (wspace >= min_wpace) { /* got enough space */ ksocknal_write_callback(conn); - /* Clear SOCK_NOSPACE _after_ ksocknal_write_callback so the + /* + * Clear SOCK_NOSPACE _after_ ksocknal_write_callback so the * ENOMEM check in ksocknal_transmit is race-free (think about - * it). */ - + * it). + */ clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); } @@ -666,15 +681,19 @@ ksocknal_lib_set_callback(struct socket *sock, ksock_conn_t *conn) void ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn) { - /* Remove conn's network callbacks. + /* + * Remove conn's network callbacks. * NB I _have_ to restore the callback, rather than storing a noop, - * since the socket could survive past this module being unloaded!! */ + * since the socket could survive past this module being unloaded!! + */ sock->sk->sk_data_ready = conn->ksnc_saved_data_ready; sock->sk->sk_write_space = conn->ksnc_saved_write_space; - /* A callback could be in progress already; they hold a read lock + /* + * A callback could be in progress already; they hold a read lock * on ksnd_global_lock (to serialise with me) and NOOP if - * sk_user_data is NULL. */ + * sk_user_data is NULL. + */ sock->sk->sk_user_data = NULL; return ; @@ -691,14 +710,16 @@ ksocknal_lib_memory_pressure(ksock_conn_t *conn) if (!test_bit(SOCK_NOSPACE, &conn->ksnc_sock->flags) && !conn->ksnc_tx_ready) { - /* SOCK_NOSPACE is set when the socket fills + /* + * SOCK_NOSPACE is set when the socket fills * and cleared in the write_space callback * (which also sets ksnc_tx_ready). If * SOCK_NOSPACE and ksnc_tx_ready are BOTH * zero, I didn't fill the socket and * write_space won't reschedule me, so I * return -ENOMEM to get my caller to retry - * after a timeout */ + * after a timeout + */ rc = -ENOMEM; } diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c index fdb2b23e2ef0..6329cbe66573 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c @@ -14,9 +14,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with Portals; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "socklnd.h" @@ -41,8 +38,10 @@ static int peer_timeout = 180; module_param(peer_timeout, int, 0444); MODULE_PARM_DESC(peer_timeout, "Seconds without aliveness news to declare peer dead (<=0 to disable)"); -/* Number of daemons in each thread pool which is percpt, - * we will estimate reasonable value based on CPUs if it's not set. */ +/* + * Number of daemons in each thread pool which is percpt, + * we will estimate reasonable value based on CPUs if it's not set. + */ static unsigned int nscheds; module_param(nscheds, int, 0444); MODULE_PARM_DESC(nscheds, "# scheduler daemons in each pool while starting"); @@ -72,7 +71,7 @@ static int typed_conns = 1; module_param(typed_conns, int, 0444); MODULE_PARM_DESC(typed_conns, "use different sockets for bulk"); -static int min_bulk = 1<<10; +static int min_bulk = 1 << 10; module_param(min_bulk, int, 0644); MODULE_PARM_DESC(min_bulk, "smallest 'large' message"); diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c index 986bce4c9f3b..32cc31e4cc29 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c @@ -19,9 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with Portals; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "socklnd.h" @@ -56,15 +53,14 @@ ksocknal_next_tx_carrier(ksock_conn_t *conn) /* Called holding BH lock: conn->ksnc_scheduler->kss_lock */ LASSERT(!list_empty(&conn->ksnc_tx_queue)); - LASSERT(tx != NULL); + LASSERT(tx); /* Next TX that can carry ZC-ACK or LNet message */ if (tx->tx_list.next == &conn->ksnc_tx_queue) { /* no more packets queued */ conn->ksnc_tx_carrier = NULL; } else { - conn->ksnc_tx_carrier = list_entry(tx->tx_list.next, - ksock_tx_t, tx_list); + conn->ksnc_tx_carrier = list_next_entry(tx, tx_list); LASSERT(conn->ksnc_tx_carrier->tx_msg.ksm_type == tx->tx_msg.ksm_type); } } @@ -75,8 +71,8 @@ ksocknal_queue_tx_zcack_v2(ksock_conn_t *conn, { ksock_tx_t *tx = conn->ksnc_tx_carrier; - LASSERT(tx_ack == NULL || - tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP); + LASSERT(!tx_ack || + tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP); /* * Enqueue or piggyback tx_ack / cookie @@ -85,10 +81,10 @@ ksocknal_queue_tx_zcack_v2(ksock_conn_t *conn, * . There is tx can piggyback cookie of tx_ack (or cookie), * piggyback the cookie and return the tx. */ - if (tx == NULL) { - if (tx_ack != NULL) { + if (!tx) { + if (tx_ack) { list_add_tail(&tx_ack->tx_list, - &conn->ksnc_tx_queue); + &conn->ksnc_tx_queue); conn->ksnc_tx_carrier = tx_ack; } return 0; @@ -96,16 +92,16 @@ ksocknal_queue_tx_zcack_v2(ksock_conn_t *conn, if (tx->tx_msg.ksm_type == KSOCK_MSG_NOOP) { /* tx is noop zc-ack, can't piggyback zc-ack cookie */ - if (tx_ack != NULL) + if (tx_ack) list_add_tail(&tx_ack->tx_list, - &conn->ksnc_tx_queue); + &conn->ksnc_tx_queue); return 0; } LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_LNET); - LASSERT(tx->tx_msg.ksm_zc_cookies[1] == 0); + LASSERT(!tx->tx_msg.ksm_zc_cookies[1]); - if (tx_ack != NULL) + if (tx_ack) cookie = tx_ack->tx_msg.ksm_zc_cookies[1]; /* piggyback the zc-ack cookie */ @@ -128,7 +124,7 @@ ksocknal_queue_tx_msg_v2(ksock_conn_t *conn, ksock_tx_t *tx_msg) * . If there is NOOP on the connection, piggyback the cookie * and replace the NOOP tx, and return the NOOP tx. */ - if (tx == NULL) { /* nothing on queue */ + if (!tx) { /* nothing on queue */ list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue); conn->ksnc_tx_carrier = tx_msg; return NULL; @@ -162,22 +158,22 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn, return ksocknal_queue_tx_zcack_v2(conn, tx_ack, cookie); /* non-blocking ZC-ACK (to router) */ - LASSERT(tx_ack == NULL || - tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP); + LASSERT(!tx_ack || + tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP); tx = conn->ksnc_tx_carrier; - if (tx == NULL) { - if (tx_ack != NULL) { + if (!tx) { + if (tx_ack) { list_add_tail(&tx_ack->tx_list, - &conn->ksnc_tx_queue); + &conn->ksnc_tx_queue); conn->ksnc_tx_carrier = tx_ack; } return 0; } - /* conn->ksnc_tx_carrier != NULL */ + /* conn->ksnc_tx_carrier */ - if (tx_ack != NULL) + if (tx_ack) cookie = tx_ack->tx_msg.ksm_zc_cookies[1]; if (cookie == SOCKNAL_KEEPALIVE_PING) /* ignore keepalive PING */ @@ -185,7 +181,7 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn, if (tx->tx_msg.ksm_zc_cookies[1] == SOCKNAL_KEEPALIVE_PING) { /* replace the keepalive PING with a real ACK */ - LASSERT(tx->tx_msg.ksm_zc_cookies[0] == 0); + LASSERT(!tx->tx_msg.ksm_zc_cookies[0]); tx->tx_msg.ksm_zc_cookies[1] = cookie; return 1; } @@ -197,7 +193,7 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn, return 1; /* XXX return error in the future */ } - if (tx->tx_msg.ksm_zc_cookies[0] == 0) { + if (!tx->tx_msg.ksm_zc_cookies[0]) { /* NOOP tx has only one ZC-ACK cookie, can carry at least one more */ if (tx->tx_msg.ksm_zc_cookies[1] > cookie) { tx->tx_msg.ksm_zc_cookies[0] = tx->tx_msg.ksm_zc_cookies[1]; @@ -233,7 +229,7 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn, tmp = tx->tx_msg.ksm_zc_cookies[0]; } - if (tmp != 0) { + if (tmp) { /* range of cookies */ tx->tx_msg.ksm_zc_cookies[0] = tmp - 1; tx->tx_msg.ksm_zc_cookies[1] = tmp + 1; @@ -261,7 +257,7 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn, } /* failed to piggyback ZC-ACK */ - if (tx_ack != NULL) { + if (tx_ack) { list_add_tail(&tx_ack->tx_list, &conn->ksnc_tx_queue); /* the next tx can piggyback at least 1 ACK */ ksocknal_next_tx_carrier(conn); @@ -280,7 +276,7 @@ ksocknal_match_tx(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk) return SOCKNAL_MATCH_YES; #endif - if (tx == NULL || tx->tx_lnetmsg == NULL) { + if (!tx || !tx->tx_lnetmsg) { /* noop packet */ nob = offsetof(ksock_msg_t, ksm_u); } else { @@ -319,7 +315,7 @@ ksocknal_match_tx_v3(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk) { int nob; - if (tx == NULL || tx->tx_lnetmsg == NULL) + if (!tx || !tx->tx_lnetmsg) nob = offsetof(ksock_msg_t, ksm_u); else nob = tx->tx_lnetmsg->msg_len + sizeof(ksock_msg_t); @@ -334,7 +330,7 @@ ksocknal_match_tx_v3(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk) case SOCKLND_CONN_ACK: if (nonblk) return SOCKNAL_MATCH_YES; - else if (tx == NULL || tx->tx_lnetmsg == NULL) + else if (!tx || !tx->tx_lnetmsg) return SOCKNAL_MATCH_MAY; else return SOCKNAL_MATCH_NO; @@ -369,10 +365,10 @@ ksocknal_handle_zcreq(ksock_conn_t *c, __u64 cookie, int remote) read_lock(&ksocknal_data.ksnd_global_lock); conn = ksocknal_find_conn_locked(peer, NULL, !!remote); - if (conn != NULL) { + if (conn) { ksock_sched_t *sched = conn->ksnc_scheduler; - LASSERT(conn->ksnc_proto->pro_queue_tx_zcack != NULL); + LASSERT(conn->ksnc_proto->pro_queue_tx_zcack); spin_lock_bh(&sched->kss_lock); @@ -390,11 +386,11 @@ ksocknal_handle_zcreq(ksock_conn_t *c, __u64 cookie, int remote) /* ACK connection is not ready, or can't piggyback the ACK */ tx = ksocknal_alloc_tx_noop(cookie, !!remote); - if (tx == NULL) + if (!tx) return -ENOMEM; rc = ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id); - if (rc == 0) + if (!rc) return 0; ksocknal_free_tx(tx); @@ -407,11 +403,12 @@ ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2) { ksock_peer_t *peer = conn->ksnc_peer; ksock_tx_t *tx; + ksock_tx_t *temp; ksock_tx_t *tmp; LIST_HEAD(zlist); int count; - if (cookie1 == 0) + if (!cookie1) cookie1 = cookie2; count = (cookie1 > cookie2) ? 2 : (cookie2 - cookie1 + 1); @@ -424,8 +421,8 @@ ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2) spin_lock(&peer->ksnp_lock); - list_for_each_entry_safe(tx, tmp, - &peer->ksnp_zc_req_list, tx_zc_list) { + list_for_each_entry_safe(tx, tmp, &peer->ksnp_zc_req_list, + tx_zc_list) { __u64 c = tx->tx_msg.ksm_zc_cookies[0]; if (c == cookie1 || c == cookie2 || (cookie1 < c && c < cookie2)) { @@ -433,20 +430,19 @@ ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2) list_del(&tx->tx_zc_list); list_add(&tx->tx_zc_list, &zlist); - if (--count == 0) + if (!--count) break; } } spin_unlock(&peer->ksnp_lock); - while (!list_empty(&zlist)) { - tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list); + list_for_each_entry_safe(tx, temp, &zlist, tx_zc_list) { list_del(&tx->tx_zc_list); ksocknal_tx_decref(tx); } - return count == 0 ? 0 : -EPROTO; + return !count ? 0 : -EPROTO; } static int @@ -461,58 +457,59 @@ ksocknal_send_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello) CLASSERT(sizeof(lnet_magicversion_t) == offsetof(lnet_hdr_t, src_nid)); LIBCFS_ALLOC(hdr, sizeof(*hdr)); - if (hdr == NULL) { + if (!hdr) { CERROR("Can't allocate lnet_hdr_t\n"); return -ENOMEM; } hmv = (lnet_magicversion_t *)&hdr->dest_nid; - /* Re-organize V2.x message header to V1.x (lnet_hdr_t) - * header and send out */ - hmv->magic = cpu_to_le32 (LNET_PROTO_TCP_MAGIC); - hmv->version_major = cpu_to_le16 (KSOCK_PROTO_V1_MAJOR); - hmv->version_minor = cpu_to_le16 (KSOCK_PROTO_V1_MINOR); + /* + * Re-organize V2.x message header to V1.x (lnet_hdr_t) + * header and send out + */ + hmv->magic = cpu_to_le32(LNET_PROTO_TCP_MAGIC); + hmv->version_major = cpu_to_le16(KSOCK_PROTO_V1_MAJOR); + hmv->version_minor = cpu_to_le16(KSOCK_PROTO_V1_MINOR); - if (the_lnet.ln_testprotocompat != 0) { + if (the_lnet.ln_testprotocompat) { /* single-shot proto check */ LNET_LOCK(); - if ((the_lnet.ln_testprotocompat & 1) != 0) { + if (the_lnet.ln_testprotocompat & 1) { hmv->version_major++; /* just different! */ the_lnet.ln_testprotocompat &= ~1; } - if ((the_lnet.ln_testprotocompat & 2) != 0) { + if (the_lnet.ln_testprotocompat & 2) { hmv->magic = LNET_PROTO_MAGIC; the_lnet.ln_testprotocompat &= ~2; } LNET_UNLOCK(); } - hdr->src_nid = cpu_to_le64 (hello->kshm_src_nid); - hdr->src_pid = cpu_to_le32 (hello->kshm_src_pid); - hdr->type = cpu_to_le32 (LNET_MSG_HELLO); - hdr->payload_length = cpu_to_le32 (hello->kshm_nips * sizeof(__u32)); - hdr->msg.hello.type = cpu_to_le32 (hello->kshm_ctype); - hdr->msg.hello.incarnation = cpu_to_le64 (hello->kshm_src_incarnation); + hdr->src_nid = cpu_to_le64(hello->kshm_src_nid); + hdr->src_pid = cpu_to_le32(hello->kshm_src_pid); + hdr->type = cpu_to_le32(LNET_MSG_HELLO); + hdr->payload_length = cpu_to_le32(hello->kshm_nips * sizeof(__u32)); + hdr->msg.hello.type = cpu_to_le32(hello->kshm_ctype); + hdr->msg.hello.incarnation = cpu_to_le64(hello->kshm_src_incarnation); rc = lnet_sock_write(sock, hdr, sizeof(*hdr), lnet_acceptor_timeout()); - if (rc != 0) { + if (rc) { CNETERR("Error %d sending HELLO hdr to %pI4h/%d\n", rc, &conn->ksnc_ipaddr, conn->ksnc_port); goto out; } - if (hello->kshm_nips == 0) + if (!hello->kshm_nips) goto out; - for (i = 0; i < (int) hello->kshm_nips; i++) { - hello->kshm_ips[i] = __cpu_to_le32 (hello->kshm_ips[i]); - } + for (i = 0; i < (int) hello->kshm_nips; i++) + hello->kshm_ips[i] = __cpu_to_le32(hello->kshm_ips[i]); rc = lnet_sock_write(sock, hello->kshm_ips, hello->kshm_nips * sizeof(__u32), lnet_acceptor_timeout()); - if (rc != 0) { + if (rc) { CNETERR("Error %d sending HELLO payload (%d) to %pI4h/%d\n", rc, hello->kshm_nips, &conn->ksnc_ipaddr, conn->ksnc_port); @@ -532,10 +529,10 @@ ksocknal_send_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello) hello->kshm_magic = LNET_PROTO_MAGIC; hello->kshm_version = conn->ksnc_proto->pro_version; - if (the_lnet.ln_testprotocompat != 0) { + if (the_lnet.ln_testprotocompat) { /* single-shot proto check */ LNET_LOCK(); - if ((the_lnet.ln_testprotocompat & 1) != 0) { + if (the_lnet.ln_testprotocompat & 1) { hello->kshm_version++; /* just different! */ the_lnet.ln_testprotocompat &= ~1; } @@ -544,19 +541,19 @@ ksocknal_send_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello) rc = lnet_sock_write(sock, hello, offsetof(ksock_hello_msg_t, kshm_ips), lnet_acceptor_timeout()); - if (rc != 0) { + if (rc) { CNETERR("Error %d sending HELLO hdr to %pI4h/%d\n", rc, &conn->ksnc_ipaddr, conn->ksnc_port); return rc; } - if (hello->kshm_nips == 0) + if (!hello->kshm_nips) return 0; rc = lnet_sock_write(sock, hello->kshm_ips, hello->kshm_nips * sizeof(__u32), lnet_acceptor_timeout()); - if (rc != 0) { + if (rc) { CNETERR("Error %d sending HELLO payload (%d) to %pI4h/%d\n", rc, hello->kshm_nips, &conn->ksnc_ipaddr, conn->ksnc_port); @@ -575,7 +572,7 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello, int i; LIBCFS_ALLOC(hdr, sizeof(*hdr)); - if (hdr == NULL) { + if (!hdr) { CERROR("Can't allocate lnet_hdr_t\n"); return -ENOMEM; } @@ -583,15 +580,15 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello, rc = lnet_sock_read(sock, &hdr->src_nid, sizeof(*hdr) - offsetof(lnet_hdr_t, src_nid), timeout); - if (rc != 0) { + if (rc) { CERROR("Error %d reading rest of HELLO hdr from %pI4h\n", - rc, &conn->ksnc_ipaddr); + rc, &conn->ksnc_ipaddr); LASSERT(rc < 0 && rc != -EALREADY); goto out; } /* ...and check we got what we expected */ - if (hdr->type != cpu_to_le32 (LNET_MSG_HELLO)) { + if (hdr->type != cpu_to_le32(LNET_MSG_HELLO)) { CERROR("Expecting a HELLO hdr, but got type %d from %pI4h\n", le32_to_cpu(hdr->type), &conn->ksnc_ipaddr); @@ -613,14 +610,14 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello, goto out; } - if (hello->kshm_nips == 0) + if (!hello->kshm_nips) goto out; rc = lnet_sock_read(sock, hello->kshm_ips, hello->kshm_nips * sizeof(__u32), timeout); - if (rc != 0) { + if (rc) { CERROR("Error %d reading IPs from ip %pI4h\n", - rc, &conn->ksnc_ipaddr); + rc, &conn->ksnc_ipaddr); LASSERT(rc < 0 && rc != -EALREADY); goto out; } @@ -628,7 +625,7 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello, for (i = 0; i < (int) hello->kshm_nips; i++) { hello->kshm_ips[i] = __le32_to_cpu(hello->kshm_ips[i]); - if (hello->kshm_ips[i] == 0) { + if (!hello->kshm_ips[i]) { CERROR("Zero IP[%d] from ip %pI4h\n", i, &conn->ksnc_ipaddr); rc = -EPROTO; @@ -657,9 +654,9 @@ ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout offsetof(ksock_hello_msg_t, kshm_ips) - offsetof(ksock_hello_msg_t, kshm_src_nid), timeout); - if (rc != 0) { + if (rc) { CERROR("Error %d reading HELLO from %pI4h\n", - rc, &conn->ksnc_ipaddr); + rc, &conn->ksnc_ipaddr); LASSERT(rc < 0 && rc != -EALREADY); return rc; } @@ -681,14 +678,14 @@ ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout return -EPROTO; } - if (hello->kshm_nips == 0) + if (!hello->kshm_nips) return 0; rc = lnet_sock_read(sock, hello->kshm_ips, hello->kshm_nips * sizeof(__u32), timeout); - if (rc != 0) { + if (rc) { CERROR("Error %d reading IPs from ip %pI4h\n", - rc, &conn->ksnc_ipaddr); + rc, &conn->ksnc_ipaddr); LASSERT(rc < 0 && rc != -EALREADY); return rc; } @@ -697,7 +694,7 @@ ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout if (conn->ksnc_flip) __swab32s(&hello->kshm_ips[i]); - if (hello->kshm_ips[i] == 0) { + if (!hello->kshm_ips[i]) { CERROR("Zero IP[%d] from ip %pI4h\n", i, &conn->ksnc_ipaddr); return -EPROTO; @@ -712,12 +709,13 @@ ksocknal_pack_msg_v1(ksock_tx_t *tx) { /* V1.x has no KSOCK_MSG_NOOP */ LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); - LASSERT(tx->tx_lnetmsg != NULL); + LASSERT(tx->tx_lnetmsg); tx->tx_iov[0].iov_base = &tx->tx_lnetmsg->msg_hdr; tx->tx_iov[0].iov_len = sizeof(lnet_hdr_t); - tx->tx_resid = tx->tx_nob = tx->tx_lnetmsg->msg_len + sizeof(lnet_hdr_t); + tx->tx_nob = tx->tx_lnetmsg->msg_len + sizeof(lnet_hdr_t); + tx->tx_resid = tx->tx_lnetmsg->msg_len + sizeof(lnet_hdr_t); } static void @@ -725,17 +723,19 @@ ksocknal_pack_msg_v2(ksock_tx_t *tx) { tx->tx_iov[0].iov_base = &tx->tx_msg; - if (tx->tx_lnetmsg != NULL) { + if (tx->tx_lnetmsg) { LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); tx->tx_msg.ksm_u.lnetmsg.ksnm_hdr = tx->tx_lnetmsg->msg_hdr; tx->tx_iov[0].iov_len = sizeof(ksock_msg_t); - tx->tx_resid = tx->tx_nob = sizeof(ksock_msg_t) + tx->tx_lnetmsg->msg_len; + tx->tx_nob = sizeof(ksock_msg_t) + tx->tx_lnetmsg->msg_len; + tx->tx_resid = sizeof(ksock_msg_t) + tx->tx_lnetmsg->msg_len; } else { LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_NOOP); tx->tx_iov[0].iov_len = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr); - tx->tx_resid = tx->tx_nob = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr); + tx->tx_nob = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr); + tx->tx_resid = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr); } /* Don't checksum before start sending, because packet can be piggybacked with ACK */ } @@ -745,7 +745,8 @@ ksocknal_unpack_msg_v1(ksock_msg_t *msg) { msg->ksm_csum = 0; msg->ksm_type = KSOCK_MSG_LNET; - msg->ksm_zc_cookies[0] = msg->ksm_zc_cookies[1] = 0; + msg->ksm_zc_cookies[0] = 0; + msg->ksm_zc_cookies[1] = 0; } static void diff --git a/drivers/staging/lustre/lustre/libcfs/Makefile b/drivers/staging/lustre/lnet/libcfs/Makefile index 03d3f3d7b1f8..8c8945545375 100644 --- a/drivers/staging/lustre/lustre/libcfs/Makefile +++ b/drivers/staging/lustre/lnet/libcfs/Makefile @@ -1,4 +1,4 @@ -obj-$(CONFIG_LUSTRE_FS) += libcfs.o +obj-$(CONFIG_LNET) += libcfs.o libcfs-linux-objs := linux-tracefile.o linux-debug.o libcfs-linux-objs += linux-prim.o linux-cpu.o @@ -11,8 +11,7 @@ libcfs-linux-objs += linux-mem.o libcfs-linux-objs := $(addprefix linux/,$(libcfs-linux-objs)) libcfs-all-objs := debug.o fail.o module.o tracefile.o \ - libcfs_string.o hash.o kernel_user_comm.o \ - prng.o workitem.o libcfs_cpu.o \ - libcfs_mem.o libcfs_lock.o + libcfs_string.o hash.o prng.o workitem.o \ + libcfs_cpu.o libcfs_mem.o libcfs_lock.o libcfs-objs := $(libcfs-linux-objs) $(libcfs-all-objs) diff --git a/drivers/staging/lustre/lustre/libcfs/debug.c b/drivers/staging/lustre/lnet/libcfs/debug.c index 0b38dad13546..c90e5102fe06 100644 --- a/drivers/staging/lustre/lustre/libcfs/debug.c +++ b/drivers/staging/lustre/lnet/libcfs/debug.c @@ -47,15 +47,15 @@ static char debug_file_name[1024]; unsigned int libcfs_subsystem_debug = ~0; +EXPORT_SYMBOL(libcfs_subsystem_debug); module_param(libcfs_subsystem_debug, int, 0644); MODULE_PARM_DESC(libcfs_subsystem_debug, "Lustre kernel debug subsystem mask"); -EXPORT_SYMBOL(libcfs_subsystem_debug); unsigned int libcfs_debug = (D_CANTMASK | D_NETERROR | D_HA | D_CONFIG | D_IOCTL); +EXPORT_SYMBOL(libcfs_debug); module_param(libcfs_debug, int, 0644); MODULE_PARM_DESC(libcfs_debug, "Lustre kernel debug mask"); -EXPORT_SYMBOL(libcfs_debug); static int libcfs_param_debug_mb_set(const char *val, const struct kernel_param *kp) @@ -82,7 +82,8 @@ static int libcfs_param_debug_mb_set(const char *val, /* While debug_mb setting look like unsigned int, in fact * it needs quite a bunch of extra processing, so we define special - * debugmb parameter type with corresponding methods to handle this case */ + * debugmb parameter type with corresponding methods to handle this case + */ static struct kernel_param_ops param_ops_debugmb = { .set = libcfs_param_debug_mb_set, .get = param_get_uint, @@ -227,8 +228,7 @@ MODULE_PARM_DESC(libcfs_debug_file_path, int libcfs_panic_in_progress; -/* libcfs_debug_token2mask() expects the returned - * string in lower-case */ +/* libcfs_debug_token2mask() expects the returned string in lower-case */ static const char * libcfs_debug_subsys2str(int subsys) { @@ -271,6 +271,8 @@ libcfs_debug_subsys2str(int subsys) return "lquota"; case S_OSD: return "osd"; + case S_LFSCK: + return "lfsck"; case S_LMV: return "lmv"; case S_SEC: @@ -288,8 +290,7 @@ libcfs_debug_subsys2str(int subsys) } } -/* libcfs_debug_token2mask() expects the returned - * string in lower-case */ +/* libcfs_debug_token2mask() expects the returned string in lower-case */ static const char * libcfs_debug_dbg2str(int debug) { @@ -376,7 +377,7 @@ libcfs_debug_mask2str(char *str, int size, int mask, int is_subsys) continue; token = fn(i); - if (token == NULL) /* unused bit */ + if (!token) /* unused bit */ continue; if (len > 0) { /* separator? */ @@ -416,7 +417,7 @@ libcfs_debug_str2mask(int *mask, const char *str, int is_subsys) /* Allow a number for backwards compatibility */ for (n = strlen(str); n > 0; n--) - if (!isspace(str[n-1])) + if (!isspace(str[n - 1])) break; matched = n; t = sscanf(str, "%i%n", &m, &matched); @@ -446,8 +447,7 @@ void libcfs_debug_dumplog_internal(void *arg) snprintf(debug_file_name, sizeof(debug_file_name) - 1, "%s.%lld.%ld", libcfs_debug_file_path_arr, (s64)ktime_get_real_seconds(), (long_ptr_t)arg); - pr_alert("LustreError: dumping log to %s\n", - debug_file_name); + pr_alert("LustreError: dumping log to %s\n", debug_file_name); cfs_tracefile_dump_all_pages(debug_file_name); libcfs_run_debug_log_upcall(debug_file_name); } @@ -469,7 +469,8 @@ void libcfs_debug_dumplog(void) /* we're being careful to ensure that the kernel thread is * able to set our state to running as it exits before we - * get to schedule() */ + * get to schedule() + */ init_waitqueue_entry(&wait, current); set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&debug_ctlwq, &wait); @@ -503,14 +504,15 @@ int libcfs_debug_init(unsigned long bufsize) libcfs_console_min_delay = CDEBUG_DEFAULT_MIN_DELAY; } - if (libcfs_debug_file_path != NULL) { + if (libcfs_debug_file_path) { strlcpy(libcfs_debug_file_path_arr, libcfs_debug_file_path, sizeof(libcfs_debug_file_path_arr)); } /* If libcfs_debug_mb is set to an invalid value or uninitialized - * then just make the total buffers smp_num_cpus * TCD_MAX_PAGES */ + * then just make the total buffers smp_num_cpus * TCD_MAX_PAGES + */ if (max > cfs_trace_max_debug_mb() || max < num_possible_cpus()) { max = TCD_MAX_PAGES; } else { @@ -540,8 +542,7 @@ int libcfs_debug_clear_buffer(void) return 0; } -/* Debug markers, although printed by S_LNET - * should not be be marked as such. */ +/* Debug markers, although printed by S_LNET should not be be marked as such. */ #undef DEBUG_SUBSYSTEM #define DEBUG_SUBSYSTEM S_UNDEFINED int libcfs_debug_mark_buffer(const char *text) diff --git a/drivers/staging/lustre/lustre/libcfs/fail.c b/drivers/staging/lustre/lnet/libcfs/fail.c index 27831432d69a..dadaf7685cbd 100644 --- a/drivers/staging/lustre/lustre/libcfs/fail.c +++ b/drivers/staging/lustre/lnet/libcfs/fail.c @@ -97,7 +97,8 @@ int __cfs_fail_check_set(__u32 id, __u32 value, int set) /* Lost race to set CFS_FAILED_BIT. */ if (test_and_set_bit(CFS_FAILED_BIT, &cfs_fail_loc)) { /* If CFS_FAIL_ONCE is valid, only one process can fail, - * otherwise multi-process can fail at the same time. */ + * otherwise multi-process can fail at the same time. + */ if (cfs_fail_loc & CFS_FAIL_ONCE) return 0; } diff --git a/drivers/staging/lustre/lustre/libcfs/hash.c b/drivers/staging/lustre/lnet/libcfs/hash.c index 4d50510434be..f60feb3a3dc7 100644 --- a/drivers/staging/lustre/lustre/libcfs/hash.c +++ b/drivers/staging/lustre/lnet/libcfs/hash.c @@ -355,7 +355,7 @@ cfs_hash_dh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd, dh = container_of(cfs_hash_dh_hhead(hs, bd), struct cfs_hash_dhead, dh_head); - if (dh->dh_tail != NULL) /* not empty */ + if (dh->dh_tail) /* not empty */ hlist_add_behind(hnode, dh->dh_tail); else /* empty list */ hlist_add_head(hnode, &dh->dh_head); @@ -371,7 +371,7 @@ cfs_hash_dh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd, dh = container_of(cfs_hash_dh_hhead(hs, bd), struct cfs_hash_dhead, dh_head); - if (hnd->next == NULL) { /* it's the tail */ + if (!hnd->next) { /* it's the tail */ dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL : container_of(hnd->pprev, struct hlist_node, next); } @@ -412,7 +412,7 @@ cfs_hash_dd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd, dh = container_of(cfs_hash_dd_hhead(hs, bd), struct cfs_hash_dhead_dep, dd_head); - if (dh->dd_tail != NULL) /* not empty */ + if (dh->dd_tail) /* not empty */ hlist_add_behind(hnode, dh->dd_tail); else /* empty list */ hlist_add_head(hnode, &dh->dd_head); @@ -428,7 +428,7 @@ cfs_hash_dd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd, dh = container_of(cfs_hash_dd_hhead(hs, bd), struct cfs_hash_dhead_dep, dd_head); - if (hnd->next == NULL) { /* it's the tail */ + if (!hnd->next) { /* it's the tail */ dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL : container_of(hnd->pprev, struct hlist_node, next); } @@ -492,7 +492,7 @@ void cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd) { /* NB: caller should hold hs->hs_rwlock if REHASH is set */ - if (likely(hs->hs_rehash_buckets == NULL)) { + if (likely(!hs->hs_rehash_buckets)) { cfs_hash_bd_from_key(hs, hs->hs_buckets, hs->hs_cur_bits, key, bd); } else { @@ -579,7 +579,8 @@ cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old, return; /* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops - * in cfs_hash_bd_del/add_locked */ + * in cfs_hash_bd_del/add_locked + */ hs->hs_hops->hop_hnode_del(hs, bd_old, hnode); rc = hs->hs_hops->hop_hnode_add(hs, bd_new, hnode); cfs_hash_bd_dep_record(hs, bd_new, rc); @@ -635,13 +636,14 @@ cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd, int intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0; /* with this function, we can avoid a lot of useless refcount ops, - * which are expensive atomic operations most time. */ + * which are expensive atomic operations most time. + */ match = intent_add ? NULL : hnode; hlist_for_each(ehnode, hhead) { if (!cfs_hash_keycmp(hs, key, ehnode)) continue; - if (match != NULL && match != ehnode) /* can't match */ + if (match && match != ehnode) /* can't match */ continue; /* match and ... */ @@ -659,7 +661,7 @@ cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd, if (!intent_add) return NULL; - LASSERT(hnode != NULL); + LASSERT(hnode); cfs_hash_bd_add_locked(hs, bd, hnode); return hnode; } @@ -698,8 +700,7 @@ cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, if (prev == bds[i].bd_bucket) continue; - LASSERT(prev == NULL || - prev->hsb_index < bds[i].bd_bucket->hsb_index); + LASSERT(!prev || prev->hsb_index < bds[i].bd_bucket->hsb_index); cfs_hash_bd_lock(hs, &bds[i], excl); prev = bds[i].bd_bucket; } @@ -730,7 +731,7 @@ cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, cfs_hash_for_each_bd(bds, n, i) { ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL, CFS_HS_LOOKUP_IT_FIND); - if (ehnode != NULL) + if (ehnode) return ehnode; } return NULL; @@ -745,13 +746,13 @@ cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, int intent; unsigned i; - LASSERT(hnode != NULL); + LASSERT(hnode); intent = (!noref * CFS_HS_LOOKUP_MASK_REF) | CFS_HS_LOOKUP_IT_PEEK; cfs_hash_for_each_bd(bds, n, i) { ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL, intent); - if (ehnode != NULL) + if (ehnode) return ehnode; } @@ -778,7 +779,7 @@ cfs_hash_multi_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, cfs_hash_for_each_bd(bds, n, i) { ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode, CFS_HS_LOOKUP_IT_FINDDEL); - if (ehnode != NULL) + if (ehnode) return ehnode; } return NULL; @@ -789,26 +790,20 @@ cfs_hash_bd_order(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2) { int rc; - if (bd2->bd_bucket == NULL) + if (!bd2->bd_bucket) return; - if (bd1->bd_bucket == NULL) { + if (!bd1->bd_bucket) { *bd1 = *bd2; bd2->bd_bucket = NULL; return; } rc = cfs_hash_bd_compare(bd1, bd2); - if (rc == 0) { + if (!rc) bd2->bd_bucket = NULL; - - } else if (rc > 0) { /* swab bd1 and bd2 */ - struct cfs_hash_bd tmp; - - tmp = *bd2; - *bd2 = *bd1; - *bd1 = tmp; - } + else if (rc > 0) + swap(*bd1, *bd2); /* swap bd1 and bd2 */ } void @@ -818,7 +813,7 @@ cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key, /* NB: caller should hold hs_lock.rw if REHASH is set */ cfs_hash_bd_from_key(hs, hs->hs_buckets, hs->hs_cur_bits, key, &bds[0]); - if (likely(hs->hs_rehash_buckets == NULL)) { + if (likely(!hs->hs_rehash_buckets)) { /* no rehash or not rehashing */ bds[1].bd_bucket = NULL; return; @@ -873,7 +868,7 @@ cfs_hash_buckets_free(struct cfs_hash_bucket **buckets, int i; for (i = prev_size; i < size; i++) { - if (buckets[i] != NULL) + if (buckets[i]) LIBCFS_FREE(buckets[i], bkt_size); } @@ -892,16 +887,16 @@ cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts, struct cfs_hash_bucket **new_bkts; int i; - LASSERT(old_size == 0 || old_bkts != NULL); + LASSERT(old_size == 0 || old_bkts); - if (old_bkts != NULL && old_size == new_size) + if (old_bkts && old_size == new_size) return old_bkts; LIBCFS_ALLOC(new_bkts, sizeof(new_bkts[0]) * new_size); - if (new_bkts == NULL) + if (!new_bkts) return NULL; - if (old_bkts != NULL) { + if (old_bkts) { memcpy(new_bkts, old_bkts, min(old_size, new_size) * sizeof(*old_bkts)); } @@ -911,7 +906,7 @@ cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts, struct cfs_hash_bd bd; LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs)); - if (new_bkts[i] == NULL) { + if (!new_bkts[i]) { cfs_hash_buckets_free(new_bkts, cfs_hash_bkt_size(hs), old_size, new_size); return NULL; @@ -1011,14 +1006,13 @@ cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits, CLASSERT(CFS_HASH_THETA_BITS < 15); - LASSERT(name != NULL); - LASSERT(ops != NULL); + LASSERT(name); LASSERT(ops->hs_key); LASSERT(ops->hs_hash); LASSERT(ops->hs_object); LASSERT(ops->hs_keycmp); - LASSERT(ops->hs_get != NULL); - LASSERT(ops->hs_put_locked != NULL); + LASSERT(ops->hs_get); + LASSERT(ops->hs_put_locked); if ((flags & CFS_HASH_REHASH) != 0) flags |= CFS_HASH_COUNTER; /* must have counter */ @@ -1029,13 +1023,12 @@ cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits, LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits)); LASSERT(ergo((flags & CFS_HASH_REHASH) != 0, (flags & CFS_HASH_NO_LOCK) == 0)); - LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0, - ops->hs_keycpy != NULL)); + LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0, ops->hs_keycpy)); len = (flags & CFS_HASH_BIGNAME) == 0 ? CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN; LIBCFS_ALLOC(hs, offsetof(struct cfs_hash, hs_name[len])); - if (hs == NULL) + if (!hs) return NULL; strlcpy(hs->hs_name, name, len); @@ -1063,7 +1056,7 @@ cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits, hs->hs_buckets = cfs_hash_buckets_realloc(hs, NULL, 0, CFS_HASH_NBKT(hs)); - if (hs->hs_buckets != NULL) + if (hs->hs_buckets) return hs; LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[len])); @@ -1082,7 +1075,7 @@ cfs_hash_destroy(struct cfs_hash *hs) struct cfs_hash_bd bd; int i; - LASSERT(hs != NULL); + LASSERT(hs); LASSERT(!cfs_hash_is_exiting(hs) && !cfs_hash_is_iterating(hs)); @@ -1096,13 +1089,12 @@ cfs_hash_destroy(struct cfs_hash *hs) cfs_hash_depth_wi_cancel(hs); /* rehash should be done/canceled */ - LASSERT(hs->hs_buckets != NULL && - hs->hs_rehash_buckets == NULL); + LASSERT(hs->hs_buckets && !hs->hs_rehash_buckets); cfs_hash_for_each_bucket(hs, &bd, i) { struct hlist_head *hhead; - LASSERT(bd.bd_bucket != NULL); + LASSERT(bd.bd_bucket); /* no need to take this lock, just for consistent code */ cfs_hash_bd_lock(hs, &bd, 1); @@ -1113,7 +1105,8 @@ cfs_hash_destroy(struct cfs_hash *hs) hs->hs_name, bd.bd_bucket->hsb_index, bd.bd_offset, bd.bd_bucket->hsb_count); /* can't assert key valicate, because we - * can interrupt rehash */ + * can interrupt rehash + */ cfs_hash_bd_del_locked(hs, &bd, hnode); cfs_hash_exit(hs, hnode); } @@ -1164,7 +1157,8 @@ cfs_hash_rehash_bits(struct cfs_hash *hs) return -EAGAIN; /* XXX: need to handle case with max_theta != 2.0 - * and the case with min_theta != 0.5 */ + * and the case with min_theta != 0.5 + */ if ((hs->hs_cur_bits < hs->hs_max_bits) && (__cfs_hash_theta(hs) > hs->hs_max_theta)) return hs->hs_cur_bits + 1; @@ -1293,8 +1287,8 @@ cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode) cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1); /* NB: do nothing if @hnode is not in hash table */ - if (hnode == NULL || !hlist_unhashed(hnode)) { - if (bds[1].bd_bucket == NULL && hnode != NULL) { + if (!hnode || !hlist_unhashed(hnode)) { + if (!bds[1].bd_bucket && hnode) { cfs_hash_bd_del_locked(hs, &bds[0], hnode); } else { hnode = cfs_hash_dual_bd_finddel_locked(hs, bds, @@ -1302,7 +1296,7 @@ cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode) } } - if (hnode != NULL) { + if (hnode) { obj = cfs_hash_object(hs, hnode); bits = cfs_hash_rehash_bits(hs); } @@ -1348,7 +1342,7 @@ cfs_hash_lookup(struct cfs_hash *hs, const void *key) cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0); hnode = cfs_hash_dual_bd_lookup_locked(hs, bds, key); - if (hnode != NULL) + if (hnode) obj = cfs_hash_object(hs, hnode); cfs_hash_dual_bd_unlock(hs, bds, 0); @@ -1378,7 +1372,8 @@ cfs_hash_for_each_enter(struct cfs_hash *hs) /* NB: iteration is mostly called by service thread, * we tend to cancel pending rehash-request, instead of * blocking service thread, we will relaunch rehash request - * after iteration */ + * after iteration + */ if (cfs_hash_is_rehashing(hs)) cfs_hash_rehash_cancel_locked(hs); cfs_hash_unlock(hs, 1); @@ -1436,7 +1431,7 @@ cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, struct hlist_head *hhead; cfs_hash_bd_lock(hs, &bd, excl); - if (func == NULL) { /* only glimpse size */ + if (!func) { /* only glimpse size */ count += bd.bd_bucket->hsb_count; cfs_hash_bd_unlock(hs, &bd, excl); continue; @@ -1574,7 +1569,7 @@ cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, stop_on_change = cfs_hash_with_rehash_key(hs) || !cfs_hash_with_no_itemref(hs) || - hs->hs_ops->hs_put_locked == NULL; + !hs->hs_ops->hs_put_locked; cfs_hash_lock(hs, 0); LASSERT(!cfs_hash_is_rehashing(hs)); @@ -1585,7 +1580,7 @@ cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, version = cfs_hash_bd_version_get(&bd); cfs_hash_bd_for_each_hlist(hs, &bd, hhead) { - for (hnode = hhead->first; hnode != NULL;) { + for (hnode = hhead->first; hnode;) { cfs_hash_bucket_validate(hs, &bd, hnode); cfs_hash_get(hs, hnode); cfs_hash_bd_unlock(hs, &bd, 0); @@ -1634,9 +1629,8 @@ cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, !cfs_hash_with_no_itemref(hs)) return -EOPNOTSUPP; - if (hs->hs_ops->hs_get == NULL || - (hs->hs_ops->hs_put == NULL && - hs->hs_ops->hs_put_locked == NULL)) + if (!hs->hs_ops->hs_get || + (!hs->hs_ops->hs_put && !hs->hs_ops->hs_put_locked)) return -EOPNOTSUPP; cfs_hash_for_each_enter(hs); @@ -1667,9 +1661,8 @@ cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, if (cfs_hash_with_no_lock(hs)) return -EOPNOTSUPP; - if (hs->hs_ops->hs_get == NULL || - (hs->hs_ops->hs_put == NULL && - hs->hs_ops->hs_put_locked == NULL)) + if (!hs->hs_ops->hs_get || + (!hs->hs_ops->hs_put && !hs->hs_ops->hs_put_locked)) return -EOPNOTSUPP; cfs_hash_for_each_enter(hs); @@ -1708,7 +1701,6 @@ out: cfs_hash_unlock(hs, 0); cfs_hash_for_each_exit(hs); } - EXPORT_SYMBOL(cfs_hash_hlist_for_each); /* @@ -1837,7 +1829,7 @@ cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old) cfs_hash_bd_for_each_hlist(hs, old, hhead) { hlist_for_each_safe(hnode, pos, hhead) { key = cfs_hash_key(hs, hnode); - LASSERT(key != NULL); + LASSERT(key); /* Validate hnode is in the correct bucket. */ cfs_hash_bucket_validate(hs, old, hnode); /* @@ -1867,7 +1859,7 @@ cfs_hash_rehash_worker(cfs_workitem_t *wi) int rc = 0; int i; - LASSERT(hs != NULL && cfs_hash_with_rehash(hs)); + LASSERT(hs && cfs_hash_with_rehash(hs)); cfs_hash_lock(hs, 0); LASSERT(cfs_hash_is_rehashing(hs)); @@ -1884,7 +1876,7 @@ cfs_hash_rehash_worker(cfs_workitem_t *wi) bkts = cfs_hash_buckets_realloc(hs, hs->hs_buckets, old_size, new_size); cfs_hash_lock(hs, 1); - if (bkts == NULL) { + if (!bkts) { rc = -ENOMEM; goto out; } @@ -1903,7 +1895,7 @@ cfs_hash_rehash_worker(cfs_workitem_t *wi) goto out; } - LASSERT(hs->hs_rehash_buckets == NULL); + LASSERT(!hs->hs_rehash_buckets); hs->hs_rehash_buckets = bkts; rc = 0; @@ -1946,7 +1938,7 @@ out: bsize = cfs_hash_bkt_size(hs); cfs_hash_unlock(hs, 1); /* can't refer to @hs anymore because it could be destroyed */ - if (bkts != NULL) + if (bkts) cfs_hash_buckets_free(bkts, bsize, new_size, old_size); if (rc != 0) CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc); @@ -1987,14 +1979,15 @@ void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key, cfs_hash_bd_order(&bds[0], &bds[1]); cfs_hash_multi_bd_lock(hs, bds, 3, 1); - if (likely(old_bds[1].bd_bucket == NULL)) { + if (likely(!old_bds[1].bd_bucket)) { cfs_hash_bd_move_locked(hs, &old_bds[0], &new_bd, hnode); } else { cfs_hash_dual_bd_finddel_locked(hs, old_bds, old_key, hnode); cfs_hash_bd_add_locked(hs, &new_bd, hnode); } /* overwrite key inside locks, otherwise may screw up with - * other operations, i.e: rehash */ + * other operations, i.e: rehash + */ cfs_hash_keycpy(hs, hnode, new_key); cfs_hash_multi_bd_unlock(hs, bds, 3, 1); @@ -2013,7 +2006,7 @@ static struct cfs_hash_bucket ** cfs_hash_full_bkts(struct cfs_hash *hs) { /* NB: caller should hold hs->hs_rwlock if REHASH is set */ - if (hs->hs_rehash_buckets == NULL) + if (!hs->hs_rehash_buckets) return hs->hs_buckets; LASSERT(hs->hs_rehash_bits != 0); @@ -2025,7 +2018,7 @@ static unsigned int cfs_hash_full_nbkt(struct cfs_hash *hs) { /* NB: caller should hold hs->hs_rwlock if REHASH is set */ - if (hs->hs_rehash_buckets == NULL) + if (!hs->hs_rehash_buckets) return CFS_HASH_NBKT(hs); LASSERT(hs->hs_rehash_bits != 0); @@ -2046,15 +2039,15 @@ void cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m) theta = __cfs_hash_theta(hs); seq_printf(m, "%-*s %5d %5d %5d %d.%03d %d.%03d %d.%03d 0x%02x %6d ", - CFS_HASH_BIGNAME_LEN, hs->hs_name, - 1 << hs->hs_cur_bits, 1 << hs->hs_min_bits, - 1 << hs->hs_max_bits, - __cfs_hash_theta_int(theta), __cfs_hash_theta_frac(theta), - __cfs_hash_theta_int(hs->hs_min_theta), - __cfs_hash_theta_frac(hs->hs_min_theta), - __cfs_hash_theta_int(hs->hs_max_theta), - __cfs_hash_theta_frac(hs->hs_max_theta), - hs->hs_flags, hs->hs_rehash_count); + CFS_HASH_BIGNAME_LEN, hs->hs_name, + 1 << hs->hs_cur_bits, 1 << hs->hs_min_bits, + 1 << hs->hs_max_bits, + __cfs_hash_theta_int(theta), __cfs_hash_theta_frac(theta), + __cfs_hash_theta_int(hs->hs_min_theta), + __cfs_hash_theta_frac(hs->hs_min_theta), + __cfs_hash_theta_int(hs->hs_max_theta), + __cfs_hash_theta_frac(hs->hs_max_theta), + hs->hs_flags, hs->hs_rehash_count); /* * The distribution is a summary of the chained hash depth in diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c b/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c index 933525c73da1..33352af6c27f 100644 --- a/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c +++ b/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c @@ -13,11 +13,6 @@ * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 021110-1307, USA - * * GPL HEADER END */ /* @@ -56,8 +51,9 @@ cfs_cpt_table_alloc(unsigned int ncpt) } LIBCFS_ALLOC(cptab, sizeof(*cptab)); - if (cptab != NULL) { + if (cptab) { cptab->ctb_version = CFS_CPU_VERSION_MAGIC; + node_set(0, cptab->ctb_nodemask); cptab->ctb_nparts = ncpt; } @@ -111,6 +107,13 @@ cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt) } EXPORT_SYMBOL(cfs_cpt_online); +nodemask_t * +cfs_cpt_nodemask(struct cfs_cpt_table *cptab, int cpt) +{ + return &cptab->ctb_nodemask; +} +EXPORT_SYMBOL(cfs_cpt_cpumask); + int cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) { @@ -207,7 +210,7 @@ EXPORT_SYMBOL(cfs_cpt_bind); void cfs_cpu_fini(void) { - if (cfs_cpt_table != NULL) { + if (cfs_cpt_table) { cfs_cpt_table_free(cfs_cpt_table); cfs_cpt_table = NULL; } @@ -218,7 +221,7 @@ cfs_cpu_init(void) { cfs_cpt_table = cfs_cpt_table_alloc(1); - return cfs_cpt_table != NULL ? 0 : -1; + return cfs_cpt_table ? 0 : -1; } #endif /* HAVE_LIBCFS_CPT */ diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_lock.c b/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c index 15782d9e6aa9..2de9eeae0232 100644 --- a/drivers/staging/lustre/lustre/libcfs/libcfs_lock.c +++ b/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c @@ -13,11 +13,6 @@ * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 021110-1307, USA - * * GPL HEADER END */ /* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. @@ -38,7 +33,7 @@ void cfs_percpt_lock_free(struct cfs_percpt_lock *pcl) { - LASSERT(pcl->pcl_locks != NULL); + LASSERT(pcl->pcl_locks); LASSERT(!pcl->pcl_locked); cfs_percpt_free(pcl->pcl_locks); @@ -90,6 +85,7 @@ EXPORT_SYMBOL(cfs_percpt_lock_alloc); */ void cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index) + __acquires(pcl->pcl_locks) { int ncpt = cfs_cpt_number(pcl->pcl_cptab); int i; @@ -114,7 +110,8 @@ cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index) if (i == 0) { LASSERT(!pcl->pcl_locked); /* nobody should take private lock after this - * so I wouldn't starve for too long time */ + * so I wouldn't starve for too long time + */ pcl->pcl_locked = 1; } } @@ -124,6 +121,7 @@ EXPORT_SYMBOL(cfs_percpt_lock); /** unlock a CPU partition */ void cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index) + __releases(pcl->pcl_locks) { int ncpt = cfs_cpt_number(pcl->pcl_cptab); int i; diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_mem.c b/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c index 27cf86106363..c5a6951516ed 100644 --- a/drivers/staging/lustre/lustre/libcfs/libcfs_mem.c +++ b/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c @@ -13,11 +13,6 @@ * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 021110-1307, USA - * * GPL HEADER END */ /* @@ -54,7 +49,7 @@ cfs_percpt_free(void *vars) arr = container_of(vars, struct cfs_var_array, va_ptrs[0]); for (i = 0; i < arr->va_count; i++) { - if (arr->va_ptrs[i] != NULL) + if (arr->va_ptrs[i]) LIBCFS_FREE(arr->va_ptrs[i], arr->va_size); } @@ -87,9 +82,10 @@ cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size) if (!arr) return NULL; - arr->va_size = size = L1_CACHE_ALIGN(size); - arr->va_count = count; - arr->va_cptab = cptab; + size = L1_CACHE_ALIGN(size); + arr->va_size = size; + arr->va_count = count; + arr->va_cptab = cptab; for (i = 0; i < count; i++) { LIBCFS_CPT_ALLOC(arr->va_ptrs[i], cptab, i, size); diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_string.c b/drivers/staging/lustre/lnet/libcfs/libcfs_string.c index 205a3ed435a8..50ac1536db4b 100644 --- a/drivers/staging/lustre/lustre/libcfs/libcfs_string.c +++ b/drivers/staging/lustre/lnet/libcfs/libcfs_string.c @@ -54,7 +54,8 @@ int cfs_str2mask(const char *str, const char *(*bit2str)(int bit), * and optionally an operator ('+' or '-'). If an operator * appears first in <str>, '*oldmask' is used as the starting point * (relative), otherwise minmask is used (absolute). An operator - * applies to all following tokens up to the next operator. */ + * applies to all following tokens up to the next operator. + */ while (*str != '\0') { while (isspace(*str)) str++; @@ -81,8 +82,7 @@ int cfs_str2mask(const char *str, const char *(*bit2str)(int bit), found = 0; for (i = 0; i < 32; i++) { debugstr = bit2str(i); - if (debugstr != NULL && - strlen(debugstr) == len && + if (debugstr && strlen(debugstr) == len && strncasecmp(str, debugstr, len) == 0) { if (op == '-') newmask &= ~(1 << i); @@ -175,7 +175,7 @@ cfs_gettok(struct cfs_lstr *next, char delim, struct cfs_lstr *res) { char *end; - if (next->ls_str == NULL) + if (!next->ls_str) return 0; /* skip leading white spaces */ @@ -196,7 +196,7 @@ cfs_gettok(struct cfs_lstr *next, char delim, struct cfs_lstr *res) res->ls_str = next->ls_str; end = memchr(next->ls_str, delim, next->ls_len); - if (end == NULL) { + if (!end) { /* there is no the delimeter in the string */ end = next->ls_str + next->ls_len; next->ls_str = NULL; @@ -229,17 +229,37 @@ int cfs_str2num_check(char *str, int nob, unsigned *num, unsigned min, unsigned max) { - char *endp; + bool all_numbers = true; + char *endp, cache; + int rc; str = cfs_trimwhite(str); - *num = simple_strtoul(str, &endp, 0); - if (endp == str) - return 0; - for (; endp < str + nob; endp++) { - if (!isspace(*endp)) - return 0; + /** + * kstrouint can only handle strings composed + * of only numbers. We need to scan the string + * passed in for the first non-digit character + * and end the string at that location. If we + * don't find any non-digit character we still + * need to place a '\0' at position nob since + * we are not interested in the rest of the + * string which is longer than nob in size. + * After we are done the character at the + * position we placed '\0' must be restored. + */ + for (endp = str; endp < str + nob; endp++) { + if (!isdigit(*endp)) { + all_numbers = false; + break; + } } + cache = *endp; + *endp = '\0'; + + rc = kstrtouint(str, 10, num); + *endp = cache; + if (rc || !all_numbers) + return 0; return (*num >= min && *num <= max); } @@ -266,7 +286,7 @@ cfs_range_expr_parse(struct cfs_lstr *src, unsigned min, unsigned max, struct cfs_lstr tok; LIBCFS_ALLOC(re, sizeof(*re)); - if (re == NULL) + if (!re) return -ENOMEM; if (src->ls_len == 1 && src->ls_str[0] == '*') { @@ -337,18 +357,19 @@ cfs_range_expr_print(char *buffer, int count, struct cfs_range_expr *expr, char s[] = "["; char e[] = "]"; - if (bracketed) - s[0] = e[0] = '\0'; + if (bracketed) { + s[0] = '\0'; + e[0] = '\0'; + } if (expr->re_lo == expr->re_hi) i = scnprintf(buffer, count, "%u", expr->re_lo); else if (expr->re_stride == 1) i = scnprintf(buffer, count, "%s%u-%u%s", - s, expr->re_lo, expr->re_hi, e); + s, expr->re_lo, expr->re_hi, e); else i = scnprintf(buffer, count, "%s%u-%u/%u%s", - s, expr->re_lo, expr->re_hi, - expr->re_stride, e); + s, expr->re_lo, expr->re_hi, expr->re_stride, e); return i; } @@ -442,7 +463,7 @@ cfs_expr_list_values(struct cfs_expr_list *expr_list, int max, __u32 **valpp) } LIBCFS_ALLOC(val, sizeof(val[0]) * count); - if (val == NULL) + if (!val) return -ENOMEM; count = 0; @@ -470,7 +491,7 @@ cfs_expr_list_free(struct cfs_expr_list *expr_list) struct cfs_range_expr *expr; expr = list_entry(expr_list->el_exprs.next, - struct cfs_range_expr, re_link); + struct cfs_range_expr, re_link); list_del(&expr->re_link); LIBCFS_FREE(expr, sizeof(*expr)); } @@ -495,7 +516,7 @@ cfs_expr_list_parse(char *str, int len, unsigned min, unsigned max, int rc; LIBCFS_ALLOC(expr_list, sizeof(*expr_list)); - if (expr_list == NULL) + if (!expr_list) return -ENOMEM; src.ls_str = str; @@ -509,7 +530,7 @@ cfs_expr_list_parse(char *str, int len, unsigned min, unsigned max, src.ls_len -= 2; rc = -EINVAL; - while (src.ls_str != NULL) { + while (src.ls_str) { struct cfs_lstr tok; if (!cfs_gettok(&src, ',', &tok)) { @@ -521,15 +542,12 @@ cfs_expr_list_parse(char *str, int len, unsigned min, unsigned max, if (rc != 0) break; - list_add_tail(&expr->re_link, - &expr_list->el_exprs); + list_add_tail(&expr->re_link, &expr_list->el_exprs); } } else { rc = cfs_range_expr_parse(&src, min, max, 0, &expr); - if (rc == 0) { - list_add_tail(&expr->re_link, - &expr_list->el_exprs); - } + if (rc == 0) + list_add_tail(&expr->re_link, &expr_list->el_exprs); } if (rc != 0) @@ -555,8 +573,7 @@ cfs_expr_list_free_list(struct list_head *list) struct cfs_expr_list *el; while (!list_empty(list)) { - el = list_entry(list->next, - struct cfs_expr_list, el_link); + el = list_entry(list->next, struct cfs_expr_list, el_link); list_del(&el->el_link); cfs_expr_list_free(el); } diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c index e52afe35e7ea..389fb9eeea75 100644 --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c @@ -13,11 +13,6 @@ * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 021110-1307, USA - * * GPL HEADER END */ /* @@ -84,32 +79,32 @@ cfs_cpt_table_free(struct cfs_cpt_table *cptab) { int i; - if (cptab->ctb_cpu2cpt != NULL) { + if (cptab->ctb_cpu2cpt) { LIBCFS_FREE(cptab->ctb_cpu2cpt, num_possible_cpus() * sizeof(cptab->ctb_cpu2cpt[0])); } - for (i = 0; cptab->ctb_parts != NULL && i < cptab->ctb_nparts; i++) { + for (i = 0; cptab->ctb_parts && i < cptab->ctb_nparts; i++) { struct cfs_cpu_partition *part = &cptab->ctb_parts[i]; - if (part->cpt_nodemask != NULL) { + if (part->cpt_nodemask) { LIBCFS_FREE(part->cpt_nodemask, sizeof(*part->cpt_nodemask)); } - if (part->cpt_cpumask != NULL) + if (part->cpt_cpumask) LIBCFS_FREE(part->cpt_cpumask, cpumask_size()); } - if (cptab->ctb_parts != NULL) { + if (cptab->ctb_parts) { LIBCFS_FREE(cptab->ctb_parts, cptab->ctb_nparts * sizeof(cptab->ctb_parts[0])); } - if (cptab->ctb_nodemask != NULL) + if (cptab->ctb_nodemask) LIBCFS_FREE(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask)); - if (cptab->ctb_cpumask != NULL) + if (cptab->ctb_cpumask) LIBCFS_FREE(cptab->ctb_cpumask, cpumask_size()); LIBCFS_FREE(cptab, sizeof(*cptab)); @@ -123,7 +118,7 @@ cfs_cpt_table_alloc(unsigned int ncpt) int i; LIBCFS_ALLOC(cptab, sizeof(*cptab)); - if (cptab == NULL) + if (!cptab) return NULL; cptab->ctb_nparts = ncpt; @@ -131,19 +126,19 @@ cfs_cpt_table_alloc(unsigned int ncpt) LIBCFS_ALLOC(cptab->ctb_cpumask, cpumask_size()); LIBCFS_ALLOC(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask)); - if (cptab->ctb_cpumask == NULL || cptab->ctb_nodemask == NULL) + if (!cptab->ctb_cpumask || !cptab->ctb_nodemask) goto failed; LIBCFS_ALLOC(cptab->ctb_cpu2cpt, num_possible_cpus() * sizeof(cptab->ctb_cpu2cpt[0])); - if (cptab->ctb_cpu2cpt == NULL) + if (!cptab->ctb_cpu2cpt) goto failed; memset(cptab->ctb_cpu2cpt, -1, num_possible_cpus() * sizeof(cptab->ctb_cpu2cpt[0])); LIBCFS_ALLOC(cptab->ctb_parts, ncpt * sizeof(cptab->ctb_parts[0])); - if (cptab->ctb_parts == NULL) + if (!cptab->ctb_parts) goto failed; for (i = 0; i < ncpt; i++) { @@ -151,7 +146,7 @@ cfs_cpt_table_alloc(unsigned int ncpt) LIBCFS_ALLOC(part->cpt_cpumask, cpumask_size()); LIBCFS_ALLOC(part->cpt_nodemask, sizeof(*part->cpt_nodemask)); - if (part->cpt_cpumask == NULL || part->cpt_nodemask == NULL) + if (!part->cpt_cpumask || !part->cpt_nodemask) goto failed; } @@ -359,8 +354,6 @@ cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) if (i >= nr_cpu_ids) node_clear(node, *cptab->ctb_nodemask); - - return; } EXPORT_SYMBOL(cfs_cpt_unset_cpu); @@ -530,7 +523,8 @@ cfs_cpt_current(struct cfs_cpt_table *cptab, int remap) return cpt; /* don't return negative value for safety of upper layer, - * instead we shadow the unknown cpu to a valid partition ID */ + * instead we shadow the unknown cpu to a valid partition ID + */ cpt = cpu % cptab->ctb_nparts; } @@ -618,7 +612,7 @@ cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt, /* allocate scratch buffer */ LIBCFS_ALLOC(socket, cpumask_size()); LIBCFS_ALLOC(core, cpumask_size()); - if (socket == NULL || core == NULL) { + if (!socket || !core) { rc = -ENOMEM; goto out; } @@ -659,9 +653,9 @@ cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt, } out: - if (socket != NULL) + if (socket) LIBCFS_FREE(socket, cpumask_size()); - if (core != NULL) + if (core) LIBCFS_FREE(core, cpumask_size()); return rc; } @@ -682,7 +676,8 @@ cfs_cpt_num_estimate(void) /* generate reasonable number of CPU partitions based on total number * of CPUs, Preferred N should be power2 and match this condition: - * 2 * (N - 1)^2 < NCPUS <= 2 * N^2 */ + * 2 * (N - 1)^2 < NCPUS <= 2 * N^2 + */ for (ncpt = 2; ncpu > 2 * ncpt * ncpt; ncpt <<= 1) ; @@ -700,7 +695,8 @@ cfs_cpt_num_estimate(void) out: #if (BITS_PER_LONG == 32) /* config many CPU partitions on 32-bit system could consume - * too much memory */ + * too much memory + */ ncpt = min(2U, ncpt); #endif while (ncpu % ncpt != 0) @@ -735,7 +731,7 @@ cfs_cpt_table_create(int ncpt) } cptab = cfs_cpt_table_alloc(ncpt); - if (cptab == NULL) { + if (!cptab) { CERROR("Failed to allocate CPU map(%d)\n", ncpt); goto failed; } @@ -747,7 +743,7 @@ cfs_cpt_table_create(int ncpt) } LIBCFS_ALLOC(mask, cpumask_size()); - if (mask == NULL) { + if (!mask) { CERROR("Failed to allocate scratch cpumask\n"); goto failed; } @@ -793,10 +789,10 @@ cfs_cpt_table_create(int ncpt) CERROR("Failed to setup CPU-partition-table with %d CPU-partitions, online HW nodes: %d, HW cpus: %d.\n", ncpt, num_online_nodes(), num_online_cpus()); - if (mask != NULL) + if (mask) LIBCFS_FREE(mask, cpumask_size()); - if (cptab != NULL) + if (cptab) cfs_cpt_table_free(cptab); return NULL; @@ -814,7 +810,7 @@ cfs_cpt_table_create_pattern(char *pattern) for (ncpt = 0;; ncpt++) { /* quick scan bracket */ str = strchr(str, '['); - if (str == NULL) + if (!str) break; str++; } @@ -836,7 +832,7 @@ cfs_cpt_table_create_pattern(char *pattern) high = node ? MAX_NUMNODES - 1 : nr_cpu_ids - 1; cptab = cfs_cpt_table_alloc(ncpt); - if (cptab == NULL) { + if (!cptab) { CERROR("Failed to allocate cpu partition table\n"); return NULL; } @@ -850,11 +846,12 @@ cfs_cpt_table_create_pattern(char *pattern) int i; int n; - if (bracket == NULL) { + if (!bracket) { if (*str != 0) { CERROR("Invalid pattern %s\n", str); goto failed; - } else if (c != ncpt) { + } + if (c != ncpt) { CERROR("expect %d partitions but found %d\n", ncpt, c); goto failed; @@ -885,7 +882,7 @@ cfs_cpt_table_create_pattern(char *pattern) } bracket = strchr(str, ']'); - if (bracket == NULL) { + if (!bracket) { CERROR("missing right bracket for cpt %d, %s\n", cpt, str); goto failed; @@ -943,6 +940,7 @@ cfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) spin_lock(&cpt_data.cpt_lock); cpt_data.cpt_version++; spin_unlock(&cpt_data.cpt_lock); + /* Fall through */ default: if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) { CDEBUG(D_INFO, "CPU changed [cpu %u action %lx]\n", @@ -975,25 +973,25 @@ static struct notifier_block cfs_cpu_notifier = { void cfs_cpu_fini(void) { - if (cfs_cpt_table != NULL) + if (cfs_cpt_table) cfs_cpt_table_free(cfs_cpt_table); #ifdef CONFIG_HOTPLUG_CPU unregister_hotcpu_notifier(&cfs_cpu_notifier); #endif - if (cpt_data.cpt_cpumask != NULL) + if (cpt_data.cpt_cpumask) LIBCFS_FREE(cpt_data.cpt_cpumask, cpumask_size()); } int cfs_cpu_init(void) { - LASSERT(cfs_cpt_table == NULL); + LASSERT(!cfs_cpt_table); memset(&cpt_data, 0, sizeof(cpt_data)); LIBCFS_ALLOC(cpt_data.cpt_cpumask, cpumask_size()); - if (cpt_data.cpt_cpumask == NULL) { + if (!cpt_data.cpt_cpumask) { CERROR("Failed to allocate scratch buffer\n"); return -1; } @@ -1007,7 +1005,7 @@ cfs_cpu_init(void) if (*cpu_pattern != 0) { cfs_cpt_table = cfs_cpt_table_create_pattern(cpu_pattern); - if (cfs_cpt_table == NULL) { + if (!cfs_cpt_table) { CERROR("Failed to create cptab from pattern %s\n", cpu_pattern); goto failed; @@ -1015,7 +1013,7 @@ cfs_cpu_init(void) } else { cfs_cpt_table = cfs_cpt_table_create(cpu_npartitions); - if (cfs_cpt_table == NULL) { + if (!cfs_cpt_table) { CERROR("Failed to create ptable with npartitions %d\n", cpu_npartitions); goto failed; diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto-adler.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c index db0572733712..db0572733712 100644 --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto-adler.c +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c index 94c01aad844b..8c9377ed850c 100644 --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c @@ -47,7 +47,7 @@ static int cfs_crypto_hash_alloc(unsigned char alg_id, *type = cfs_crypto_hash_type(alg_id); - if (*type == NULL) { + if (!*type) { CWARN("Unsupported hash algorithm id = %d, max id is %d\n", alg_id, CFS_HASH_ALG_MAX); return -EINVAL; @@ -76,7 +76,7 @@ static int cfs_crypto_hash_alloc(unsigned char alg_id, * Skip this function for digest, because we use shash logic at * cfs_crypto_hash_alloc. */ - if (key != NULL) + if (key) err = crypto_ahash_setkey(tfm, key, key_len); else if ((*type)->cht_key != 0) err = crypto_ahash_setkey(tfm, @@ -110,14 +110,14 @@ int cfs_crypto_hash_digest(unsigned char alg_id, int err; const struct cfs_crypto_hash_type *type; - if (buf == NULL || buf_len == 0 || hash_len == NULL) + if (!buf || buf_len == 0 || !hash_len) return -EINVAL; err = cfs_crypto_hash_alloc(alg_id, &type, &req, key, key_len); if (err != 0) return err; - if (hash == NULL || *hash_len < type->cht_size) { + if (!hash || *hash_len < type->cht_size) { *hash_len = type->cht_size; crypto_free_ahash(crypto_ahash_reqtfm(req)); ahash_request_free(req); @@ -186,12 +186,12 @@ int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *hdesc, struct ahash_request *req = (void *)hdesc; int size = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); - if (hash_len == NULL) { + if (!hash_len) { crypto_free_ahash(crypto_ahash_reqtfm(req)); ahash_request_free(req); return 0; } - if (hash == NULL || *hash_len < size) { + if (!hash || *hash_len < size) { *hash_len = size; return -ENOSPC; } @@ -224,7 +224,6 @@ static void cfs_crypto_performance_test(unsigned char alg_id, hash, &hash_len); if (err) break; - } end = jiffies; @@ -247,8 +246,7 @@ int cfs_crypto_hash_speed(unsigned char hash_alg) { if (hash_alg < CFS_HASH_ALG_MAX) return cfs_crypto_hash_speeds[hash_alg]; - else - return -1; + return -1; } EXPORT_SYMBOL(cfs_crypto_hash_speed); @@ -261,14 +259,13 @@ static int cfs_crypto_test_hashes(void) unsigned char *data; unsigned int j; /* Data block size for testing hash. Maximum - * kmalloc size for 2.6.18 kernel is 128K */ + * kmalloc size for 2.6.18 kernel is 128K + */ unsigned int data_len = 1 * 128 * 1024; data = kmalloc(data_len, 0); - if (data == NULL) { - CERROR("Failed to allocate mem\n"); + if (!data) return -ENOMEM; - } for (j = 0; j < data_len; j++) data[j] = j & 0xff; @@ -297,6 +294,4 @@ void cfs_crypto_unregister(void) { if (adler32 == 0) cfs_crypto_adler32_unregister(); - - return; } diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.h b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h index 18e8cd4d8758..18e8cd4d8758 100644 --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.h +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c index 68515d9130c1..13d31e8a931d 100644 --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c @@ -65,6 +65,7 @@ void cfs_cap_raise(cfs_cap_t cap) commit_creds(cred); } } +EXPORT_SYMBOL(cfs_cap_raise); void cfs_cap_lower(cfs_cap_t cap) { @@ -76,11 +77,13 @@ void cfs_cap_lower(cfs_cap_t cap) commit_creds(cred); } } +EXPORT_SYMBOL(cfs_cap_lower); int cfs_cap_raised(cfs_cap_t cap) { return cap_raised(current_cap(), cap); } +EXPORT_SYMBOL(cfs_cap_raised); static void cfs_kernel_cap_pack(kernel_cap_t kcap, cfs_cap_t *cap) { @@ -95,10 +98,6 @@ cfs_cap_t cfs_curproc_cap_pack(void) cfs_kernel_cap_pack(current_cap(), &cap); return cap; } - -EXPORT_SYMBOL(cfs_cap_raise); -EXPORT_SYMBOL(cfs_cap_lower); -EXPORT_SYMBOL(cfs_cap_raised); EXPORT_SYMBOL(cfs_curproc_cap_pack); /* diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c index 59c7bf3cbc1f..638e4b33d3a9 100644 --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c @@ -80,14 +80,14 @@ void libcfs_run_debug_log_upcall(char *file) argv[0] = lnet_debug_log_upcall; - LASSERTF(file != NULL, "called on a null filename\n"); + LASSERTF(file, "called on a null filename\n"); argv[1] = file; /* only need to pass the path of the file */ argv[2] = NULL; rc = call_usermodehelper(argv[0], argv, envp, 1); if (rc < 0 && rc != -ENOENT) { - CERROR("Error %d invoking LNET debug log upcall %s %s; check /proc/sys/lnet/debug_log_upcall\n", + CERROR("Error %d invoking LNET debug log upcall %s %s; check /sys/kernel/debug/lnet/debug_log_upcall\n", rc, argv[0], argv[1]); } else { CDEBUG(D_HA, "Invoked LNET debug log upcall %s %s\n", @@ -106,14 +106,14 @@ void libcfs_run_upcall(char **argv) argv[0] = lnet_upcall; argc = 1; - while (argv[argc] != NULL) + while (argv[argc]) argc++; LASSERT(argc >= 2); rc = call_usermodehelper(argv[0], argv, envp, 1); if (rc < 0 && rc != -ENOENT) { - CERROR("Error %d invoking LNET upcall %s %s%s%s%s%s%s%s%s; check /proc/sys/lnet/upcall\n", + CERROR("Error %d invoking LNET upcall %s %s%s%s%s%s%s%s%s; check /sys/kernel/debug/lnet/upcall\n", rc, argv[0], argv[1], argc < 3 ? "" : ",", argc < 3 ? "" : argv[2], argc < 4 ? "" : ",", argc < 4 ? "" : argv[3], @@ -142,8 +142,9 @@ void libcfs_run_lbug_upcall(struct libcfs_debug_msg_data *msgdata) argv[4] = buf; argv[5] = NULL; - libcfs_run_upcall (argv); + libcfs_run_upcall(argv); } +EXPORT_SYMBOL(libcfs_run_lbug_upcall); /* coverity[+kill] */ void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *msgdata) @@ -166,9 +167,10 @@ void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *msgdata) while (1) schedule(); } +EXPORT_SYMBOL(lbug_with_loc); static int panic_notifier(struct notifier_block *self, unsigned long unused1, - void *unused2) + void *unused2) { if (libcfs_panic_in_progress) return 0; @@ -187,13 +189,12 @@ static struct notifier_block libcfs_panic_notifier = { void libcfs_register_panic_notifier(void) { - atomic_notifier_chain_register(&panic_notifier_list, &libcfs_panic_notifier); + atomic_notifier_chain_register(&panic_notifier_list, + &libcfs_panic_notifier); } void libcfs_unregister_panic_notifier(void) { - atomic_notifier_chain_unregister(&panic_notifier_list, &libcfs_panic_notifier); + atomic_notifier_chain_unregister(&panic_notifier_list, + &libcfs_panic_notifier); } - -EXPORT_SYMBOL(libcfs_run_lbug_upcall); -EXPORT_SYMBOL(lbug_with_loc); diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-mem.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c index 025e2f0028ab..86f32ffc5d04 100644 --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-mem.c +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c @@ -50,7 +50,7 @@ void *libcfs_kvzalloc_cpt(struct cfs_cpt_table *cptab, int cpt, size_t size, ret = kzalloc_node(size, flags | __GFP_NOWARN, cfs_cpt_spread_node(cptab, cpt)); if (!ret) { - WARN_ON(!(flags & (__GFP_FS|__GFP_HIGH))); + WARN_ON(!(flags & (__GFP_FS | __GFP_HIGH))); ret = vmalloc_node(size, cfs_cpt_spread_node(cptab, cpt)); } diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c index 70a99cf019de..ebc60ac9bb7a 100644 --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c @@ -40,41 +40,10 @@ #define LNET_MINOR 240 -int libcfs_ioctl_getdata(char *buf, char *end, void *arg) +int libcfs_ioctl_data_adjust(struct libcfs_ioctl_data *data) { - struct libcfs_ioctl_hdr *hdr; - struct libcfs_ioctl_data *data; - int orig_len; - - hdr = (struct libcfs_ioctl_hdr *)buf; - data = (struct libcfs_ioctl_data *)buf; - - if (copy_from_user(buf, arg, sizeof(*hdr))) - return -EFAULT; - - if (hdr->ioc_version != LIBCFS_IOCTL_VERSION) { - CERROR("PORTALS: version mismatch kernel vs application\n"); - return -EINVAL; - } - - if (hdr->ioc_len >= end - buf) { - CERROR("PORTALS: user buffer exceeds kernel buffer\n"); - return -EINVAL; - } - - if (hdr->ioc_len < sizeof(struct libcfs_ioctl_data)) { - CERROR("PORTALS: user buffer too small for ioctl\n"); - return -EINVAL; - } - - orig_len = hdr->ioc_len; - if (copy_from_user(buf, arg, hdr->ioc_len)) - return -EFAULT; - if (orig_len != data->ioc_len) - return -EINVAL; - if (libcfs_ioctl_is_invalid(data)) { - CERROR("PORTALS: ioctl not correctly formatted\n"); + CERROR("LNET: ioctl not correctly formatted\n"); return -EINVAL; } @@ -88,9 +57,29 @@ int libcfs_ioctl_getdata(char *buf, char *end, void *arg) return 0; } -int libcfs_ioctl_popdata(void *arg, void *data, int size) +int libcfs_ioctl_getdata_len(const struct libcfs_ioctl_hdr __user *arg, + __u32 *len) +{ + struct libcfs_ioctl_hdr hdr; + + if (copy_from_user(&hdr, arg, sizeof(hdr))) + return -EFAULT; + + if (hdr.ioc_version != LIBCFS_IOCTL_VERSION && + hdr.ioc_version != LIBCFS_IOCTL_VERSION2) { + CERROR("LNET: version mismatch expected %#x, got %#x\n", + LIBCFS_IOCTL_VERSION, hdr.ioc_version); + return -EINVAL; + } + + *len = hdr.ioc_len; + + return 0; +} + +int libcfs_ioctl_popdata(void __user *arg, void *data, int size) { - if (copy_to_user((char *)arg, data, size)) + if (copy_to_user(arg, data, size)) return -EFAULT; return 0; } @@ -98,14 +87,12 @@ int libcfs_ioctl_popdata(void *arg, void *data, int size) static int libcfs_psdev_open(struct inode *inode, struct file *file) { - struct libcfs_device_userstate **pdu = NULL; int rc = 0; if (!inode) return -EINVAL; - pdu = (struct libcfs_device_userstate **)&file->private_data; - if (libcfs_psdev_ops.p_open != NULL) - rc = libcfs_psdev_ops.p_open(0, (void *)pdu); + if (libcfs_psdev_ops.p_open) + rc = libcfs_psdev_ops.p_open(0, NULL); else return -EPERM; return rc; @@ -115,14 +102,12 @@ libcfs_psdev_open(struct inode *inode, struct file *file) static int libcfs_psdev_release(struct inode *inode, struct file *file) { - struct libcfs_device_userstate *pdu; int rc = 0; if (!inode) return -EINVAL; - pdu = file->private_data; - if (libcfs_psdev_ops.p_close != NULL) - rc = libcfs_psdev_ops.p_close(0, (void *)pdu); + if (libcfs_psdev_ops.p_close) + rc = libcfs_psdev_ops.p_close(0, NULL); else rc = -EPERM; return rc; @@ -138,8 +123,8 @@ static long libcfs_ioctl(struct file *file, return -EACCES; if (_IOC_TYPE(cmd) != IOC_LIBCFS_TYPE || - _IOC_NR(cmd) < IOC_LIBCFS_MIN_NR || - _IOC_NR(cmd) > IOC_LIBCFS_MAX_NR) { + _IOC_NR(cmd) < IOC_LIBCFS_MIN_NR || + _IOC_NR(cmd) > IOC_LIBCFS_MAX_NR) { CDEBUG(D_IOCTL, "invalid ioctl ( type %d, nr %d, size %d )\n", _IOC_TYPE(cmd), _IOC_NR(cmd), _IOC_SIZE(cmd)); return -EINVAL; @@ -152,16 +137,10 @@ static long libcfs_ioctl(struct file *file, return -EPERM; panic("debugctl-invoked panic"); return 0; - case IOC_LIBCFS_MEMHOG: - if (!capable(CFS_CAP_SYS_ADMIN)) - return -EPERM; - /* go thought */ } - pfile.off = 0; - pfile.private_data = file->private_data; - if (libcfs_psdev_ops.p_ioctl != NULL) - rc = libcfs_psdev_ops.p_ioctl(&pfile, cmd, (void *)arg); + if (libcfs_psdev_ops.p_ioctl) + rc = libcfs_psdev_ops.p_ioctl(&pfile, cmd, (void __user *)arg); else rc = -EPERM; return rc; diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c index 89084460231a..89084460231a 100644 --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c index 64a136cd503d..91c2ae8f9d67 100644 --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c @@ -63,9 +63,8 @@ int cfs_tracefile_init_arch(void) cfs_trace_data[i] = kmalloc(sizeof(union cfs_trace_data_union) * num_possible_cpus(), GFP_KERNEL); - if (cfs_trace_data[i] == NULL) + if (!cfs_trace_data[i]) goto out; - } /* arch related info initialized */ @@ -82,7 +81,7 @@ int cfs_tracefile_init_arch(void) kmalloc(CFS_TRACE_CONSOLE_BUFFER_SIZE, GFP_KERNEL); - if (cfs_trace_console_buffers[i][j] == NULL) + if (!cfs_trace_console_buffers[i][j]) goto out; } @@ -105,7 +104,7 @@ void cfs_tracefile_fini_arch(void) cfs_trace_console_buffers[i][j] = NULL; } - for (i = 0; cfs_trace_data[i] != NULL; i++) { + for (i = 0; cfs_trace_data[i]; i++) { kfree(cfs_trace_data[i]); cfs_trace_data[i] = NULL; } @@ -131,14 +130,13 @@ void cfs_tracefile_write_unlock(void) up_write(&cfs_tracefile_sem); } -cfs_trace_buf_type_t cfs_trace_buf_idx_get(void) +enum cfs_trace_buf_type cfs_trace_buf_idx_get(void) { if (in_irq()) return CFS_TCD_TYPE_IRQ; - else if (in_softirq()) + if (in_softirq()) return CFS_TCD_TYPE_SOFTIRQ; - else - return CFS_TCD_TYPE_PROC; + return CFS_TCD_TYPE_PROC; } /* @@ -176,16 +174,6 @@ void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking) spin_unlock(&tcd->tcd_lock); } -int cfs_tcd_owns_tage(struct cfs_trace_cpu_data *tcd, - struct cfs_trace_page *tage) -{ - /* - * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT) - * from here: this will lead to infinite recursion. - */ - return tcd->tcd_cpu == tage->cpu; -} - void cfs_set_ptldebug_header(struct ptldebug_header *header, struct libcfs_debug_msg_data *msgdata, @@ -200,14 +188,14 @@ cfs_set_ptldebug_header(struct ptldebug_header *header, header->ph_cpu_id = smp_processor_id(); header->ph_type = cfs_trace_buf_idx_get(); /* y2038 safe since all user space treats this as unsigned, but - * will overflow in 2106 */ + * will overflow in 2106 + */ header->ph_sec = (u32)ts.tv_sec; header->ph_usec = ts.tv_nsec / NSEC_PER_USEC; header->ph_stack = stack; header->ph_pid = current->pid; header->ph_line_num = msgdata->msg_line; header->ph_extern_pid = 0; - return; } static char * @@ -261,12 +249,11 @@ void cfs_print_to_console(struct ptldebug_header *hdr, int mask, hdr->ph_pid, hdr->ph_extern_pid, file, hdr->ph_line_num, fn, len, buf); } - return; } int cfs_trace_max_debug_mb(void) { int total_mb = (totalram_pages >> (20 - PAGE_SHIFT)); - return max(512, (total_mb * 80)/100); + return max(512, (total_mb * 80) / 100); } diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lnet/libcfs/module.c index 329d78ce272d..cdc640bfdba8 100644 --- a/drivers/staging/lustre/lustre/libcfs/module.c +++ b/drivers/staging/lustre/lnet/libcfs/module.c @@ -54,156 +54,30 @@ # define DEBUG_SUBSYSTEM S_LNET +#define LNET_MAX_IOCTL_BUF_LEN (sizeof(struct lnet_ioctl_net_config) + \ + sizeof(struct lnet_ioctl_config_data)) + #include "../../include/linux/libcfs/libcfs.h" #include <asm/div64.h> #include "../../include/linux/libcfs/libcfs_crypto.h" #include "../../include/linux/lnet/lib-lnet.h" +#include "../../include/linux/lnet/lib-dlc.h" #include "../../include/linux/lnet/lnet.h" #include "tracefile.h" -MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>"); -MODULE_DESCRIPTION("Portals v3.1"); -MODULE_LICENSE("GPL"); - static struct dentry *lnet_debugfs_root; -static void kportal_memhog_free(struct libcfs_device_userstate *ldu) -{ - struct page **level0p = &ldu->ldu_memhog_root_page; - struct page **level1p; - struct page **level2p; - int count1; - int count2; - - if (*level0p != NULL) { - - level1p = (struct page **)page_address(*level0p); - count1 = 0; - - while (count1 < PAGE_CACHE_SIZE/sizeof(struct page *) && - *level1p != NULL) { - - level2p = (struct page **)page_address(*level1p); - count2 = 0; - - while (count2 < PAGE_CACHE_SIZE/sizeof(struct page *) && - *level2p != NULL) { - - __free_page(*level2p); - ldu->ldu_memhog_pages--; - level2p++; - count2++; - } - - __free_page(*level1p); - ldu->ldu_memhog_pages--; - level1p++; - count1++; - } - - __free_page(*level0p); - ldu->ldu_memhog_pages--; - - *level0p = NULL; - } - - LASSERT(ldu->ldu_memhog_pages == 0); -} - -static int kportal_memhog_alloc(struct libcfs_device_userstate *ldu, int npages, - gfp_t flags) -{ - struct page **level0p; - struct page **level1p; - struct page **level2p; - int count1; - int count2; - - LASSERT(ldu->ldu_memhog_pages == 0); - LASSERT(ldu->ldu_memhog_root_page == NULL); - - if (npages < 0) - return -EINVAL; - - if (npages == 0) - return 0; - - level0p = &ldu->ldu_memhog_root_page; - *level0p = alloc_page(flags); - if (*level0p == NULL) - return -ENOMEM; - ldu->ldu_memhog_pages++; - - level1p = (struct page **)page_address(*level0p); - count1 = 0; - memset(level1p, 0, PAGE_CACHE_SIZE); - - while (ldu->ldu_memhog_pages < npages && - count1 < PAGE_CACHE_SIZE/sizeof(struct page *)) { - - if (cfs_signal_pending()) - return -EINTR; - - *level1p = alloc_page(flags); - if (*level1p == NULL) - return -ENOMEM; - ldu->ldu_memhog_pages++; - - level2p = (struct page **)page_address(*level1p); - count2 = 0; - memset(level2p, 0, PAGE_CACHE_SIZE); - - while (ldu->ldu_memhog_pages < npages && - count2 < PAGE_CACHE_SIZE/sizeof(struct page *)) { - - if (cfs_signal_pending()) - return -EINTR; - - *level2p = alloc_page(flags); - if (*level2p == NULL) - return -ENOMEM; - ldu->ldu_memhog_pages++; - - level2p++; - count2++; - } - - level1p++; - count1++; - } - - return 0; -} - /* called when opening /dev/device */ static int libcfs_psdev_open(unsigned long flags, void *args) { - struct libcfs_device_userstate *ldu; - try_module_get(THIS_MODULE); - - LIBCFS_ALLOC(ldu, sizeof(*ldu)); - if (ldu != NULL) { - ldu->ldu_memhog_pages = 0; - ldu->ldu_memhog_root_page = NULL; - } - *(struct libcfs_device_userstate **)args = ldu; - return 0; } /* called when closing /dev/device */ static int libcfs_psdev_release(unsigned long flags, void *args) { - struct libcfs_device_userstate *ldu; - - ldu = (struct libcfs_device_userstate *)args; - if (ldu != NULL) { - kportal_memhog_free(ldu); - LIBCFS_FREE(ldu, sizeof(*ldu)); - } - module_put(THIS_MODULE); return 0; } @@ -241,11 +115,25 @@ int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand) } EXPORT_SYMBOL(libcfs_deregister_ioctl); -static int libcfs_ioctl_int(struct cfs_psdev_file *pfile, unsigned long cmd, - void *arg, struct libcfs_ioctl_data *data) +static int libcfs_ioctl_handle(struct cfs_psdev_file *pfile, unsigned long cmd, + void __user *arg, struct libcfs_ioctl_hdr *hdr) { + struct libcfs_ioctl_data *data = NULL; int err = -EINVAL; + /* + * The libcfs_ioctl_data_adjust() function performs adjustment + * operations on the libcfs_ioctl_data structure to make + * it usable by the code. This doesn't need to be called + * for new data structures added. + */ + if (hdr->ioc_version == LIBCFS_IOCTL_VERSION) { + data = container_of(hdr, struct libcfs_ioctl_data, ioc_hdr); + err = libcfs_ioctl_data_adjust(data); + if (err) + return err; + } + switch (cmd) { case IOC_LIBCFS_CLEAR_DEBUG: libcfs_debug_clear_buffer(); @@ -255,24 +143,11 @@ static int libcfs_ioctl_int(struct cfs_psdev_file *pfile, unsigned long cmd, * Handled in arch/cfs_module.c */ case IOC_LIBCFS_MARK_DEBUG: - if (data->ioc_inlbuf1 == NULL || + if (!data->ioc_inlbuf1 || data->ioc_inlbuf1[data->ioc_inllen1 - 1] != '\0') return -EINVAL; libcfs_debug_mark_buffer(data->ioc_inlbuf1); return 0; - case IOC_LIBCFS_MEMHOG: - if (pfile->private_data == NULL) { - err = -EINVAL; - } else { - kportal_memhog_free(pfile->private_data); - /* XXX The ioc_flags is not GFP flags now, need to be fixed */ - err = kportal_memhog_alloc(pfile->private_data, - data->ioc_count, - data->ioc_flags); - if (err != 0) - kportal_memhog_free(pfile->private_data); - } - break; default: { struct libcfs_ioctl_handler *hand; @@ -280,11 +155,11 @@ static int libcfs_ioctl_int(struct cfs_psdev_file *pfile, unsigned long cmd, err = -EINVAL; down_read(&ioctl_list_sem); list_for_each_entry(hand, &ioctl_list, item) { - err = hand->handle_ioctl(cmd, data); + err = hand->handle_ioctl(cmd, hdr); if (err != -EINVAL) { if (err == 0) err = libcfs_ioctl_popdata(arg, - data, sizeof(*data)); + hdr, hdr->ioc_len); break; } } @@ -296,28 +171,41 @@ static int libcfs_ioctl_int(struct cfs_psdev_file *pfile, unsigned long cmd, return err; } -static int libcfs_ioctl(struct cfs_psdev_file *pfile, unsigned long cmd, void *arg) +static int libcfs_ioctl(struct cfs_psdev_file *pfile, unsigned long cmd, + void __user *arg) { - char *buf; - struct libcfs_ioctl_data *data; + struct libcfs_ioctl_hdr *hdr; int err = 0; + __u32 buf_len; - LIBCFS_ALLOC_GFP(buf, 1024, GFP_KERNEL); - if (buf == NULL) + err = libcfs_ioctl_getdata_len(arg, &buf_len); + if (err) + return err; + + /* + * do a check here to restrict the size of the memory + * to allocate to guard against DoS attacks. + */ + if (buf_len > LNET_MAX_IOCTL_BUF_LEN) { + CERROR("LNET: user buffer exceeds kernel buffer\n"); + return -EINVAL; + } + + LIBCFS_ALLOC_GFP(hdr, buf_len, GFP_KERNEL); + if (!hdr) return -ENOMEM; /* 'cmd' and permissions get checked in our arch-specific caller */ - if (libcfs_ioctl_getdata(buf, buf + 800, arg)) { - CERROR("PORTALS ioctl: data error\n"); - err = -EINVAL; + if (copy_from_user(hdr, arg, buf_len)) { + CERROR("LNET ioctl: data error\n"); + err = -EFAULT; goto out; } - data = (struct libcfs_ioctl_data *)buf; - err = libcfs_ioctl_int(pfile, cmd, arg, data); + err = libcfs_ioctl_handle(pfile, cmd, arg, hdr); out: - LIBCFS_FREE(buf, 1024); + LIBCFS_FREE(hdr, buf_len); return err; } @@ -329,10 +217,10 @@ struct cfs_psdev_ops libcfs_psdev_ops = { libcfs_ioctl }; -static int proc_call_handler(void *data, int write, loff_t *ppos, - void __user *buffer, size_t *lenp, - int (*handler)(void *data, int write, - loff_t pos, void __user *buffer, int len)) +int lprocfs_call_handler(void *data, int write, loff_t *ppos, + void __user *buffer, size_t *lenp, + int (*handler)(void *data, int write, loff_t pos, + void __user *buffer, int len)) { int rc = handler(data, write, *ppos, buffer, *lenp); @@ -347,6 +235,7 @@ static int proc_call_handler(void *data, int write, loff_t *ppos, } return 0; } +EXPORT_SYMBOL(lprocfs_call_handler); static int __proc_dobitmasks(void *data, int write, loff_t pos, void __user *buffer, int nob) @@ -392,8 +281,8 @@ static int __proc_dobitmasks(void *data, int write, static int proc_dobitmasks(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { - return proc_call_handler(table->data, write, ppos, buffer, lenp, - __proc_dobitmasks); + return lprocfs_call_handler(table->data, write, ppos, buffer, lenp, + __proc_dobitmasks); } static int __proc_dump_kernel(void *data, int write, @@ -408,8 +297,8 @@ static int __proc_dump_kernel(void *data, int write, static int proc_dump_kernel(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { - return proc_call_handler(table->data, write, ppos, buffer, lenp, - __proc_dump_kernel); + return lprocfs_call_handler(table->data, write, ppos, buffer, lenp, + __proc_dump_kernel); } static int __proc_daemon_file(void *data, int write, @@ -431,8 +320,8 @@ static int __proc_daemon_file(void *data, int write, static int proc_daemon_file(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { - return proc_call_handler(table->data, write, ppos, buffer, lenp, - __proc_daemon_file); + return lprocfs_call_handler(table->data, write, ppos, buffer, lenp, + __proc_daemon_file); } static int libcfs_force_lbug(struct ctl_table *table, int write, @@ -467,11 +356,11 @@ static int __proc_cpt_table(void *data, int write, if (write) return -EPERM; - LASSERT(cfs_cpt_table != NULL); + LASSERT(cfs_cpt_table); while (1) { LIBCFS_ALLOC(buf, len); - if (buf == NULL) + if (!buf) return -ENOMEM; rc = cfs_cpt_table_print(cfs_cpt_table, buf, len); @@ -493,23 +382,19 @@ static int __proc_cpt_table(void *data, int write, rc = cfs_trace_copyout_string(buffer, nob, buf + pos, NULL); out: - if (buf != NULL) + if (buf) LIBCFS_FREE(buf, len); return rc; } static int proc_cpt_table(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) + void __user *buffer, size_t *lenp, loff_t *ppos) { - return proc_call_handler(table->data, write, ppos, buffer, lenp, - __proc_cpt_table); + return lprocfs_call_handler(table->data, write, ppos, buffer, lenp, + __proc_cpt_table); } static struct ctl_table lnet_table[] = { - /* - * NB No .strategy entries have been provided since sysctl(8) prefers - * to go via /proc for portability. - */ { .procname = "debug", .data = &libcfs_debug, @@ -640,47 +525,68 @@ static ssize_t lnet_debugfs_write(struct file *filp, const char __user *buf, return error; } -static const struct file_operations lnet_debugfs_file_operations = { +static const struct file_operations lnet_debugfs_file_operations_rw = { .open = simple_open, .read = lnet_debugfs_read, .write = lnet_debugfs_write, .llseek = default_llseek, }; +static const struct file_operations lnet_debugfs_file_operations_ro = { + .open = simple_open, + .read = lnet_debugfs_read, + .llseek = default_llseek, +}; + +static const struct file_operations lnet_debugfs_file_operations_wo = { + .open = simple_open, + .write = lnet_debugfs_write, + .llseek = default_llseek, +}; + +static const struct file_operations *lnet_debugfs_fops_select(umode_t mode) +{ + if (!(mode & S_IWUGO)) + return &lnet_debugfs_file_operations_ro; + + if (!(mode & S_IRUGO)) + return &lnet_debugfs_file_operations_wo; + + return &lnet_debugfs_file_operations_rw; +} + void lustre_insert_debugfs(struct ctl_table *table, const struct lnet_debugfs_symlink_def *symlinks) { - struct dentry *entry; - - if (lnet_debugfs_root == NULL) + if (!lnet_debugfs_root) lnet_debugfs_root = debugfs_create_dir("lnet", NULL); /* Even if we cannot create, just ignore it altogether) */ if (IS_ERR_OR_NULL(lnet_debugfs_root)) return; + /* We don't save the dentry returned in next two calls, because + * we don't call debugfs_remove() but rather remove_recursive() + */ for (; table->procname; table++) - entry = debugfs_create_file(table->procname, table->mode, - lnet_debugfs_root, table, - &lnet_debugfs_file_operations); + debugfs_create_file(table->procname, table->mode, + lnet_debugfs_root, table, + lnet_debugfs_fops_select(table->mode)); for (; symlinks && symlinks->name; symlinks++) - entry = debugfs_create_symlink(symlinks->name, - lnet_debugfs_root, - symlinks->target); - + debugfs_create_symlink(symlinks->name, lnet_debugfs_root, + symlinks->target); } EXPORT_SYMBOL_GPL(lustre_insert_debugfs); static void lustre_remove_debugfs(void) { - if (lnet_debugfs_root != NULL) - debugfs_remove_recursive(lnet_debugfs_root); + debugfs_remove_recursive(lnet_debugfs_root); lnet_debugfs_root = NULL; } -static int init_libcfs_module(void) +static int libcfs_init(void) { int rc; @@ -736,7 +642,7 @@ cleanup_cpu: return rc; } -static void exit_libcfs_module(void) +static void libcfs_exit(void) { int rc; @@ -759,7 +665,10 @@ static void exit_libcfs_module(void) pr_err("LustreError: libcfs_debug_cleanup: %d\n", rc); } -MODULE_VERSION("1.0.0"); +MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>"); +MODULE_DESCRIPTION("Lustre helper library"); +MODULE_VERSION(LIBCFS_VERSION); +MODULE_LICENSE("GPL"); -module_init(init_libcfs_module); -module_exit(exit_libcfs_module); +module_init(libcfs_init); +module_exit(libcfs_exit); diff --git a/drivers/staging/lustre/lustre/libcfs/prng.c b/drivers/staging/lustre/lnet/libcfs/prng.c index 4147664ff57a..c75ae9a68e76 100644 --- a/drivers/staging/lustre/lustre/libcfs/prng.c +++ b/drivers/staging/lustre/lnet/libcfs/prng.c @@ -42,11 +42,11 @@ #include "../../include/linux/libcfs/libcfs.h" /* -From: George Marsaglia <geo@stat.fsu.edu> -Newsgroups: sci.math -Subject: Re: A RANDOM NUMBER GENERATOR FOR C -Date: Tue, 30 Sep 1997 05:29:35 -0700 - + * From: George Marsaglia <geo@stat.fsu.edu> + * Newsgroups: sci.math + * Subject: Re: A RANDOM NUMBER GENERATOR FOR C + * Date: Tue, 30 Sep 1997 05:29:35 -0700 + * * You may replace the two constants 36969 and 18000 by any * pair of distinct constants from this list: * 18000 18030 18273 18513 18879 19074 19098 19164 19215 19584 @@ -58,7 +58,8 @@ Date: Tue, 30 Sep 1997 05:29:35 -0700 * 27960 28320 28380 28689 28710 28794 28854 28959 28980 29013 * 29379 29889 30135 30345 30459 30714 30903 30963 31059 31083 * (or any other 16-bit constants k for which both k*2^16-1 - * and k*2^15-1 are prime) */ + * and k*2^15-1 are prime) + */ #define RANDOM_CONST_A 18030 #define RANDOM_CONST_B 29013 diff --git a/drivers/staging/lustre/lustre/libcfs/tracefile.c b/drivers/staging/lustre/lnet/libcfs/tracefile.c index 65c4f1ab0de8..ec3bc04bd89f 100644 --- a/drivers/staging/lustre/lustre/libcfs/tracefile.c +++ b/drivers/staging/lustre/lnet/libcfs/tracefile.c @@ -56,6 +56,51 @@ static int thread_running; static atomic_t cfs_tage_allocated = ATOMIC_INIT(0); +struct page_collection { + struct list_head pc_pages; + /* + * if this flag is set, collect_pages() will spill both + * ->tcd_daemon_pages and ->tcd_pages to the ->pc_pages. Otherwise, + * only ->tcd_pages are spilled. + */ + int pc_want_daemon_pages; +}; + +struct tracefiled_ctl { + struct completion tctl_start; + struct completion tctl_stop; + wait_queue_head_t tctl_waitq; + pid_t tctl_pid; + atomic_t tctl_shutdown; +}; + +/* + * small data-structure for each page owned by tracefiled. + */ +struct cfs_trace_page { + /* + * page itself + */ + struct page *page; + /* + * linkage into one of the lists in trace_data_union or + * page_collection + */ + struct list_head linkage; + /* + * number of bytes used within this page + */ + unsigned int used; + /* + * cpu that owns this page + */ + unsigned short cpu; + /* + * type(context) of this page + */ + unsigned short type; +}; + static void put_pages_on_tcd_daemon_list(struct page_collection *pc, struct cfs_trace_cpu_data *tcd); @@ -80,11 +125,11 @@ static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp) */ gfp |= __GFP_NOWARN; page = alloc_page(gfp); - if (page == NULL) + if (!page) return NULL; tage = kmalloc(sizeof(*tage), gfp); - if (tage == NULL) { + if (!tage) { __free_page(page); return NULL; } @@ -96,9 +141,6 @@ static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp) static void cfs_tage_free(struct cfs_trace_page *tage) { - __LASSERT(tage != NULL); - __LASSERT(tage->page != NULL); - __free_page(tage->page); kfree(tage); atomic_dec(&cfs_tage_allocated); @@ -107,9 +149,6 @@ static void cfs_tage_free(struct cfs_trace_page *tage) static void cfs_tage_to_tail(struct cfs_trace_page *tage, struct list_head *queue) { - __LASSERT(tage != NULL); - __LASSERT(queue != NULL); - list_move_tail(&tage->linkage, queue); } @@ -127,7 +166,7 @@ int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, gfp_t gfp, struct cfs_trace_page *tage; tage = cfs_tage_alloc(gfp); - if (tage == NULL) + if (!tage) break; list_add_tail(&tage->linkage, stock); } @@ -154,7 +193,7 @@ cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len) list_del_init(&tage->linkage); } else { tage = cfs_tage_alloc(GFP_ATOMIC); - if (unlikely(tage == NULL)) { + if (unlikely(!tage)) { if ((!memory_pressure_get() || in_interrupt()) && printk_ratelimit()) printk(KERN_WARNING @@ -227,7 +266,7 @@ static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd, } tage = cfs_trace_get_tage_try(tcd, len); - if (tage != NULL) + if (tage) return tage; if (thread_running) cfs_tcd_shrink(tcd); @@ -278,10 +317,11 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata, /* cfs_trace_get_tcd() grabs a lock, which disables preemption and * pins us to a particular CPU. This avoids an smp_processor_id() - * warning on Linux when debugging is enabled. */ + * warning on Linux when debugging is enabled. + */ cfs_set_ptldebug_header(&header, msgdata, CDEBUG_STACK()); - if (tcd == NULL) /* arch may not log in IRQ context */ + if (!tcd) /* arch may not log in IRQ context */ goto console; if (tcd->tcd_cur_pages == 0) @@ -301,14 +341,14 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata, if (libcfs_debug_binary) known_size += sizeof(header); - /*/ + /* * '2' used because vsnprintf return real size required for output * _without_ terminating NULL. * if needed is to small for this format. */ for (i = 0; i < 2; i++) { tage = cfs_trace_get_tage(tcd, needed + known_size + 1); - if (tage == NULL) { + if (!tage) { if (needed + known_size > PAGE_CACHE_SIZE) mask |= D_ERROR; @@ -352,7 +392,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata, break; } - if (*(string_buf+needed-1) != '\n') + if (*(string_buf + needed - 1) != '\n') printk(KERN_INFO "format at %s:%d:%s doesn't end in newline\n", file, msgdata->msg_line, msgdata->msg_fn); @@ -384,30 +424,30 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata, __LASSERT(debug_buf == string_buf); tage->used += needed; - __LASSERT (tage->used <= PAGE_CACHE_SIZE); + __LASSERT(tage->used <= PAGE_CACHE_SIZE); console: if ((mask & libcfs_printk) == 0) { /* no console output requested */ - if (tcd != NULL) + if (tcd) cfs_trace_put_tcd(tcd); return 1; } - if (cdls != NULL) { + if (cdls) { if (libcfs_console_ratelimit && cdls->cdls_next != 0 && /* not first time ever */ !cfs_time_after(cfs_time_current(), cdls->cdls_next)) { /* skipping a console message */ cdls->cdls_count++; - if (tcd != NULL) + if (tcd) cfs_trace_put_tcd(tcd); return 1; } - if (cfs_time_after(cfs_time_current(), cdls->cdls_next + - libcfs_console_max_delay - + cfs_time_seconds(10))) { + if (cfs_time_after(cfs_time_current(), + cdls->cdls_next + libcfs_console_max_delay + + cfs_time_seconds(10))) { /* last timeout was a long time ago */ cdls->cdls_delay /= libcfs_console_backoff * 4; } else { @@ -423,7 +463,7 @@ console: cdls->cdls_next = (cfs_time_current() + cdls->cdls_delay) | 1; } - if (tcd != NULL) { + if (tcd) { cfs_print_to_console(&header, mask, string_buf, needed, file, msgdata->msg_fn); cfs_trace_put_tcd(tcd); @@ -431,18 +471,18 @@ console: string_buf = cfs_trace_get_console_buffer(); needed = 0; - if (format1 != NULL) { + if (format1) { va_copy(ap, args); needed = vsnprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE, format1, ap); va_end(ap); } - if (format2 != NULL) { + if (format2) { remain = CFS_TRACE_CONSOLE_BUFFER_SIZE - needed; if (remain > 0) { va_start(ap, format2); - needed += vsnprintf(string_buf+needed, remain, + needed += vsnprintf(string_buf + needed, remain, format2, ap); va_end(ap); } @@ -453,7 +493,7 @@ console: put_cpu(); } - if (cdls != NULL && cdls->cdls_count != 0) { + if (cdls && cdls->cdls_count != 0) { string_buf = cfs_trace_get_console_buffer(); needed = snprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE, @@ -497,7 +537,8 @@ panic_collect_pages(struct page_collection *pc) { /* Do the collect_pages job on a single CPU: assumes that all other * CPUs have been stopped during a panic. If this isn't true for some - * arch, this will have to be implemented separately in each arch. */ + * arch, this will have to be implemented separately in each arch. + */ int i; int j; struct cfs_trace_cpu_data *tcd; @@ -509,8 +550,7 @@ panic_collect_pages(struct page_collection *pc) tcd->tcd_cur_pages = 0; if (pc->pc_want_daemon_pages) { - list_splice_init(&tcd->tcd_daemon_pages, - &pc->pc_pages); + list_splice_init(&tcd->tcd_daemon_pages, &pc->pc_pages); tcd->tcd_cur_daemon_pages = 0; } } @@ -527,7 +567,7 @@ static void collect_pages_on_all_cpus(struct page_collection *pc) tcd->tcd_cur_pages = 0; if (pc->pc_want_daemon_pages) { list_splice_init(&tcd->tcd_daemon_pages, - &pc->pc_pages); + &pc->pc_pages); tcd->tcd_cur_daemon_pages = 0; } } @@ -558,7 +598,6 @@ static void put_pages_back_on_all_cpus(struct page_collection *pc) list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) { - __LASSERT_TAGE_INVARIANT(tage); if (tage->cpu != cpu || tage->type != i) @@ -580,7 +619,8 @@ static void put_pages_back(struct page_collection *pc) /* Add pages to a per-cpu debug daemon ringbuffer. This buffer makes sure that * we have a good amount of data at all times for dumping during an LBUG, even * if we have been steadily writing (and otherwise discarding) pages via the - * debug daemon. */ + * debug daemon. + */ static void put_pages_on_tcd_daemon_list(struct page_collection *pc, struct cfs_trace_cpu_data *tcd) { @@ -588,7 +628,6 @@ static void put_pages_on_tcd_daemon_list(struct page_collection *pc, struct cfs_trace_page *tmp; list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) { - __LASSERT_TAGE_INVARIANT(tage); if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type) @@ -674,12 +713,13 @@ int cfs_tracefile_dump_all_pages(char *filename) cfs_tracefile_write_lock(); - filp = filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE, 0600); + filp = filp_open(filename, O_CREAT | O_EXCL | O_WRONLY | O_LARGEFILE, + 0600); if (IS_ERR(filp)) { rc = PTR_ERR(filp); filp = NULL; pr_err("LustreError: can't open %s for dump: rc %d\n", - filename, rc); + filename, rc); goto out; } @@ -691,10 +731,10 @@ int cfs_tracefile_dump_all_pages(char *filename) } /* ok, for now, just write the pages. in the future we'll be building - * iobufs with the pages and calling generic_direct_IO */ + * iobufs with the pages and calling generic_direct_IO + */ MMSPACE_OPEN; list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { - __LASSERT_TAGE_INVARIANT(tage); buf = kmap(tage->page); @@ -732,7 +772,6 @@ void cfs_trace_flush_pages(void) pc.pc_want_daemon_pages = 1; collect_pages(&pc); list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { - __LASSERT_TAGE_INVARIANT(tage); list_del(&tage->linkage); @@ -771,9 +810,10 @@ EXPORT_SYMBOL(cfs_trace_copyin_string); int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob, const char *knl_buffer, char *append) { - /* NB if 'append' != NULL, it's a single character to append to the - * copied out string - usually "\n", for /proc entries and "" (i.e. a - * terminating zero byte) for sysctl entries */ + /* + * NB if 'append' != NULL, it's a single character to append to the + * copied out string - usually "\n" or "" (i.e. a terminating zero byte) + */ int nob = strlen(knl_buffer); if (nob > usr_buffer_nob) @@ -782,7 +822,7 @@ int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob, if (copy_to_user(usr_buffer, knl_buffer, nob)) return -EFAULT; - if (append != NULL && nob < usr_buffer_nob) { + if (append && nob < usr_buffer_nob) { if (copy_to_user(usr_buffer + nob, append, 1)) return -EFAULT; @@ -799,7 +839,7 @@ int cfs_trace_allocate_string_buffer(char **str, int nob) return -EINVAL; *str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO); - if (*str == NULL) + if (!*str) return -ENOMEM; return 0; @@ -842,12 +882,15 @@ int cfs_trace_daemon_command(char *str) memset(cfs_tracefile, 0, sizeof(cfs_tracefile)); } else if (strncmp(str, "size=", 5) == 0) { - cfs_tracefile_size = simple_strtoul(str + 5, NULL, 0); - if (cfs_tracefile_size < 10 || cfs_tracefile_size > 20480) - cfs_tracefile_size = CFS_TRACEFILE_SIZE; - else - cfs_tracefile_size <<= 20; - + unsigned long tmp; + + rc = kstrtoul(str + 5, 10, &tmp); + if (!rc) { + if (tmp < 10 || tmp > 20480) + cfs_tracefile_size = CFS_TRACEFILE_SIZE; + else + cfs_tracefile_size = tmp << 20; + } } else if (strlen(str) >= sizeof(cfs_tracefile)) { rc = -ENAMETOOLONG; } else if (str[0] != '/') { @@ -877,7 +920,7 @@ int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob) return rc; rc = cfs_trace_copyin_string(str, usr_str_nob + 1, - usr_str, usr_str_nob); + usr_str, usr_str_nob); if (rc == 0) rc = cfs_trace_daemon_command(str); @@ -977,7 +1020,7 @@ static int tracefiled(void *arg) } } cfs_tracefile_read_unlock(); - if (filp == NULL) { + if (!filp) { put_pages_on_daemon_list(&pc); __LASSERT(list_empty(&pc.pc_pages)); goto end_loop; @@ -985,8 +1028,7 @@ static int tracefiled(void *arg) MMSPACE_OPEN; - list_for_each_entry_safe(tage, tmp, &pc.pc_pages, - linkage) { + list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { static loff_t f_pos; __LASSERT_TAGE_INVARIANT(tage); @@ -1017,8 +1059,7 @@ static int tracefiled(void *arg) int i; printk(KERN_ALERT "Lustre: trace pages aren't empty\n"); - pr_err("total cpus(%d): ", - num_possible_cpus()); + pr_err("total cpus(%d): ", num_possible_cpus()); for (i = 0; i < num_possible_cpus(); i++) if (cpu_online(i)) pr_cont("%d(on) ", i); @@ -1028,9 +1069,9 @@ static int tracefiled(void *arg) i = 0; list_for_each_entry_safe(tage, tmp, &pc.pc_pages, - linkage) + linkage) pr_err("page %d belongs to cpu %d\n", - ++i, tage->cpu); + ++i, tage->cpu); pr_err("There are %d pages unwritten\n", i); } __LASSERT(list_empty(&pc.pc_pages)); @@ -1056,6 +1097,7 @@ end_loop: int cfs_trace_start_thread(void) { struct tracefiled_ctl *tctl = &trace_tctl; + struct task_struct *task; int rc = 0; mutex_lock(&cfs_trace_thread_mutex); @@ -1067,8 +1109,9 @@ int cfs_trace_start_thread(void) init_waitqueue_head(&tctl->tctl_waitq); atomic_set(&tctl->tctl_shutdown, 0); - if (IS_ERR(kthread_run(tracefiled, tctl, "ktracefiled"))) { - rc = -ECHILD; + task = kthread_run(tracefiled, tctl, "ktracefiled"); + if (IS_ERR(task)) { + rc = PTR_ERR(task); goto out; } @@ -1135,7 +1178,7 @@ static void trace_cleanup_on_all_cpus(void) tcd->tcd_shutting_down = 1; list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, - linkage) { + linkage) { __LASSERT_TAGE_INVARIANT(tage); list_del(&tage->linkage); diff --git a/drivers/staging/lustre/lustre/libcfs/tracefile.h b/drivers/staging/lustre/lnet/libcfs/tracefile.h index 7bf1471a54fb..4c77f9044dd3 100644 --- a/drivers/staging/lustre/lustre/libcfs/tracefile.h +++ b/drivers/staging/lustre/lnet/libcfs/tracefile.h @@ -39,12 +39,12 @@ #include "../../include/linux/libcfs/libcfs.h" -typedef enum { +enum cfs_trace_buf_type { CFS_TCD_TYPE_PROC = 0, CFS_TCD_TYPE_SOFTIRQ, CFS_TCD_TYPE_IRQ, CFS_TCD_TYPE_MAX -} cfs_trace_buf_type_t; +}; /* trace file lock routines */ @@ -101,8 +101,10 @@ int cfs_trace_max_debug_mb(void); #define CFS_TRACEFILE_SIZE (500 << 20) -/* Size of a buffer for sprinting console messages if we can't get a page - * from system */ +/* + * Size of a buffer for sprinting console messages if we can't get a page + * from system + */ #define CFS_TRACE_CONSOLE_BUFFER_SIZE 1024 union cfs_trace_data_union { @@ -185,66 +187,15 @@ union cfs_trace_data_union { extern union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS]; #define cfs_tcd_for_each(tcd, i, j) \ - for (i = 0; cfs_trace_data[i] != NULL; i++) \ - for (j = 0, ((tcd) = &(*cfs_trace_data[i])[j].tcd); \ - j < num_possible_cpus(); \ - j++, (tcd) = &(*cfs_trace_data[i])[j].tcd) + for (i = 0; cfs_trace_data[i]; i++) \ + for (j = 0, ((tcd) = &(*cfs_trace_data[i])[j].tcd); \ + j < num_possible_cpus(); \ + j++, (tcd) = &(*cfs_trace_data[i])[j].tcd) #define cfs_tcd_for_each_type_lock(tcd, i, cpu) \ - for (i = 0; cfs_trace_data[i] && \ - (tcd = &(*cfs_trace_data[i])[cpu].tcd) && \ - cfs_trace_lock_tcd(tcd, 1); cfs_trace_unlock_tcd(tcd, 1), i++) - -/* XXX nikita: this declaration is internal to tracefile.c and should probably - * be moved there */ -struct page_collection { - struct list_head pc_pages; - /* - * if this flag is set, collect_pages() will spill both - * ->tcd_daemon_pages and ->tcd_pages to the ->pc_pages. Otherwise, - * only ->tcd_pages are spilled. - */ - int pc_want_daemon_pages; -}; - -/* XXX nikita: this declaration is internal to tracefile.c and should probably - * be moved there */ -struct tracefiled_ctl { - struct completion tctl_start; - struct completion tctl_stop; - wait_queue_head_t tctl_waitq; - pid_t tctl_pid; - atomic_t tctl_shutdown; -}; - -/* - * small data-structure for each page owned by tracefiled. - */ -/* XXX nikita: this declaration is internal to tracefile.c and should probably - * be moved there */ -struct cfs_trace_page { - /* - * page itself - */ - struct page *page; - /* - * linkage into one of the lists in trace_data_union or - * page_collection - */ - struct list_head linkage; - /* - * number of bytes used within this page - */ - unsigned int used; - /* - * cpu that owns this page - */ - unsigned short cpu; - /* - * type(context) of this page - */ - unsigned short type; -}; + for (i = 0; cfs_trace_data[i] && \ + (tcd = &(*cfs_trace_data[i])[cpu].tcd) && \ + cfs_trace_lock_tcd(tcd, 1); cfs_trace_unlock_tcd(tcd, 1), i++) void cfs_set_ptldebug_header(struct ptldebug_header *header, struct libcfs_debug_msg_data *m, @@ -257,7 +208,7 @@ int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking); void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking); extern char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_MAX]; -cfs_trace_buf_type_t cfs_trace_buf_idx_get(void); +enum cfs_trace_buf_type cfs_trace_buf_idx_get(void); static inline char * cfs_trace_get_console_buffer(void) @@ -279,8 +230,7 @@ cfs_trace_get_tcd(void) return tcd; } -static inline void -cfs_trace_put_tcd (struct cfs_trace_cpu_data *tcd) +static inline void cfs_trace_put_tcd(struct cfs_trace_cpu_data *tcd) { cfs_trace_unlock_tcd(tcd, 0); @@ -290,9 +240,6 @@ cfs_trace_put_tcd (struct cfs_trace_cpu_data *tcd) int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, gfp_t gfp, struct list_head *stock); -int cfs_tcd_owns_tage(struct cfs_trace_cpu_data *tcd, - struct cfs_trace_page *tage); - void cfs_trace_assertion_failed(const char *str, struct libcfs_debug_msg_data *m); @@ -308,8 +255,8 @@ do { \ #define __LASSERT_TAGE_INVARIANT(tage) \ do { \ - __LASSERT(tage != NULL); \ - __LASSERT(tage->page != NULL); \ + __LASSERT(tage); \ + __LASSERT(tage->page); \ __LASSERT(tage->used <= PAGE_CACHE_SIZE); \ __LASSERT(page_count(tage->page) > 0); \ } while (0) diff --git a/drivers/staging/lustre/lustre/libcfs/workitem.c b/drivers/staging/lustre/lnet/libcfs/workitem.c index 60bb88a00b41..c72fe00dce8d 100644 --- a/drivers/staging/lustre/lustre/libcfs/workitem.c +++ b/drivers/staging/lustre/lnet/libcfs/workitem.c @@ -46,18 +46,21 @@ #define CFS_WS_NAME_LEN 16 struct cfs_wi_sched { - struct list_head ws_list; /* chain on global list */ + /* chain on global list */ + struct list_head ws_list; /** serialised workitems */ spinlock_t ws_lock; /** where schedulers sleep */ wait_queue_head_t ws_waitq; /** concurrent workitems */ struct list_head ws_runq; - /** rescheduled running-workitems, a workitem can be rescheduled + /** + * rescheduled running-workitems, a workitem can be rescheduled * while running in wi_action(), but we don't to execute it again * unless it returns from wi_action(), so we put it on ws_rerunq * while rescheduling, and move it to runq after it returns - * from wi_action() */ + * from wi_action() + */ struct list_head ws_rerunq; /** CPT-table for this scheduler */ struct cfs_cpt_table *ws_cptab; @@ -128,8 +131,6 @@ cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi) wi->wi_scheduled = 1; /* LBUG future schedule attempts */ spin_unlock(&sched->ws_lock); - - return; } EXPORT_SYMBOL(cfs_wi_exit); @@ -163,7 +164,7 @@ cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi) wi->wi_scheduled = 0; } - LASSERT (list_empty(&wi->wi_list)); + LASSERT(list_empty(&wi->wi_list)); spin_unlock(&sched->ws_lock); return rc; @@ -186,7 +187,7 @@ cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi) spin_lock(&sched->ws_lock); if (!wi->wi_scheduled) { - LASSERT (list_empty(&wi->wi_list)); + LASSERT(list_empty(&wi->wi_list)); wi->wi_scheduled = 1; sched->ws_nscheduled++; @@ -198,21 +199,19 @@ cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi) } } - LASSERT (!list_empty(&wi->wi_list)); + LASSERT(!list_empty(&wi->wi_list)); spin_unlock(&sched->ws_lock); - return; } EXPORT_SYMBOL(cfs_wi_schedule); -static int -cfs_wi_scheduler (void *arg) +static int cfs_wi_scheduler(void *arg) { struct cfs_wi_sched *sched = (struct cfs_wi_sched *)arg; cfs_block_allsigs(); /* CPT affinity scheduler? */ - if (sched->ws_cptab != NULL) + if (sched->ws_cptab) if (cfs_cpt_bind(sched->ws_cptab, sched->ws_cpt) != 0) CWARN("Failed to bind %s on CPT %d\n", sched->ws_name, sched->ws_cpt); @@ -234,8 +233,8 @@ cfs_wi_scheduler (void *arg) while (!list_empty(&sched->ws_runq) && nloops < CFS_WI_RESCHED) { - wi = list_entry(sched->ws_runq.next, - cfs_workitem_t, wi_list); + wi = list_entry(sched->ws_runq.next, cfs_workitem_t, + wi_list); LASSERT(wi->wi_scheduled && !wi->wi_running); list_del_init(&wi->wi_list); @@ -261,14 +260,16 @@ cfs_wi_scheduler (void *arg) LASSERT(wi->wi_scheduled); /* wi is rescheduled, should be on rerunq now, we - * move it to runq so it can run action now */ + * move it to runq so it can run action now + */ list_move_tail(&wi->wi_list, &sched->ws_runq); } if (!list_empty(&sched->ws_runq)) { spin_unlock(&sched->ws_lock); /* don't sleep because some workitems still - * expect me to come back soon */ + * expect me to come back soon + */ cond_resched(); spin_lock(&sched->ws_lock); continue; @@ -343,14 +344,18 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab, LASSERT(cfs_wi_data.wi_init); LASSERT(!cfs_wi_data.wi_stopping); - LASSERT(cptab == NULL || cpt == CFS_CPT_ANY || + LASSERT(!cptab || cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cfs_cpt_number(cptab))); LIBCFS_ALLOC(sched, sizeof(*sched)); - if (sched == NULL) + if (!sched) return -ENOMEM; - strlcpy(sched->ws_name, name, CFS_WS_NAME_LEN); + if (strlen(name) > sizeof(sched->ws_name) - 1) { + LIBCFS_FREE(sched, sizeof(*sched)); + return -E2BIG; + } + strncpy(sched->ws_name, name, sizeof(sched->ws_name)); sched->ws_cptab = cptab; sched->ws_cpt = cpt; @@ -376,7 +381,7 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab, sched->ws_starting++; spin_unlock(&cfs_wi_data.wi_glock); - if (sched->ws_cptab != NULL && sched->ws_cpt >= 0) { + if (sched->ws_cptab && sched->ws_cpt >= 0) { snprintf(name, sizeof(name), "%s_%02d_%02u", sched->ws_name, sched->ws_cpt, sched->ws_nthreads); @@ -431,6 +436,7 @@ void cfs_wi_shutdown(void) { struct cfs_wi_sched *sched; + struct cfs_wi_sched *temp; spin_lock(&cfs_wi_data.wi_glock); cfs_wi_data.wi_stopping = 1; @@ -453,9 +459,7 @@ cfs_wi_shutdown(void) } spin_unlock(&cfs_wi_data.wi_glock); } - while (!list_empty(&cfs_wi_data.wi_scheds)) { - sched = list_entry(cfs_wi_data.wi_scheds.next, - struct cfs_wi_sched, ws_list); + list_for_each_entry_safe(sched, temp, &cfs_wi_data.wi_scheds, ws_list) { list_del(&sched->ws_list); LIBCFS_FREE(sched, sizeof(*sched)); } diff --git a/drivers/staging/lustre/lnet/lnet/Makefile b/drivers/staging/lustre/lnet/lnet/Makefile index e276fe2bf8f3..4c81fa19429a 100644 --- a/drivers/staging/lustre/lnet/lnet/Makefile +++ b/drivers/staging/lustre/lnet/lnet/Makefile @@ -1,6 +1,6 @@ obj-$(CONFIG_LNET) += lnet.o -lnet-y := api-ni.o config.o nidstrings.o \ +lnet-y := api-ni.o config.o nidstrings.o net_fault.o \ lib-me.o lib-msg.o lib-eq.o lib-md.o lib-ptl.o \ lib-socket.o lib-move.o module.o lo.o \ router.o router_proc.o acceptor.o peer.o diff --git a/drivers/staging/lustre/lnet/lnet/acceptor.c b/drivers/staging/lustre/lnet/lnet/acceptor.c index fed57d90028d..1452bb3ad9eb 100644 --- a/drivers/staging/lustre/lnet/lnet/acceptor.c +++ b/drivers/staging/lustre/lnet/lnet/acceptor.c @@ -36,6 +36,7 @@ #define DEBUG_SUBSYSTEM S_LNET #include <linux/completion.h> +#include <net/sock.h> #include "../../include/linux/lnet/lib-lnet.h" static int accept_port = 988; @@ -46,7 +47,9 @@ static struct { int pta_shutdown; struct socket *pta_sock; struct completion pta_signal; -} lnet_acceptor_state; +} lnet_acceptor_state = { + .pta_shutdown = 1 +}; int lnet_acceptor_port(void) @@ -78,9 +81,11 @@ static char *accept_type; static int lnet_acceptor_get_tunables(void) { - /* Userland acceptor uses 'accept_type' instead of 'accept', due to + /* + * Userland acceptor uses 'accept_type' instead of 'accept', due to * conflict with 'accept(2)', but kernel acceptor still uses 'accept' - * for compatibility. Hence the trick. */ + * for compatibility. Hence the trick. + */ accept_type = accept; return 0; } @@ -140,7 +145,7 @@ EXPORT_SYMBOL(lnet_connect_console_error); int lnet_connect(struct socket **sockp, lnet_nid_t peer_nid, - __u32 local_ip, __u32 peer_ip, int peer_port) + __u32 local_ip, __u32 peer_ip, int peer_port) { lnet_acceptor_connreq_t cr; struct socket *sock; @@ -157,7 +162,7 @@ lnet_connect(struct socket **sockp, lnet_nid_t peer_nid, rc = lnet_sock_connect(&sock, &fatal, local_ip, port, peer_ip, peer_port); - if (rc != 0) { + if (rc) { if (fatal) goto failed; continue; @@ -169,14 +174,14 @@ lnet_connect(struct socket **sockp, lnet_nid_t peer_nid, cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION; cr.acr_nid = peer_nid; - if (the_lnet.ln_testprotocompat != 0) { + if (the_lnet.ln_testprotocompat) { /* single-shot proto check */ lnet_net_lock(LNET_LOCK_EX); - if ((the_lnet.ln_testprotocompat & 4) != 0) { + if (the_lnet.ln_testprotocompat & 4) { cr.acr_version++; the_lnet.ln_testprotocompat &= ~4; } - if ((the_lnet.ln_testprotocompat & 8) != 0) { + if (the_lnet.ln_testprotocompat & 8) { cr.acr_magic = LNET_PROTO_MAGIC; the_lnet.ln_testprotocompat &= ~8; } @@ -184,7 +189,7 @@ lnet_connect(struct socket **sockp, lnet_nid_t peer_nid, } rc = lnet_sock_write(sock, &cr, sizeof(cr), accept_timeout); - if (rc != 0) + if (rc) goto failed_sock; *sockp = sock; @@ -202,8 +207,6 @@ lnet_connect(struct socket **sockp, lnet_nid_t peer_nid, } EXPORT_SYMBOL(lnet_connect); -/* Below is the code common for both kernel and MT user-space */ - static int lnet_accept(struct socket *sock, __u32 magic) { @@ -218,23 +221,23 @@ lnet_accept(struct socket *sock, __u32 magic) LASSERT(sizeof(cr) <= 16); /* not too big for the stack */ rc = lnet_sock_getaddr(sock, 1, &peer_ip, &peer_port); - LASSERT(rc == 0); /* we succeeded before */ + LASSERT(!rc); /* we succeeded before */ if (!lnet_accept_magic(magic, LNET_PROTO_ACCEPTOR_MAGIC)) { - if (lnet_accept_magic(magic, LNET_PROTO_MAGIC)) { - /* future version compatibility! + /* + * future version compatibility! * When LNET unifies protocols over all LNDs, the first - * thing sent will be a version query. I send back - * LNET_PROTO_ACCEPTOR_MAGIC to tell her I'm "old" */ - + * thing sent will be a version query. I send back + * LNET_PROTO_ACCEPTOR_MAGIC to tell her I'm "old" + */ memset(&cr, 0, sizeof(cr)); cr.acr_magic = LNET_PROTO_ACCEPTOR_MAGIC; cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION; rc = lnet_sock_write(sock, &cr, sizeof(cr), accept_timeout); - if (rc != 0) + if (rc) CERROR("Error sending magic+version in response to LNET magic from %pI4h: %d\n", &peer_ip, rc); return -EPROTO; @@ -254,9 +257,9 @@ lnet_accept(struct socket *sock, __u32 magic) rc = lnet_sock_read(sock, &cr.acr_version, sizeof(cr.acr_version), accept_timeout); - if (rc != 0) { + if (rc) { CERROR("Error %d reading connection request version from %pI4h\n", - rc, &peer_ip); + rc, &peer_ip); return -EIO; } @@ -264,10 +267,12 @@ lnet_accept(struct socket *sock, __u32 magic) __swab32s(&cr.acr_version); if (cr.acr_version != LNET_PROTO_ACCEPTOR_VERSION) { - /* future version compatibility! + /* + * future version compatibility! * An acceptor-specific protocol rev will first send a version * query. I send back my current version to tell her I'm - * "old". */ + * "old". + */ int peer_version = cr.acr_version; memset(&cr, 0, sizeof(cr)); @@ -275,7 +280,7 @@ lnet_accept(struct socket *sock, __u32 magic) cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION; rc = lnet_sock_write(sock, &cr, sizeof(cr), accept_timeout); - if (rc != 0) + if (rc) CERROR("Error sending magic+version in response to version %d from %pI4h: %d\n", peer_version, &peer_ip, rc); return -EPROTO; @@ -285,9 +290,9 @@ lnet_accept(struct socket *sock, __u32 magic) sizeof(cr) - offsetof(lnet_acceptor_connreq_t, acr_nid), accept_timeout); - if (rc != 0) { + if (rc) { CERROR("Error %d reading connection request from %pI4h\n", - rc, &peer_ip); + rc, &peer_ip); return -EIO; } @@ -295,20 +300,20 @@ lnet_accept(struct socket *sock, __u32 magic) __swab64s(&cr.acr_nid); ni = lnet_net2ni(LNET_NIDNET(cr.acr_nid)); - if (ni == NULL || /* no matching net */ + if (!ni || /* no matching net */ ni->ni_nid != cr.acr_nid) { /* right NET, wrong NID! */ - if (ni != NULL) + if (ni) lnet_ni_decref(ni); LCONSOLE_ERROR_MSG(0x120, "Refusing connection from %pI4h for %s: No matching NI\n", &peer_ip, libcfs_nid2str(cr.acr_nid)); return -EPERM; } - if (ni->ni_lnd->lnd_accept == NULL) { + if (!ni->ni_lnd->lnd_accept) { /* This catches a request for the loopback LND */ lnet_ni_decref(ni); LCONSOLE_ERROR_MSG(0x121, "Refusing connection from %pI4h for %s: NI doesn not accept IP connections\n", - &peer_ip, libcfs_nid2str(cr.acr_nid)); + &peer_ip, libcfs_nid2str(cr.acr_nid)); return -EPERM; } @@ -331,13 +336,13 @@ lnet_acceptor(void *arg) int peer_port; int secure = (int)((long_ptr_t)arg); - LASSERT(lnet_acceptor_state.pta_sock == NULL); + LASSERT(!lnet_acceptor_state.pta_sock); cfs_block_allsigs(); rc = lnet_sock_listen(&lnet_acceptor_state.pta_sock, 0, accept_port, accept_backlog); - if (rc != 0) { + if (rc) { if (rc == -EADDRINUSE) LCONSOLE_ERROR_MSG(0x122, "Can't start acceptor on port %d: port already in use\n", accept_port); @@ -354,13 +359,12 @@ lnet_acceptor(void *arg) lnet_acceptor_state.pta_shutdown = rc; complete(&lnet_acceptor_state.pta_signal); - if (rc != 0) + if (rc) return rc; while (!lnet_acceptor_state.pta_shutdown) { - rc = lnet_sock_accept(&newsock, lnet_acceptor_state.pta_sock); - if (rc != 0) { + if (rc) { if (rc != -EAGAIN) { CWARN("Accept error %d: pausing...\n", rc); set_current_state(TASK_UNINTERRUPTIBLE); @@ -376,7 +380,7 @@ lnet_acceptor(void *arg) } rc = lnet_sock_getaddr(newsock, 1, &peer_ip, &peer_port); - if (rc != 0) { + if (rc) { CERROR("Can't determine new connection's address\n"); goto failed; } @@ -389,14 +393,14 @@ lnet_acceptor(void *arg) rc = lnet_sock_read(newsock, &magic, sizeof(magic), accept_timeout); - if (rc != 0) { + if (rc) { CERROR("Error %d reading connection request from %pI4h\n", - rc, &peer_ip); + rc, &peer_ip); goto failed; } rc = lnet_accept(newsock, magic); - if (rc != 0) + if (rc) goto failed; continue; @@ -436,14 +440,19 @@ accept2secure(const char *acc, long *sec) int lnet_acceptor_start(void) { + struct task_struct *task; int rc; long rc2; long secure; - LASSERT(lnet_acceptor_state.pta_sock == NULL); + /* if acceptor is already running return immediately */ + if (!lnet_acceptor_state.pta_shutdown) + return 0; + + LASSERT(!lnet_acceptor_state.pta_sock); rc = lnet_acceptor_get_tunables(); - if (rc != 0) + if (rc) return rc; init_completion(&lnet_acceptor_state.pta_signal); @@ -451,13 +460,13 @@ lnet_acceptor_start(void) if (rc <= 0) return rc; - if (lnet_count_acceptor_nis() == 0) /* not required */ + if (!lnet_count_acceptor_nis()) /* not required */ return 0; - rc2 = PTR_ERR(kthread_run(lnet_acceptor, - (void *)(ulong_ptr_t)secure, - "acceptor_%03ld", secure)); - if (IS_ERR_VALUE(rc2)) { + task = kthread_run(lnet_acceptor, (void *)(ulong_ptr_t)secure, + "acceptor_%03ld", secure); + if (IS_ERR(task)) { + rc2 = PTR_ERR(task); CERROR("Can't start acceptor thread: %ld\n", rc2); return -ESRCH; @@ -468,11 +477,11 @@ lnet_acceptor_start(void) if (!lnet_acceptor_state.pta_shutdown) { /* started OK */ - LASSERT(lnet_acceptor_state.pta_sock != NULL); + LASSERT(lnet_acceptor_state.pta_sock); return 0; } - LASSERT(lnet_acceptor_state.pta_sock == NULL); + LASSERT(!lnet_acceptor_state.pta_sock); return -ENETDOWN; } @@ -480,11 +489,17 @@ lnet_acceptor_start(void) void lnet_acceptor_stop(void) { - if (lnet_acceptor_state.pta_sock == NULL) /* not running */ + struct sock *sk; + + if (lnet_acceptor_state.pta_shutdown) /* not running */ return; lnet_acceptor_state.pta_shutdown = 1; - wake_up_all(sk_sleep(lnet_acceptor_state.pta_sock->sk)); + + sk = lnet_acceptor_state.pta_sock->sk; + + /* awake any sleepers using safe method */ + sk->sk_state_change(sk); /* block until acceptor signals exit */ wait_for_completion(&lnet_acceptor_state.pta_signal); diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c index 362282fa00bf..8764755544c9 100644 --- a/drivers/staging/lustre/lnet/lnet/api-ni.c +++ b/drivers/staging/lustre/lnet/lnet/api-ni.c @@ -39,6 +39,7 @@ #include <linux/ktime.h> #include "../../include/linux/lnet/lib-lnet.h" +#include "../../include/linux/lnet/lib-dlc.h" #define D_LNI D_CONSOLE @@ -61,6 +62,9 @@ static int rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT; module_param(rnet_htable_size, int, 0444); MODULE_PARM_DESC(rnet_htable_size, "size of remote network hash table"); +static int lnet_ping(lnet_process_id_t id, int timeout_ms, + lnet_process_id_t __user *ids, int n_ids); + static char * lnet_get_routes(void) { @@ -73,17 +77,17 @@ lnet_get_networks(void) char *nets; int rc; - if (*networks != 0 && *ip2nets != 0) { + if (*networks && *ip2nets) { LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or 'ip2nets' but not both at once\n"); return NULL; } - if (*ip2nets != 0) { + if (*ip2nets) { rc = lnet_parse_ip2nets(&nets, ip2nets); - return (rc == 0) ? nets : NULL; + return !rc ? nets : NULL; } - if (*networks != 0) + if (*networks) return networks; return "tcp"; @@ -94,6 +98,7 @@ lnet_init_locks(void) { spin_lock_init(&the_lnet.ln_eq_wait_lock); init_waitqueue_head(&the_lnet.ln_eq_waitq); + init_waitqueue_head(&the_lnet.ln_rc_waitq); mutex_init(&the_lnet.ln_lnd_mutex); mutex_init(&the_lnet.ln_api_mutex); } @@ -104,10 +109,10 @@ lnet_create_remote_nets_table(void) int i; struct list_head *hash; - LASSERT(the_lnet.ln_remote_nets_hash == NULL); + LASSERT(!the_lnet.ln_remote_nets_hash); LASSERT(the_lnet.ln_remote_nets_hbits > 0); LIBCFS_ALLOC(hash, LNET_REMOTE_NETS_HASH_SIZE * sizeof(*hash)); - if (hash == NULL) { + if (!hash) { CERROR("Failed to create remote nets hash table\n"); return -ENOMEM; } @@ -123,7 +128,7 @@ lnet_destroy_remote_nets_table(void) { int i; - if (the_lnet.ln_remote_nets_hash == NULL) + if (!the_lnet.ln_remote_nets_hash) return; for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) @@ -138,12 +143,12 @@ lnet_destroy_remote_nets_table(void) static void lnet_destroy_locks(void) { - if (the_lnet.ln_res_lock != NULL) { + if (the_lnet.ln_res_lock) { cfs_percpt_lock_free(the_lnet.ln_res_lock); the_lnet.ln_res_lock = NULL; } - if (the_lnet.ln_net_lock != NULL) { + if (the_lnet.ln_net_lock) { cfs_percpt_lock_free(the_lnet.ln_net_lock); the_lnet.ln_net_lock = NULL; } @@ -155,11 +160,11 @@ lnet_create_locks(void) lnet_init_locks(); the_lnet.ln_res_lock = cfs_percpt_lock_alloc(lnet_cpt_table()); - if (the_lnet.ln_res_lock == NULL) + if (!the_lnet.ln_res_lock) goto failed; the_lnet.ln_net_lock = cfs_percpt_lock_alloc(lnet_cpt_table()); - if (the_lnet.ln_net_lock == NULL) + if (!the_lnet.ln_net_lock) goto failed; return 0; @@ -171,10 +176,12 @@ lnet_create_locks(void) static void lnet_assert_wire_constants(void) { - /* Wire protocol assertions generated by 'wirecheck' + /* + * Wire protocol assertions generated by 'wirecheck' * running on Linux robert.bartonsoftware.com 2.6.8-1.521 * #1 Mon Aug 16 09:01:18 EDT 2004 i686 athlon i386 GNU/Linux - * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7) */ + * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7) + */ /* Constants... */ CLASSERT(LNET_PROTO_TCP_MAGIC == 0xeebc0ded); @@ -284,9 +291,8 @@ lnet_register_lnd(lnd_t *lnd) { mutex_lock(&the_lnet.ln_lnd_mutex); - LASSERT(the_lnet.ln_init); LASSERT(libcfs_isknown_lnd(lnd->lnd_type)); - LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == NULL); + LASSERT(!lnet_find_lnd_by_type(lnd->lnd_type)); list_add_tail(&lnd->lnd_list, &the_lnet.ln_lnds); lnd->lnd_refcount = 0; @@ -302,9 +308,8 @@ lnet_unregister_lnd(lnd_t *lnd) { mutex_lock(&the_lnet.ln_lnd_mutex); - LASSERT(the_lnet.ln_init); LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd); - LASSERT(lnd->lnd_refcount == 0); + LASSERT(!lnd->lnd_refcount); list_del(&lnd->lnd_list); CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type)); @@ -335,7 +340,6 @@ lnet_counters_get(lnet_counters_t *counters) counters->recv_length += ctr->recv_length; counters->route_length += ctr->route_length; counters->drop_length += ctr->drop_length; - } lnet_net_unlock(LNET_LOCK_EX); } @@ -375,7 +379,7 @@ lnet_res_container_cleanup(struct lnet_res_container *rec) { int count = 0; - if (rec->rec_type == 0) /* not set yet, it's uninitialized */ + if (!rec->rec_type) /* not set yet, it's uninitialized */ return; while (!list_empty(&rec->rec_active)) { @@ -395,14 +399,16 @@ lnet_res_container_cleanup(struct lnet_res_container *rec) } if (count > 0) { - /* Found alive MD/ME/EQ, user really should unlink/free + /* + * Found alive MD/ME/EQ, user really should unlink/free * all of them before finalize LNet, but if someone didn't, - * we have to recycle garbage for him */ + * we have to recycle garbage for him + */ CERROR("%d active elements on exit of %s container\n", count, lnet_res_type2str(rec->rec_type)); } - if (rec->rec_lh_hash != NULL) { + if (rec->rec_lh_hash) { LIBCFS_FREE(rec->rec_lh_hash, LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0])); rec->rec_lh_hash = NULL; @@ -417,7 +423,7 @@ lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type) int rc = 0; int i; - LASSERT(rec->rec_type == 0); + LASSERT(!rec->rec_type); rec->rec_type = type; INIT_LIST_HEAD(&rec->rec_active); @@ -426,7 +432,7 @@ lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type) /* Arbitrary choice of hash table size */ LIBCFS_CPT_ALLOC(rec->rec_lh_hash, lnet_cpt_table(), cpt, LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0])); - if (rec->rec_lh_hash == NULL) { + if (!rec->rec_lh_hash) { rc = -ENOMEM; goto out; } @@ -464,7 +470,7 @@ lnet_res_containers_create(int type) int i; recs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*rec)); - if (recs == NULL) { + if (!recs) { CERROR("Failed to allocate %s resource containers\n", lnet_res_type2str(type)); return NULL; @@ -472,7 +478,7 @@ lnet_res_containers_create(int type) cfs_percpt_for_each(rec, i, recs) { rc = lnet_res_container_setup(rec, i, type); - if (rc != 0) { + if (rc) { lnet_res_containers_destroy(recs); return NULL; } @@ -518,7 +524,7 @@ lnet_res_lh_initialize(struct lnet_res_container *rec, lnet_libhandle_t *lh) list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]); } -int lnet_unprepare(void); +static int lnet_unprepare(void); static int lnet_prepare(lnet_pid_t requested_pid) @@ -527,11 +533,16 @@ lnet_prepare(lnet_pid_t requested_pid) struct lnet_res_container **recs; int rc = 0; - LASSERT(the_lnet.ln_refcount == 0); + if (requested_pid == LNET_PID_ANY) { + /* Don't instantiate LNET just for me */ + return -ENETDOWN; + } + + LASSERT(!the_lnet.ln_refcount); the_lnet.ln_routing = 0; - LASSERT((requested_pid & LNET_PID_USERFLAG) == 0); + LASSERT(!(requested_pid & LNET_PID_USERFLAG)); the_lnet.ln_pid = requested_pid; INIT_LIST_HEAD(&the_lnet.ln_test_peers); @@ -539,9 +550,11 @@ lnet_prepare(lnet_pid_t requested_pid) INIT_LIST_HEAD(&the_lnet.ln_nis_cpt); INIT_LIST_HEAD(&the_lnet.ln_nis_zombie); INIT_LIST_HEAD(&the_lnet.ln_routers); + INIT_LIST_HEAD(&the_lnet.ln_drop_rules); + INIT_LIST_HEAD(&the_lnet.ln_delay_rules); rc = lnet_create_remote_nets_table(); - if (rc != 0) + if (rc) goto failed; /* * NB the interface cookie in wire handles guards against delayed @@ -551,27 +564,27 @@ lnet_prepare(lnet_pid_t requested_pid) the_lnet.ln_counters = cfs_percpt_alloc(lnet_cpt_table(), sizeof(lnet_counters_t)); - if (the_lnet.ln_counters == NULL) { + if (!the_lnet.ln_counters) { CERROR("Failed to allocate counters for LNet\n"); rc = -ENOMEM; goto failed; } rc = lnet_peer_tables_create(); - if (rc != 0) + if (rc) goto failed; rc = lnet_msg_containers_create(); - if (rc != 0) + if (rc) goto failed; rc = lnet_res_container_setup(&the_lnet.ln_eq_container, 0, LNET_COOKIE_TYPE_EQ); - if (rc != 0) + if (rc) goto failed; recs = lnet_res_containers_create(LNET_COOKIE_TYPE_ME); - if (recs == NULL) { + if (!recs) { rc = -ENOMEM; goto failed; } @@ -579,7 +592,7 @@ lnet_prepare(lnet_pid_t requested_pid) the_lnet.ln_me_containers = recs; recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD); - if (recs == NULL) { + if (!recs) { rc = -ENOMEM; goto failed; } @@ -587,7 +600,7 @@ lnet_prepare(lnet_pid_t requested_pid) the_lnet.ln_md_containers = recs; rc = lnet_portals_create(); - if (rc != 0) { + if (rc) { CERROR("Failed to create portals for LNet: %d\n", rc); goto failed; } @@ -599,17 +612,18 @@ lnet_prepare(lnet_pid_t requested_pid) return rc; } -int +static int lnet_unprepare(void) { - /* NB no LNET_LOCK since this is the last reference. All LND instances + /* + * NB no LNET_LOCK since this is the last reference. All LND instances * have shut down already, so it is safe to unlink and free all * descriptors, even those that appear committed to a network op (eg MD - * with non-zero pending count) */ - + * with non-zero pending count) + */ lnet_fail_nid(LNET_NID_ANY, 0); - LASSERT(the_lnet.ln_refcount == 0); + LASSERT(!the_lnet.ln_refcount); LASSERT(list_empty(&the_lnet.ln_test_peers)); LASSERT(list_empty(&the_lnet.ln_nis)); LASSERT(list_empty(&the_lnet.ln_nis_cpt)); @@ -617,12 +631,12 @@ lnet_unprepare(void) lnet_portals_destroy(); - if (the_lnet.ln_md_containers != NULL) { + if (the_lnet.ln_md_containers) { lnet_res_containers_destroy(the_lnet.ln_md_containers); the_lnet.ln_md_containers = NULL; } - if (the_lnet.ln_me_containers != NULL) { + if (the_lnet.ln_me_containers) { lnet_res_containers_destroy(the_lnet.ln_me_containers); the_lnet.ln_me_containers = NULL; } @@ -631,9 +645,9 @@ lnet_unprepare(void) lnet_msg_containers_destroy(); lnet_peer_tables_destroy(); - lnet_rtrpools_free(); + lnet_rtrpools_free(0); - if (the_lnet.ln_counters != NULL) { + if (the_lnet.ln_counters) { cfs_percpt_free(the_lnet.ln_counters); the_lnet.ln_counters = NULL; } @@ -709,7 +723,7 @@ lnet_cpt_of_nid_locked(lnet_nid_t nid) if (LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid)) continue; - LASSERT(ni->ni_cpts != NULL); + LASSERT(ni->ni_cpts); return ni->ni_cpts[lnet_nid_cpt_hash (nid, ni->ni_ncpts)]; } @@ -747,12 +761,12 @@ lnet_islocalnet(__u32 net) cpt = lnet_net_lock_current(); ni = lnet_net2ni_locked(net, cpt); - if (ni != NULL) + if (ni) lnet_ni_decref_locked(ni, cpt); lnet_net_unlock(cpt); - return ni != NULL; + return !!ni; } lnet_ni_t * @@ -783,11 +797,11 @@ lnet_islocalnid(lnet_nid_t nid) cpt = lnet_net_lock_current(); ni = lnet_nid2ni_locked(nid, cpt); - if (ni != NULL) + if (ni) lnet_ni_decref_locked(ni, cpt); lnet_net_unlock(cpt); - return ni != NULL; + return !!ni; } int @@ -803,7 +817,7 @@ lnet_count_acceptor_nis(void) list_for_each(tmp, &the_lnet.ln_nis) { ni = list_entry(tmp, lnet_ni_t, ni_list); - if (ni->ni_lnd->lnd_accept != NULL) + if (ni->ni_lnd->lnd_accept) count++; } @@ -812,90 +826,280 @@ lnet_count_acceptor_nis(void) return count; } -static int -lnet_ni_tq_credits(lnet_ni_t *ni) +static lnet_ping_info_t * +lnet_ping_info_create(int num_ni) { - int credits; + lnet_ping_info_t *ping_info; + unsigned int infosz; - LASSERT(ni->ni_ncpts >= 1); + infosz = offsetof(lnet_ping_info_t, pi_ni[num_ni]); + LIBCFS_ALLOC(ping_info, infosz); + if (!ping_info) { + CERROR("Can't allocate ping info[%d]\n", num_ni); + return NULL; + } - if (ni->ni_ncpts == 1) - return ni->ni_maxtxcredits; + ping_info->pi_nnis = num_ni; + ping_info->pi_pid = the_lnet.ln_pid; + ping_info->pi_magic = LNET_PROTO_PING_MAGIC; + ping_info->pi_features = LNET_PING_FEAT_NI_STATUS; - credits = ni->ni_maxtxcredits / ni->ni_ncpts; - credits = max(credits, 8 * ni->ni_peertxcredits); - credits = min(credits, ni->ni_maxtxcredits); + return ping_info; +} - return credits; +static inline int +lnet_get_ni_count(void) +{ + struct lnet_ni *ni; + int count = 0; + + lnet_net_lock(0); + + list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) + count++; + + lnet_net_unlock(0); + + return count; +} + +static inline void +lnet_ping_info_free(lnet_ping_info_t *pinfo) +{ + LIBCFS_FREE(pinfo, + offsetof(lnet_ping_info_t, + pi_ni[pinfo->pi_nnis])); } static void -lnet_shutdown_lndnis(void) +lnet_ping_info_destroy(void) { - int i; - int islo; - lnet_ni_t *ni; + struct lnet_ni *ni; - /* NB called holding the global mutex */ + lnet_net_lock(LNET_LOCK_EX); - /* All quiet on the API front */ - LASSERT(!the_lnet.ln_shutdown); - LASSERT(the_lnet.ln_refcount == 0); - LASSERT(list_empty(&the_lnet.ln_nis_zombie)); + list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) { + lnet_ni_lock(ni); + ni->ni_status = NULL; + lnet_ni_unlock(ni); + } - lnet_net_lock(LNET_LOCK_EX); - the_lnet.ln_shutdown = 1; /* flag shutdown */ + lnet_ping_info_free(the_lnet.ln_ping_info); + the_lnet.ln_ping_info = NULL; - /* Unlink NIs from the global table */ - while (!list_empty(&the_lnet.ln_nis)) { - ni = list_entry(the_lnet.ln_nis.next, - lnet_ni_t, ni_list); - /* move it to zombie list and nobody can find it anymore */ - list_move(&ni->ni_list, &the_lnet.ln_nis_zombie); - lnet_ni_decref_locked(ni, 0); /* drop ln_nis' ref */ - - if (!list_empty(&ni->ni_cptlist)) { - list_del_init(&ni->ni_cptlist); - lnet_ni_decref_locked(ni, 0); + lnet_net_unlock(LNET_LOCK_EX); +} + +static void +lnet_ping_event_handler(lnet_event_t *event) +{ + lnet_ping_info_t *pinfo = event->md.user_ptr; + + if (event->unlinked) + pinfo->pi_features = LNET_PING_FEAT_INVAL; +} + +static int +lnet_ping_info_setup(lnet_ping_info_t **ppinfo, lnet_handle_md_t *md_handle, + int ni_count, bool set_eq) +{ + lnet_process_id_t id = {LNET_NID_ANY, LNET_PID_ANY}; + lnet_handle_me_t me_handle; + lnet_md_t md = { NULL }; + int rc, rc2; + + if (set_eq) { + rc = LNetEQAlloc(0, lnet_ping_event_handler, + &the_lnet.ln_ping_target_eq); + if (rc) { + CERROR("Can't allocate ping EQ: %d\n", rc); + return rc; } } - /* Drop the cached eqwait NI. */ - if (the_lnet.ln_eq_waitni != NULL) { - lnet_ni_decref_locked(the_lnet.ln_eq_waitni, 0); - the_lnet.ln_eq_waitni = NULL; + *ppinfo = lnet_ping_info_create(ni_count); + if (!*ppinfo) { + rc = -ENOMEM; + goto failed_0; } - /* Drop the cached loopback NI. */ - if (the_lnet.ln_loni != NULL) { - lnet_ni_decref_locked(the_lnet.ln_loni, 0); - the_lnet.ln_loni = NULL; + rc = LNetMEAttach(LNET_RESERVED_PORTAL, id, + LNET_PROTO_PING_MATCHBITS, 0, + LNET_UNLINK, LNET_INS_AFTER, + &me_handle); + if (rc) { + CERROR("Can't create ping ME: %d\n", rc); + goto failed_1; } - lnet_net_unlock(LNET_LOCK_EX); + /* initialize md content */ + md.start = *ppinfo; + md.length = offsetof(lnet_ping_info_t, + pi_ni[(*ppinfo)->pi_nnis]); + md.threshold = LNET_MD_THRESH_INF; + md.max_size = 0; + md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE | + LNET_MD_MANAGE_REMOTE; + md.user_ptr = NULL; + md.eq_handle = the_lnet.ln_ping_target_eq; + md.user_ptr = *ppinfo; - /* Clear lazy portals and drop delayed messages which hold refs - * on their lnet_msg_t::msg_rxpeer */ - for (i = 0; i < the_lnet.ln_nportals; i++) - LNetClearLazyPortal(i); + rc = LNetMDAttach(me_handle, md, LNET_RETAIN, md_handle); + if (rc) { + CERROR("Can't attach ping MD: %d\n", rc); + goto failed_2; + } + + return 0; + +failed_2: + rc2 = LNetMEUnlink(me_handle); + LASSERT(!rc2); +failed_1: + lnet_ping_info_free(*ppinfo); + *ppinfo = NULL; +failed_0: + if (set_eq) + LNetEQFree(the_lnet.ln_ping_target_eq); + return rc; +} + +static void +lnet_ping_md_unlink(lnet_ping_info_t *pinfo, lnet_handle_md_t *md_handle) +{ + sigset_t blocked = cfs_block_allsigs(); + + LNetMDUnlink(*md_handle); + LNetInvalidateHandle(md_handle); + + /* NB md could be busy; this just starts the unlink */ + while (pinfo->pi_features != LNET_PING_FEAT_INVAL) { + CDEBUG(D_NET, "Still waiting for ping MD to unlink\n"); + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(cfs_time_seconds(1)); + } + + cfs_restore_sigs(blocked); +} - /* Clear the peer table and wait for all peers to go (they hold refs on - * their NIs) */ - lnet_peer_tables_cleanup(); +static void +lnet_ping_info_install_locked(lnet_ping_info_t *ping_info) +{ + lnet_ni_status_t *ns; + lnet_ni_t *ni; + int i = 0; + + list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) { + LASSERT(i < ping_info->pi_nnis); + ns = &ping_info->pi_ni[i]; + + ns->ns_nid = ni->ni_nid; + + lnet_ni_lock(ni); + ns->ns_status = (ni->ni_status) ? + ni->ni_status->ns_status : LNET_NI_STATUS_UP; + ni->ni_status = ns; + lnet_ni_unlock(ni); + + i++; + } +} + +static void +lnet_ping_target_update(lnet_ping_info_t *pinfo, lnet_handle_md_t md_handle) +{ + lnet_ping_info_t *old_pinfo = NULL; + lnet_handle_md_t old_md; + + /* switch the NIs to point to the new ping info created */ lnet_net_lock(LNET_LOCK_EX); - /* Now wait for the NI's I just nuked to show up on ln_zombie_nis - * and shut them down in guaranteed thread context */ + + if (!the_lnet.ln_routing) + pinfo->pi_features |= LNET_PING_FEAT_RTE_DISABLED; + lnet_ping_info_install_locked(pinfo); + + if (the_lnet.ln_ping_info) { + old_pinfo = the_lnet.ln_ping_info; + old_md = the_lnet.ln_ping_target_md; + } + the_lnet.ln_ping_target_md = md_handle; + the_lnet.ln_ping_info = pinfo; + + lnet_net_unlock(LNET_LOCK_EX); + + if (old_pinfo) { + /* unlink the old ping info */ + lnet_ping_md_unlink(old_pinfo, &old_md); + lnet_ping_info_free(old_pinfo); + } +} + +static void +lnet_ping_target_fini(void) +{ + int rc; + + lnet_ping_md_unlink(the_lnet.ln_ping_info, + &the_lnet.ln_ping_target_md); + + rc = LNetEQFree(the_lnet.ln_ping_target_eq); + LASSERT(!rc); + + lnet_ping_info_destroy(); +} + +static int +lnet_ni_tq_credits(lnet_ni_t *ni) +{ + int credits; + + LASSERT(ni->ni_ncpts >= 1); + + if (ni->ni_ncpts == 1) + return ni->ni_maxtxcredits; + + credits = ni->ni_maxtxcredits / ni->ni_ncpts; + credits = max(credits, 8 * ni->ni_peertxcredits); + credits = min(credits, ni->ni_maxtxcredits); + + return credits; +} + +static void +lnet_ni_unlink_locked(lnet_ni_t *ni) +{ + if (!list_empty(&ni->ni_cptlist)) { + list_del_init(&ni->ni_cptlist); + lnet_ni_decref_locked(ni, 0); + } + + /* move it to zombie list and nobody can find it anymore */ + LASSERT(!list_empty(&ni->ni_list)); + list_move(&ni->ni_list, &the_lnet.ln_nis_zombie); + lnet_ni_decref_locked(ni, 0); /* drop ln_nis' ref */ +} + +static void +lnet_clear_zombies_nis_locked(void) +{ + int i; + int islo; + lnet_ni_t *ni; + lnet_ni_t *temp; + + /* + * Now wait for the NI's I just nuked to show up on ln_zombie_nis + * and shut them down in guaranteed thread context + */ i = 2; - while (!list_empty(&the_lnet.ln_nis_zombie)) { + list_for_each_entry_safe(ni, temp, &the_lnet.ln_nis_zombie, ni_list) { int *ref; int j; - ni = list_entry(the_lnet.ln_nis_zombie.next, - lnet_ni_t, ni_list); list_del_init(&ni->ni_list); cfs_percpt_for_each(ref, j, ni->ni_refs) { - if (*ref == 0) + if (!*ref) continue; /* still busy, add it back to zombie list */ list_add(&ni->ni_list, &the_lnet.ln_nis_zombie); @@ -921,11 +1125,12 @@ lnet_shutdown_lndnis(void) islo = ni->ni_lnd->lnd_type == LOLND; LASSERT(!in_interrupt()); - (ni->ni_lnd->lnd_shutdown)(ni); - - /* can't deref lnd anymore now; it might have unregistered - * itself... */ + ni->ni_lnd->lnd_shutdown(ni); + /* + * can't deref lnd anymore now; it might have unregistered + * itself... + */ if (!islo) CDEBUG(D_LNI, "Removed LNI %s\n", libcfs_nid2str(ni->ni_nid)); @@ -935,176 +1140,263 @@ lnet_shutdown_lndnis(void) lnet_net_lock(LNET_LOCK_EX); } +} + +static void +lnet_shutdown_lndnis(void) +{ + lnet_ni_t *ni; + lnet_ni_t *temp; + int i; + + /* NB called holding the global mutex */ + + /* All quiet on the API front */ + LASSERT(!the_lnet.ln_shutdown); + LASSERT(!the_lnet.ln_refcount); + LASSERT(list_empty(&the_lnet.ln_nis_zombie)); + + lnet_net_lock(LNET_LOCK_EX); + the_lnet.ln_shutdown = 1; /* flag shutdown */ + + /* Unlink NIs from the global table */ + list_for_each_entry_safe(ni, temp, &the_lnet.ln_nis, ni_list) { + lnet_ni_unlink_locked(ni); + } + /* Drop the cached loopback NI. */ + if (the_lnet.ln_loni) { + lnet_ni_decref_locked(the_lnet.ln_loni, 0); + the_lnet.ln_loni = NULL; + } + + lnet_net_unlock(LNET_LOCK_EX); + + /* + * Clear lazy portals and drop delayed messages which hold refs + * on their lnet_msg_t::msg_rxpeer + */ + for (i = 0; i < the_lnet.ln_nportals; i++) + LNetClearLazyPortal(i); + + /* + * Clear the peer table and wait for all peers to go (they hold refs on + * their NIs) + */ + lnet_peer_tables_cleanup(NULL); + + lnet_net_lock(LNET_LOCK_EX); + + lnet_clear_zombies_nis_locked(); the_lnet.ln_shutdown = 0; lnet_net_unlock(LNET_LOCK_EX); +} - if (the_lnet.ln_network_tokens != NULL) { - LIBCFS_FREE(the_lnet.ln_network_tokens, - the_lnet.ln_network_tokens_nob); - the_lnet.ln_network_tokens = NULL; - } +/* shutdown down the NI and release refcount */ +static void +lnet_shutdown_lndni(struct lnet_ni *ni) +{ + int i; + + lnet_net_lock(LNET_LOCK_EX); + lnet_ni_unlink_locked(ni); + lnet_net_unlock(LNET_LOCK_EX); + + /* clear messages for this NI on the lazy portal */ + for (i = 0; i < the_lnet.ln_nportals; i++) + lnet_clear_lazy_portal(ni, i, "Shutting down NI"); + + /* Do peer table cleanup for this ni */ + lnet_peer_tables_cleanup(ni); + + lnet_net_lock(LNET_LOCK_EX); + lnet_clear_zombies_nis_locked(); + lnet_net_unlock(LNET_LOCK_EX); } static int -lnet_startup_lndnis(void) +lnet_startup_lndni(struct lnet_ni *ni, __s32 peer_timeout, + __s32 peer_cr, __s32 peer_buf_cr, __s32 credits) { + int rc = -EINVAL; + int lnd_type; lnd_t *lnd; - struct lnet_ni *ni; struct lnet_tx_queue *tq; - struct list_head nilist; int i; - int rc = 0; - __u32 lnd_type; - int nicount = 0; - char *nets = lnet_get_networks(); - INIT_LIST_HEAD(&nilist); + lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid)); - if (nets == NULL) - goto failed; + LASSERT(libcfs_isknown_lnd(lnd_type)); - rc = lnet_parse_networks(&nilist, nets); - if (rc != 0) - goto failed; + if (lnd_type == CIBLND || lnd_type == OPENIBLND || + lnd_type == IIBLND || lnd_type == VIBLND) { + CERROR("LND %s obsoleted\n", libcfs_lnd2str(lnd_type)); + goto failed0; + } - while (!list_empty(&nilist)) { - ni = list_entry(nilist.next, lnet_ni_t, ni_list); - lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid)); + /* Make sure this new NI is unique. */ + lnet_net_lock(LNET_LOCK_EX); + rc = lnet_net_unique(LNET_NIDNET(ni->ni_nid), &the_lnet.ln_nis); + lnet_net_unlock(LNET_LOCK_EX); + if (!rc) { + if (lnd_type == LOLND) { + lnet_ni_free(ni); + return 0; + } - LASSERT(libcfs_isknown_lnd(lnd_type)); + CERROR("Net %s is not unique\n", + libcfs_net2str(LNET_NIDNET(ni->ni_nid))); + rc = -EEXIST; + goto failed0; + } - if (lnd_type == CIBLND || - lnd_type == OPENIBLND || - lnd_type == IIBLND || - lnd_type == VIBLND) { - CERROR("LND %s obsoleted\n", - libcfs_lnd2str(lnd_type)); - goto failed; - } + mutex_lock(&the_lnet.ln_lnd_mutex); + lnd = lnet_find_lnd_by_type(lnd_type); + if (!lnd) { + mutex_unlock(&the_lnet.ln_lnd_mutex); + rc = request_module("%s", libcfs_lnd2modname(lnd_type)); mutex_lock(&the_lnet.ln_lnd_mutex); - lnd = lnet_find_lnd_by_type(lnd_type); - if (lnd == NULL) { + lnd = lnet_find_lnd_by_type(lnd_type); + if (!lnd) { mutex_unlock(&the_lnet.ln_lnd_mutex); - rc = request_module("%s", - libcfs_lnd2modname(lnd_type)); - mutex_lock(&the_lnet.ln_lnd_mutex); - - lnd = lnet_find_lnd_by_type(lnd_type); - if (lnd == NULL) { - mutex_unlock(&the_lnet.ln_lnd_mutex); - CERROR("Can't load LND %s, module %s, rc=%d\n", - libcfs_lnd2str(lnd_type), - libcfs_lnd2modname(lnd_type), rc); - goto failed; - } + CERROR("Can't load LND %s, module %s, rc=%d\n", + libcfs_lnd2str(lnd_type), + libcfs_lnd2modname(lnd_type), rc); + rc = -EINVAL; + goto failed0; } + } - lnet_net_lock(LNET_LOCK_EX); - lnd->lnd_refcount++; - lnet_net_unlock(LNET_LOCK_EX); - - ni->ni_lnd = lnd; - - rc = (lnd->lnd_startup)(ni); - - mutex_unlock(&the_lnet.ln_lnd_mutex); + lnet_net_lock(LNET_LOCK_EX); + lnd->lnd_refcount++; + lnet_net_unlock(LNET_LOCK_EX); - if (rc != 0) { - LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n", - rc, libcfs_lnd2str(lnd->lnd_type)); - lnet_net_lock(LNET_LOCK_EX); - lnd->lnd_refcount--; - lnet_net_unlock(LNET_LOCK_EX); - goto failed; - } + ni->ni_lnd = lnd; - LASSERT(ni->ni_peertimeout <= 0 || lnd->lnd_query != NULL); + rc = lnd->lnd_startup(ni); - list_del(&ni->ni_list); + mutex_unlock(&the_lnet.ln_lnd_mutex); + if (rc) { + LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n", + rc, libcfs_lnd2str(lnd->lnd_type)); lnet_net_lock(LNET_LOCK_EX); - /* refcount for ln_nis */ - lnet_ni_addref_locked(ni, 0); - list_add_tail(&ni->ni_list, &the_lnet.ln_nis); - if (ni->ni_cpts != NULL) { - list_add_tail(&ni->ni_cptlist, - &the_lnet.ln_nis_cpt); - lnet_ni_addref_locked(ni, 0); - } - + lnd->lnd_refcount--; lnet_net_unlock(LNET_LOCK_EX); + goto failed0; + } - if (lnd->lnd_type == LOLND) { - lnet_ni_addref(ni); - LASSERT(the_lnet.ln_loni == NULL); - the_lnet.ln_loni = ni; - continue; - } + /* + * If given some LND tunable parameters, parse those now to + * override the values in the NI structure. + */ + if (peer_buf_cr >= 0) + ni->ni_peerrtrcredits = peer_buf_cr; + if (peer_timeout >= 0) + ni->ni_peertimeout = peer_timeout; + /* + * TODO + * Note: For now, don't allow the user to change + * peertxcredits as this number is used in the + * IB LND to control queue depth. + * if (peer_cr != -1) + * ni->ni_peertxcredits = peer_cr; + */ + if (credits >= 0) + ni->ni_maxtxcredits = credits; - if (ni->ni_peertxcredits == 0 || - ni->ni_maxtxcredits == 0) { - LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n", - libcfs_lnd2str(lnd->lnd_type), - ni->ni_peertxcredits == 0 ? - "" : "per-peer "); - goto failed; - } + LASSERT(ni->ni_peertimeout <= 0 || lnd->lnd_query); - cfs_percpt_for_each(tq, i, ni->ni_tx_queues) { - tq->tq_credits_min = - tq->tq_credits_max = - tq->tq_credits = lnet_ni_tq_credits(ni); - } + lnet_net_lock(LNET_LOCK_EX); + /* refcount for ln_nis */ + lnet_ni_addref_locked(ni, 0); + list_add_tail(&ni->ni_list, &the_lnet.ln_nis); + if (ni->ni_cpts) { + lnet_ni_addref_locked(ni, 0); + list_add_tail(&ni->ni_cptlist, &the_lnet.ln_nis_cpt); + } + + lnet_net_unlock(LNET_LOCK_EX); - CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n", - libcfs_nid2str(ni->ni_nid), ni->ni_peertxcredits, - lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER, - ni->ni_peerrtrcredits, ni->ni_peertimeout); + if (lnd->lnd_type == LOLND) { + lnet_ni_addref(ni); + LASSERT(!the_lnet.ln_loni); + the_lnet.ln_loni = ni; + return 0; + } - nicount++; + if (!ni->ni_peertxcredits || !ni->ni_maxtxcredits) { + LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n", + libcfs_lnd2str(lnd->lnd_type), + !ni->ni_peertxcredits ? + "" : "per-peer "); + /* + * shutdown the NI since if we get here then it must've already + * been started + */ + lnet_shutdown_lndni(ni); + return -EINVAL; } - if (the_lnet.ln_eq_waitni != NULL && nicount > 1) { - lnd_type = the_lnet.ln_eq_waitni->ni_lnd->lnd_type; - LCONSOLE_ERROR_MSG(0x109, "LND %s can only run single-network\n", - libcfs_lnd2str(lnd_type)); - goto failed; + cfs_percpt_for_each(tq, i, ni->ni_tx_queues) { + tq->tq_credits_min = + tq->tq_credits_max = + tq->tq_credits = lnet_ni_tq_credits(ni); } + CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n", + libcfs_nid2str(ni->ni_nid), ni->ni_peertxcredits, + lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER, + ni->ni_peerrtrcredits, ni->ni_peertimeout); + return 0; +failed0: + lnet_ni_free(ni); + return rc; +} - failed: - lnet_shutdown_lndnis(); +static int +lnet_startup_lndnis(struct list_head *nilist) +{ + struct lnet_ni *ni; + int rc; + int ni_count = 0; - while (!list_empty(&nilist)) { - ni = list_entry(nilist.next, lnet_ni_t, ni_list); + while (!list_empty(nilist)) { + ni = list_entry(nilist->next, lnet_ni_t, ni_list); list_del(&ni->ni_list); - lnet_ni_free(ni); + rc = lnet_startup_lndni(ni, -1, -1, -1, -1); + + if (rc < 0) + goto failed; + + ni_count++; } - return -ENETDOWN; + return ni_count; +failed: + lnet_shutdown_lndnis(); + + return rc; } /** * Initialize LNet library. * - * Only userspace program needs to call this function - it's automatically - * called in the kernel at module loading time. Caller has to call lnet_fini() - * after a call to lnet_init(), if and only if the latter returned 0. It must - * be called exactly once. + * Automatically called at module loading time. Caller has to call + * lnet_lib_exit() after a call to lnet_lib_init(), if and only if the + * latter returned 0. It must be called exactly once. * - * \return 0 on success, and -ve on failures. + * \retval 0 on success + * \retval -ve on failures. */ -int -lnet_init(void) +int lnet_lib_init(void) { int rc; lnet_assert_wire_constants(); - LASSERT(!the_lnet.ln_init); memset(&the_lnet, 0, sizeof(the_lnet)); @@ -1117,28 +1409,29 @@ lnet_init(void) /* we are under risk of consuming all lh_cookie */ CERROR("Can't have %d CPTs for LNet (max allowed is %d), please change setting of CPT-table and retry\n", the_lnet.ln_cpt_number, LNET_CPT_MAX); - return -1; + return -E2BIG; } while ((1 << the_lnet.ln_cpt_bits) < the_lnet.ln_cpt_number) the_lnet.ln_cpt_bits++; rc = lnet_create_locks(); - if (rc != 0) { + if (rc) { CERROR("Can't create LNet global locks: %d\n", rc); - return -1; + return rc; } the_lnet.ln_refcount = 0; - the_lnet.ln_init = 1; LNetInvalidateHandle(&the_lnet.ln_rc_eqh); INIT_LIST_HEAD(&the_lnet.ln_lnds); INIT_LIST_HEAD(&the_lnet.ln_rcd_zombie); INIT_LIST_HEAD(&the_lnet.ln_rcd_deathrow); - /* The hash table size is the number of bits it takes to express the set + /* + * The hash table size is the number of bits it takes to express the set * ln_num_routes, minus 1 (better to under estimate than over so we - * don't waste memory). */ + * don't waste memory). + */ if (rnet_htable_size <= 0) rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT; else if (rnet_htable_size > LNET_REMOTE_NETS_HASH_MAX) @@ -1146,9 +1439,11 @@ lnet_init(void) the_lnet.ln_remote_nets_hbits = max_t(int, 1, order_base_2(rnet_htable_size) - 1); - /* All LNDs apart from the LOLND are in separate modules. They + /* + * All LNDs apart from the LOLND are in separate modules. They * register themselves when their module loads, and unregister - * themselves when their module is unloaded. */ + * themselves when their module is unloaded. + */ lnet_register_lnd(&the_lolnd); return 0; } @@ -1156,30 +1451,22 @@ lnet_init(void) /** * Finalize LNet library. * - * Only userspace program needs to call this function. It can be called - * at most once. - * - * \pre lnet_init() called with success. + * \pre lnet_lib_init() called with success. * \pre All LNet users called LNetNIFini() for matching LNetNIInit() calls. */ -void -lnet_fini(void) +void lnet_lib_exit(void) { - LASSERT(the_lnet.ln_init); - LASSERT(the_lnet.ln_refcount == 0); + LASSERT(!the_lnet.ln_refcount); while (!list_empty(&the_lnet.ln_lnds)) lnet_unregister_lnd(list_entry(the_lnet.ln_lnds.next, - lnd_t, lnd_list)); + lnd_t, lnd_list)); lnet_destroy_locks(); - - the_lnet.ln_init = 0; } /** * Set LNet PID and start LNet interfaces, routing, and forwarding. * - * Userspace program should call this after a successful call to lnet_init(). * Users must call this function at least once before any other functions. * For each successful call there must be a corresponding call to * LNetNIFini(). For subsequent calls to LNetNIInit(), \a requested_pid is @@ -1197,77 +1484,114 @@ LNetNIInit(lnet_pid_t requested_pid) { int im_a_router = 0; int rc; + int ni_count; + lnet_ping_info_t *pinfo; + lnet_handle_md_t md_handle; + struct list_head net_head; + + INIT_LIST_HEAD(&net_head); mutex_lock(&the_lnet.ln_api_mutex); - LASSERT(the_lnet.ln_init); CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount); if (the_lnet.ln_refcount > 0) { rc = the_lnet.ln_refcount++; - goto out; + mutex_unlock(&the_lnet.ln_api_mutex); + return rc; } - if (requested_pid == LNET_PID_ANY) { - /* Don't instantiate LNET just for me */ - rc = -ENETDOWN; - goto failed0; + rc = lnet_prepare(requested_pid); + if (rc) { + mutex_unlock(&the_lnet.ln_api_mutex); + return rc; } - rc = lnet_prepare(requested_pid); - if (rc != 0) - goto failed0; + /* Add in the loopback network */ + if (!lnet_ni_alloc(LNET_MKNET(LOLND, 0), NULL, &net_head)) { + rc = -ENOMEM; + goto err_empty_list; + } - rc = lnet_startup_lndnis(); - if (rc != 0) - goto failed1; + /* + * If LNet is being initialized via DLC it is possible + * that the user requests not to load module parameters (ones which + * are supported by DLC) on initialization. Therefore, make sure not + * to load networks, routes and forwarding from module parameters + * in this case. On cleanup in case of failure only clean up + * routes if it has been loaded + */ + if (!the_lnet.ln_nis_from_mod_params) { + rc = lnet_parse_networks(&net_head, lnet_get_networks()); + if (rc < 0) + goto err_empty_list; + } + + ni_count = lnet_startup_lndnis(&net_head); + if (ni_count < 0) { + rc = ni_count; + goto err_empty_list; + } - rc = lnet_parse_routes(lnet_get_routes(), &im_a_router); - if (rc != 0) - goto failed2; + if (!the_lnet.ln_nis_from_mod_params) { + rc = lnet_parse_routes(lnet_get_routes(), &im_a_router); + if (rc) + goto err_shutdown_lndnis; - rc = lnet_check_routes(); - if (rc != 0) - goto failed2; + rc = lnet_check_routes(); + if (rc) + goto err_destory_routes; - rc = lnet_rtrpools_alloc(im_a_router); - if (rc != 0) - goto failed2; + rc = lnet_rtrpools_alloc(im_a_router); + if (rc) + goto err_destory_routes; + } rc = lnet_acceptor_start(); - if (rc != 0) - goto failed2; + if (rc) + goto err_destory_routes; the_lnet.ln_refcount = 1; /* Now I may use my own API functions... */ - /* NB router checker needs the_lnet.ln_ping_info in - * lnet_router_checker -> lnet_update_ni_status_locked */ - rc = lnet_ping_target_init(); - if (rc != 0) - goto failed3; + rc = lnet_ping_info_setup(&pinfo, &md_handle, ni_count, true); + if (rc) + goto err_acceptor_stop; + + lnet_ping_target_update(pinfo, md_handle); rc = lnet_router_checker_start(); - if (rc != 0) - goto failed4; + if (rc) + goto err_stop_ping; + lnet_fault_init(); lnet_router_debugfs_init(); - goto out; - failed4: + mutex_unlock(&the_lnet.ln_api_mutex); + + return 0; + +err_stop_ping: lnet_ping_target_fini(); - failed3: +err_acceptor_stop: the_lnet.ln_refcount = 0; lnet_acceptor_stop(); - failed2: - lnet_destroy_routes(); +err_destory_routes: + if (!the_lnet.ln_nis_from_mod_params) + lnet_destroy_routes(); +err_shutdown_lndnis: lnet_shutdown_lndnis(); - failed1: +err_empty_list: lnet_unprepare(); - failed0: LASSERT(rc < 0); - out: mutex_unlock(&the_lnet.ln_api_mutex); + while (!list_empty(&net_head)) { + struct lnet_ni *ni; + + ni = list_entry(net_head.next, struct lnet_ni, ni_list); + list_del_init(&ni->ni_list); + lnet_ni_free(ni); + } return rc; } EXPORT_SYMBOL(LNetNIInit); @@ -1286,7 +1610,6 @@ LNetNIFini(void) { mutex_lock(&the_lnet.ln_api_mutex); - LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount > 0); if (the_lnet.ln_refcount != 1) { @@ -1294,6 +1617,7 @@ LNetNIFini(void) } else { LASSERT(!the_lnet.ln_niinit_self); + lnet_fault_fini(); lnet_router_debugfs_fini(); lnet_router_checker_stop(); lnet_ping_target_fini(); @@ -1313,30 +1637,233 @@ LNetNIFini(void) EXPORT_SYMBOL(LNetNIFini); /** - * This is an ugly hack to export IOC_LIBCFS_DEBUG_PEER and - * IOC_LIBCFS_PORTALS_COMPATIBILITY commands to users, by tweaking the LNet - * internal ioctl handler. + * Grabs the ni data from the ni structure and fills the out + * parameters * - * IOC_LIBCFS_PORTALS_COMPATIBILITY is now deprecated, don't use it. - * - * \param cmd IOC_LIBCFS_DEBUG_PEER to print debugging data about a peer. - * The data will be printed to system console. Don't use it excessively. - * \param arg A pointer to lnet_process_id_t, process ID of the peer. + * \param[in] ni network interface structure + * \param[out] cpt_count the number of cpts the ni is on + * \param[out] nid Network Interface ID + * \param[out] peer_timeout NI peer timeout + * \param[out] peer_tx_crdits NI peer transmit credits + * \param[out] peer_rtr_credits NI peer router credits + * \param[out] max_tx_credits NI max transmit credit + * \param[out] net_config Network configuration + */ +static void +lnet_fill_ni_info(struct lnet_ni *ni, __u32 *cpt_count, __u64 *nid, + int *peer_timeout, int *peer_tx_credits, + int *peer_rtr_credits, int *max_tx_credits, + struct lnet_ioctl_net_config *net_config) +{ + int i; + + if (!ni) + return; + + if (!net_config) + return; + + BUILD_BUG_ON(ARRAY_SIZE(ni->ni_interfaces) != + ARRAY_SIZE(net_config->ni_interfaces)); + + for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) { + if (!ni->ni_interfaces[i]) + break; + + strncpy(net_config->ni_interfaces[i], + ni->ni_interfaces[i], + sizeof(net_config->ni_interfaces[i])); + } + + *nid = ni->ni_nid; + *peer_timeout = ni->ni_peertimeout; + *peer_tx_credits = ni->ni_peertxcredits; + *peer_rtr_credits = ni->ni_peerrtrcredits; + *max_tx_credits = ni->ni_maxtxcredits; + + net_config->ni_status = ni->ni_status->ns_status; + + if (ni->ni_cpts) { + int num_cpts = min(ni->ni_ncpts, LNET_MAX_SHOW_NUM_CPT); + + for (i = 0; i < num_cpts; i++) + net_config->ni_cpts[i] = ni->ni_cpts[i]; + + *cpt_count = num_cpts; + } +} + +int +lnet_get_net_config(int idx, __u32 *cpt_count, __u64 *nid, int *peer_timeout, + int *peer_tx_credits, int *peer_rtr_credits, + int *max_tx_credits, + struct lnet_ioctl_net_config *net_config) +{ + struct lnet_ni *ni; + struct list_head *tmp; + int cpt, i = 0; + int rc = -ENOENT; + + cpt = lnet_net_lock_current(); + + list_for_each(tmp, &the_lnet.ln_nis) { + if (i++ != idx) + continue; + + ni = list_entry(tmp, lnet_ni_t, ni_list); + lnet_ni_lock(ni); + lnet_fill_ni_info(ni, cpt_count, nid, peer_timeout, + peer_tx_credits, peer_rtr_credits, + max_tx_credits, net_config); + lnet_ni_unlock(ni); + rc = 0; + break; + } + + lnet_net_unlock(cpt); + return rc; +} + +int +lnet_dyn_add_ni(lnet_pid_t requested_pid, char *nets, + __s32 peer_timeout, __s32 peer_cr, __s32 peer_buf_cr, + __s32 credits) +{ + lnet_ping_info_t *pinfo; + lnet_handle_md_t md_handle; + struct lnet_ni *ni; + struct list_head net_head; + lnet_remotenet_t *rnet; + int rc; + + INIT_LIST_HEAD(&net_head); + + /* Create a ni structure for the network string */ + rc = lnet_parse_networks(&net_head, nets); + if (rc <= 0) + return !rc ? -EINVAL : rc; + + mutex_lock(&the_lnet.ln_api_mutex); + + if (rc > 1) { + rc = -EINVAL; /* only add one interface per call */ + goto failed0; + } + + ni = list_entry(net_head.next, struct lnet_ni, ni_list); + + lnet_net_lock(LNET_LOCK_EX); + rnet = lnet_find_net_locked(LNET_NIDNET(ni->ni_nid)); + lnet_net_unlock(LNET_LOCK_EX); + /* + * make sure that the net added doesn't invalidate the current + * configuration LNet is keeping + */ + if (rnet) { + CERROR("Adding net %s will invalidate routing configuration\n", + nets); + rc = -EUSERS; + goto failed0; + } + + rc = lnet_ping_info_setup(&pinfo, &md_handle, 1 + lnet_get_ni_count(), + false); + if (rc) + goto failed0; + + list_del_init(&ni->ni_list); + + rc = lnet_startup_lndni(ni, peer_timeout, peer_cr, + peer_buf_cr, credits); + if (rc) + goto failed1; + + if (ni->ni_lnd->lnd_accept) { + rc = lnet_acceptor_start(); + if (rc < 0) { + /* shutdown the ni that we just started */ + CERROR("Failed to start up acceptor thread\n"); + lnet_shutdown_lndni(ni); + goto failed1; + } + } + + lnet_ping_target_update(pinfo, md_handle); + mutex_unlock(&the_lnet.ln_api_mutex); + + return 0; + +failed1: + lnet_ping_md_unlink(pinfo, &md_handle); + lnet_ping_info_free(pinfo); +failed0: + mutex_unlock(&the_lnet.ln_api_mutex); + while (!list_empty(&net_head)) { + ni = list_entry(net_head.next, struct lnet_ni, ni_list); + list_del_init(&ni->ni_list); + lnet_ni_free(ni); + } + return rc; +} + +int +lnet_dyn_del_ni(__u32 net) +{ + lnet_ni_t *ni; + lnet_ping_info_t *pinfo; + lnet_handle_md_t md_handle; + int rc; + + /* don't allow userspace to shutdown the LOLND */ + if (LNET_NETTYP(net) == LOLND) + return -EINVAL; + + mutex_lock(&the_lnet.ln_api_mutex); + /* create and link a new ping info, before removing the old one */ + rc = lnet_ping_info_setup(&pinfo, &md_handle, + lnet_get_ni_count() - 1, false); + if (rc) + goto out; + + ni = lnet_net2ni(net); + if (!ni) { + rc = -EINVAL; + goto failed; + } + + /* decrement the reference counter taken by lnet_net2ni() */ + lnet_ni_decref_locked(ni, 0); + + lnet_shutdown_lndni(ni); + + if (!lnet_count_acceptor_nis()) + lnet_acceptor_stop(); + + lnet_ping_target_update(pinfo, md_handle); + goto out; +failed: + lnet_ping_md_unlink(pinfo, &md_handle); + lnet_ping_info_free(pinfo); +out: + mutex_unlock(&the_lnet.ln_api_mutex); + + return rc; +} + +/** + * LNet ioctl handler. * - * \return Always return 0 when called by users directly (i.e., not via ioctl). */ int LNetCtl(unsigned int cmd, void *arg) { struct libcfs_ioctl_data *data = arg; + struct lnet_ioctl_config_data *config; lnet_process_id_t id = {0}; lnet_ni_t *ni; int rc; unsigned long secs_passed; - LASSERT(the_lnet.ln_init); - LASSERT(the_lnet.ln_refcount > 0); - switch (cmd) { case IOC_LIBCFS_GET_NI: rc = LNetGetId(data->ioc_count, &id); @@ -1347,26 +1874,149 @@ LNetCtl(unsigned int cmd, void *arg) return lnet_fail_nid(data->ioc_nid, data->ioc_count); case IOC_LIBCFS_ADD_ROUTE: - rc = lnet_add_route(data->ioc_net, data->ioc_count, - data->ioc_nid, data->ioc_priority); - return (rc != 0) ? rc : lnet_check_routes(); + config = arg; + + if (config->cfg_hdr.ioc_len < sizeof(*config)) + return -EINVAL; + + mutex_lock(&the_lnet.ln_api_mutex); + rc = lnet_add_route(config->cfg_net, + config->cfg_config_u.cfg_route.rtr_hop, + config->cfg_nid, + config->cfg_config_u.cfg_route.rtr_priority); + if (!rc) { + rc = lnet_check_routes(); + if (rc) + lnet_del_route(config->cfg_net, + config->cfg_nid); + } + mutex_unlock(&the_lnet.ln_api_mutex); + return rc; case IOC_LIBCFS_DEL_ROUTE: - return lnet_del_route(data->ioc_net, data->ioc_nid); + config = arg; + + if (config->cfg_hdr.ioc_len < sizeof(*config)) + return -EINVAL; + + mutex_lock(&the_lnet.ln_api_mutex); + rc = lnet_del_route(config->cfg_net, config->cfg_nid); + mutex_unlock(&the_lnet.ln_api_mutex); + return rc; case IOC_LIBCFS_GET_ROUTE: - return lnet_get_route(data->ioc_count, - &data->ioc_net, &data->ioc_count, - &data->ioc_nid, &data->ioc_flags, - &data->ioc_priority); + config = arg; + + if (config->cfg_hdr.ioc_len < sizeof(*config)) + return -EINVAL; + + return lnet_get_route(config->cfg_count, + &config->cfg_net, + &config->cfg_config_u.cfg_route.rtr_hop, + &config->cfg_nid, + &config->cfg_config_u.cfg_route.rtr_flags, + &config->cfg_config_u.cfg_route.rtr_priority); + + case IOC_LIBCFS_GET_NET: { + struct lnet_ioctl_net_config *net_config; + size_t total = sizeof(*config) + sizeof(*net_config); + + config = arg; + + if (config->cfg_hdr.ioc_len < total) + return -EINVAL; + + net_config = (struct lnet_ioctl_net_config *) + config->cfg_bulk; + if (!net_config) + return -EINVAL; + + return lnet_get_net_config(config->cfg_count, + &config->cfg_ncpts, + &config->cfg_nid, + &config->cfg_config_u.cfg_net.net_peer_timeout, + &config->cfg_config_u.cfg_net.net_peer_tx_credits, + &config->cfg_config_u.cfg_net.net_peer_rtr_credits, + &config->cfg_config_u.cfg_net.net_max_tx_credits, + net_config); + } + + case IOC_LIBCFS_GET_LNET_STATS: { + struct lnet_ioctl_lnet_stats *lnet_stats = arg; + + if (lnet_stats->st_hdr.ioc_len < sizeof(*lnet_stats)) + return -EINVAL; + + lnet_counters_get(&lnet_stats->st_cntrs); + return 0; + } + + case IOC_LIBCFS_CONFIG_RTR: + config = arg; + + if (config->cfg_hdr.ioc_len < sizeof(*config)) + return -EINVAL; + + mutex_lock(&the_lnet.ln_api_mutex); + if (config->cfg_config_u.cfg_buffers.buf_enable) { + rc = lnet_rtrpools_enable(); + mutex_unlock(&the_lnet.ln_api_mutex); + return rc; + } + lnet_rtrpools_disable(); + mutex_unlock(&the_lnet.ln_api_mutex); + return 0; + + case IOC_LIBCFS_ADD_BUF: + config = arg; + + if (config->cfg_hdr.ioc_len < sizeof(*config)) + return -EINVAL; + + mutex_lock(&the_lnet.ln_api_mutex); + rc = lnet_rtrpools_adjust(config->cfg_config_u.cfg_buffers.buf_tiny, + config->cfg_config_u.cfg_buffers.buf_small, + config->cfg_config_u.cfg_buffers.buf_large); + mutex_unlock(&the_lnet.ln_api_mutex); + return rc; + + case IOC_LIBCFS_GET_BUF: { + struct lnet_ioctl_pool_cfg *pool_cfg; + size_t total = sizeof(*config) + sizeof(*pool_cfg); + + config = arg; + + if (config->cfg_hdr.ioc_len < total) + return -EINVAL; + + pool_cfg = (struct lnet_ioctl_pool_cfg *)config->cfg_bulk; + return lnet_get_rtr_pool_cfg(config->cfg_count, pool_cfg); + } + + case IOC_LIBCFS_GET_PEER_INFO: { + struct lnet_ioctl_peer *peer_info = arg; + + if (peer_info->pr_hdr.ioc_len < sizeof(*peer_info)) + return -EINVAL; + + return lnet_get_peer_info(peer_info->pr_count, + &peer_info->pr_nid, + peer_info->pr_lnd_u.pr_peer_credits.cr_aliveness, + &peer_info->pr_lnd_u.pr_peer_credits.cr_ncpt, + &peer_info->pr_lnd_u.pr_peer_credits.cr_refcount, + &peer_info->pr_lnd_u.pr_peer_credits.cr_ni_peer_tx_credits, + &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_credits, + &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_rtr_credits, + &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_min_rtr_credits, + &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_qnob); + } + case IOC_LIBCFS_NOTIFY_ROUTER: secs_passed = (ktime_get_real_seconds() - data->ioc_u64[0]); - return lnet_notify(NULL, data->ioc_nid, data->ioc_flags, - jiffies - secs_passed * HZ); + secs_passed *= msecs_to_jiffies(MSEC_PER_SEC); - case IOC_LIBCFS_PORTALS_COMPATIBILITY: - /* This can be removed once lustre stops calling it */ - return 0; + return lnet_notify(NULL, data->ioc_nid, data->ioc_flags, + jiffies - secs_passed); case IOC_LIBCFS_LNET_DIST: rc = LNetDist(data->ioc_nid, &data->ioc_nid, &data->ioc_u32[1]); @@ -1382,46 +2032,26 @@ LNetCtl(unsigned int cmd, void *arg) lnet_net_unlock(LNET_LOCK_EX); return 0; + case IOC_LIBCFS_LNET_FAULT: + return lnet_fault_ctl(data->ioc_flags, data); + case IOC_LIBCFS_PING: id.nid = data->ioc_nid; id.pid = data->ioc_u32[0]; rc = lnet_ping(id, data->ioc_u32[1], /* timeout */ - (lnet_process_id_t *)data->ioc_pbuf1, - data->ioc_plen1/sizeof(lnet_process_id_t)); + data->ioc_pbuf1, + data->ioc_plen1 / sizeof(lnet_process_id_t)); if (rc < 0) return rc; data->ioc_count = rc; return 0; - case IOC_LIBCFS_DEBUG_PEER: { - /* CAVEAT EMPTOR: this one designed for calling directly; not - * via an ioctl */ - id = *((lnet_process_id_t *) arg); - - lnet_debug_peer(id.nid); - - ni = lnet_net2ni(LNET_NIDNET(id.nid)); - if (ni == NULL) { - CDEBUG(D_WARNING, "No NI for %s\n", libcfs_id2str(id)); - } else { - if (ni->ni_lnd->lnd_ctl == NULL) { - CDEBUG(D_WARNING, "No ctl for %s\n", - libcfs_id2str(id)); - } else { - (void)ni->ni_lnd->lnd_ctl(ni, cmd, arg); - } - - lnet_ni_decref(ni); - } - return 0; - } - default: ni = lnet_net2ni(data->ioc_net); - if (ni == NULL) + if (!ni) return -EINVAL; - if (ni->ni_lnd->lnd_ctl == NULL) + if (!ni->ni_lnd->lnd_ctl) rc = -EINVAL; else rc = ni->ni_lnd->lnd_ctl(ni, cmd, arg); @@ -1433,6 +2063,12 @@ LNetCtl(unsigned int cmd, void *arg) } EXPORT_SYMBOL(LNetCtl); +void LNetDebugPeer(lnet_process_id_t id) +{ + lnet_debug_peer(id.nid); +} +EXPORT_SYMBOL(LNetDebugPeer); + /** * Retrieve the lnet_process_id_t ID of LNet interface at \a index. Note that * all interfaces share a same PID, as requested by LNetNIInit(). @@ -1452,16 +2088,12 @@ LNetGetId(unsigned int index, lnet_process_id_t *id) int cpt; int rc = -ENOENT; - LASSERT(the_lnet.ln_init); - - /* LNetNI initilization failed? */ - if (the_lnet.ln_refcount == 0) - return rc; + LASSERT(the_lnet.ln_refcount > 0); cpt = lnet_net_lock_current(); list_for_each(tmp, &the_lnet.ln_nis) { - if (index-- != 0) + if (index--) continue; ni = list_entry(tmp, lnet_ni_t, ni_list); @@ -1488,192 +2120,8 @@ LNetSnprintHandle(char *str, int len, lnet_handle_any_t h) } EXPORT_SYMBOL(LNetSnprintHandle); -static int -lnet_create_ping_info(void) -{ - int i; - int n; - int rc; - unsigned int infosz; - lnet_ni_t *ni; - lnet_process_id_t id; - lnet_ping_info_t *pinfo; - - for (n = 0; ; n++) { - rc = LNetGetId(n, &id); - if (rc == -ENOENT) - break; - - LASSERT(rc == 0); - } - - infosz = offsetof(lnet_ping_info_t, pi_ni[n]); - LIBCFS_ALLOC(pinfo, infosz); - if (pinfo == NULL) { - CERROR("Can't allocate ping info[%d]\n", n); - return -ENOMEM; - } - - pinfo->pi_nnis = n; - pinfo->pi_pid = the_lnet.ln_pid; - pinfo->pi_magic = LNET_PROTO_PING_MAGIC; - pinfo->pi_features = LNET_PING_FEAT_NI_STATUS; - - for (i = 0; i < n; i++) { - lnet_ni_status_t *ns = &pinfo->pi_ni[i]; - - rc = LNetGetId(i, &id); - LASSERT(rc == 0); - - ns->ns_nid = id.nid; - ns->ns_status = LNET_NI_STATUS_UP; - - lnet_net_lock(0); - - ni = lnet_nid2ni_locked(id.nid, 0); - LASSERT(ni != NULL); - - lnet_ni_lock(ni); - LASSERT(ni->ni_status == NULL); - ni->ni_status = ns; - lnet_ni_unlock(ni); - - lnet_ni_decref_locked(ni, 0); - lnet_net_unlock(0); - } - - the_lnet.ln_ping_info = pinfo; - return 0; -} - -static void -lnet_destroy_ping_info(void) -{ - struct lnet_ni *ni; - - lnet_net_lock(0); - - list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) { - lnet_ni_lock(ni); - ni->ni_status = NULL; - lnet_ni_unlock(ni); - } - - lnet_net_unlock(0); - - LIBCFS_FREE(the_lnet.ln_ping_info, - offsetof(lnet_ping_info_t, - pi_ni[the_lnet.ln_ping_info->pi_nnis])); - the_lnet.ln_ping_info = NULL; -} - -int -lnet_ping_target_init(void) -{ - lnet_md_t md = { NULL }; - lnet_handle_me_t meh; - lnet_process_id_t id; - int rc; - int rc2; - int infosz; - - rc = lnet_create_ping_info(); - if (rc != 0) - return rc; - - /* We can have a tiny EQ since we only need to see the unlink event on - * teardown, which by definition is the last one! */ - rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &the_lnet.ln_ping_target_eq); - if (rc != 0) { - CERROR("Can't allocate ping EQ: %d\n", rc); - goto failed_0; - } - - memset(&id, 0, sizeof(lnet_process_id_t)); - id.nid = LNET_NID_ANY; - id.pid = LNET_PID_ANY; - - rc = LNetMEAttach(LNET_RESERVED_PORTAL, id, - LNET_PROTO_PING_MATCHBITS, 0, - LNET_UNLINK, LNET_INS_AFTER, - &meh); - if (rc != 0) { - CERROR("Can't create ping ME: %d\n", rc); - goto failed_1; - } - - /* initialize md content */ - infosz = offsetof(lnet_ping_info_t, - pi_ni[the_lnet.ln_ping_info->pi_nnis]); - md.start = the_lnet.ln_ping_info; - md.length = infosz; - md.threshold = LNET_MD_THRESH_INF; - md.max_size = 0; - md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE | - LNET_MD_MANAGE_REMOTE; - md.user_ptr = NULL; - md.eq_handle = the_lnet.ln_ping_target_eq; - - rc = LNetMDAttach(meh, md, - LNET_RETAIN, - &the_lnet.ln_ping_target_md); - if (rc != 0) { - CERROR("Can't attach ping MD: %d\n", rc); - goto failed_2; - } - - return 0; - - failed_2: - rc2 = LNetMEUnlink(meh); - LASSERT(rc2 == 0); - failed_1: - rc2 = LNetEQFree(the_lnet.ln_ping_target_eq); - LASSERT(rc2 == 0); - failed_0: - lnet_destroy_ping_info(); - return rc; -} - -void -lnet_ping_target_fini(void) -{ - lnet_event_t event; - int rc; - int which; - int timeout_ms = 1000; - sigset_t blocked = cfs_block_allsigs(); - - LNetMDUnlink(the_lnet.ln_ping_target_md); - /* NB md could be busy; this just starts the unlink */ - - for (;;) { - rc = LNetEQPoll(&the_lnet.ln_ping_target_eq, 1, - timeout_ms, &event, &which); - - /* I expect overflow... */ - LASSERT(rc >= 0 || rc == -EOVERFLOW); - - if (rc == 0) { - /* timed out: provide a diagnostic */ - CWARN("Still waiting for ping MD to unlink\n"); - timeout_ms *= 2; - continue; - } - - /* Got a valid event */ - if (event.unlinked) - break; - } - - rc = LNetEQFree(the_lnet.ln_ping_target_eq); - LASSERT(rc == 0); - lnet_destroy_ping_info(); - cfs_restore_sigs(blocked); -} - -int -lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_ids) +static int lnet_ping(lnet_process_id_t id, int timeout_ms, + lnet_process_id_t __user *ids, int n_ids) { lnet_handle_eq_t eqh; lnet_handle_md_t mdh; @@ -1683,7 +2131,7 @@ lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_id int unlinked = 0; int replied = 0; const int a_long_time = 60000; /* mS */ - int infosz = offsetof(lnet_ping_info_t, pi_ni[n_ids]); + int infosz; lnet_ping_info_t *info; lnet_process_id_t tmpid; int i; @@ -1692,6 +2140,8 @@ lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_id int rc2; sigset_t blocked; + infosz = offsetof(lnet_ping_info_t, pi_ni[n_ids]); + if (n_ids <= 0 || id.nid == LNET_NID_ANY || timeout_ms > 500000 || /* arbitrary limit! */ @@ -1699,15 +2149,15 @@ lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_id return -EINVAL; if (id.pid == LNET_PID_ANY) - id.pid = LUSTRE_SRV_LNET_PID; + id.pid = LNET_PID_LUSTRE; LIBCFS_ALLOC(info, infosz); - if (info == NULL) + if (!info) return -ENOMEM; /* NB 2 events max (including any unlink event) */ rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &eqh); - if (rc != 0) { + if (rc) { CERROR("Can't allocate EQ: %d\n", rc); goto out_0; } @@ -1722,7 +2172,7 @@ lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_id md.eq_handle = eqh; rc = LNetMDBind(md, LNET_UNLINK, &mdh); - if (rc != 0) { + if (rc) { CERROR("Can't bind MD: %d\n", rc); goto out_1; } @@ -1731,11 +2181,11 @@ lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_id LNET_RESERVED_PORTAL, LNET_PROTO_PING_MATCHBITS, 0); - if (rc != 0) { + if (rc) { /* Don't CERROR; this could be deliberate! */ rc2 = LNetMDUnlink(mdh); - LASSERT(rc2 == 0); + LASSERT(!rc2); /* NB must wait for the UNLINK event below... */ unlinked = 1; @@ -1759,11 +2209,11 @@ lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_id LASSERT(rc2 != -EOVERFLOW); /* can't miss anything */ - if (rc2 <= 0 || event.status != 0) { + if (rc2 <= 0 || event.status) { /* timeout or error */ - if (!replied && rc == 0) + if (!replied && !rc) rc = (rc2 < 0) ? rc2 : - (rc2 == 0) ? -ETIMEDOUT : + !rc2 ? -ETIMEDOUT : event.status; if (!unlinked) { @@ -1772,7 +2222,7 @@ lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_id /* No assertion (racing with network) */ unlinked = 1; timeout_ms = a_long_time; - } else if (rc2 == 0) { + } else if (!rc2) { /* timed out waiting for unlink */ CWARN("ping %s: late network completion\n", libcfs_id2str(id)); @@ -1812,7 +2262,7 @@ lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_id goto out_1; } - if ((info->pi_features & LNET_PING_FEAT_NI_STATUS) == 0) { + if (!(info->pi_features & LNET_PING_FEAT_NI_STATUS)) { CERROR("%s: ping w/o NI status: 0x%x\n", libcfs_id2str(id), info->pi_features); goto out_1; @@ -1846,9 +2296,9 @@ lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_id out_1: rc2 = LNetEQFree(eqh); - if (rc2 != 0) + if (rc2) CERROR("rc2 %d\n", rc2); - LASSERT(rc2 == 0); + LASSERT(!rc2); out_0: LIBCFS_FREE(info, infosz); diff --git a/drivers/staging/lustre/lnet/lnet/config.c b/drivers/staging/lustre/lnet/lnet/config.c index 284a3c271bc6..449069c9e649 100644 --- a/drivers/staging/lustre/lnet/lnet/config.c +++ b/drivers/staging/lustre/lnet/lnet/config.c @@ -37,15 +37,15 @@ #define DEBUG_SUBSYSTEM S_LNET #include "../../include/linux/lnet/lib-lnet.h" -struct lnet_text_buf_t { /* tmp struct for parsing routes */ +struct lnet_text_buf { /* tmp struct for parsing routes */ struct list_head ltb_list; /* stash on lists */ int ltb_size; /* allocated size */ char ltb_text[0]; /* text buffer */ }; static int lnet_tbnob; /* track text buf allocation */ -#define LNET_MAX_TEXTBUF_NOB (64<<10) /* bound allocation */ -#define LNET_SINGLE_TEXTBUF_NOB (4<<10) +#define LNET_MAX_TEXTBUF_NOB (64 << 10) /* bound allocation */ +#define LNET_SINGLE_TEXTBUF_NOB (4 << 10) static void lnet_syntax(char *name, char *str, int offset, int width) @@ -54,9 +54,9 @@ lnet_syntax(char *name, char *str, int offset, int width) static char dashes[LNET_SINGLE_TEXTBUF_NOB]; memset(dots, '.', sizeof(dots)); - dots[sizeof(dots)-1] = 0; + dots[sizeof(dots) - 1] = 0; memset(dashes, '-', sizeof(dashes)); - dashes[sizeof(dashes)-1] = 0; + dashes[sizeof(dashes) - 1] = 0; LCONSOLE_ERROR_MSG(0x10f, "Error parsing '%s=\"%s\"'\n", name, str); LCONSOLE_ERROR_MSG(0x110, "here...........%.*s..%.*s|%.*s|\n", @@ -77,7 +77,7 @@ lnet_issep(char c) } } -static int +int lnet_net_unique(__u32 net, struct list_head *nilist) { struct list_head *tmp; @@ -96,19 +96,25 @@ lnet_net_unique(__u32 net, struct list_head *nilist) void lnet_ni_free(struct lnet_ni *ni) { - if (ni->ni_refs != NULL) + int i; + + if (ni->ni_refs) cfs_percpt_free(ni->ni_refs); - if (ni->ni_tx_queues != NULL) + if (ni->ni_tx_queues) cfs_percpt_free(ni->ni_tx_queues); - if (ni->ni_cpts != NULL) + if (ni->ni_cpts) cfs_expr_list_values_free(ni->ni_cpts, ni->ni_ncpts); + for (i = 0; i < LNET_MAX_INTERFACES && ni->ni_interfaces[i]; i++) { + LIBCFS_FREE(ni->ni_interfaces[i], + strlen(ni->ni_interfaces[i]) + 1); + } LIBCFS_FREE(ni, sizeof(*ni)); } -static lnet_ni_t * +lnet_ni_t * lnet_ni_alloc(__u32 net, struct cfs_expr_list *el, struct list_head *nilist) { struct lnet_tx_queue *tq; @@ -123,7 +129,7 @@ lnet_ni_alloc(__u32 net, struct cfs_expr_list *el, struct list_head *nilist) } LIBCFS_ALLOC(ni, sizeof(*ni)); - if (ni == NULL) { + if (!ni) { CERROR("Out of memory creating network %s\n", libcfs_net2str(net)); return NULL; @@ -133,18 +139,18 @@ lnet_ni_alloc(__u32 net, struct cfs_expr_list *el, struct list_head *nilist) INIT_LIST_HEAD(&ni->ni_cptlist); ni->ni_refs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*ni->ni_refs[0])); - if (ni->ni_refs == NULL) + if (!ni->ni_refs) goto failed; ni->ni_tx_queues = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*ni->ni_tx_queues[0])); - if (ni->ni_tx_queues == NULL) + if (!ni->ni_tx_queues) goto failed; cfs_percpt_for_each(tq, i, ni->ni_tx_queues) INIT_LIST_HEAD(&tq->tq_delayed); - if (el == NULL) { + if (!el) { ni->ni_cpts = NULL; ni->ni_ncpts = LNET_CPT_NUMBER; } else { @@ -178,13 +184,19 @@ int lnet_parse_networks(struct list_head *nilist, char *networks) { struct cfs_expr_list *el = NULL; - int tokensize = strlen(networks) + 1; + int tokensize; char *tokens; char *str; char *tmp; struct lnet_ni *ni; __u32 net; int nnets = 0; + struct list_head *temp_node; + + if (!networks) { + CERROR("networks string is undefined\n"); + return -EINVAL; + } if (strlen(networks) > LNET_SINGLE_TEXTBUF_NOB) { /* _WAY_ conservative */ @@ -193,23 +205,19 @@ lnet_parse_networks(struct list_head *nilist, char *networks) return -EINVAL; } + tokensize = strlen(networks) + 1; + LIBCFS_ALLOC(tokens, tokensize); - if (tokens == NULL) { + if (!tokens) { CERROR("Can't allocate net tokens\n"); return -ENOMEM; } - the_lnet.ln_network_tokens = tokens; - the_lnet.ln_network_tokens_nob = tokensize; memcpy(tokens, networks, tokensize); - str = tmp = tokens; - - /* Add in the loopback network */ - ni = lnet_ni_alloc(LNET_MKNET(LOLND, 0), NULL, nilist); - if (ni == NULL) - goto failed; + tmp = tokens; + str = tokens; - while (str != NULL && *str != 0) { + while (str && *str) { char *comma = strchr(str, ','); char *bracket = strchr(str, '('); char *square = strchr(str, '['); @@ -217,26 +225,29 @@ lnet_parse_networks(struct list_head *nilist, char *networks) int niface; int rc; - /* NB we don't check interface conflicts here; it's the LNDs - * responsibility (if it cares at all) */ - - if (square != NULL && (comma == NULL || square < comma)) { - /* i.e: o2ib0(ib0)[1,2], number between square - * brackets are CPTs this NI needs to be bond */ - if (bracket != NULL && bracket > square) { + /* + * NB we don't check interface conflicts here; it's the LNDs + * responsibility (if it cares at all) + */ + if (square && (!comma || square < comma)) { + /* + * i.e: o2ib0(ib0)[1,2], number between square + * brackets are CPTs this NI needs to be bond + */ + if (bracket && bracket > square) { tmp = square; goto failed_syntax; } tmp = strchr(square, ']'); - if (tmp == NULL) { + if (!tmp) { tmp = square; goto failed_syntax; } rc = cfs_expr_list_parse(square, tmp - square + 1, 0, LNET_CPT_NUMBER - 1, &el); - if (rc != 0) { + if (rc) { tmp = square; goto failed_syntax; } @@ -245,12 +256,10 @@ lnet_parse_networks(struct list_head *nilist, char *networks) *square++ = ' '; } - if (bracket == NULL || - (comma != NULL && comma < bracket)) { - + if (!bracket || (comma && comma < bracket)) { /* no interface list specified */ - if (comma != NULL) + if (comma) *comma++ = 0; net = libcfs_str2net(cfs_trimwhite(str)); @@ -262,10 +271,10 @@ lnet_parse_networks(struct list_head *nilist, char *networks) } if (LNET_NETTYP(net) != LOLND && /* LO is implicit */ - lnet_ni_alloc(net, el, nilist) == NULL) + !lnet_ni_alloc(net, el, nilist)) goto failed; - if (el != NULL) { + if (el) { cfs_expr_list_free(el); el = NULL; } @@ -281,12 +290,11 @@ lnet_parse_networks(struct list_head *nilist, char *networks) goto failed_syntax; } - nnets++; ni = lnet_ni_alloc(net, el, nilist); - if (ni == NULL) + if (!ni) goto failed; - if (el != NULL) { + if (el) { cfs_expr_list_free(el); el = NULL; } @@ -295,7 +303,7 @@ lnet_parse_networks(struct list_head *nilist, char *networks) iface = bracket + 1; bracket = strchr(iface, ')'); - if (bracket == NULL) { + if (!bracket) { tmp = iface; goto failed_syntax; } @@ -303,11 +311,11 @@ lnet_parse_networks(struct list_head *nilist, char *networks) *bracket = 0; do { comma = strchr(iface, ','); - if (comma != NULL) + if (comma) *comma++ = 0; iface = cfs_trimwhite(iface); - if (*iface == 0) { + if (!*iface) { tmp = iface; goto failed_syntax; } @@ -319,16 +327,32 @@ lnet_parse_networks(struct list_head *nilist, char *networks) goto failed; } - ni->ni_interfaces[niface++] = iface; + /* + * Allocate a separate piece of memory and copy + * into it the string, so we don't have + * a depencency on the tokens string. This way we + * can free the tokens at the end of the function. + * The newly allocated ni_interfaces[] can be + * freed when freeing the NI + */ + LIBCFS_ALLOC(ni->ni_interfaces[niface], + strlen(iface) + 1); + if (!ni->ni_interfaces[niface]) { + CERROR("Can't allocate net interface name\n"); + goto failed; + } + strncpy(ni->ni_interfaces[niface], iface, + strlen(iface)); + niface++; iface = comma; - } while (iface != NULL); + } while (iface); str = bracket + 1; comma = strchr(bracket + 1, ','); - if (comma != NULL) { + if (comma) { *comma = 0; str = cfs_trimwhite(str); - if (*str != 0) { + if (*str) { tmp = str; goto failed_syntax; } @@ -337,14 +361,17 @@ lnet_parse_networks(struct list_head *nilist, char *networks) } str = cfs_trimwhite(str); - if (*str != 0) { + if (*str) { tmp = str; goto failed_syntax; } } - LASSERT(!list_empty(nilist)); - return 0; + list_for_each(temp_node, nilist) + nnets++; + + LIBCFS_FREE(tokens, tokensize); + return nnets; failed_syntax: lnet_syntax("networks", networks, (int)(tmp - tokens), strlen(tmp)); @@ -356,23 +383,22 @@ lnet_parse_networks(struct list_head *nilist, char *networks) lnet_ni_free(ni); } - if (el != NULL) + if (el) cfs_expr_list_free(el); LIBCFS_FREE(tokens, tokensize); - the_lnet.ln_network_tokens = NULL; return -EINVAL; } -static struct lnet_text_buf_t * +static struct lnet_text_buf * lnet_new_text_buf(int str_len) { - struct lnet_text_buf_t *ltb; + struct lnet_text_buf *ltb; int nob; /* NB allocate space for the terminating 0 */ - nob = offsetof(struct lnet_text_buf_t, ltb_text[str_len + 1]); + nob = offsetof(struct lnet_text_buf, ltb_text[str_len + 1]); if (nob > LNET_SINGLE_TEXTBUF_NOB) { /* _way_ conservative for "route net gateway..." */ CERROR("text buffer too big\n"); @@ -385,7 +411,7 @@ lnet_new_text_buf(int str_len) } LIBCFS_ALLOC(ltb, nob); - if (ltb == NULL) + if (!ltb) return NULL; ltb->ltb_size = nob; @@ -395,7 +421,7 @@ lnet_new_text_buf(int str_len) } static void -lnet_free_text_buf(struct lnet_text_buf_t *ltb) +lnet_free_text_buf(struct lnet_text_buf *ltb) { lnet_tbnob -= ltb->ltb_size; LIBCFS_FREE(ltb, ltb->ltb_size); @@ -404,10 +430,10 @@ lnet_free_text_buf(struct lnet_text_buf_t *ltb) static void lnet_free_text_bufs(struct list_head *tbs) { - struct lnet_text_buf_t *ltb; + struct lnet_text_buf *ltb; while (!list_empty(tbs)) { - ltb = list_entry(tbs->next, struct lnet_text_buf_t, ltb_list); + ltb = list_entry(tbs->next, struct lnet_text_buf, ltb_list); list_del(<b->ltb_list); lnet_free_text_buf(ltb); @@ -421,7 +447,7 @@ lnet_str2tbs_sep(struct list_head *tbs, char *str) char *sep; int nob; int i; - struct lnet_text_buf_t *ltb; + struct lnet_text_buf *ltb; INIT_LIST_HEAD(&pending); @@ -432,16 +458,16 @@ lnet_str2tbs_sep(struct list_head *tbs, char *str) str++; /* scan for separator or comment */ - for (sep = str; *sep != 0; sep++) + for (sep = str; *sep; sep++) if (lnet_issep(*sep) || *sep == '#') break; nob = (int)(sep - str); if (nob > 0) { ltb = lnet_new_text_buf(nob); - if (ltb == NULL) { + if (!ltb) { lnet_free_text_bufs(&pending); - return -1; + return -ENOMEM; } for (i = 0; i < nob; i++) @@ -459,10 +485,10 @@ lnet_str2tbs_sep(struct list_head *tbs, char *str) /* scan for separator */ do { sep++; - } while (*sep != 0 && !lnet_issep(*sep)); + } while (*sep && !lnet_issep(*sep)); } - if (*sep == 0) + if (!*sep) break; str = sep + 1; @@ -479,18 +505,18 @@ lnet_expand1tb(struct list_head *list, { int len1 = (int)(sep1 - str); int len2 = strlen(sep2 + 1); - struct lnet_text_buf_t *ltb; + struct lnet_text_buf *ltb; LASSERT(*sep1 == '['); LASSERT(*sep2 == ']'); ltb = lnet_new_text_buf(len1 + itemlen + len2); - if (ltb == NULL) + if (!ltb) return -ENOMEM; memcpy(ltb->ltb_text, str, len1); memcpy(<b->ltb_text[len1], item, itemlen); - memcpy(<b->ltb_text[len1+itemlen], sep2 + 1, len2); + memcpy(<b->ltb_text[len1 + itemlen], sep2 + 1, len2); ltb->ltb_text[len1 + itemlen + len2] = 0; list_add_tail(<b->ltb_list, list); @@ -516,15 +542,14 @@ lnet_str2tbs_expand(struct list_head *tbs, char *str) INIT_LIST_HEAD(&pending); sep = strchr(str, '['); - if (sep == NULL) /* nothing to expand */ + if (!sep) /* nothing to expand */ return 0; sep2 = strchr(sep, ']'); - if (sep2 == NULL) + if (!sep2) goto failed; for (parsed = sep; parsed < sep2; parsed = enditem) { - enditem = ++parsed; while (enditem < sep2 && *enditem != ',') enditem++; @@ -534,17 +559,13 @@ lnet_str2tbs_expand(struct list_head *tbs, char *str) if (sscanf(parsed, "%d-%d/%d%n", &lo, &hi, &stride, &scanned) < 3) { - if (sscanf(parsed, "%d-%d%n", &lo, &hi, &scanned) < 2) { - /* simple string enumeration */ - if (lnet_expand1tb( - &pending, str, sep, sep2, - parsed, - (int)(enditem - parsed)) != 0) { + if (lnet_expand1tb(&pending, str, sep, sep2, + parsed, + (int)(enditem - parsed))) { goto failed; } - continue; } @@ -557,18 +578,17 @@ lnet_str2tbs_expand(struct list_head *tbs, char *str) goto failed; if (hi < 0 || lo < 0 || stride < 0 || hi < lo || - (hi - lo) % stride != 0) + (hi - lo) % stride) goto failed; for (i = lo; i <= hi; i += stride) { - snprintf(num, sizeof(num), "%d", i); nob = strlen(num); if (nob + 1 == sizeof(num)) goto failed; if (lnet_expand1tb(&pending, str, sep, sep2, - num, nob) != 0) + num, nob)) goto failed; } } @@ -578,7 +598,7 @@ lnet_str2tbs_expand(struct list_head *tbs, char *str) failed: lnet_free_text_bufs(&pending); - return -1; + return -EINVAL; } static int @@ -602,17 +622,19 @@ lnet_parse_priority(char *str, unsigned int *priority, char **token) int len; sep = strchr(str, LNET_PRIORITY_SEPARATOR); - if (sep == NULL) { + if (!sep) { *priority = 0; return 0; } len = strlen(sep + 1); - if ((sscanf((sep+1), "%u%n", priority, &nob) < 1) || (len != nob)) { - /* Update the caller's token pointer so it treats the found - priority as the token to report in the error message. */ + if ((sscanf((sep + 1), "%u%n", priority, &nob) < 1) || (len != nob)) { + /* + * Update the caller's token pointer so it treats the found + * priority as the token to report in the error message. + */ *token += sep - str + 1; - return -1; + return -EINVAL; } CDEBUG(D_NET, "gateway %s, priority %d, nob %d\n", str, *priority, nob); @@ -636,13 +658,13 @@ lnet_parse_route(char *str, int *im_a_router) struct list_head *tmp2; __u32 net; lnet_nid_t nid; - struct lnet_text_buf_t *ltb; + struct lnet_text_buf *ltb; int rc; char *sep; char *token = str; int ntokens = 0; int myrc = -1; - unsigned int hops; + __u32 hops; int got_hops = 0; unsigned int priority = 0; @@ -658,7 +680,7 @@ lnet_parse_route(char *str, int *im_a_router) /* scan for token start */ while (isspace(*sep)) sep++; - if (*sep == 0) { + if (!*sep) { if (ntokens < (got_hops ? 3 : 2)) goto token_error; break; @@ -668,9 +690,9 @@ lnet_parse_route(char *str, int *im_a_router) token = sep++; /* scan for token end */ - while (*sep != 0 && !isspace(*sep)) + while (*sep && !isspace(*sep)) sep++; - if (*sep != 0) + if (*sep) *sep++ = 0; if (ntokens == 1) { @@ -684,7 +706,7 @@ lnet_parse_route(char *str, int *im_a_router) } ltb = lnet_new_text_buf(strlen(token)); - if (ltb == NULL) + if (!ltb) goto out; strcpy(ltb->ltb_text, token); @@ -692,8 +714,7 @@ lnet_parse_route(char *str, int *im_a_router) list_add_tail(tmp1, tmp2); while (tmp1 != tmp2) { - ltb = list_entry(tmp1, struct lnet_text_buf_t, - ltb_list); + ltb = list_entry(tmp1, struct lnet_text_buf, ltb_list); rc = lnet_str2tbs_expand(tmp1->next, ltb->ltb_text); if (rc < 0) @@ -726,20 +747,23 @@ lnet_parse_route(char *str, int *im_a_router) } } + /** + * if there are no hops set then we want to flag this value as + * unset since hops is an optional parameter + */ if (!got_hops) - hops = 1; + hops = LNET_UNDEFINED_HOPS; LASSERT(!list_empty(&nets)); LASSERT(!list_empty(&gateways)); list_for_each(tmp1, &nets) { - ltb = list_entry(tmp1, struct lnet_text_buf_t, ltb_list); + ltb = list_entry(tmp1, struct lnet_text_buf, ltb_list); net = libcfs_str2net(ltb->ltb_text); LASSERT(net != LNET_NIDNET(LNET_NID_ANY)); list_for_each(tmp2, &gateways) { - ltb = list_entry(tmp2, struct lnet_text_buf_t, - ltb_list); + ltb = list_entry(tmp2, struct lnet_text_buf, ltb_list); nid = libcfs_str2nid(ltb->ltb_text); LASSERT(nid != LNET_NID_ANY); @@ -749,7 +773,7 @@ lnet_parse_route(char *str, int *im_a_router) } rc = lnet_add_route(net, hops, nid, priority); - if (rc != 0) { + if (rc && rc != -EEXIST && rc != -EHOSTUNREACH) { CERROR("Can't create route to %s via %s\n", libcfs_net2str(net), libcfs_nid2str(nid)); @@ -772,10 +796,10 @@ lnet_parse_route(char *str, int *im_a_router) static int lnet_parse_route_tbs(struct list_head *tbs, int *im_a_router) { - struct lnet_text_buf_t *ltb; + struct lnet_text_buf *ltb; while (!list_empty(tbs)) { - ltb = list_entry(tbs->next, struct lnet_text_buf_t, ltb_list); + ltb = list_entry(tbs->next, struct lnet_text_buf, ltb_list); if (lnet_parse_route(ltb->ltb_text, im_a_router) < 0) { lnet_free_text_bufs(tbs); @@ -806,7 +830,7 @@ lnet_parse_routes(char *routes, int *im_a_router) rc = lnet_parse_route_tbs(&tbs, im_a_router); } - LASSERT(lnet_tbnob == 0); + LASSERT(!lnet_tbnob); return rc; } @@ -818,7 +842,7 @@ lnet_match_network_token(char *token, int len, __u32 *ipaddrs, int nip) int i; rc = cfs_ip_addr_parse(token, len, &list); - if (rc != 0) + if (rc) return rc; for (rc = i = 0; !rc && i < nip; i++) @@ -851,18 +875,18 @@ lnet_match_network_tokens(char *net_entry, __u32 *ipaddrs, int nip) /* scan for token start */ while (isspace(*sep)) sep++; - if (*sep == 0) + if (!*sep) break; token = sep++; /* scan for token end */ - while (*sep != 0 && !isspace(*sep)) + while (*sep && !isspace(*sep)) sep++; - if (*sep != 0) + if (*sep) *sep++ = 0; - if (ntokens++ == 0) { + if (!ntokens++) { net = token; continue; } @@ -876,7 +900,8 @@ lnet_match_network_tokens(char *net_entry, __u32 *ipaddrs, int nip) return rc; } - matched |= (rc != 0); + if (rc) + matched |= 1; } if (!matched) @@ -892,12 +917,12 @@ lnet_netspec2net(char *netspec) char *bracket = strchr(netspec, '('); __u32 net; - if (bracket != NULL) + if (bracket) *bracket = 0; net = libcfs_str2net(netspec); - if (bracket != NULL) + if (bracket) *bracket = '('; return net; @@ -909,8 +934,8 @@ lnet_splitnets(char *source, struct list_head *nets) int offset = 0; int offset2; int len; - struct lnet_text_buf_t *tb; - struct lnet_text_buf_t *tb2; + struct lnet_text_buf *tb; + struct lnet_text_buf *tb2; struct list_head *t; char *sep; char *bracket; @@ -919,15 +944,13 @@ lnet_splitnets(char *source, struct list_head *nets) LASSERT(!list_empty(nets)); LASSERT(nets->next == nets->prev); /* single entry */ - tb = list_entry(nets->next, struct lnet_text_buf_t, ltb_list); + tb = list_entry(nets->next, struct lnet_text_buf, ltb_list); for (;;) { sep = strchr(tb->ltb_text, ','); bracket = strchr(tb->ltb_text, '('); - if (sep != NULL && - bracket != NULL && - bracket < sep) { + if (sep && bracket && bracket < sep) { /* netspec lists interfaces... */ offset2 = offset + (int)(bracket - tb->ltb_text); @@ -935,16 +958,16 @@ lnet_splitnets(char *source, struct list_head *nets) bracket = strchr(bracket + 1, ')'); - if (bracket == NULL || - !(bracket[1] == ',' || bracket[1] == 0)) { + if (!bracket || + !(bracket[1] == ',' || !bracket[1])) { lnet_syntax("ip2nets", source, offset2, len); return -EINVAL; } - sep = (bracket[1] == 0) ? NULL : bracket + 1; + sep = !bracket[1] ? NULL : bracket + 1; } - if (sep != NULL) + if (sep) *sep++ = 0; net = lnet_netspec2net(tb->ltb_text); @@ -955,7 +978,7 @@ lnet_splitnets(char *source, struct list_head *nets) } list_for_each(t, nets) { - tb2 = list_entry(t, struct lnet_text_buf_t, ltb_list); + tb2 = list_entry(t, struct lnet_text_buf, ltb_list); if (tb2 == tb) continue; @@ -968,13 +991,13 @@ lnet_splitnets(char *source, struct list_head *nets) } } - if (sep == NULL) + if (!sep) return 0; offset += (int)(sep - tb->ltb_text); len = strlen(sep); tb2 = lnet_new_text_buf(len); - if (tb2 == NULL) + if (!tb2) return -ENOMEM; strncpy(tb2->ltb_text, sep, len); @@ -996,8 +1019,9 @@ lnet_match_networks(char **networksp, char *ip2nets, __u32 *ipaddrs, int nip) struct list_head current_nets; struct list_head *t; struct list_head *t2; - struct lnet_text_buf_t *tb; - struct lnet_text_buf_t *tb2; + struct lnet_text_buf *tb; + struct lnet_text_buf *temp; + struct lnet_text_buf *tb2; __u32 net1; __u32 net2; int len; @@ -1008,7 +1032,7 @@ lnet_match_networks(char **networksp, char *ip2nets, __u32 *ipaddrs, int nip) INIT_LIST_HEAD(&raw_entries); if (lnet_str2tbs_sep(&raw_entries, ip2nets) < 0) { CERROR("Error parsing ip2nets\n"); - LASSERT(lnet_tbnob == 0); + LASSERT(!lnet_tbnob); return -EINVAL; } @@ -1019,12 +1043,9 @@ lnet_match_networks(char **networksp, char *ip2nets, __u32 *ipaddrs, int nip) len = 0; rc = 0; - while (!list_empty(&raw_entries)) { - tb = list_entry(raw_entries.next, struct lnet_text_buf_t, - ltb_list); - + list_for_each_entry_safe(tb, temp, &raw_entries, ltb_list) { strncpy(source, tb->ltb_text, sizeof(source)); - source[sizeof(source)-1] = '\0'; + source[sizeof(source) - 1] = '\0'; /* replace ltb_text with the network(s) add on match */ rc = lnet_match_network_tokens(tb->ltb_text, ipaddrs, nip); @@ -1033,7 +1054,7 @@ lnet_match_networks(char **networksp, char *ip2nets, __u32 *ipaddrs, int nip) list_del(&tb->ltb_list); - if (rc == 0) { /* no match */ + if (!rc) { /* no match */ lnet_free_text_buf(tb); continue; } @@ -1047,13 +1068,13 @@ lnet_match_networks(char **networksp, char *ip2nets, __u32 *ipaddrs, int nip) dup = 0; list_for_each(t, ¤t_nets) { - tb = list_entry(t, struct lnet_text_buf_t, ltb_list); + tb = list_entry(t, struct lnet_text_buf, ltb_list); net1 = lnet_netspec2net(tb->ltb_text); LASSERT(net1 != LNET_NIDNET(LNET_NID_ANY)); list_for_each(t2, &matched_nets) { - tb2 = list_entry(t2, struct lnet_text_buf_t, - ltb_list); + tb2 = list_entry(t2, struct lnet_text_buf, + ltb_list); net2 = lnet_netspec2net(tb2->ltb_text); LASSERT(net2 != LNET_NIDNET(LNET_NID_ANY)); @@ -1073,13 +1094,13 @@ lnet_match_networks(char **networksp, char *ip2nets, __u32 *ipaddrs, int nip) } list_for_each_safe(t, t2, ¤t_nets) { - tb = list_entry(t, struct lnet_text_buf_t, ltb_list); + tb = list_entry(t, struct lnet_text_buf, ltb_list); list_del(&tb->ltb_list); list_add_tail(&tb->ltb_list, &matched_nets); len += snprintf(networks + len, sizeof(networks) - len, - "%s%s", (len == 0) ? "" : ",", + "%s%s", !len ? "" : ",", tb->ltb_text); if (len >= sizeof(networks)) { @@ -1096,7 +1117,7 @@ lnet_match_networks(char **networksp, char *ip2nets, __u32 *ipaddrs, int nip) lnet_free_text_bufs(&raw_entries); lnet_free_text_bufs(&matched_nets); lnet_free_text_bufs(¤t_nets); - LASSERT(lnet_tbnob == 0); + LASSERT(!lnet_tbnob); if (rc < 0) return rc; @@ -1122,7 +1143,7 @@ lnet_ipaddr_enumerate(__u32 **ipaddrsp) return nif; LIBCFS_ALLOC(ipaddrs, nif * sizeof(*ipaddrs)); - if (ipaddrs == NULL) { + if (!ipaddrs) { CERROR("Can't allocate ipaddrs[%d]\n", nif); lnet_ipif_free_enumeration(ifnames, nif); return -ENOMEM; @@ -1133,7 +1154,7 @@ lnet_ipaddr_enumerate(__u32 **ipaddrsp) continue; rc = lnet_ipif_query(ifnames[i], &up, &ipaddrs[nip], &netmask); - if (rc != 0) { + if (rc) { CWARN("Can't query interface %s: %d\n", ifnames[i], rc); continue; @@ -1155,7 +1176,7 @@ lnet_ipaddr_enumerate(__u32 **ipaddrsp) } else { if (nip > 0) { LIBCFS_ALLOC(ipaddrs2, nip * sizeof(*ipaddrs2)); - if (ipaddrs2 == NULL) { + if (!ipaddrs2) { CERROR("Can't allocate ipaddrs[%d]\n", nip); nip = -ENOMEM; } else { @@ -1184,7 +1205,7 @@ lnet_parse_ip2nets(char **networksp, char *ip2nets) return nip; } - if (nip == 0) { + if (!nip) { LCONSOLE_ERROR_MSG(0x118, "No local IP interfaces for ip2nets to match\n"); return -ENOENT; @@ -1198,7 +1219,7 @@ lnet_parse_ip2nets(char **networksp, char *ip2nets) return rc; } - if (rc == 0) { + if (!rc) { LCONSOLE_ERROR_MSG(0x11a, "ip2nets does not match any local IP interfaces\n"); return -ENOENT; diff --git a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c index 64f94a690081..adbcadbab1be 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-eq.c +++ b/drivers/staging/lustre/lnet/lnet/lib-eq.c @@ -72,33 +72,38 @@ LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback, { lnet_eq_t *eq; - LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount > 0); - /* We need count to be a power of 2 so that when eq_{enq,deq}_seq + /* + * We need count to be a power of 2 so that when eq_{enq,deq}_seq * overflow, they don't skip entries, so the queue has the same - * apparent capacity at all times */ + * apparent capacity at all times + */ + if (count) + count = roundup_pow_of_two(count); - count = roundup_pow_of_two(count); - - if (callback != LNET_EQ_HANDLER_NONE && count != 0) + if (callback != LNET_EQ_HANDLER_NONE && count) CWARN("EQ callback is guaranteed to get every event, do you still want to set eqcount %d for polling event which will have locking overhead? Please contact with developer to confirm\n", count); - /* count can be 0 if only need callback, we can eliminate - * overhead of enqueue event */ - if (count == 0 && callback == LNET_EQ_HANDLER_NONE) + /* + * count can be 0 if only need callback, we can eliminate + * overhead of enqueue event + */ + if (!count && callback == LNET_EQ_HANDLER_NONE) return -EINVAL; eq = lnet_eq_alloc(); - if (eq == NULL) + if (!eq) return -ENOMEM; - if (count != 0) { + if (count) { LIBCFS_ALLOC(eq->eq_events, count * sizeof(lnet_event_t)); - if (eq->eq_events == NULL) + if (!eq->eq_events) goto failed; - /* NB allocator has set all event sequence numbers to 0, - * so all them should be earlier than eq_deq_seq */ + /* + * NB allocator has set all event sequence numbers to 0, + * so all them should be earlier than eq_deq_seq + */ } eq->eq_deq_seq = 1; @@ -108,13 +113,15 @@ LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback, eq->eq_refs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*eq->eq_refs[0])); - if (eq->eq_refs == NULL) + if (!eq->eq_refs) goto failed; /* MUST hold both exclusive lnet_res_lock */ lnet_res_lock(LNET_LOCK_EX); - /* NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do - * both EQ lookup and poll event with only lnet_eq_wait_lock */ + /* + * NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do + * both EQ lookup and poll event with only lnet_eq_wait_lock + */ lnet_eq_wait_lock(); lnet_res_lh_initialize(&the_lnet.ln_eq_container, &eq->eq_lh); @@ -127,10 +134,10 @@ LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback, return 0; failed: - if (eq->eq_events != NULL) + if (eq->eq_events) LIBCFS_FREE(eq->eq_events, count * sizeof(lnet_event_t)); - if (eq->eq_refs != NULL) + if (eq->eq_refs) cfs_percpt_free(eq->eq_refs); lnet_eq_free(eq); @@ -159,23 +166,24 @@ LNetEQFree(lnet_handle_eq_t eqh) int size = 0; int i; - LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount > 0); lnet_res_lock(LNET_LOCK_EX); - /* NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do - * both EQ lookup and poll event with only lnet_eq_wait_lock */ + /* + * NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do + * both EQ lookup and poll event with only lnet_eq_wait_lock + */ lnet_eq_wait_lock(); eq = lnet_handle2eq(&eqh); - if (eq == NULL) { + if (!eq) { rc = -ENOENT; goto out; } cfs_percpt_for_each(ref, i, eq->eq_refs) { LASSERT(*ref >= 0); - if (*ref == 0) + if (!*ref) continue; CDEBUG(D_NET, "Event equeue (%d: %d) busy on destroy.\n", @@ -196,9 +204,9 @@ LNetEQFree(lnet_handle_eq_t eqh) lnet_eq_wait_unlock(); lnet_res_unlock(LNET_LOCK_EX); - if (events != NULL) + if (events) LIBCFS_FREE(events, size * sizeof(lnet_event_t)); - if (refs != NULL) + if (refs) cfs_percpt_free(refs); return rc; @@ -211,7 +219,7 @@ lnet_eq_enqueue_event(lnet_eq_t *eq, lnet_event_t *ev) /* MUST called with resource lock hold but w/o lnet_eq_wait_lock */ int index; - if (eq->eq_size == 0) { + if (!eq->eq_size) { LASSERT(eq->eq_callback != LNET_EQ_HANDLER_NONE); eq->eq_callback(ev); return; @@ -255,8 +263,10 @@ lnet_eq_dequeue_event(lnet_eq_t *eq, lnet_event_t *ev) if (eq->eq_deq_seq == new_event->sequence) { rc = 1; } else { - /* don't complain with CERROR: some EQs are sized small - * anyway; if it's important, the caller should complain */ + /* + * don't complain with CERROR: some EQs are sized small + * anyway; if it's important, the caller should complain + */ CDEBUG(D_NET, "Event Queue Overflow: eq seq %lu ev seq %lu\n", eq->eq_deq_seq, new_event->sequence); rc = -EOVERFLOW; @@ -309,8 +319,8 @@ __must_hold(&the_lnet.ln_eq_wait_lock) wait_queue_t wl; unsigned long now; - if (tms == 0) - return -1; /* don't want to wait and no new event */ + if (!tms) + return -ENXIO; /* don't want to wait and no new event */ init_waitqueue_entry(&wl, current); set_current_state(TASK_INTERRUPTIBLE); @@ -320,7 +330,6 @@ __must_hold(&the_lnet.ln_eq_wait_lock) if (tms < 0) { schedule(); - } else { now = jiffies; schedule_timeout(msecs_to_jiffies(tms)); @@ -329,7 +338,7 @@ __must_hold(&the_lnet.ln_eq_wait_lock) tms = 0; } - wait = tms != 0; /* might need to call here again */ + wait = tms; /* might need to call here again */ *timeout_ms = tms; lnet_eq_wait_lock(); @@ -372,7 +381,6 @@ LNetEQPoll(lnet_handle_eq_t *eventqs, int neq, int timeout_ms, int rc; int i; - LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount > 0); if (neq < 1) @@ -384,20 +392,20 @@ LNetEQPoll(lnet_handle_eq_t *eventqs, int neq, int timeout_ms, for (i = 0; i < neq; i++) { lnet_eq_t *eq = lnet_handle2eq(&eventqs[i]); - if (eq == NULL) { + if (!eq) { lnet_eq_wait_unlock(); return -ENOENT; } rc = lnet_eq_dequeue_event(eq, event); - if (rc != 0) { + if (rc) { lnet_eq_wait_unlock(); *which = i; return rc; } } - if (wait == 0) + if (!wait) break; /* diff --git a/drivers/staging/lustre/lnet/lnet/lib-md.c b/drivers/staging/lustre/lnet/lnet/lib-md.c index 758f5bedef7e..c74514f99f90 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-md.c +++ b/drivers/staging/lustre/lnet/lnet/lib-md.c @@ -46,16 +46,18 @@ void lnet_md_unlink(lnet_libmd_t *md) { - if ((md->md_flags & LNET_MD_FLAG_ZOMBIE) == 0) { + if (!(md->md_flags & LNET_MD_FLAG_ZOMBIE)) { /* first unlink attempt... */ lnet_me_t *me = md->md_me; md->md_flags |= LNET_MD_FLAG_ZOMBIE; - /* Disassociate from ME (if any), + /* + * Disassociate from ME (if any), * and unlink it if it was created - * with LNET_UNLINK */ - if (me != NULL) { + * with LNET_UNLINK + */ + if (me) { /* detach MD from portal */ lnet_ptl_detach_md(me, md); if (me->me_unlink == LNET_UNLINK) @@ -66,14 +68,14 @@ lnet_md_unlink(lnet_libmd_t *md) lnet_res_lh_invalidate(&md->md_lh); } - if (md->md_refcount != 0) { + if (md->md_refcount) { CDEBUG(D_NET, "Queueing unlink of md %p\n", md); return; } CDEBUG(D_NET, "Unlinking md %p\n", md); - if (md->md_eq != NULL) { + if (md->md_eq) { int cpt = lnet_cpt_of_cookie(md->md_lh.lh_cookie); LASSERT(*md->md_eq->eq_refs[cpt] > 0); @@ -103,12 +105,12 @@ lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink) lmd->md_refcount = 0; lmd->md_flags = (unlink == LNET_UNLINK) ? LNET_MD_FLAG_AUTO_UNLINK : 0; - if ((umd->options & LNET_MD_IOVEC) != 0) { - - if ((umd->options & LNET_MD_KIOV) != 0) /* Can't specify both */ + if (umd->options & LNET_MD_IOVEC) { + if (umd->options & LNET_MD_KIOV) /* Can't specify both */ return -EINVAL; - lmd->md_niov = niov = umd->length; + niov = umd->length; + lmd->md_niov = umd->length; memcpy(lmd->md_iov.iov, umd->start, niov * sizeof(lmd->md_iov.iov[0])); @@ -123,13 +125,14 @@ lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink) lmd->md_length = total_length; - if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* use max size */ + if ((umd->options & LNET_MD_MAX_SIZE) && /* use max size */ (umd->max_size < 0 || umd->max_size > total_length)) /* illegal max_size */ return -EINVAL; - } else if ((umd->options & LNET_MD_KIOV) != 0) { - lmd->md_niov = niov = umd->length; + } else if (umd->options & LNET_MD_KIOV) { + niov = umd->length; + lmd->md_niov = umd->length; memcpy(lmd->md_iov.kiov, umd->start, niov * sizeof(lmd->md_iov.kiov[0])); @@ -144,17 +147,18 @@ lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink) lmd->md_length = total_length; - if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* max size used */ + if ((umd->options & LNET_MD_MAX_SIZE) && /* max size used */ (umd->max_size < 0 || umd->max_size > total_length)) /* illegal max_size */ return -EINVAL; } else { /* contiguous */ lmd->md_length = umd->length; - lmd->md_niov = niov = 1; + niov = 1; + lmd->md_niov = 1; lmd->md_iov.iov[0].iov_base = umd->start; lmd->md_iov.iov[0].iov_len = umd->length; - if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* max size used */ + if ((umd->options & LNET_MD_MAX_SIZE) && /* max size used */ (umd->max_size < 0 || umd->max_size > (int)umd->length)) /* illegal max_size */ return -EINVAL; @@ -169,22 +173,26 @@ lnet_md_link(lnet_libmd_t *md, lnet_handle_eq_t eq_handle, int cpt) { struct lnet_res_container *container = the_lnet.ln_md_containers[cpt]; - /* NB we are passed an allocated, but inactive md. + /* + * NB we are passed an allocated, but inactive md. * if we return success, caller may lnet_md_unlink() it. * otherwise caller may only lnet_md_free() it. */ - /* This implementation doesn't know how to create START events or + /* + * This implementation doesn't know how to create START events or * disable END events. Best to LASSERT our caller is compliant so - * we find out quickly... */ - /* TODO - reevaluate what should be here in light of + * we find out quickly... + */ + /* + * TODO - reevaluate what should be here in light of * the removal of the start and end events * maybe there we shouldn't even allow LNET_EQ_NONE!) - * LASSERT (eq == NULL); + * LASSERT(!eq); */ if (!LNetHandleIsInvalid(eq_handle)) { md->md_eq = lnet_handle2eq(&eq_handle); - if (md->md_eq == NULL) + if (!md->md_eq) return -ENOENT; (*md->md_eq->eq_refs[cpt])++; @@ -208,8 +216,8 @@ lnet_md_deconstruct(lnet_libmd_t *lmd, lnet_md_t *umd) * and that's all. */ umd->start = lmd->md_start; - umd->length = ((lmd->md_options & - (LNET_MD_IOVEC | LNET_MD_KIOV)) == 0) ? + umd->length = !(lmd->md_options & + (LNET_MD_IOVEC | LNET_MD_KIOV)) ? lmd->md_length : lmd->md_niov; umd->threshold = lmd->md_threshold; umd->max_size = lmd->md_max_size; @@ -221,13 +229,13 @@ lnet_md_deconstruct(lnet_libmd_t *lmd, lnet_md_t *umd) static int lnet_md_validate(lnet_md_t *umd) { - if (umd->start == NULL && umd->length != 0) { + if (!umd->start && umd->length) { CERROR("MD start pointer can not be NULL with length %u\n", umd->length); return -EINVAL; } - if ((umd->options & (LNET_MD_KIOV | LNET_MD_IOVEC)) != 0 && + if ((umd->options & (LNET_MD_KIOV | LNET_MD_IOVEC)) && umd->length > LNET_MAX_IOV) { CERROR("Invalid option: too many fragments %u, %d max\n", umd->length, LNET_MAX_IOV); @@ -273,41 +281,42 @@ LNetMDAttach(lnet_handle_me_t meh, lnet_md_t umd, int cpt; int rc; - LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount > 0); - if (lnet_md_validate(&umd) != 0) + if (lnet_md_validate(&umd)) return -EINVAL; - if ((umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT)) == 0) { + if (!(umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT))) { CERROR("Invalid option: no MD_OP set\n"); return -EINVAL; } md = lnet_md_alloc(&umd); - if (md == NULL) + if (!md) return -ENOMEM; rc = lnet_md_build(md, &umd, unlink); cpt = lnet_cpt_of_cookie(meh.cookie); lnet_res_lock(cpt); - if (rc != 0) + if (rc) goto failed; me = lnet_handle2me(&meh); - if (me == NULL) + if (!me) rc = -ENOENT; - else if (me->me_md != NULL) + else if (me->me_md) rc = -EBUSY; else rc = lnet_md_link(md, umd.eq_handle, cpt); - if (rc != 0) + if (rc) goto failed; - /* attach this MD to portal of ME and check if it matches any - * blocked msgs on this portal */ + /* + * attach this MD to portal of ME and check if it matches any + * blocked msgs on this portal + */ lnet_ptl_attach_md(me, md, &matches, &drops); lnet_md2handle(handle, md); @@ -350,29 +359,28 @@ LNetMDBind(lnet_md_t umd, lnet_unlink_t unlink, lnet_handle_md_t *handle) int cpt; int rc; - LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount > 0); - if (lnet_md_validate(&umd) != 0) + if (lnet_md_validate(&umd)) return -EINVAL; - if ((umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT)) != 0) { + if ((umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT))) { CERROR("Invalid option: GET|PUT illegal on active MDs\n"); return -EINVAL; } md = lnet_md_alloc(&umd); - if (md == NULL) + if (!md) return -ENOMEM; rc = lnet_md_build(md, &umd, unlink); cpt = lnet_res_lock_current(); - if (rc != 0) + if (rc) goto failed; rc = lnet_md_link(md, umd.eq_handle, cpt); - if (rc != 0) + if (rc) goto failed; lnet_md2handle(handle, md); @@ -425,23 +433,24 @@ LNetMDUnlink(lnet_handle_md_t mdh) lnet_libmd_t *md; int cpt; - LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount > 0); cpt = lnet_cpt_of_cookie(mdh.cookie); lnet_res_lock(cpt); md = lnet_handle2md(&mdh); - if (md == NULL) { + if (!md) { lnet_res_unlock(cpt); return -ENOENT; } md->md_flags |= LNET_MD_FLAG_ABORTED; - /* If the MD is busy, lnet_md_unlink just marks it for deletion, and + /* + * If the MD is busy, lnet_md_unlink just marks it for deletion, and * when the LND is done, the completion event flags that the MD was - * unlinked. Otherwise, we enqueue an event now... */ - if (md->md_eq != NULL && md->md_refcount == 0) { + * unlinked. Otherwise, we enqueue an event now... + */ + if (md->md_eq && !md->md_refcount) { lnet_build_unlink_event(md, &ev); lnet_eq_enqueue_event(md->md_eq, &ev); } diff --git a/drivers/staging/lustre/lnet/lnet/lib-me.c b/drivers/staging/lustre/lnet/lnet/lib-me.c index 42fc99ef9f80..e671aed373df 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-me.c +++ b/drivers/staging/lustre/lnet/lnet/lib-me.c @@ -83,7 +83,6 @@ LNetMEAttach(unsigned int portal, struct lnet_me *me; struct list_head *head; - LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount > 0); if ((int)portal >= the_lnet.ln_nportals) @@ -91,11 +90,11 @@ LNetMEAttach(unsigned int portal, mtable = lnet_mt_of_attach(portal, match_id, match_bits, ignore_bits, pos); - if (mtable == NULL) /* can't match portal type */ + if (!mtable) /* can't match portal type */ return -EPERM; me = lnet_me_alloc(); - if (me == NULL) + if (!me) return -ENOMEM; lnet_res_lock(mtable->mt_cpt); @@ -109,7 +108,7 @@ LNetMEAttach(unsigned int portal, lnet_res_lh_initialize(the_lnet.ln_me_containers[mtable->mt_cpt], &me->me_lh); - if (ignore_bits != 0) + if (ignore_bits) head = &mtable->mt_mhash[LNET_MT_HASH_IGNORE]; else head = lnet_mt_match_head(mtable, match_id, match_bits); @@ -156,14 +155,13 @@ LNetMEInsert(lnet_handle_me_t current_meh, struct lnet_portal *ptl; int cpt; - LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount > 0); if (pos == LNET_INS_LOCAL) return -EPERM; new_me = lnet_me_alloc(); - if (new_me == NULL) + if (!new_me) return -ENOMEM; cpt = lnet_cpt_of_cookie(current_meh.cookie); @@ -171,7 +169,7 @@ LNetMEInsert(lnet_handle_me_t current_meh, lnet_res_lock(cpt); current_me = lnet_handle2me(¤t_meh); - if (current_me == NULL) { + if (!current_me) { lnet_me_free(new_me); lnet_res_unlock(cpt); @@ -233,22 +231,21 @@ LNetMEUnlink(lnet_handle_me_t meh) lnet_event_t ev; int cpt; - LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount > 0); cpt = lnet_cpt_of_cookie(meh.cookie); lnet_res_lock(cpt); me = lnet_handle2me(&meh); - if (me == NULL) { + if (!me) { lnet_res_unlock(cpt); return -ENOENT; } md = me->me_md; - if (md != NULL) { + if (md) { md->md_flags |= LNET_MD_FLAG_ABORTED; - if (md->md_eq != NULL && md->md_refcount == 0) { + if (md->md_eq && !md->md_refcount) { lnet_build_unlink_event(md, &ev); lnet_eq_enqueue_event(md->md_eq, &ev); } @@ -267,7 +264,7 @@ lnet_me_unlink(lnet_me_t *me) { list_del(&me->me_list); - if (me->me_md != NULL) { + if (me->me_md) { lnet_libmd_t *md = me->me_md; /* detach MD from portal of this ME */ diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c index fb8f7be043ec..0009a8de77d5 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-move.c +++ b/drivers/staging/lustre/lnet/lnet/lib-move.c @@ -50,17 +50,16 @@ int lnet_fail_nid(lnet_nid_t nid, unsigned int threshold) { lnet_test_peer_t *tp; + lnet_test_peer_t *temp; struct list_head *el; struct list_head *next; struct list_head cull; - LASSERT(the_lnet.ln_init); - /* NB: use lnet_net_lock(0) to serialize operations on test peers */ - if (threshold != 0) { + if (threshold) { /* Adding a new entry */ LIBCFS_ALLOC(tp, sizeof(*tp)); - if (tp == NULL) + if (!tp) return -ENOMEM; tp->tp_nid = nid; @@ -80,7 +79,7 @@ lnet_fail_nid(lnet_nid_t nid, unsigned int threshold) list_for_each_safe(el, next, &the_lnet.ln_test_peers) { tp = list_entry(el, lnet_test_peer_t, tp_list); - if (tp->tp_threshold == 0 || /* needs culling anyway */ + if (!tp->tp_threshold || /* needs culling anyway */ nid == LNET_NID_ANY || /* removing all entries */ tp->tp_nid == nid) { /* matched this one */ list_del(&tp->tp_list); @@ -90,9 +89,7 @@ lnet_fail_nid(lnet_nid_t nid, unsigned int threshold) lnet_net_unlock(0); - while (!list_empty(&cull)) { - tp = list_entry(cull.next, lnet_test_peer_t, tp_list); - + list_for_each_entry_safe(tp, temp, &cull, tp_list) { list_del(&tp->tp_list); LIBCFS_FREE(tp, sizeof(*tp)); } @@ -103,6 +100,7 @@ static int fail_peer(lnet_nid_t nid, int outgoing) { lnet_test_peer_t *tp; + lnet_test_peer_t *temp; struct list_head *el; struct list_head *next; struct list_head cull; @@ -116,12 +114,14 @@ fail_peer(lnet_nid_t nid, int outgoing) list_for_each_safe(el, next, &the_lnet.ln_test_peers) { tp = list_entry(el, lnet_test_peer_t, tp_list); - if (tp->tp_threshold == 0) { + if (!tp->tp_threshold) { /* zombie entry */ if (outgoing) { - /* only cull zombies on outgoing tests, + /* + * only cull zombies on outgoing tests, * since we may be at interrupt priority on - * incoming messages. */ + * incoming messages. + */ list_del(&tp->tp_list); list_add(&tp->tp_list, &cull); } @@ -135,7 +135,7 @@ fail_peer(lnet_nid_t nid, int outgoing) if (tp->tp_threshold != LNET_MD_THRESH_INF) { tp->tp_threshold--; if (outgoing && - tp->tp_threshold == 0) { + !tp->tp_threshold) { /* see above */ list_del(&tp->tp_list); list_add(&tp->tp_list, &cull); @@ -147,8 +147,7 @@ fail_peer(lnet_nid_t nid, int outgoing) lnet_net_unlock(0); - while (!list_empty(&cull)) { - tp = list_entry(cull.next, lnet_test_peer_t, tp_list); + list_for_each_entry_safe(tp, temp, &cull, tp_list) { list_del(&tp->tp_list); LIBCFS_FREE(tp, sizeof(*tp)); @@ -162,6 +161,7 @@ lnet_iov_nob(unsigned int niov, struct kvec *iov) { unsigned int nob = 0; + LASSERT(!niov || iov); while (niov-- > 0) nob += (iov++)->iov_len; @@ -171,13 +171,13 @@ EXPORT_SYMBOL(lnet_iov_nob); void lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset, - unsigned int nsiov, struct kvec *siov, unsigned int soffset, - unsigned int nob) + unsigned int nsiov, struct kvec *siov, unsigned int soffset, + unsigned int nob) { /* NB diov, siov are READ-ONLY */ unsigned int this_nob; - if (nob == 0) + if (!nob) return; /* skip complete frags before 'doffset' */ @@ -206,7 +206,7 @@ lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset, this_nob = min(this_nob, nob); memcpy((char *)diov->iov_base + doffset, - (char *)siov->iov_base + soffset, this_nob); + (char *)siov->iov_base + soffset, this_nob); nob -= this_nob; if (diov->iov_len > doffset + this_nob) { @@ -230,16 +230,18 @@ EXPORT_SYMBOL(lnet_copy_iov2iov); int lnet_extract_iov(int dst_niov, struct kvec *dst, - int src_niov, struct kvec *src, - unsigned int offset, unsigned int len) + int src_niov, struct kvec *src, + unsigned int offset, unsigned int len) { - /* Initialise 'dst' to the subset of 'src' starting at 'offset', + /* + * Initialise 'dst' to the subset of 'src' starting at 'offset', * for exactly 'len' bytes, and return the number of entries. - * NB not destructive to 'src' */ + * NB not destructive to 'src' + */ unsigned int frag_len; unsigned int niov; - if (len == 0) /* no data => */ + if (!len) /* no data => */ return 0; /* no frags */ LASSERT(src_niov > 0); @@ -280,6 +282,7 @@ lnet_kiov_nob(unsigned int niov, lnet_kiov_t *kiov) { unsigned int nob = 0; + LASSERT(!niov || kiov); while (niov-- > 0) nob += (kiov++)->kiov_len; @@ -297,7 +300,7 @@ lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset, char *daddr = NULL; char *saddr = NULL; - if (nob == 0) + if (!nob) return; LASSERT(!in_interrupt()); @@ -325,17 +328,18 @@ lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset, siov->kiov_len - soffset); this_nob = min(this_nob, nob); - if (daddr == NULL) + if (!daddr) daddr = ((char *)kmap(diov->kiov_page)) + diov->kiov_offset + doffset; - if (saddr == NULL) + if (!saddr) saddr = ((char *)kmap(siov->kiov_page)) + siov->kiov_offset + soffset; - /* Vanishing risk of kmap deadlock when mapping 2 pages. + /* + * Vanishing risk of kmap deadlock when mapping 2 pages. * However in practice at least one of the kiovs will be mapped - * kernel pages and the map/unmap will be NOOPs */ - + * kernel pages and the map/unmap will be NOOPs + */ memcpy(daddr, saddr, this_nob); nob -= this_nob; @@ -362,9 +366,9 @@ lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset, } } while (nob > 0); - if (daddr != NULL) + if (daddr) kunmap(diov->kiov_page); - if (saddr != NULL) + if (saddr) kunmap(siov->kiov_page); } EXPORT_SYMBOL(lnet_copy_kiov2kiov); @@ -378,7 +382,7 @@ lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov, unsigned int iovoffset, unsigned int this_nob; char *addr = NULL; - if (nob == 0) + if (!nob) return; LASSERT(!in_interrupt()); @@ -406,7 +410,7 @@ lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov, unsigned int iovoffset, (__kernel_size_t) kiov->kiov_len - kiovoffset); this_nob = min(this_nob, nob); - if (addr == NULL) + if (!addr) addr = ((char *)kmap(kiov->kiov_page)) + kiov->kiov_offset + kiovoffset; @@ -434,7 +438,7 @@ lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov, unsigned int iovoffset, } while (nob > 0); - if (addr != NULL) + if (addr) kunmap(kiov->kiov_page); } EXPORT_SYMBOL(lnet_copy_kiov2iov); @@ -449,7 +453,7 @@ lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov, unsigned int this_nob; char *addr = NULL; - if (nob == 0) + if (!nob) return; LASSERT(!in_interrupt()); @@ -477,7 +481,7 @@ lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov, iov->iov_len - iovoffset); this_nob = min(this_nob, nob); - if (addr == NULL) + if (!addr) addr = ((char *)kmap(kiov->kiov_page)) + kiov->kiov_offset + kiovoffset; @@ -504,23 +508,25 @@ lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov, } } while (nob > 0); - if (addr != NULL) + if (addr) kunmap(kiov->kiov_page); } EXPORT_SYMBOL(lnet_copy_iov2kiov); int lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst, - int src_niov, lnet_kiov_t *src, - unsigned int offset, unsigned int len) + int src_niov, lnet_kiov_t *src, + unsigned int offset, unsigned int len) { - /* Initialise 'dst' to the subset of 'src' starting at 'offset', + /* + * Initialise 'dst' to the subset of 'src' starting at 'offset', * for exactly 'len' bytes, and return the number of entries. - * NB not destructive to 'src' */ + * NB not destructive to 'src' + */ unsigned int frag_len; unsigned int niov; - if (len == 0) /* no data => */ + if (!len) /* no data => */ return 0; /* no frags */ LASSERT(src_niov > 0); @@ -543,7 +549,7 @@ lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst, if (len <= frag_len) { dst->kiov_len = len; LASSERT(dst->kiov_offset + dst->kiov_len - <= PAGE_CACHE_SIZE); + <= PAGE_CACHE_SIZE); return niov; } @@ -560,7 +566,7 @@ lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst, } EXPORT_SYMBOL(lnet_extract_kiov); -static void +void lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed, unsigned int offset, unsigned int mlen, unsigned int rlen) { @@ -570,9 +576,9 @@ lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed, int rc; LASSERT(!in_interrupt()); - LASSERT(mlen == 0 || msg != NULL); + LASSERT(!mlen || msg); - if (msg != NULL) { + if (msg) { LASSERT(msg->msg_receiving); LASSERT(!msg->msg_sending); LASSERT(rlen == msg->msg_len); @@ -582,18 +588,18 @@ lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed, msg->msg_receiving = 0; - if (mlen != 0) { + if (mlen) { niov = msg->msg_niov; iov = msg->msg_iov; kiov = msg->msg_kiov; LASSERT(niov > 0); - LASSERT((iov == NULL) != (kiov == NULL)); + LASSERT(!iov != !kiov); } } - rc = (ni->ni_lnd->lnd_recv)(ni, private, msg, delayed, - niov, iov, kiov, offset, mlen, rlen); + rc = ni->ni_lnd->lnd_recv(ni, private, msg, delayed, + niov, iov, kiov, offset, mlen, rlen); if (rc < 0) lnet_finalize(ni, msg, rc); } @@ -605,13 +611,13 @@ lnet_setpayloadbuffer(lnet_msg_t *msg) LASSERT(msg->msg_len > 0); LASSERT(!msg->msg_routing); - LASSERT(md != NULL); - LASSERT(msg->msg_niov == 0); - LASSERT(msg->msg_iov == NULL); - LASSERT(msg->msg_kiov == NULL); + LASSERT(md); + LASSERT(!msg->msg_niov); + LASSERT(!msg->msg_iov); + LASSERT(!msg->msg_kiov); msg->msg_niov = md->md_niov; - if ((md->md_options & LNET_MD_KIOV) != 0) + if (md->md_options & LNET_MD_KIOV) msg->msg_kiov = md->md_iov.kiov; else msg->msg_iov = md->md_iov.iov; @@ -626,7 +632,7 @@ lnet_prep_send(lnet_msg_t *msg, int type, lnet_process_id_t target, msg->msg_len = len; msg->msg_offset = offset; - if (len != 0) + if (len) lnet_setpayloadbuffer(msg); memset(&msg->msg_hdr, 0, sizeof(msg->msg_hdr)); @@ -646,9 +652,9 @@ lnet_ni_send(lnet_ni_t *ni, lnet_msg_t *msg) LASSERT(!in_interrupt()); LASSERT(LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND || - (msg->msg_txcredit && msg->msg_peertxcredit)); + (msg->msg_txcredit && msg->msg_peertxcredit)); - rc = (ni->ni_lnd->lnd_send)(ni, priv, msg); + rc = ni->ni_lnd->lnd_send(ni, priv, msg); if (rc < 0) lnet_finalize(ni, msg, rc); } @@ -661,12 +667,12 @@ lnet_ni_eager_recv(lnet_ni_t *ni, lnet_msg_t *msg) LASSERT(!msg->msg_sending); LASSERT(msg->msg_receiving); LASSERT(!msg->msg_rx_ready_delay); - LASSERT(ni->ni_lnd->lnd_eager_recv != NULL); + LASSERT(ni->ni_lnd->lnd_eager_recv); msg->msg_rx_ready_delay = 1; - rc = (ni->ni_lnd->lnd_eager_recv)(ni, msg->msg_private, msg, - &msg->msg_private); - if (rc != 0) { + rc = ni->ni_lnd->lnd_eager_recv(ni, msg->msg_private, msg, + &msg->msg_private); + if (rc) { CERROR("recv from %s / send to %s aborted: eager_recv failed %d\n", libcfs_nid2str(msg->msg_rxpeer->lp_nid), libcfs_id2str(msg->msg_target), rc); @@ -683,15 +689,15 @@ lnet_ni_query_locked(lnet_ni_t *ni, lnet_peer_t *lp) unsigned long last_alive = 0; LASSERT(lnet_peer_aliveness_enabled(lp)); - LASSERT(ni->ni_lnd->lnd_query != NULL); + LASSERT(ni->ni_lnd->lnd_query); lnet_net_unlock(lp->lp_cpt); - (ni->ni_lnd->lnd_query)(ni, lp->lp_nid, &last_alive); + ni->ni_lnd->lnd_query(ni, lp->lp_nid, &last_alive); lnet_net_lock(lp->lp_cpt); lp->lp_last_query = cfs_time_current(); - if (last_alive != 0) /* NI has updated timestamp */ + if (last_alive) /* NI has updated timestamp */ lp->lp_last_alive = last_alive; } @@ -720,14 +726,16 @@ lnet_peer_is_alive(lnet_peer_t *lp, unsigned long now) * case, and moreover lp_last_alive at peer creation is assumed. */ if (alive && !lp->lp_alive && - !(lnet_isrouter(lp) && lp->lp_alive_count == 0)) + !(lnet_isrouter(lp) && !lp->lp_alive_count)) lnet_notify_locked(lp, 0, 1, lp->lp_last_alive); return alive; } -/* NB: returns 1 when alive, 0 when dead, negative when error; - * may drop the lnet_net_lock */ +/* + * NB: returns 1 when alive, 0 when dead, negative when error; + * may drop the lnet_net_lock + */ static int lnet_peer_alive_locked(lnet_peer_t *lp) { @@ -739,9 +747,11 @@ lnet_peer_alive_locked(lnet_peer_t *lp) if (lnet_peer_is_alive(lp, now)) return 1; - /* Peer appears dead, but we should avoid frequent NI queries (at - * most once per lnet_queryinterval seconds). */ - if (lp->lp_last_query != 0) { + /* + * Peer appears dead, but we should avoid frequent NI queries (at + * most once per lnet_queryinterval seconds). + */ + if (lp->lp_last_query) { static const int lnet_queryinterval = 1; unsigned long next_query = @@ -775,10 +785,10 @@ lnet_peer_alive_locked(lnet_peer_t *lp) * lnet_send() is going to lnet_net_unlock immediately after this, so * it sets do_send FALSE and I don't do the unlock/send/lock bit. * - * \retval 0 If \a msg sent or OK to send. - * \retval EAGAIN If \a msg blocked for credit. - * \retval EHOSTUNREACH If the next hop of the message appears dead. - * \retval ECANCELED If the MD of the message has been unlinked. + * \retval LNET_CREDIT_OK If \a msg sent or OK to send. + * \retval LNET_CREDIT_WAIT If \a msg blocked for credit. + * \retval -EHOSTUNREACH If the next hop of the message appears dead. + * \retval -ECANCELED If the MD of the message has been unlinked. */ static int lnet_post_send_locked(lnet_msg_t *msg, int do_send) @@ -794,8 +804,8 @@ lnet_post_send_locked(lnet_msg_t *msg, int do_send) LASSERT(msg->msg_tx_committed); /* NB 'lp' is always the next hop */ - if ((msg->msg_target.pid & LNET_PID_USERFLAG) == 0 && - lnet_peer_alive_locked(lp) == 0) { + if (!(msg->msg_target.pid & LNET_PID_USERFLAG) && + !lnet_peer_alive_locked(lp)) { the_lnet.ln_counters[cpt]->drop_count++; the_lnet.ln_counters[cpt]->drop_length += msg->msg_len; lnet_net_unlock(cpt); @@ -806,11 +816,11 @@ lnet_post_send_locked(lnet_msg_t *msg, int do_send) lnet_finalize(ni, msg, -EHOSTUNREACH); lnet_net_lock(cpt); - return EHOSTUNREACH; + return -EHOSTUNREACH; } - if (msg->msg_md != NULL && - (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED) != 0) { + if (msg->msg_md && + (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED)) { lnet_net_unlock(cpt); CNETERR("Aborting message for %s: LNetM[DE]Unlink() already called on the MD/ME.\n", @@ -819,12 +829,12 @@ lnet_post_send_locked(lnet_msg_t *msg, int do_send) lnet_finalize(ni, msg, -ECANCELED); lnet_net_lock(cpt); - return ECANCELED; + return -ECANCELED; } if (!msg->msg_peertxcredit) { LASSERT((lp->lp_txcredits < 0) == - !list_empty(&lp->lp_txq)); + !list_empty(&lp->lp_txq)); msg->msg_peertxcredit = 1; lp->lp_txqnob += msg->msg_len + sizeof(lnet_hdr_t); @@ -836,7 +846,7 @@ lnet_post_send_locked(lnet_msg_t *msg, int do_send) if (lp->lp_txcredits < 0) { msg->msg_tx_delayed = 1; list_add_tail(&msg->msg_list, &lp->lp_txq); - return EAGAIN; + return LNET_CREDIT_WAIT; } } @@ -853,7 +863,7 @@ lnet_post_send_locked(lnet_msg_t *msg, int do_send) if (tq->tq_credits < 0) { msg->msg_tx_delayed = 1; list_add_tail(&msg->msg_list, &tq->tq_delayed); - return EAGAIN; + return LNET_CREDIT_WAIT; } } @@ -862,7 +872,7 @@ lnet_post_send_locked(lnet_msg_t *msg, int do_send) lnet_ni_send(ni, msg); lnet_net_lock(cpt); } - return 0; + return LNET_CREDIT_OK; } static lnet_rtrbufpool_t * @@ -888,16 +898,19 @@ lnet_msg2bufpool(lnet_msg_t *msg) static int lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv) { - /* lnet_parse is going to lnet_net_unlock immediately after this, so it - * sets do_recv FALSE and I don't do the unlock/send/lock bit. I - * return EAGAIN if msg blocked and 0 if received or OK to receive */ + /* + * lnet_parse is going to lnet_net_unlock immediately after this, so it + * sets do_recv FALSE and I don't do the unlock/send/lock bit. + * I return LNET_CREDIT_WAIT if msg blocked and LNET_CREDIT_OK if + * received or OK to receive + */ lnet_peer_t *lp = msg->msg_rxpeer; lnet_rtrbufpool_t *rbp; lnet_rtrbuf_t *rb; - LASSERT(msg->msg_iov == NULL); - LASSERT(msg->msg_kiov == NULL); - LASSERT(msg->msg_niov == 0); + LASSERT(!msg->msg_iov); + LASSERT(!msg->msg_kiov); + LASSERT(!msg->msg_niov); LASSERT(msg->msg_routing); LASSERT(msg->msg_receiving); LASSERT(!msg->msg_sending); @@ -907,7 +920,7 @@ lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv) if (!msg->msg_peerrtrcredit) { LASSERT((lp->lp_rtrcredits < 0) == - !list_empty(&lp->lp_rtrq)); + !list_empty(&lp->lp_rtrq)); msg->msg_peerrtrcredit = 1; lp->lp_rtrcredits--; @@ -919,16 +932,13 @@ lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv) LASSERT(msg->msg_rx_ready_delay); msg->msg_rx_delayed = 1; list_add_tail(&msg->msg_list, &lp->lp_rtrq); - return EAGAIN; + return LNET_CREDIT_WAIT; } } rbp = lnet_msg2bufpool(msg); if (!msg->msg_rtrcredit) { - LASSERT((rbp->rbp_credits < 0) == - !list_empty(&rbp->rbp_msgs)); - msg->msg_rtrcredit = 1; rbp->rbp_credits--; if (rbp->rbp_credits < rbp->rbp_mincredits) @@ -939,7 +949,7 @@ lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv) LASSERT(msg->msg_rx_ready_delay); msg->msg_rx_delayed = 1; list_add_tail(&msg->msg_list, &rbp->rbp_msgs); - return EAGAIN; + return LNET_CREDIT_WAIT; } } @@ -958,7 +968,7 @@ lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv) 0, msg->msg_len, msg->msg_len); lnet_net_lock(cpt); } - return 0; + return LNET_CREDIT_OK; } void @@ -980,7 +990,7 @@ lnet_return_tx_credits_locked(lnet_msg_t *msg) tq->tq_credits++; if (tq->tq_credits <= 0) { msg2 = list_entry(tq->tq_delayed.next, - lnet_msg_t, msg_list); + lnet_msg_t, msg_list); list_del(&msg2->msg_list); LASSERT(msg2->msg_txpeer->lp_ni == ni); @@ -1003,7 +1013,7 @@ lnet_return_tx_credits_locked(lnet_msg_t *msg) txpeer->lp_txcredits++; if (txpeer->lp_txcredits <= 0) { msg2 = list_entry(txpeer->lp_txq.next, - lnet_msg_t, msg_list); + lnet_msg_t, msg_list); list_del(&msg2->msg_list); LASSERT(msg2->msg_txpeer == txpeer); @@ -1013,13 +1023,50 @@ lnet_return_tx_credits_locked(lnet_msg_t *msg) } } - if (txpeer != NULL) { + if (txpeer) { msg->msg_txpeer = NULL; lnet_peer_decref_locked(txpeer); } } void +lnet_schedule_blocked_locked(lnet_rtrbufpool_t *rbp) +{ + lnet_msg_t *msg; + + if (list_empty(&rbp->rbp_msgs)) + return; + msg = list_entry(rbp->rbp_msgs.next, + lnet_msg_t, msg_list); + list_del(&msg->msg_list); + + (void)lnet_post_routed_recv_locked(msg, 1); +} + +void +lnet_drop_routed_msgs_locked(struct list_head *list, int cpt) +{ + struct list_head drop; + lnet_msg_t *msg; + lnet_msg_t *tmp; + + INIT_LIST_HEAD(&drop); + + list_splice_init(list, &drop); + + lnet_net_unlock(cpt); + + list_for_each_entry_safe(msg, tmp, &drop, msg_list) { + lnet_ni_recv(msg->msg_rxpeer->lp_ni, msg->msg_private, NULL, + 0, 0, 0, msg->msg_hdr.payload_length); + list_del_init(&msg->msg_list); + lnet_finalize(NULL, msg, -ECANCELED); + } + + lnet_net_lock(cpt); +} + +void lnet_return_rx_credits_locked(lnet_msg_t *msg) { lnet_peer_t *rxpeer = msg->msg_rxpeer; @@ -1030,34 +1077,51 @@ lnet_return_rx_credits_locked(lnet_msg_t *msg) lnet_rtrbuf_t *rb; lnet_rtrbufpool_t *rbp; - /* NB If a msg ever blocks for a buffer in rbp_msgs, it stays + /* + * NB If a msg ever blocks for a buffer in rbp_msgs, it stays * there until it gets one allocated, or aborts the wait - * itself */ - LASSERT(msg->msg_kiov != NULL); + * itself + */ + LASSERT(msg->msg_kiov); rb = list_entry(msg->msg_kiov, lnet_rtrbuf_t, rb_kiov[0]); rbp = rb->rb_pool; - LASSERT(rbp == lnet_msg2bufpool(msg)); msg->msg_kiov = NULL; msg->msg_rtrcredit = 0; - LASSERT((rbp->rbp_credits < 0) == - !list_empty(&rbp->rbp_msgs)); + LASSERT(rbp == lnet_msg2bufpool(msg)); + LASSERT((rbp->rbp_credits > 0) == !list_empty(&rbp->rbp_bufs)); - list_add(&rb->rb_list, &rbp->rbp_bufs); - rbp->rbp_credits++; - if (rbp->rbp_credits <= 0) { - msg2 = list_entry(rbp->rbp_msgs.next, - lnet_msg_t, msg_list); - list_del(&msg2->msg_list); + /* + * If routing is now turned off, we just drop this buffer and + * don't bother trying to return credits. + */ + if (!the_lnet.ln_routing) { + lnet_destroy_rtrbuf(rb, rbp->rbp_npages); + goto routing_off; + } - (void) lnet_post_routed_recv_locked(msg2, 1); + /* + * It is possible that a user has lowered the desired number of + * buffers in this pool. Make sure we never put back + * more buffers than the stated number. + */ + if (unlikely(rbp->rbp_credits >= rbp->rbp_req_nbuffers)) { + /* Discard this buffer so we don't have too many. */ + lnet_destroy_rtrbuf(rb, rbp->rbp_npages); + rbp->rbp_nbuffers--; + } else { + list_add(&rb->rb_list, &rbp->rbp_bufs); + rbp->rbp_credits++; + if (rbp->rbp_credits <= 0) + lnet_schedule_blocked_locked(rbp); } } +routing_off: if (msg->msg_peerrtrcredit) { /* give back peer router credits */ msg->msg_peerrtrcredit = 0; @@ -1066,15 +1130,22 @@ lnet_return_rx_credits_locked(lnet_msg_t *msg) !list_empty(&rxpeer->lp_rtrq)); rxpeer->lp_rtrcredits++; - if (rxpeer->lp_rtrcredits <= 0) { + /* + * drop all messages which are queued to be routed on that + * peer. + */ + if (!the_lnet.ln_routing) { + lnet_drop_routed_msgs_locked(&rxpeer->lp_rtrq, + msg->msg_rx_cpt); + } else if (rxpeer->lp_rtrcredits <= 0) { msg2 = list_entry(rxpeer->lp_rtrq.next, - lnet_msg_t, msg_list); + lnet_msg_t, msg_list); list_del(&msg2->msg_list); (void) lnet_post_routed_recv_locked(msg2, 1); } } - if (rxpeer != NULL) { + if (rxpeer) { msg->msg_rxpeer = NULL; lnet_peer_decref_locked(rxpeer); } @@ -1085,94 +1156,99 @@ lnet_compare_routes(lnet_route_t *r1, lnet_route_t *r2) { lnet_peer_t *p1 = r1->lr_gateway; lnet_peer_t *p2 = r2->lr_gateway; + int r1_hops = (r1->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r1->lr_hops; + int r2_hops = (r2->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r2->lr_hops; if (r1->lr_priority < r2->lr_priority) return 1; if (r1->lr_priority > r2->lr_priority) - return -1; + return -ERANGE; - if (r1->lr_hops < r2->lr_hops) + if (r1_hops < r2_hops) return 1; - if (r1->lr_hops > r2->lr_hops) - return -1; + if (r1_hops > r2_hops) + return -ERANGE; if (p1->lp_txqnob < p2->lp_txqnob) return 1; if (p1->lp_txqnob > p2->lp_txqnob) - return -1; + return -ERANGE; if (p1->lp_txcredits > p2->lp_txcredits) return 1; if (p1->lp_txcredits < p2->lp_txcredits) - return -1; + return -ERANGE; if (r1->lr_seq - r2->lr_seq <= 0) return 1; - return -1; + return -ERANGE; } static lnet_peer_t * lnet_find_route_locked(lnet_ni_t *ni, lnet_nid_t target, lnet_nid_t rtr_nid) { lnet_remotenet_t *rnet; - lnet_route_t *rtr; - lnet_route_t *rtr_best; - lnet_route_t *rtr_last; + lnet_route_t *route; + lnet_route_t *best_route; + lnet_route_t *last_route; struct lnet_peer *lp_best; struct lnet_peer *lp; int rc; - /* If @rtr_nid is not LNET_NID_ANY, return the gateway with - * rtr_nid nid, otherwise find the best gateway I can use */ - + /* + * If @rtr_nid is not LNET_NID_ANY, return the gateway with + * rtr_nid nid, otherwise find the best gateway I can use + */ rnet = lnet_find_net_locked(LNET_NIDNET(target)); - if (rnet == NULL) + if (!rnet) return NULL; lp_best = NULL; - rtr_best = rtr_last = NULL; - list_for_each_entry(rtr, &rnet->lrn_routes, lr_list) { - lp = rtr->lr_gateway; + best_route = NULL; + last_route = NULL; + list_for_each_entry(route, &rnet->lrn_routes, lr_list) { + lp = route->lr_gateway; - if (!lp->lp_alive || /* gateway is down */ - ((lp->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) != 0 && - rtr->lr_downis != 0)) /* NI to target is down */ + if (!lnet_is_route_alive(route)) continue; - if (ni != NULL && lp->lp_ni != ni) + if (ni && lp->lp_ni != ni) continue; if (lp->lp_nid == rtr_nid) /* it's pre-determined router */ return lp; - if (lp_best == NULL) { - rtr_best = rtr_last = rtr; + if (!lp_best) { + best_route = route; + last_route = route; lp_best = lp; continue; } /* no protection on below fields, but it's harmless */ - if (rtr_last->lr_seq - rtr->lr_seq < 0) - rtr_last = rtr; + if (last_route->lr_seq - route->lr_seq < 0) + last_route = route; - rc = lnet_compare_routes(rtr, rtr_best); + rc = lnet_compare_routes(route, best_route); if (rc < 0) continue; - rtr_best = rtr; + best_route = route; lp_best = lp; } - /* set sequence number on the best router to the latest sequence + 1 + /* + * set sequence number on the best router to the latest sequence + 1 * so we can round-robin all routers, it's race and inaccurate but - * harmless and functional */ - if (rtr_best != NULL) - rtr_best->lr_seq = rtr_last->lr_seq + 1; + * harmless and functional + */ + if (best_route) + best_route->lr_seq = last_route->lr_seq + 1; return lp_best; } @@ -1187,11 +1263,13 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid) int cpt2; int rc; - /* NB: rtr_nid is set to LNET_NID_ANY for all current use-cases, + /* + * NB: rtr_nid is set to LNET_NID_ANY for all current use-cases, * but we might want to use pre-determined router for ACK/REPLY - * in the future */ - /* NB: ni != NULL == interface pre-determined (ACK/REPLY) */ - LASSERT(msg->msg_txpeer == NULL); + * in the future + */ + /* NB: ni == interface pre-determined (ACK/REPLY) */ + LASSERT(!msg->msg_txpeer); LASSERT(!msg->msg_sending); LASSERT(!msg->msg_target_is_router); LASSERT(!msg->msg_receiving); @@ -1212,7 +1290,7 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid) src_ni = NULL; } else { src_ni = lnet_nid2ni_locked(src_nid, cpt); - if (src_ni == NULL) { + if (!src_ni) { lnet_net_unlock(cpt); LCONSOLE_WARN("Can't send to %s: src %s is not a local nid\n", libcfs_nid2str(dst_nid), @@ -1225,8 +1303,8 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid) /* Is this for someone on a local network? */ local_ni = lnet_net2ni_locked(LNET_NIDNET(dst_nid), cpt); - if (local_ni != NULL) { - if (src_ni == NULL) { + if (local_ni) { + if (!src_ni) { src_ni = local_ni; src_nid = src_ni->ni_nid; } else if (src_ni == local_ni) { @@ -1261,7 +1339,7 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid) rc = lnet_nid2peer_locked(&lp, dst_nid, cpt); /* lp has ref on src_ni; lose mine */ lnet_ni_decref_locked(src_ni, cpt); - if (rc != 0) { + if (rc) { lnet_net_unlock(cpt); LCONSOLE_WARN("Error %d finding peer %s\n", rc, libcfs_nid2str(dst_nid)); @@ -1272,8 +1350,8 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid) } else { /* sending to a remote network */ lp = lnet_find_route_locked(src_ni, dst_nid, rtr_nid); - if (lp == NULL) { - if (src_ni != NULL) + if (!lp) { + if (src_ni) lnet_ni_decref_locked(src_ni, cpt); lnet_net_unlock(cpt); @@ -1283,14 +1361,16 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid) return -EHOSTUNREACH; } - /* rtr_nid is LNET_NID_ANY or NID of pre-determined router, + /* + * rtr_nid is LNET_NID_ANY or NID of pre-determined router, * it's possible that rtr_nid isn't LNET_NID_ANY and lp isn't * pre-determined router, this can happen if router table - * was changed when we release the lock */ + * was changed when we release the lock + */ if (rtr_nid != lp->lp_nid) { cpt2 = lnet_cpt_of_nid_locked(lp->lp_nid); if (cpt2 != cpt) { - if (src_ni != NULL) + if (src_ni) lnet_ni_decref_locked(src_ni, cpt); lnet_net_unlock(cpt); @@ -1304,7 +1384,7 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid) libcfs_nid2str(dst_nid), libcfs_nid2str(lp->lp_nid), lnet_msgtyp2str(msg->msg_type), msg->msg_len); - if (src_ni == NULL) { + if (!src_ni) { src_ni = lp->lp_ni; src_nid = src_ni->ni_nid; } else { @@ -1324,30 +1404,30 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid) msg->msg_target_is_router = 1; msg->msg_target.nid = lp->lp_nid; - msg->msg_target.pid = LUSTRE_SRV_LNET_PID; + msg->msg_target.pid = LNET_PID_LUSTRE; } /* 'lp' is our best choice of peer */ LASSERT(!msg->msg_peertxcredit); LASSERT(!msg->msg_txcredit); - LASSERT(msg->msg_txpeer == NULL); + LASSERT(!msg->msg_txpeer); msg->msg_txpeer = lp; /* msg takes my ref on lp */ rc = lnet_post_send_locked(msg, 0); lnet_net_unlock(cpt); - if (rc == EHOSTUNREACH || rc == ECANCELED) - return -rc; + if (rc < 0) + return rc; - if (rc == 0) + if (rc == LNET_CREDIT_OK) lnet_ni_send(src_ni, msg); - return 0; /* rc == 0 or EAGAIN */ + return 0; /* rc == LNET_CREDIT_OK or LNET_CREDIT_WAIT */ } -static void +void lnet_drop_message(lnet_ni_t *ni, int cpt, void *private, unsigned int nob) { lnet_net_lock(cpt); @@ -1363,15 +1443,17 @@ lnet_recv_put(lnet_ni_t *ni, lnet_msg_t *msg) { lnet_hdr_t *hdr = &msg->msg_hdr; - if (msg->msg_wanted != 0) + if (msg->msg_wanted) lnet_setpayloadbuffer(msg); lnet_build_msg_event(msg, LNET_EVENT_PUT); - /* Must I ACK? If so I'll grab the ack_wmd out of the header and put - * it back into the ACK during lnet_finalize() */ - msg->msg_ack = (!lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) && - (msg->msg_md->md_options & LNET_MD_ACK_DISABLE) == 0); + /* + * Must I ACK? If so I'll grab the ack_wmd out of the header and put + * it back into the ACK during lnet_finalize() + */ + msg->msg_ack = !lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) && + !(msg->msg_md->md_options & LNET_MD_ACK_DISABLE); lnet_ni_recv(ni, msg->msg_private, msg, msg->msg_rx_delayed, msg->msg_offset, msg->msg_wanted, hdr->payload_length); @@ -1382,6 +1464,7 @@ lnet_parse_put(lnet_ni_t *ni, lnet_msg_t *msg) { lnet_hdr_t *hdr = &msg->msg_hdr; struct lnet_match_info info; + bool ready_delay; int rc; /* Convert put fields to host byte order */ @@ -1397,7 +1480,8 @@ lnet_parse_put(lnet_ni_t *ni, lnet_msg_t *msg) info.mi_roffset = hdr->msg.put.offset; info.mi_mbits = hdr->msg.put.match_bits; - msg->msg_rx_ready_delay = ni->ni_lnd->lnd_eager_recv == NULL; + msg->msg_rx_ready_delay = !ni->ni_lnd->lnd_eager_recv; + ready_delay = msg->msg_rx_ready_delay; again: rc = lnet_ptl_match_md(&info, msg); @@ -1410,12 +1494,18 @@ lnet_parse_put(lnet_ni_t *ni, lnet_msg_t *msg) return 0; case LNET_MATCHMD_NONE: - if (msg->msg_rx_delayed) /* attached on delayed list */ + /** + * no eager_recv or has already called it, should + * have been attached on delayed list + */ + if (ready_delay) return 0; rc = lnet_ni_eager_recv(ni, msg); - if (rc == 0) + if (!rc) { + ready_delay = true; goto again; + } /* fall through */ case LNET_MATCHMD_DROP: @@ -1423,7 +1513,7 @@ lnet_parse_put(lnet_ni_t *ni, lnet_msg_t *msg) libcfs_id2str(info.mi_id), info.mi_portal, info.mi_mbits, info.mi_roffset, info.mi_rlength, rc); - return ENOENT; /* +ve: OK but no match */ + return -ENOENT; /* -ve: OK but no match */ } } @@ -1454,7 +1544,7 @@ lnet_parse_get(lnet_ni_t *ni, lnet_msg_t *msg, int rdma_get) CNETERR("Dropping GET from %s portal %d match %llu offset %d length %d\n", libcfs_id2str(info.mi_id), info.mi_portal, info.mi_mbits, info.mi_roffset, info.mi_rlength); - return ENOENT; /* +ve: OK but no match */ + return -ENOENT; /* -ve: OK but no match */ } LASSERT(rc == LNET_MATCHMD_OK); @@ -1510,33 +1600,33 @@ lnet_parse_reply(lnet_ni_t *ni, lnet_msg_t *msg) /* NB handles only looked up by creator (no flips) */ md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd); - if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) { + if (!md || !md->md_threshold || md->md_me) { CNETERR("%s: Dropping REPLY from %s for %s MD %#llx.%#llx\n", libcfs_nid2str(ni->ni_nid), libcfs_id2str(src), - (md == NULL) ? "invalid" : "inactive", + !md ? "invalid" : "inactive", hdr->msg.reply.dst_wmd.wh_interface_cookie, hdr->msg.reply.dst_wmd.wh_object_cookie); - if (md != NULL && md->md_me != NULL) + if (md && md->md_me) CERROR("REPLY MD also attached to portal %d\n", md->md_me->me_portal); lnet_res_unlock(cpt); - return ENOENT; /* +ve: OK but no match */ + return -ENOENT; /* -ve: OK but no match */ } - LASSERT(md->md_offset == 0); + LASSERT(!md->md_offset); rlength = hdr->payload_length; mlength = min_t(uint, rlength, md->md_length); if (mlength < rlength && - (md->md_options & LNET_MD_TRUNCATE) == 0) { + !(md->md_options & LNET_MD_TRUNCATE)) { CNETERR("%s: Dropping REPLY from %s length %d for MD %#llx would overflow (%d)\n", libcfs_nid2str(ni->ni_nid), libcfs_id2str(src), rlength, hdr->msg.reply.dst_wmd.wh_object_cookie, mlength); lnet_res_unlock(cpt); - return ENOENT; /* +ve: OK but no match */ + return -ENOENT; /* -ve: OK but no match */ } CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md %#llx\n", @@ -1545,7 +1635,7 @@ lnet_parse_reply(lnet_ni_t *ni, lnet_msg_t *msg) lnet_msg_attach_md(msg, md, 0, mlength); - if (mlength != 0) + if (mlength) lnet_setpayloadbuffer(msg); lnet_res_unlock(cpt); @@ -1576,20 +1666,20 @@ lnet_parse_ack(lnet_ni_t *ni, lnet_msg_t *msg) /* NB handles only looked up by creator (no flips) */ md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd); - if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) { + if (!md || !md->md_threshold || md->md_me) { /* Don't moan; this is expected */ CDEBUG(D_NET, "%s: Dropping ACK from %s to %s MD %#llx.%#llx\n", libcfs_nid2str(ni->ni_nid), libcfs_id2str(src), - (md == NULL) ? "invalid" : "inactive", + !md ? "invalid" : "inactive", hdr->msg.ack.dst_wmd.wh_interface_cookie, hdr->msg.ack.dst_wmd.wh_object_cookie); - if (md != NULL && md->md_me != NULL) + if (md && md->md_me) CERROR("Source MD also attached to portal %d\n", md->md_me->me_portal); lnet_res_unlock(cpt); - return ENOENT; /* +ve! */ + return -ENOENT; /* -ve! */ } CDEBUG(D_NET, "%s: ACK from %s into md %#llx\n", @@ -1606,14 +1696,22 @@ lnet_parse_ack(lnet_ni_t *ni, lnet_msg_t *msg) return 0; } -static int +/** + * \retval LNET_CREDIT_OK If \a msg is forwarded + * \retval LNET_CREDIT_WAIT If \a msg is blocked because w/o buffer + * \retval -ve error code + */ +int lnet_parse_forward_locked(lnet_ni_t *ni, lnet_msg_t *msg) { int rc = 0; + if (!the_lnet.ln_routing) + return -ECANCELED; + if (msg->msg_rxpeer->lp_rtrcredits <= 0 || lnet_msg2bufpool(msg)->rbp_credits <= 0) { - if (ni->ni_lnd->lnd_eager_recv == NULL) { + if (!ni->ni_lnd->lnd_eager_recv) { msg->msg_rx_ready_delay = 1; } else { lnet_net_unlock(msg->msg_rx_cpt); @@ -1622,11 +1720,38 @@ lnet_parse_forward_locked(lnet_ni_t *ni, lnet_msg_t *msg) } } - if (rc == 0) + if (!rc) rc = lnet_post_routed_recv_locked(msg, 0); return rc; } +int +lnet_parse_local(lnet_ni_t *ni, lnet_msg_t *msg) +{ + int rc; + + switch (msg->msg_type) { + case LNET_MSG_ACK: + rc = lnet_parse_ack(ni, msg); + break; + case LNET_MSG_PUT: + rc = lnet_parse_put(ni, msg); + break; + case LNET_MSG_GET: + rc = lnet_parse_get(ni, msg, msg->msg_rdma_get); + break; + case LNET_MSG_REPLY: + rc = lnet_parse_reply(ni, msg); + break; + default: /* prevent an unused label if !kernel */ + LASSERT(0); + return -EPROTO; + } + + LASSERT(!rc || rc == -ENOENT); + return rc; +} + char * lnet_msgtyp2str(int type) { @@ -1702,7 +1827,6 @@ lnet_print_hdr(lnet_hdr_t *hdr) hdr->msg.reply.dst_wmd.wh_object_cookie, hdr->payload_length); } - } int @@ -1765,20 +1889,20 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid, if (the_lnet.ln_routing && ni->ni_last_alive != ktime_get_real_seconds()) { - lnet_ni_lock(ni); - /* NB: so far here is the only place to set NI status to "up */ + lnet_ni_lock(ni); ni->ni_last_alive = ktime_get_real_seconds(); - if (ni->ni_status != NULL && + if (ni->ni_status && ni->ni_status->ns_status == LNET_NI_STATUS_DOWN) ni->ni_status->ns_status = LNET_NI_STATUS_UP; lnet_ni_unlock(ni); } - /* Regard a bad destination NID as a protocol error. Senders should + /* + * Regard a bad destination NID as a protocol error. Senders should * know what they're doing; if they don't they're misconfigured, buggy - * or malicious so we chop them off at the knees :) */ - + * or malicious so we chop them off at the knees :) + */ if (!for_me) { if (LNET_NIDNET(dest_nid) == LNET_NIDNET(ni->ni_nid)) { /* should have gone direct */ @@ -1790,8 +1914,10 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid, } if (lnet_islocalnid(dest_nid)) { - /* dest is another local NI; sender should have used - * this node's NID on its own network */ + /* + * dest is another local NI; sender should have used + * this node's NID on its own network + */ CERROR("%s, src %s: Bad dest nid %s (it's my nid but on a different network)\n", libcfs_nid2str(from_nid), libcfs_nid2str(src_nid), @@ -1816,9 +1942,10 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid, } } - /* Message looks OK; we're not going to return an error, so we MUST - * call back lnd_recv() come what may... */ - + /* + * Message looks OK; we're not going to return an error, so we MUST + * call back lnd_recv() come what may... + */ if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */ fail_peer(src_nid, 0)) { /* shall we now? */ CERROR("%s, src %s: Dropping %s to simulate failure\n", @@ -1827,8 +1954,16 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid, goto drop; } + if (!list_empty(&the_lnet.ln_drop_rules) && + lnet_drop_rule_match(hdr)) { + CDEBUG(D_NET, "%s, src %s, dst %s: Dropping %s to simulate silent message loss\n", + libcfs_nid2str(from_nid), libcfs_nid2str(src_nid), + libcfs_nid2str(dest_nid), lnet_msgtyp2str(type)); + goto drop; + } + msg = lnet_msg_alloc(); - if (msg == NULL) { + if (!msg) { CERROR("%s, src %s: Dropping %s (out of memory)\n", libcfs_nid2str(from_nid), libcfs_nid2str(src_nid), lnet_msgtyp2str(type)); @@ -1838,11 +1973,12 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid, /* msg zeroed in lnet_msg_alloc; * i.e. flags all clear, pointers NULL etc */ - msg->msg_type = type; msg->msg_private = private; msg->msg_receiving = 1; - msg->msg_len = msg->msg_wanted = payload_length; + msg->msg_rdma_get = rdma_req; + msg->msg_wanted = payload_length; + msg->msg_len = payload_length; msg->msg_offset = 0; msg->msg_hdr = *hdr; /* for building message event */ @@ -1864,7 +2000,7 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid, lnet_net_lock(cpt); rc = lnet_nid2peer_locked(&msg->msg_rxpeer, from_nid, cpt); - if (rc != 0) { + if (rc) { lnet_net_unlock(cpt); CERROR("%s, src %s: Dropping %s (error %d looking up sender)\n", libcfs_nid2str(from_nid), libcfs_nid2str(src_nid), @@ -1888,13 +2024,21 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid, lnet_msg_commit(msg, cpt); + /* message delay simulation */ + if (unlikely(!list_empty(&the_lnet.ln_delay_rules) && + lnet_delay_rule_match_locked(hdr, msg))) { + lnet_net_unlock(cpt); + return 0; + } + if (!for_me) { rc = lnet_parse_forward_locked(ni, msg); lnet_net_unlock(cpt); if (rc < 0) goto free_drop; - if (rc == 0) { + + if (rc == LNET_CREDIT_OK) { lnet_ni_recv(ni, msg->msg_private, msg, 0, 0, payload_length, payload_length); } @@ -1903,32 +2047,13 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid, lnet_net_unlock(cpt); - switch (type) { - case LNET_MSG_ACK: - rc = lnet_parse_ack(ni, msg); - break; - case LNET_MSG_PUT: - rc = lnet_parse_put(ni, msg); - break; - case LNET_MSG_GET: - rc = lnet_parse_get(ni, msg, rdma_req); - break; - case LNET_MSG_REPLY: - rc = lnet_parse_reply(ni, msg); - break; - default: - LASSERT(0); - rc = -EPROTO; - goto free_drop; /* prevent an unused label if !kernel */ - } - - if (rc == 0) - return 0; - - LASSERT(rc == ENOENT); + rc = lnet_parse_local(ni, msg); + if (rc) + goto free_drop; + return 0; free_drop: - LASSERT(msg->msg_md == NULL); + LASSERT(!msg->msg_md); lnet_finalize(ni, msg, rc); drop: @@ -1950,9 +2075,9 @@ lnet_drop_delayed_msg_list(struct list_head *head, char *reason) id.nid = msg->msg_hdr.src_nid; id.pid = msg->msg_hdr.src_pid; - LASSERT(msg->msg_md == NULL); + LASSERT(!msg->msg_md); LASSERT(msg->msg_rx_delayed); - LASSERT(msg->msg_rxpeer != NULL); + LASSERT(msg->msg_rxpeer); LASSERT(msg->msg_hdr.type == LNET_MSG_PUT); CWARN("Dropping delayed PUT from %s portal %d match %llu offset %d length %d: %s\n", @@ -1962,10 +2087,11 @@ lnet_drop_delayed_msg_list(struct list_head *head, char *reason) msg->msg_hdr.msg.put.offset, msg->msg_hdr.payload_length, reason); - /* NB I can't drop msg's ref on msg_rxpeer until after I've + /* + * NB I can't drop msg's ref on msg_rxpeer until after I've * called lnet_drop_message(), so I just hang onto msg as well - * until that's done */ - + * until that's done + */ lnet_drop_message(msg->msg_rxpeer->lp_ni, msg->msg_rxpeer->lp_cpt, msg->msg_private, msg->msg_len); @@ -1988,15 +2114,16 @@ lnet_recv_delayed_msg_list(struct list_head *head) msg = list_entry(head->next, lnet_msg_t, msg_list); list_del(&msg->msg_list); - /* md won't disappear under me, since each msg - * holds a ref on it */ - + /* + * md won't disappear under me, since each msg + * holds a ref on it + */ id.nid = msg->msg_hdr.src_nid; id.pid = msg->msg_hdr.src_pid; LASSERT(msg->msg_rx_delayed); - LASSERT(msg->msg_md != NULL); - LASSERT(msg->msg_rxpeer != NULL); + LASSERT(msg->msg_md); + LASSERT(msg->msg_rxpeer); LASSERT(msg->msg_hdr.type == LNET_MSG_PUT); CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d match %llu offset %d length %d.\n", @@ -2064,7 +2191,6 @@ LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack, int cpt; int rc; - LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount > 0); if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */ @@ -2075,7 +2201,7 @@ LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack, } msg = lnet_msg_alloc(); - if (msg == NULL) { + if (!msg) { CERROR("Dropping PUT to %s: ENOMEM on lnet_msg_t\n", libcfs_id2str(target)); return -ENOMEM; @@ -2086,11 +2212,11 @@ LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack, lnet_res_lock(cpt); md = lnet_handle2md(&mdh); - if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) { + if (!md || !md->md_threshold || md->md_me) { CERROR("Dropping PUT (%llu:%d:%s): MD (%d) invalid\n", match_bits, portal, libcfs_id2str(target), - md == NULL ? -1 : md->md_threshold); - if (md != NULL && md->md_me != NULL) + !md ? -1 : md->md_threshold); + if (md && md->md_me) CERROR("Source MD also attached to portal %d\n", md->md_me->me_portal); lnet_res_unlock(cpt); @@ -2128,9 +2254,9 @@ LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack, lnet_build_msg_event(msg, LNET_EVENT_SEND); rc = lnet_send(self, msg, LNET_NID_ANY); - if (rc != 0) { + if (rc) { CNETERR("Error sending PUT to %s: %d\n", - libcfs_id2str(target), rc); + libcfs_id2str(target), rc); lnet_finalize(NULL, msg, rc); } @@ -2142,13 +2268,14 @@ EXPORT_SYMBOL(LNetPut); lnet_msg_t * lnet_create_reply_msg(lnet_ni_t *ni, lnet_msg_t *getmsg) { - /* The LND can DMA direct to the GET md (i.e. no REPLY msg). This + /* + * The LND can DMA direct to the GET md (i.e. no REPLY msg). This * returns a msg for the LND to pass to lnet_finalize() when the sink * data has been received. * * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when - * lnet_finalize() is called on it, so the LND must call this first */ - + * lnet_finalize() is called on it, so the LND must call this first + */ struct lnet_msg *msg = lnet_msg_alloc(); struct lnet_libmd *getmd = getmsg->msg_md; lnet_process_id_t peer_id = getmsg->msg_target; @@ -2157,26 +2284,26 @@ lnet_create_reply_msg(lnet_ni_t *ni, lnet_msg_t *getmsg) LASSERT(!getmsg->msg_target_is_router); LASSERT(!getmsg->msg_routing); + if (!msg) { + CERROR("%s: Dropping REPLY from %s: can't allocate msg\n", + libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id)); + goto drop; + } + cpt = lnet_cpt_of_cookie(getmd->md_lh.lh_cookie); lnet_res_lock(cpt); LASSERT(getmd->md_refcount > 0); - if (msg == NULL) { - CERROR("%s: Dropping REPLY from %s: can't allocate msg\n", - libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id)); - goto drop; - } - - if (getmd->md_threshold == 0) { + if (!getmd->md_threshold) { CERROR("%s: Dropping REPLY from %s for inactive MD %p\n", - libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id), - getmd); + libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id), + getmd); lnet_res_unlock(cpt); goto drop; } - LASSERT(getmd->md_offset == 0); + LASSERT(!getmd->md_offset); CDEBUG(D_NET, "%s: Reply from %s md %p\n", libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id), getmd); @@ -2209,7 +2336,7 @@ lnet_create_reply_msg(lnet_ni_t *ni, lnet_msg_t *getmsg) the_lnet.ln_counters[cpt]->drop_length += getmd->md_length; lnet_net_unlock(cpt); - if (msg != NULL) + if (msg) lnet_msg_free(msg); return NULL; @@ -2219,14 +2346,18 @@ EXPORT_SYMBOL(lnet_create_reply_msg); void lnet_set_reply_msg_len(lnet_ni_t *ni, lnet_msg_t *reply, unsigned int len) { - /* Set the REPLY length, now the RDMA that elides the REPLY message has - * completed and I know it. */ - LASSERT(reply != NULL); + /* + * Set the REPLY length, now the RDMA that elides the REPLY message has + * completed and I know it. + */ + LASSERT(reply); LASSERT(reply->msg_type == LNET_MSG_GET); LASSERT(reply->msg_ev.type == LNET_EVENT_REPLY); - /* NB I trusted my peer to RDMA. If she tells me she's written beyond - * the end of my buffer, I might as well be dead. */ + /* + * NB I trusted my peer to RDMA. If she tells me she's written beyond + * the end of my buffer, I might as well be dead. + */ LASSERT(len <= reply->msg_ev.mlength); reply->msg_ev.mlength = len; @@ -2264,7 +2395,6 @@ LNetGet(lnet_nid_t self, lnet_handle_md_t mdh, int cpt; int rc; - LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount > 0); if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */ @@ -2275,7 +2405,7 @@ LNetGet(lnet_nid_t self, lnet_handle_md_t mdh, } msg = lnet_msg_alloc(); - if (msg == NULL) { + if (!msg) { CERROR("Dropping GET to %s: ENOMEM on lnet_msg_t\n", libcfs_id2str(target)); return -ENOMEM; @@ -2285,11 +2415,11 @@ LNetGet(lnet_nid_t self, lnet_handle_md_t mdh, lnet_res_lock(cpt); md = lnet_handle2md(&mdh); - if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) { + if (!md || !md->md_threshold || md->md_me) { CERROR("Dropping GET (%llu:%d:%s): MD (%d) invalid\n", match_bits, portal, libcfs_id2str(target), - md == NULL ? -1 : md->md_threshold); - if (md != NULL && md->md_me != NULL) + !md ? -1 : md->md_threshold); + if (md && md->md_me) CERROR("REPLY MD also attached to portal %d\n", md->md_me->me_portal); @@ -2323,7 +2453,7 @@ LNetGet(lnet_nid_t self, lnet_handle_md_t mdh, rc = lnet_send(self, msg, LNET_NID_ANY); if (rc < 0) { CNETERR("Error sending GET to %s: %d\n", - libcfs_id2str(target), rc); + libcfs_id2str(target), rc); lnet_finalize(NULL, msg, rc); } @@ -2358,12 +2488,12 @@ LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp) __u32 order = 2; struct list_head *rn_list; - /* if !local_nid_dist_zero, I don't return a distance of 0 ever + /* + * if !local_nid_dist_zero, I don't return a distance of 0 ever * (when lustre sees a distance of 0, it substitutes 0@lo), so I * keep order 0 free for 0@lo and order 1 free for a local NID - * match */ - - LASSERT(the_lnet.ln_init); + * match + */ LASSERT(the_lnet.ln_refcount > 0); cpt = lnet_net_lock_current(); @@ -2372,9 +2502,9 @@ LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp) ni = list_entry(e, lnet_ni_t, ni_list); if (ni->ni_nid == dstnid) { - if (srcnidp != NULL) + if (srcnidp) *srcnidp = dstnid; - if (orderp != NULL) { + if (orderp) { if (LNET_NETTYP(LNET_NIDNET(dstnid)) == LOLND) *orderp = 0; else @@ -2386,9 +2516,9 @@ LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp) } if (LNET_NIDNET(ni->ni_nid) == dstnet) { - if (srcnidp != NULL) + if (srcnidp) *srcnidp = ni->ni_nid; - if (orderp != NULL) + if (orderp) *orderp = order; lnet_net_unlock(cpt); return 1; @@ -2404,21 +2534,28 @@ LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp) if (rnet->lrn_net == dstnet) { lnet_route_t *route; lnet_route_t *shortest = NULL; + __u32 shortest_hops = LNET_UNDEFINED_HOPS; + __u32 route_hops; LASSERT(!list_empty(&rnet->lrn_routes)); list_for_each_entry(route, &rnet->lrn_routes, - lr_list) { - if (shortest == NULL || - route->lr_hops < shortest->lr_hops) + lr_list) { + route_hops = route->lr_hops; + if (route_hops == LNET_UNDEFINED_HOPS) + route_hops = 1; + if (!shortest || + route_hops < shortest_hops) { shortest = route; + shortest_hops = route_hops; + } } - LASSERT(shortest != NULL); - hops = shortest->lr_hops; - if (srcnidp != NULL) + LASSERT(shortest); + hops = shortest_hops; + if (srcnidp) *srcnidp = shortest->lr_gateway->lp_ni->ni_nid; - if (orderp != NULL) + if (orderp) *orderp = order; lnet_net_unlock(cpt); return hops + 1; diff --git a/drivers/staging/lustre/lnet/lnet/lib-msg.c b/drivers/staging/lustre/lnet/lnet/lib-msg.c index 43977e8dffbb..f879d7f28708 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-msg.c +++ b/drivers/staging/lustre/lnet/lnet/lib-msg.c @@ -74,7 +74,6 @@ lnet_build_msg_event(lnet_msg_t *msg, lnet_event_kind_t ev_type) ev->initiator.nid = LNET_NID_ANY; ev->initiator.pid = the_lnet.ln_pid; ev->sender = LNET_NID_ANY; - } else { /* event for passive message */ ev->target.pid = hdr->dest_pid; @@ -173,7 +172,7 @@ lnet_msg_decommit_tx(lnet_msg_t *msg, int status) lnet_event_t *ev = &msg->msg_ev; LASSERT(msg->msg_tx_committed); - if (status != 0) + if (status) goto out; counters = the_lnet.ln_counters[msg->msg_tx_cpt]; @@ -181,7 +180,7 @@ lnet_msg_decommit_tx(lnet_msg_t *msg, int status) default: /* routed message */ LASSERT(msg->msg_routing); LASSERT(msg->msg_rx_committed); - LASSERT(ev->type == 0); + LASSERT(!ev->type); counters->route_length += msg->msg_len; counters->route_count++; @@ -203,8 +202,10 @@ lnet_msg_decommit_tx(lnet_msg_t *msg, int status) case LNET_EVENT_GET: LASSERT(msg->msg_rx_committed); - /* overwritten while sending reply, we should never be - * here for optimized GET */ + /* + * overwritten while sending reply, we should never be + * here for optimized GET + */ LASSERT(msg->msg_type == LNET_MSG_REPLY); msg->msg_type = LNET_MSG_GET; /* fix type */ break; @@ -225,13 +226,13 @@ lnet_msg_decommit_rx(lnet_msg_t *msg, int status) LASSERT(!msg->msg_tx_committed); /* decommitted or never committed */ LASSERT(msg->msg_rx_committed); - if (status != 0) + if (status) goto out; counters = the_lnet.ln_counters[msg->msg_rx_cpt]; switch (ev->type) { default: - LASSERT(ev->type == 0); + LASSERT(!ev->type); LASSERT(msg->msg_routing); goto out; @@ -240,10 +241,12 @@ lnet_msg_decommit_rx(lnet_msg_t *msg, int status) break; case LNET_EVENT_GET: - /* type is "REPLY" if it's an optimized GET on passive side, + /* + * type is "REPLY" if it's an optimized GET on passive side, * because optimized GET will never be committed for sending, * so message type wouldn't be changed back to "GET" by - * lnet_msg_decommit_tx(), see details in lnet_parse_get() */ + * lnet_msg_decommit_tx(), see details in lnet_parse_get() + */ LASSERT(msg->msg_type == LNET_MSG_REPLY || msg->msg_type == LNET_MSG_GET); counters->send_length += msg->msg_wanted; @@ -254,8 +257,10 @@ lnet_msg_decommit_rx(lnet_msg_t *msg, int status) break; case LNET_EVENT_REPLY: - /* type is "GET" if it's an optimized GET on active side, - * see details in lnet_create_reply_msg() */ + /* + * type is "GET" if it's an optimized GET on active side, + * see details in lnet_create_reply_msg() + */ LASSERT(msg->msg_type == LNET_MSG_GET || msg->msg_type == LNET_MSG_REPLY); break; @@ -309,10 +314,12 @@ lnet_msg_attach_md(lnet_msg_t *msg, lnet_libmd_t *md, unsigned int offset, unsigned int mlen) { /* NB: @offset and @len are only useful for receiving */ - /* Here, we attach the MD on lnet_msg and mark it busy and + /* + * Here, we attach the MD on lnet_msg and mark it busy and * decrementing its threshold. Come what may, the lnet_msg "owns" * the MD until a call to lnet_msg_detach_md or lnet_finalize() - * signals completion. */ + * signals completion. + */ LASSERT(!msg->msg_routing); msg->msg_md = md; @@ -343,7 +350,7 @@ lnet_msg_detach_md(lnet_msg_t *msg, int status) LASSERT(md->md_refcount >= 0); unlink = lnet_md_unlinkable(md); - if (md->md_eq != NULL) { + if (md->md_eq) { msg->msg_ev.status = status; msg->msg_ev.unlinked = unlink; lnet_eq_enqueue_event(md->md_eq, &msg->msg_ev); @@ -364,7 +371,7 @@ lnet_complete_msg_locked(lnet_msg_t *msg, int cpt) LASSERT(msg->msg_onactivelist); - if (status == 0 && msg->msg_ack) { + if (!status && msg->msg_ack) { /* Only send an ACK if the PUT completed successfully */ lnet_msg_decommit(msg, cpt, 0); @@ -383,8 +390,10 @@ lnet_complete_msg_locked(lnet_msg_t *msg, int cpt) msg->msg_hdr.msg.ack.match_bits = msg->msg_ev.match_bits; msg->msg_hdr.msg.ack.mlength = cpu_to_le32(msg->msg_ev.mlength); - /* NB: we probably want to use NID of msg::msg_from as 3rd - * parameter (router NID) if it's routed message */ + /* + * NB: we probably want to use NID of msg::msg_from as 3rd + * parameter (router NID) if it's routed message + */ rc = lnet_send(msg->msg_ev.target.nid, msg, LNET_NID_ANY); lnet_net_lock(cpt); @@ -401,7 +410,7 @@ lnet_complete_msg_locked(lnet_msg_t *msg, int cpt) */ return rc; - } else if (status == 0 && /* OK so far */ + } else if (!status && /* OK so far */ (msg->msg_routing && !msg->msg_sending)) { /* not forwarded */ LASSERT(!msg->msg_receiving); /* called back recv already */ @@ -442,7 +451,7 @@ lnet_finalize(lnet_ni_t *ni, lnet_msg_t *msg, int status) LASSERT(!in_interrupt()); - if (msg == NULL) + if (!msg) return; #if 0 CDEBUG(D_WARNING, "%s msg->%s Flags:%s%s%s%s%s%s%s%s%s%s%s txp %s rxp %s\n", @@ -458,12 +467,12 @@ lnet_finalize(lnet_ni_t *ni, lnet_msg_t *msg, int status) msg->msg_rtrcredit ? "F" : "", msg->msg_peerrtrcredit ? "f" : "", msg->msg_onactivelist ? "!" : "", - msg->msg_txpeer == NULL ? "<none>" : libcfs_nid2str(msg->msg_txpeer->lp_nid), - msg->msg_rxpeer == NULL ? "<none>" : libcfs_nid2str(msg->msg_rxpeer->lp_nid)); + !msg->msg_txpeer ? "<none>" : libcfs_nid2str(msg->msg_txpeer->lp_nid), + !msg->msg_rxpeer ? "<none>" : libcfs_nid2str(msg->msg_rxpeer->lp_nid)); #endif msg->msg_ev.status = status; - if (msg->msg_md != NULL) { + if (msg->msg_md) { cpt = lnet_cpt_of_cookie(msg->msg_md->md_lh.lh_cookie); lnet_res_lock(cpt); @@ -491,15 +500,16 @@ lnet_finalize(lnet_ni_t *ni, lnet_msg_t *msg, int status) container = the_lnet.ln_msg_containers[cpt]; list_add_tail(&msg->msg_list, &container->msc_finalizing); - /* Recursion breaker. Don't complete the message here if I am (or - * enough other threads are) already completing messages */ - + /* + * Recursion breaker. Don't complete the message here if I am (or + * enough other threads are) already completing messages + */ my_slot = -1; for (i = 0; i < container->msc_nfinalizers; i++) { if (container->msc_finalizers[i] == current) break; - if (my_slot < 0 && container->msc_finalizers[i] == NULL) + if (my_slot < 0 && !container->msc_finalizers[i]) my_slot = i; } @@ -512,21 +522,29 @@ lnet_finalize(lnet_ni_t *ni, lnet_msg_t *msg, int status) while (!list_empty(&container->msc_finalizing)) { msg = list_entry(container->msc_finalizing.next, - lnet_msg_t, msg_list); + lnet_msg_t, msg_list); list_del(&msg->msg_list); - /* NB drops and regains the lnet lock if it actually does - * anything, so my finalizing friends can chomp along too */ + /* + * NB drops and regains the lnet lock if it actually does + * anything, so my finalizing friends can chomp along too + */ rc = lnet_complete_msg_locked(msg, cpt); - if (rc != 0) + if (rc) break; } + if (unlikely(!list_empty(&the_lnet.ln_delay_rules))) { + lnet_net_unlock(cpt); + lnet_delay_rule_check(); + lnet_net_lock(cpt); + } + container->msc_finalizers[my_slot] = NULL; lnet_net_unlock(cpt); - if (rc != 0) + if (rc) goto again; } EXPORT_SYMBOL(lnet_finalize); @@ -536,12 +554,12 @@ lnet_msg_container_cleanup(struct lnet_msg_container *container) { int count = 0; - if (container->msc_init == 0) + if (!container->msc_init) return; while (!list_empty(&container->msc_active)) { lnet_msg_t *msg = list_entry(container->msc_active.next, - lnet_msg_t, msg_activelist); + lnet_msg_t, msg_activelist); LASSERT(msg->msg_onactivelist); msg->msg_onactivelist = 0; @@ -553,41 +571,23 @@ lnet_msg_container_cleanup(struct lnet_msg_container *container) if (count > 0) CERROR("%d active msg on exit\n", count); - if (container->msc_finalizers != NULL) { + if (container->msc_finalizers) { LIBCFS_FREE(container->msc_finalizers, container->msc_nfinalizers * sizeof(*container->msc_finalizers)); container->msc_finalizers = NULL; } -#ifdef LNET_USE_LIB_FREELIST - lnet_freelist_fini(&container->msc_freelist); -#endif container->msc_init = 0; } int lnet_msg_container_setup(struct lnet_msg_container *container, int cpt) { - int rc; - container->msc_init = 1; INIT_LIST_HEAD(&container->msc_active); INIT_LIST_HEAD(&container->msc_finalizing); -#ifdef LNET_USE_LIB_FREELIST - memset(&container->msc_freelist, 0, sizeof(lnet_freelist_t)); - - rc = lnet_freelist_init(&container->msc_freelist, - LNET_FL_MAX_MSGS, sizeof(lnet_msg_t)); - if (rc != 0) { - CERROR("Failed to init freelist for message container\n"); - lnet_msg_container_cleanup(container); - return rc; - } -#else - rc = 0; -#endif /* number of CPUs */ container->msc_nfinalizers = cfs_cpt_weight(lnet_cpt_table(), cpt); @@ -595,13 +595,13 @@ lnet_msg_container_setup(struct lnet_msg_container *container, int cpt) container->msc_nfinalizers * sizeof(*container->msc_finalizers)); - if (container->msc_finalizers == NULL) { + if (!container->msc_finalizers) { CERROR("Failed to allocate message finalizers\n"); lnet_msg_container_cleanup(container); return -ENOMEM; } - return rc; + return 0; } void @@ -610,7 +610,7 @@ lnet_msg_containers_destroy(void) struct lnet_msg_container *container; int i; - if (the_lnet.ln_msg_containers == NULL) + if (!the_lnet.ln_msg_containers) return; cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers) @@ -630,14 +630,14 @@ lnet_msg_containers_create(void) the_lnet.ln_msg_containers = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*container)); - if (the_lnet.ln_msg_containers == NULL) { + if (!the_lnet.ln_msg_containers) { CERROR("Failed to allocate cpu-partition data for network\n"); return -ENOMEM; } cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers) { rc = lnet_msg_container_setup(container, i); - if (rc != 0) { + if (rc) { lnet_msg_containers_destroy(); return rc; } diff --git a/drivers/staging/lustre/lnet/lnet/lib-ptl.c b/drivers/staging/lustre/lnet/lnet/lib-ptl.c index bd7b071b2873..3947e8b711c0 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-ptl.c +++ b/drivers/staging/lustre/lnet/lnet/lib-ptl.c @@ -13,11 +13,6 @@ * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 021110-1307, USA - * * GPL HEADER END */ /* @@ -50,7 +45,7 @@ lnet_ptl_match_type(unsigned int index, lnet_process_id_t match_id, struct lnet_portal *ptl = the_lnet.ln_portals[index]; int unique; - unique = ignore_bits == 0 && + unique = !ignore_bits && match_id.nid != LNET_NID_ANY && match_id.pid != LNET_PID_ANY; @@ -139,8 +134,10 @@ static int lnet_try_match_md(lnet_libmd_t *md, struct lnet_match_info *info, struct lnet_msg *msg) { - /* ALWAYS called holding the lnet_res_lock, and can't lnet_res_unlock; - * lnet_match_blocked_msg() relies on this to avoid races */ + /* + * ALWAYS called holding the lnet_res_lock, and can't lnet_res_unlock; + * lnet_match_blocked_msg() relies on this to avoid races + */ unsigned int offset; unsigned int mlength; lnet_me_t *me = md->md_me; @@ -150,7 +147,7 @@ lnet_try_match_md(lnet_libmd_t *md, return LNET_MATCHMD_NONE | LNET_MATCHMD_EXHAUSTED; /* mismatched MD op */ - if ((md->md_options & info->mi_opc) == 0) + if (!(md->md_options & info->mi_opc)) return LNET_MATCHMD_NONE; /* mismatched ME nid/pid? */ @@ -163,17 +160,17 @@ lnet_try_match_md(lnet_libmd_t *md, return LNET_MATCHMD_NONE; /* mismatched ME matchbits? */ - if (((me->me_match_bits ^ info->mi_mbits) & ~me->me_ignore_bits) != 0) + if ((me->me_match_bits ^ info->mi_mbits) & ~me->me_ignore_bits) return LNET_MATCHMD_NONE; /* Hurrah! This _is_ a match; check it out... */ - if ((md->md_options & LNET_MD_MANAGE_REMOTE) == 0) + if (!(md->md_options & LNET_MD_MANAGE_REMOTE)) offset = md->md_offset; else offset = info->mi_roffset; - if ((md->md_options & LNET_MD_MAX_SIZE) != 0) { + if (md->md_options & LNET_MD_MAX_SIZE) { mlength = md->md_max_size; LASSERT(md->md_offset + mlength <= md->md_length); } else { @@ -182,7 +179,7 @@ lnet_try_match_md(lnet_libmd_t *md, if (info->mi_rlength <= mlength) { /* fits in allowed space */ mlength = info->mi_rlength; - } else if ((md->md_options & LNET_MD_TRUNCATE) == 0) { + } else if (!(md->md_options & LNET_MD_TRUNCATE)) { /* this packet _really_ is too big */ CERROR("Matching packet from %s, match %llu length %d too big: %d left, %d allowed\n", libcfs_id2str(info->mi_id), info->mi_mbits, @@ -203,10 +200,12 @@ lnet_try_match_md(lnet_libmd_t *md, if (!lnet_md_exhausted(md)) return LNET_MATCHMD_OK; - /* Auto-unlink NOW, so the ME gets unlinked if required. + /* + * Auto-unlink NOW, so the ME gets unlinked if required. * We bumped md->md_refcount above so the MD just gets flagged - * for unlink when it is finalized. */ - if ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) != 0) + * for unlink when it is finalized. + */ + if (md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) lnet_md_unlink(md); return LNET_MATCHMD_OK | LNET_MATCHMD_EXHAUSTED; @@ -239,7 +238,7 @@ lnet_mt_of_attach(unsigned int index, lnet_process_id_t id, ptl = the_lnet.ln_portals[index]; mtable = lnet_match2mt(ptl, id, mbits); - if (mtable != NULL) /* unique portal or only one match-table */ + if (mtable) /* unique portal or only one match-table */ return mtable; /* it's a wildcard portal */ @@ -248,8 +247,10 @@ lnet_mt_of_attach(unsigned int index, lnet_process_id_t id, return NULL; case LNET_INS_BEFORE: case LNET_INS_AFTER: - /* posted by no affinity thread, always hash to specific - * match-table to avoid buffer stealing which is heavy */ + /* + * posted by no affinity thread, always hash to specific + * match-table to avoid buffer stealing which is heavy + */ return ptl->ptl_mtables[ptl->ptl_index % LNET_CPT_NUMBER]; case LNET_INS_LOCAL: /* posted by cpu-affinity thread */ @@ -274,7 +275,7 @@ lnet_mt_of_match(struct lnet_match_info *info, struct lnet_msg *msg) LASSERT(lnet_ptl_is_wildcard(ptl) || lnet_ptl_is_unique(ptl)); mtable = lnet_match2mt(ptl, info->mi_id, info->mi_mbits); - if (mtable != NULL) + if (mtable) return mtable; /* it's a wildcard portal */ @@ -298,10 +299,12 @@ lnet_mt_of_match(struct lnet_match_info *info, struct lnet_msg *msg) /* is there any active entry for this portal? */ nmaps = ptl->ptl_mt_nmaps; /* map to an active mtable to avoid heavy "stealing" */ - if (nmaps != 0) { - /* NB: there is possibility that ptl_mt_maps is being + if (nmaps) { + /* + * NB: there is possibility that ptl_mt_maps is being * changed because we are not under protection of - * lnet_ptl_lock, but it shouldn't hurt anything */ + * lnet_ptl_lock, but it shouldn't hurt anything + */ cpt = ptl->ptl_mt_maps[rotor % nmaps]; } } @@ -331,7 +334,7 @@ lnet_mt_test_exhausted(struct lnet_match_table *mtable, int pos) bmap = &mtable->mt_exhausted[pos >> LNET_MT_BITS_U64]; pos &= (1 << LNET_MT_BITS_U64) - 1; - return ((*bmap) & (1ULL << pos)) != 0; + return (*bmap & (1ULL << pos)); } static void @@ -357,16 +360,15 @@ lnet_mt_match_head(struct lnet_match_table *mtable, lnet_process_id_t id, __u64 mbits) { struct lnet_portal *ptl = the_lnet.ln_portals[mtable->mt_portal]; + unsigned long hash = mbits; - if (lnet_ptl_is_wildcard(ptl)) { - return &mtable->mt_mhash[mbits & LNET_MT_HASH_MASK]; - } else { - unsigned long hash = mbits + id.nid + id.pid; + if (!lnet_ptl_is_wildcard(ptl)) { + hash += id.nid + id.pid; LASSERT(lnet_ptl_is_unique(ptl)); hash = hash_long(hash, LNET_MT_HASH_BITS); - return &mtable->mt_mhash[hash]; } + return &mtable->mt_mhash[hash & LNET_MT_HASH_MASK]; } int @@ -391,18 +393,20 @@ lnet_mt_match_md(struct lnet_match_table *mtable, list_for_each_entry_safe(me, tmp, head, me_list) { /* ME attached but MD not attached yet */ - if (me->me_md == NULL) + if (!me->me_md) continue; LASSERT(me == me->me_md->md_me); rc = lnet_try_match_md(me->me_md, info, msg); - if ((rc & LNET_MATCHMD_EXHAUSTED) == 0) + if (!(rc & LNET_MATCHMD_EXHAUSTED)) exhausted = 0; /* mlist is not empty */ - if ((rc & LNET_MATCHMD_FINISH) != 0) { - /* don't return EXHAUSTED bit because we don't know - * whether the mlist is empty or not */ + if (rc & LNET_MATCHMD_FINISH) { + /* + * don't return EXHAUSTED bit because we don't know + * whether the mlist is empty or not + */ return rc & ~LNET_MATCHMD_EXHAUSTED; } } @@ -413,7 +417,7 @@ lnet_mt_match_md(struct lnet_match_table *mtable, exhausted = 0; } - if (exhausted == 0 && head == &mtable->mt_mhash[LNET_MT_HASH_IGNORE]) { + if (!exhausted && head == &mtable->mt_mhash[LNET_MT_HASH_IGNORE]) { head = lnet_mt_match_head(mtable, info->mi_id, info->mi_mbits); goto again; /* re-check MEs w/o ignore-bits */ } @@ -430,8 +434,10 @@ lnet_ptl_match_early(struct lnet_portal *ptl, struct lnet_msg *msg) { int rc; - /* message arrived before any buffer posting on this portal, - * simply delay or drop this message */ + /* + * message arrived before any buffer posting on this portal, + * simply delay or drop this message + */ if (likely(lnet_ptl_is_wildcard(ptl) || lnet_ptl_is_unique(ptl))) return 0; @@ -446,7 +452,7 @@ lnet_ptl_match_early(struct lnet_portal *ptl, struct lnet_msg *msg) if (msg->msg_rx_ready_delay) { msg->msg_rx_delayed = 1; list_add_tail(&msg->msg_list, - &ptl->ptl_msg_delayed); + &ptl->ptl_msg_delayed); } rc = LNET_MATCHMD_NONE; } else { @@ -465,9 +471,13 @@ lnet_ptl_match_delay(struct lnet_portal *ptl, int rc = 0; int i; - /* steal buffer from other CPTs, and delay it if nothing to steal, - * this function is more expensive than a regular match, but we - * don't expect it can happen a lot */ + /** + * Steal buffer from other CPTs, and delay msg if nothing to + * steal. This function is more expensive than a regular + * match, but we don't expect it can happen a lot. The return + * code contains one of LNET_MATCHMD_OK, LNET_MATCHMD_DROP, or + * LNET_MATCHMD_NONE. + */ LASSERT(lnet_ptl_is_wildcard(ptl)); for (i = 0; i < LNET_CPT_NUMBER; i++) { @@ -476,56 +486,77 @@ lnet_ptl_match_delay(struct lnet_portal *ptl, cpt = (first + i) % LNET_CPT_NUMBER; mtable = ptl->ptl_mtables[cpt]; - if (i != 0 && i != LNET_CPT_NUMBER - 1 && !mtable->mt_enabled) + if (i && i != LNET_CPT_NUMBER - 1 && !mtable->mt_enabled) continue; lnet_res_lock(cpt); lnet_ptl_lock(ptl); - if (i == 0) { /* the first try, attach on stealing list */ + if (!i) { + /* The first try, add to stealing list. */ list_add_tail(&msg->msg_list, - &ptl->ptl_msg_stealing); + &ptl->ptl_msg_stealing); } - if (!list_empty(&msg->msg_list)) { /* on stealing list */ + if (!list_empty(&msg->msg_list)) { + /* On stealing list. */ rc = lnet_mt_match_md(mtable, info, msg); - if ((rc & LNET_MATCHMD_EXHAUSTED) != 0 && + if ((rc & LNET_MATCHMD_EXHAUSTED) && mtable->mt_enabled) lnet_ptl_disable_mt(ptl, cpt); - if ((rc & LNET_MATCHMD_FINISH) != 0) + if (rc & LNET_MATCHMD_FINISH) { + /* Match found, remove from stealing list. */ + list_del_init(&msg->msg_list); + } else if (i == LNET_CPT_NUMBER - 1 || /* (1) */ + !ptl->ptl_mt_nmaps || /* (2) */ + (ptl->ptl_mt_nmaps == 1 && /* (3) */ + ptl->ptl_mt_maps[0] == cpt)) { + /** + * No match found, and this is either + * (1) the last cpt to check, or + * (2) there is no active cpt, or + * (3) this is the only active cpt. + * There is nothing to steal: delay or + * drop the message. + */ list_del_init(&msg->msg_list); - } else { - /* could be matched by lnet_ptl_attach_md() - * which is called by another thread */ - rc = msg->msg_md == NULL ? - LNET_MATCHMD_DROP : LNET_MATCHMD_OK; - } - - if (!list_empty(&msg->msg_list) && /* not matched yet */ - (i == LNET_CPT_NUMBER - 1 || /* the last CPT */ - ptl->ptl_mt_nmaps == 0 || /* no active CPT */ - (ptl->ptl_mt_nmaps == 1 && /* the only active CPT */ - ptl->ptl_mt_maps[0] == cpt))) { - /* nothing to steal, delay or drop */ - list_del_init(&msg->msg_list); - - if (lnet_ptl_is_lazy(ptl)) { - msg->msg_rx_delayed = 1; - list_add_tail(&msg->msg_list, - &ptl->ptl_msg_delayed); - rc = LNET_MATCHMD_NONE; + if (lnet_ptl_is_lazy(ptl)) { + msg->msg_rx_delayed = 1; + list_add_tail(&msg->msg_list, + &ptl->ptl_msg_delayed); + rc = LNET_MATCHMD_NONE; + } else { + rc = LNET_MATCHMD_DROP; + } } else { - rc = LNET_MATCHMD_DROP; + /* Do another iteration. */ + rc = 0; } + } else { + /** + * No longer on stealing list: another thread + * matched the message in lnet_ptl_attach_md(). + * We are now expected to handle the message. + */ + rc = !msg->msg_md ? + LNET_MATCHMD_DROP : LNET_MATCHMD_OK; } lnet_ptl_unlock(ptl); lnet_res_unlock(cpt); - if ((rc & LNET_MATCHMD_FINISH) != 0 || msg->msg_rx_delayed) + /** + * Note that test (1) above ensures that we always + * exit the loop through this break statement. + * + * LNET_MATCHMD_NONE means msg was added to the + * delayed queue, and we may no longer reference it + * after lnet_ptl_unlock() and lnet_res_unlock(). + */ + if (rc & (LNET_MATCHMD_FINISH | LNET_MATCHMD_NONE)) break; } @@ -551,7 +582,7 @@ lnet_ptl_match_md(struct lnet_match_info *info, struct lnet_msg *msg) ptl = the_lnet.ln_portals[info->mi_portal]; rc = lnet_ptl_match_early(ptl, msg); - if (rc != 0) /* matched or delayed early message */ + if (rc) /* matched or delayed early message */ return rc; mtable = lnet_mt_of_match(info, msg); @@ -563,13 +594,13 @@ lnet_ptl_match_md(struct lnet_match_info *info, struct lnet_msg *msg) } rc = lnet_mt_match_md(mtable, info, msg); - if ((rc & LNET_MATCHMD_EXHAUSTED) != 0 && mtable->mt_enabled) { + if ((rc & LNET_MATCHMD_EXHAUSTED) && mtable->mt_enabled) { lnet_ptl_lock(ptl); lnet_ptl_disable_mt(ptl, mtable->mt_cpt); lnet_ptl_unlock(ptl); } - if ((rc & LNET_MATCHMD_FINISH) != 0) /* matched or dropping */ + if (rc & LNET_MATCHMD_FINISH) /* matched or dropping */ goto out1; if (!msg->msg_rx_ready_delay) @@ -587,13 +618,14 @@ lnet_ptl_match_md(struct lnet_match_info *info, struct lnet_msg *msg) lnet_ptl_unlock(ptl); lnet_res_unlock(mtable->mt_cpt); - + rc = LNET_MATCHMD_NONE; } else { lnet_res_unlock(mtable->mt_cpt); rc = lnet_ptl_match_delay(ptl, info, msg); } - if (msg->msg_rx_delayed) { + /* LNET_MATCHMD_NONE means msg was added to the delay queue */ + if (rc & LNET_MATCHMD_NONE) { CDEBUG(D_NET, "Delaying %s from %s ptl %d MB %#llx off %d len %d\n", info->mi_opc == LNET_MD_OP_PUT ? "PUT" : "GET", @@ -630,7 +662,7 @@ lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md, int exhausted = 0; int cpt; - LASSERT(md->md_refcount == 0); /* a brand new MD */ + LASSERT(!md->md_refcount); /* a brand new MD */ me->me_md = md; md->md_me = me; @@ -664,15 +696,15 @@ lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md, rc = lnet_try_match_md(md, &info, msg); - exhausted = (rc & LNET_MATCHMD_EXHAUSTED) != 0; - if ((rc & LNET_MATCHMD_NONE) != 0) { + exhausted = (rc & LNET_MATCHMD_EXHAUSTED); + if (rc & LNET_MATCHMD_NONE) { if (exhausted) break; continue; } /* Hurrah! This _is_ a match */ - LASSERT((rc & LNET_MATCHMD_FINISH) != 0); + LASSERT(rc & LNET_MATCHMD_FINISH); list_del_init(&msg->msg_list); if (head == &ptl->ptl_msg_stealing) { @@ -682,7 +714,7 @@ lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md, continue; } - if ((rc & LNET_MATCHMD_OK) != 0) { + if (rc & LNET_MATCHMD_OK) { list_add_tail(&msg->msg_list, matches); CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d match %llu offset %d length %d.\n", @@ -717,7 +749,7 @@ lnet_ptl_cleanup(struct lnet_portal *ptl) struct lnet_match_table *mtable; int i; - if (ptl->ptl_mtables == NULL) /* uninitialized portal */ + if (!ptl->ptl_mtables) /* uninitialized portal */ return; LASSERT(list_empty(&ptl->ptl_msg_delayed)); @@ -727,7 +759,7 @@ lnet_ptl_cleanup(struct lnet_portal *ptl) lnet_me_t *me; int j; - if (mtable->mt_mhash == NULL) /* uninitialized match-table */ + if (!mtable->mt_mhash) /* uninitialized match-table */ continue; mhash = mtable->mt_mhash; @@ -735,7 +767,7 @@ lnet_ptl_cleanup(struct lnet_portal *ptl) for (j = 0; j < LNET_MT_HASH_SIZE + 1; j++) { while (!list_empty(&mhash[j])) { me = list_entry(mhash[j].next, - lnet_me_t, me_list); + lnet_me_t, me_list); CERROR("Active ME %p on exit\n", me); list_del(&me->me_list); lnet_me_free(me); @@ -759,7 +791,7 @@ lnet_ptl_setup(struct lnet_portal *ptl, int index) ptl->ptl_mtables = cfs_percpt_alloc(lnet_cpt_table(), sizeof(struct lnet_match_table)); - if (ptl->ptl_mtables == NULL) { + if (!ptl->ptl_mtables) { CERROR("Failed to create match table for portal %d\n", index); return -ENOMEM; } @@ -772,7 +804,7 @@ lnet_ptl_setup(struct lnet_portal *ptl, int index) /* the extra entry is for MEs with ignore bits */ LIBCFS_CPT_ALLOC(mhash, lnet_cpt_table(), i, sizeof(*mhash) * (LNET_MT_HASH_SIZE + 1)); - if (mhash == NULL) { + if (!mhash) { CERROR("Failed to create match hash for portal %d\n", index); goto failed; @@ -800,7 +832,7 @@ lnet_portals_destroy(void) { int i; - if (the_lnet.ln_portals == NULL) + if (!the_lnet.ln_portals) return; for (i = 0; i < the_lnet.ln_nportals; i++) @@ -820,7 +852,7 @@ lnet_portals_create(void) the_lnet.ln_nportals = MAX_PORTALS; the_lnet.ln_portals = cfs_array_alloc(the_lnet.ln_nportals, size); - if (the_lnet.ln_portals == NULL) { + if (!the_lnet.ln_portals) { CERROR("Failed to allocate portals table\n"); return -ENOMEM; } @@ -886,17 +918,8 @@ LNetSetLazyPortal(int portal) } EXPORT_SYMBOL(LNetSetLazyPortal); -/** - * Turn off the lazy portal attribute. Delayed requests on the portal, - * if any, will be all dropped when this function returns. - * - * \param portal Index of the portal to disable the lazy attribute on. - * - * \retval 0 On success. - * \retval -EINVAL If \a portal is not a valid index. - */ int -LNetClearLazyPortal(int portal) +lnet_clear_lazy_portal(struct lnet_ni *ni, int portal, char *reason) { struct lnet_portal *ptl; LIST_HEAD(zombies); @@ -915,21 +938,48 @@ LNetClearLazyPortal(int portal) return 0; } - if (the_lnet.ln_shutdown) - CWARN("Active lazy portal %d on exit\n", portal); - else - CDEBUG(D_NET, "clearing portal %d lazy\n", portal); + if (ni) { + struct lnet_msg *msg, *tmp; + + /* grab all messages which are on the NI passed in */ + list_for_each_entry_safe(msg, tmp, &ptl->ptl_msg_delayed, + msg_list) { + if (msg->msg_rxpeer->lp_ni == ni) + list_move(&msg->msg_list, &zombies); + } + } else { + if (the_lnet.ln_shutdown) + CWARN("Active lazy portal %d on exit\n", portal); + else + CDEBUG(D_NET, "clearing portal %d lazy\n", portal); - /* grab all the blocked messages atomically */ - list_splice_init(&ptl->ptl_msg_delayed, &zombies); + /* grab all the blocked messages atomically */ + list_splice_init(&ptl->ptl_msg_delayed, &zombies); - lnet_ptl_unsetopt(ptl, LNET_PTL_LAZY); + lnet_ptl_unsetopt(ptl, LNET_PTL_LAZY); + } lnet_ptl_unlock(ptl); lnet_res_unlock(LNET_LOCK_EX); - lnet_drop_delayed_msg_list(&zombies, "Clearing lazy portal attr"); + lnet_drop_delayed_msg_list(&zombies, reason); return 0; } + +/** + * Turn off the lazy portal attribute. Delayed requests on the portal, + * if any, will be all dropped when this function returns. + * + * \param portal Index of the portal to disable the lazy attribute on. + * + * \retval 0 On success. + * \retval -EINVAL If \a portal is not a valid index. + */ +int +LNetClearLazyPortal(int portal) +{ + return lnet_clear_lazy_portal(NULL, portal, + "Clearing lazy portal attr"); +} EXPORT_SYMBOL(LNetClearLazyPortal); diff --git a/drivers/staging/lustre/lnet/lnet/lib-socket.c b/drivers/staging/lustre/lnet/lnet/lib-socket.c index 589ecc84d1b8..cc0c2753dd63 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-socket.c +++ b/drivers/staging/lustre/lnet/lnet/lib-socket.c @@ -64,7 +64,7 @@ lnet_sock_ioctl(int cmd, unsigned long arg) int rc; rc = sock_create(PF_INET, SOCK_STREAM, 0, &sock); - if (rc != 0) { + if (rc) { CERROR("Can't create socket: %d\n", rc); return rc; } @@ -99,14 +99,17 @@ lnet_ipif_query(char *name, int *up, __u32 *ip, __u32 *mask) CLASSERT(sizeof(ifr.ifr_name) >= IFNAMSIZ); - strcpy(ifr.ifr_name, name); + if (strlen(name) > sizeof(ifr.ifr_name) - 1) + return -E2BIG; + strncpy(ifr.ifr_name, name, sizeof(ifr.ifr_name)); + rc = lnet_sock_ioctl(SIOCGIFFLAGS, (unsigned long)&ifr); - if (rc != 0) { + if (rc) { CERROR("Can't get flags for interface %s\n", name); return rc; } - if ((ifr.ifr_flags & IFF_UP) == 0) { + if (!(ifr.ifr_flags & IFF_UP)) { CDEBUG(D_NET, "Interface %s down\n", name); *up = 0; *ip = *mask = 0; @@ -114,10 +117,13 @@ lnet_ipif_query(char *name, int *up, __u32 *ip, __u32 *mask) } *up = 1; - strcpy(ifr.ifr_name, name); + if (strlen(name) > sizeof(ifr.ifr_name) - 1) + return -E2BIG; + strncpy(ifr.ifr_name, name, sizeof(ifr.ifr_name)); + ifr.ifr_addr.sa_family = AF_INET; rc = lnet_sock_ioctl(SIOCGIFADDR, (unsigned long)&ifr); - if (rc != 0) { + if (rc) { CERROR("Can't get IP address for interface %s\n", name); return rc; } @@ -125,10 +131,13 @@ lnet_ipif_query(char *name, int *up, __u32 *ip, __u32 *mask) val = ((struct sockaddr_in *)&ifr.ifr_addr)->sin_addr.s_addr; *ip = ntohl(val); - strcpy(ifr.ifr_name, name); + if (strlen(name) > sizeof(ifr.ifr_name) - 1) + return -E2BIG; + strncpy(ifr.ifr_name, name, sizeof(ifr.ifr_name)); + ifr.ifr_addr.sa_family = AF_INET; rc = lnet_sock_ioctl(SIOCGIFNETMASK, (unsigned long)&ifr); - if (rc != 0) { + if (rc) { CERROR("Can't get netmask for interface %s\n", name); return rc; } @@ -159,13 +168,13 @@ lnet_ipif_enumerate(char ***namesp) for (;;) { if (nalloc * sizeof(*ifr) > PAGE_CACHE_SIZE) { toobig = 1; - nalloc = PAGE_CACHE_SIZE/sizeof(*ifr); + nalloc = PAGE_CACHE_SIZE / sizeof(*ifr); CWARN("Too many interfaces: only enumerating first %d\n", nalloc); } LIBCFS_ALLOC(ifr, nalloc * sizeof(*ifr)); - if (ifr == NULL) { + if (!ifr) { CERROR("ENOMEM enumerating up to %d interfaces\n", nalloc); rc = -ENOMEM; @@ -181,9 +190,9 @@ lnet_ipif_enumerate(char ***namesp) goto out1; } - LASSERT(rc == 0); + LASSERT(!rc); - nfound = ifc.ifc_len/sizeof(*ifr); + nfound = ifc.ifc_len / sizeof(*ifr); LASSERT(nfound <= nalloc); if (nfound < nalloc || toobig) @@ -193,11 +202,11 @@ lnet_ipif_enumerate(char ***namesp) nalloc *= 2; } - if (nfound == 0) + if (!nfound) goto out1; LIBCFS_ALLOC(names, nfound * sizeof(*names)); - if (names == NULL) { + if (!names) { rc = -ENOMEM; goto out1; } @@ -213,7 +222,7 @@ lnet_ipif_enumerate(char ***namesp) } LIBCFS_ALLOC(names[i], IFNAMSIZ); - if (names[i] == NULL) { + if (!names[i]) { rc = -ENOMEM; goto out2; } @@ -242,7 +251,7 @@ lnet_ipif_free_enumeration(char **names, int n) LASSERT(n > 0); - for (i = 0; i < n && names[i] != NULL; i++) + for (i = 0; i < n && names[i]; i++) LIBCFS_FREE(names[i], IFNAMSIZ); LIBCFS_FREE(names, n * sizeof(*names)); @@ -253,32 +262,30 @@ int lnet_sock_write(struct socket *sock, void *buffer, int nob, int timeout) { int rc; - long ticks = timeout * HZ; + long jiffies_left = timeout * msecs_to_jiffies(MSEC_PER_SEC); unsigned long then; struct timeval tv; LASSERT(nob > 0); - /* Caller may pass a zero timeout if she thinks the socket buffer is - * empty enough to take the whole message immediately */ - + /* + * Caller may pass a zero timeout if she thinks the socket buffer is + * empty enough to take the whole message immediately + */ for (;;) { struct kvec iov = { .iov_base = buffer, .iov_len = nob }; struct msghdr msg = { - .msg_flags = (timeout == 0) ? MSG_DONTWAIT : 0 + .msg_flags = !timeout ? MSG_DONTWAIT : 0 }; - if (timeout != 0) { + if (timeout) { /* Set send timeout to remaining time */ - tv = (struct timeval) { - .tv_sec = ticks / HZ, - .tv_usec = ((ticks % HZ) * 1000000) / HZ - }; + jiffies_to_timeval(jiffies_left, &tv); rc = kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO, (char *)&tv, sizeof(tv)); - if (rc != 0) { + if (rc) { CERROR("Can't set socket send timeout %ld.%06d: %d\n", (long)tv.tv_sec, (int)tv.tv_usec, rc); return rc; @@ -287,7 +294,7 @@ lnet_sock_write(struct socket *sock, void *buffer, int nob, int timeout) then = jiffies; rc = kernel_sendmsg(sock, &msg, &iov, 1, nob); - ticks -= jiffies - then; + jiffies_left -= jiffies - then; if (rc == nob) return 0; @@ -295,12 +302,12 @@ lnet_sock_write(struct socket *sock, void *buffer, int nob, int timeout) if (rc < 0) return rc; - if (rc == 0) { + if (!rc) { CERROR("Unexpected zero rc\n"); return -ECONNABORTED; } - if (ticks <= 0) + if (jiffies_left <= 0) return -EAGAIN; buffer = ((char *)buffer) + rc; @@ -314,12 +321,12 @@ int lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout) { int rc; - long ticks = timeout * HZ; + long jiffies_left = timeout * msecs_to_jiffies(MSEC_PER_SEC); unsigned long then; struct timeval tv; LASSERT(nob > 0); - LASSERT(ticks > 0); + LASSERT(jiffies_left > 0); for (;;) { struct kvec iov = { @@ -331,13 +338,10 @@ lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout) }; /* Set receive timeout to remaining time */ - tv = (struct timeval) { - .tv_sec = ticks / HZ, - .tv_usec = ((ticks % HZ) * 1000000) / HZ - }; + jiffies_to_timeval(jiffies_left, &tv); rc = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, (char *)&tv, sizeof(tv)); - if (rc != 0) { + if (rc) { CERROR("Can't set socket recv timeout %ld.%06d: %d\n", (long)tv.tv_sec, (int)tv.tv_usec, rc); return rc; @@ -345,21 +349,21 @@ lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout) then = jiffies; rc = kernel_recvmsg(sock, &msg, &iov, 1, nob, 0); - ticks -= jiffies - then; + jiffies_left -= jiffies - then; if (rc < 0) return rc; - if (rc == 0) + if (!rc) return -ECONNRESET; buffer = ((char *)buffer) + rc; nob -= rc; - if (nob == 0) + if (!nob) return 0; - if (ticks <= 0) + if (jiffies_left <= 0) return -ETIMEDOUT; } } @@ -379,7 +383,7 @@ lnet_sock_create(struct socket **sockp, int *fatal, __u32 local_ip, rc = sock_create(PF_INET, SOCK_STREAM, 0, &sock); *sockp = sock; - if (rc != 0) { + if (rc) { CERROR("Can't create socket: %d\n", rc); return rc; } @@ -387,16 +391,16 @@ lnet_sock_create(struct socket **sockp, int *fatal, __u32 local_ip, option = 1; rc = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (char *)&option, sizeof(option)); - if (rc != 0) { + if (rc) { CERROR("Can't set SO_REUSEADDR for socket: %d\n", rc); goto failed; } - if (local_ip != 0 || local_port != 0) { + if (local_ip || local_port) { memset(&locaddr, 0, sizeof(locaddr)); locaddr.sin_family = AF_INET; locaddr.sin_port = htons(local_port); - locaddr.sin_addr.s_addr = (local_ip == 0) ? + locaddr.sin_addr.s_addr = !local_ip ? INADDR_ANY : htonl(local_ip); rc = kernel_bind(sock, (struct sockaddr *)&locaddr, @@ -406,7 +410,7 @@ lnet_sock_create(struct socket **sockp, int *fatal, __u32 local_ip, *fatal = 0; goto failed; } - if (rc != 0) { + if (rc) { CERROR("Error trying to bind to port %d: %d\n", local_port, rc); goto failed; @@ -425,22 +429,22 @@ lnet_sock_setbuf(struct socket *sock, int txbufsize, int rxbufsize) int option; int rc; - if (txbufsize != 0) { + if (txbufsize) { option = txbufsize; rc = kernel_setsockopt(sock, SOL_SOCKET, SO_SNDBUF, (char *)&option, sizeof(option)); - if (rc != 0) { + if (rc) { CERROR("Can't set send buffer %d: %d\n", option, rc); return rc; } } - if (rxbufsize != 0) { + if (rxbufsize) { option = rxbufsize; rc = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVBUF, - (char *)&option, sizeof(option)); - if (rc != 0) { + (char *)&option, sizeof(option)); + if (rc) { CERROR("Can't set receive buffer %d: %d\n", option, rc); return rc; @@ -461,16 +465,16 @@ lnet_sock_getaddr(struct socket *sock, bool remote, __u32 *ip, int *port) rc = kernel_getpeername(sock, (struct sockaddr *)&sin, &len); else rc = kernel_getsockname(sock, (struct sockaddr *)&sin, &len); - if (rc != 0) { + if (rc) { CERROR("Error %d getting sock %s IP/port\n", rc, remote ? "peer" : "local"); return rc; } - if (ip != NULL) + if (ip) *ip = ntohl(sin.sin_addr.s_addr); - if (port != NULL) + if (port) *port = ntohs(sin.sin_port); return 0; @@ -480,10 +484,10 @@ EXPORT_SYMBOL(lnet_sock_getaddr); int lnet_sock_getbuf(struct socket *sock, int *txbufsize, int *rxbufsize) { - if (txbufsize != NULL) + if (txbufsize) *txbufsize = sock->sk->sk_sndbuf; - if (rxbufsize != NULL) + if (rxbufsize) *rxbufsize = sock->sk->sk_rcvbuf; return 0; @@ -498,7 +502,7 @@ lnet_sock_listen(struct socket **sockp, __u32 local_ip, int local_port, int rc; rc = lnet_sock_create(sockp, &fatal, local_ip, local_port); - if (rc != 0) { + if (rc) { if (!fatal) CERROR("Can't create socket: port %d already in use\n", local_port); @@ -506,14 +510,13 @@ lnet_sock_listen(struct socket **sockp, __u32 local_ip, int local_port, } rc = kernel_listen(*sockp, backlog); - if (rc == 0) + if (!rc) return 0; CERROR("Can't set listen backlog %d: %d\n", backlog, rc); sock_release(*sockp); return rc; } -EXPORT_SYMBOL(lnet_sock_listen); int lnet_sock_accept(struct socket **newsockp, struct socket *sock) @@ -522,10 +525,10 @@ lnet_sock_accept(struct socket **newsockp, struct socket *sock) struct socket *newsock; int rc; - init_waitqueue_entry(&wait, current); - - /* XXX this should add a ref to sock->ops->owner, if - * TCP could be a module */ + /* + * XXX this should add a ref to sock->ops->owner, if + * TCP could be a module + */ rc = sock_create_lite(PF_PACKET, sock->type, IPPROTO_TCP, &newsock); if (rc) { CERROR("Can't allocate socket\n"); @@ -537,15 +540,15 @@ lnet_sock_accept(struct socket **newsockp, struct socket *sock) rc = sock->ops->accept(sock, newsock, O_NONBLOCK); if (rc == -EAGAIN) { /* Nothing ready, so wait for activity */ - set_current_state(TASK_INTERRUPTIBLE); + init_waitqueue_entry(&wait, current); add_wait_queue(sk_sleep(sock->sk), &wait); + set_current_state(TASK_INTERRUPTIBLE); schedule(); remove_wait_queue(sk_sleep(sock->sk), &wait); - set_current_state(TASK_RUNNING); rc = sock->ops->accept(sock, newsock, O_NONBLOCK); } - if (rc != 0) + if (rc) goto failed; *newsockp = newsock; @@ -555,7 +558,6 @@ failed: sock_release(newsock); return rc; } -EXPORT_SYMBOL(lnet_sock_accept); int lnet_sock_connect(struct socket **sockp, int *fatal, __u32 local_ip, @@ -565,7 +567,7 @@ lnet_sock_connect(struct socket **sockp, int *fatal, __u32 local_ip, int rc; rc = lnet_sock_create(sockp, fatal, local_ip, local_port); - if (rc != 0) + if (rc) return rc; memset(&srvaddr, 0, sizeof(srvaddr)); @@ -575,13 +577,15 @@ lnet_sock_connect(struct socket **sockp, int *fatal, __u32 local_ip, rc = kernel_connect(*sockp, (struct sockaddr *)&srvaddr, sizeof(srvaddr), 0); - if (rc == 0) + if (!rc) return 0; - /* EADDRNOTAVAIL probably means we're already connected to the same + /* + * EADDRNOTAVAIL probably means we're already connected to the same * peer/port on the same local port on a differently typed * connection. Let our caller retry with a different local - * port... */ + * port... + */ *fatal = !(rc == -EADDRNOTAVAIL); CDEBUG_LIMIT(*fatal ? D_NETERROR : D_NET, @@ -591,4 +595,3 @@ lnet_sock_connect(struct socket **sockp, int *fatal, __u32 local_ip, sock_release(*sockp); return rc; } -EXPORT_SYMBOL(lnet_sock_connect); diff --git a/drivers/staging/lustre/lnet/lnet/lo.c b/drivers/staging/lustre/lnet/lnet/lo.c index 2a137f46800f..468eda611bf8 100644 --- a/drivers/staging/lustre/lnet/lnet/lo.c +++ b/drivers/staging/lustre/lnet/lnet/lo.c @@ -46,15 +46,15 @@ lolnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) static int lolnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, - int delayed, unsigned int niov, - struct kvec *iov, lnet_kiov_t *kiov, - unsigned int offset, unsigned int mlen, unsigned int rlen) + int delayed, unsigned int niov, + struct kvec *iov, lnet_kiov_t *kiov, + unsigned int offset, unsigned int mlen, unsigned int rlen) { lnet_msg_t *sendmsg = private; - if (lntmsg != NULL) { /* not discarding */ - if (sendmsg->msg_iov != NULL) { - if (iov != NULL) + if (lntmsg) { /* not discarding */ + if (sendmsg->msg_iov) { + if (iov) lnet_copy_iov2iov(niov, iov, offset, sendmsg->msg_niov, sendmsg->msg_iov, @@ -65,7 +65,7 @@ lolnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, sendmsg->msg_iov, sendmsg->msg_offset, mlen); } else { - if (iov != NULL) + if (iov) lnet_copy_kiov2iov(niov, iov, offset, sendmsg->msg_niov, sendmsg->msg_kiov, diff --git a/drivers/staging/lustre/lnet/lnet/module.c b/drivers/staging/lustre/lnet/lnet/module.c index c93c00752a4c..93037c1168ca 100644 --- a/drivers/staging/lustre/lnet/lnet/module.c +++ b/drivers/staging/lustre/lnet/lnet/module.c @@ -36,6 +36,7 @@ #define DEBUG_SUBSYSTEM S_LNET #include "../../include/linux/lnet/lib-lnet.h" +#include "../../include/linux/lnet/lib-dlc.h" static int config_on_load; module_param(config_on_load, int, 0444); @@ -52,13 +53,21 @@ lnet_configure(void *arg) mutex_lock(&lnet_config_mutex); if (!the_lnet.ln_niinit_self) { - rc = LNetNIInit(LUSTRE_SRV_LNET_PID); + rc = try_module_get(THIS_MODULE); + + if (rc != 1) + goto out; + + rc = LNetNIInit(LNET_PID_LUSTRE); if (rc >= 0) { the_lnet.ln_niinit_self = 1; rc = 0; + } else { + module_put(THIS_MODULE); } } +out: mutex_unlock(&lnet_config_mutex); return rc; } @@ -73,6 +82,7 @@ lnet_unconfigure(void) if (the_lnet.ln_niinit_self) { the_lnet.ln_niinit_self = 0; LNetNIFini(); + module_put(THIS_MODULE); } mutex_lock(&the_lnet.ln_api_mutex); @@ -80,28 +90,93 @@ lnet_unconfigure(void) mutex_unlock(&the_lnet.ln_api_mutex); mutex_unlock(&lnet_config_mutex); - return (refcount == 0) ? 0 : -EBUSY; + return !refcount ? 0 : -EBUSY; } static int -lnet_ioctl(unsigned int cmd, struct libcfs_ioctl_data *data) +lnet_dyn_configure(struct libcfs_ioctl_hdr *hdr) +{ + struct lnet_ioctl_config_data *conf = + (struct lnet_ioctl_config_data *)hdr; + int rc; + + if (conf->cfg_hdr.ioc_len < sizeof(*conf)) + return -EINVAL; + + mutex_lock(&lnet_config_mutex); + if (!the_lnet.ln_niinit_self) { + rc = -EINVAL; + goto out_unlock; + } + rc = lnet_dyn_add_ni(LNET_PID_LUSTRE, + conf->cfg_config_u.cfg_net.net_intf, + conf->cfg_config_u.cfg_net.net_peer_timeout, + conf->cfg_config_u.cfg_net.net_peer_tx_credits, + conf->cfg_config_u.cfg_net.net_peer_rtr_credits, + conf->cfg_config_u.cfg_net.net_max_tx_credits); +out_unlock: + mutex_unlock(&lnet_config_mutex); + + return rc; +} + +static int +lnet_dyn_unconfigure(struct libcfs_ioctl_hdr *hdr) +{ + struct lnet_ioctl_config_data *conf = + (struct lnet_ioctl_config_data *)hdr; + int rc; + + if (conf->cfg_hdr.ioc_len < sizeof(*conf)) + return -EINVAL; + + mutex_lock(&lnet_config_mutex); + if (!the_lnet.ln_niinit_self) { + rc = -EINVAL; + goto out_unlock; + } + rc = lnet_dyn_del_ni(conf->cfg_net); +out_unlock: + mutex_unlock(&lnet_config_mutex); + + return rc; +} + +static int +lnet_ioctl(unsigned int cmd, struct libcfs_ioctl_hdr *hdr) { int rc; switch (cmd) { - case IOC_LIBCFS_CONFIGURE: + case IOC_LIBCFS_CONFIGURE: { + struct libcfs_ioctl_data *data = + (struct libcfs_ioctl_data *)hdr; + + if (data->ioc_hdr.ioc_len < sizeof(*data)) + return -EINVAL; + + the_lnet.ln_nis_from_mod_params = data->ioc_flags; return lnet_configure(NULL); + } case IOC_LIBCFS_UNCONFIGURE: return lnet_unconfigure(); + case IOC_LIBCFS_ADD_NET: + return lnet_dyn_configure(hdr); + + case IOC_LIBCFS_DEL_NET: + return lnet_dyn_unconfigure(hdr); + default: - /* Passing LNET_PID_ANY only gives me a ref if the net is up + /* + * Passing LNET_PID_ANY only gives me a ref if the net is up * already; I'll need it to ensure the net can't go down while - * I'm called into it */ + * I'm called into it + */ rc = LNetNIInit(LNET_PID_ANY); if (rc >= 0) { - rc = LNetCtl(cmd, data); + rc = LNetCtl(cmd, hdr); LNetNIFini(); } return rc; @@ -110,46 +185,46 @@ lnet_ioctl(unsigned int cmd, struct libcfs_ioctl_data *data) static DECLARE_IOCTL_HANDLER(lnet_ioctl_handler, lnet_ioctl); -static int __init -init_lnet(void) +static int __init lnet_init(void) { int rc; mutex_init(&lnet_config_mutex); - rc = lnet_init(); - if (rc != 0) { - CERROR("lnet_init: error %d\n", rc); + rc = lnet_lib_init(); + if (rc) { + CERROR("lnet_lib_init: error %d\n", rc); return rc; } rc = libcfs_register_ioctl(&lnet_ioctl_handler); - LASSERT(rc == 0); + LASSERT(!rc); if (config_on_load) { - /* Have to schedule a separate thread to avoid deadlocking - * in modload */ + /* + * Have to schedule a separate thread to avoid deadlocking + * in modload + */ (void) kthread_run(lnet_configure, NULL, "lnet_initd"); } return 0; } -static void __exit -fini_lnet(void) +static void __exit lnet_exit(void) { int rc; rc = libcfs_deregister_ioctl(&lnet_ioctl_handler); - LASSERT(rc == 0); + LASSERT(!rc); - lnet_fini(); + lnet_lib_exit(); } MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>"); -MODULE_DESCRIPTION("LNet v3.1"); +MODULE_DESCRIPTION("Lustre Networking layer"); +MODULE_VERSION(LNET_VERSION); MODULE_LICENSE("GPL"); -MODULE_VERSION("1.0.0"); -module_init(init_lnet); -module_exit(fini_lnet); +module_init(lnet_init); +module_exit(lnet_exit); diff --git a/drivers/staging/lustre/lnet/lnet/net_fault.c b/drivers/staging/lustre/lnet/lnet/net_fault.c new file mode 100644 index 000000000000..7d76f28d3a7a --- /dev/null +++ b/drivers/staging/lustre/lnet/lnet/net_fault.c @@ -0,0 +1,1025 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.gnu.org/licenses/gpl-2.0.html + * + * GPL HEADER END + */ +/* + * Copyright (c) 2014, Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Seagate, Inc. + * + * lnet/lnet/net_fault.c + * + * Lustre network fault simulation + * + * Author: liang.zhen@intel.com + */ + +#define DEBUG_SUBSYSTEM S_LNET + +#include "../../include/linux/lnet/lib-lnet.h" +#include "../../include/linux/lnet/lnetctl.h" + +#define LNET_MSG_MASK (LNET_PUT_BIT | LNET_ACK_BIT | \ + LNET_GET_BIT | LNET_REPLY_BIT) + +struct lnet_drop_rule { + /** link chain on the_lnet.ln_drop_rules */ + struct list_head dr_link; + /** attributes of this rule */ + struct lnet_fault_attr dr_attr; + /** lock to protect \a dr_drop_at and \a dr_stat */ + spinlock_t dr_lock; + /** + * the message sequence to drop, which means message is dropped when + * dr_stat.drs_count == dr_drop_at + */ + unsigned long dr_drop_at; + /** + * seconds to drop the next message, it's exclusive with dr_drop_at + */ + unsigned long dr_drop_time; + /** baseline to caculate dr_drop_time */ + unsigned long dr_time_base; + /** statistic of dropped messages */ + struct lnet_fault_stat dr_stat; +}; + +static bool +lnet_fault_nid_match(lnet_nid_t nid, lnet_nid_t msg_nid) +{ + if (nid == msg_nid || nid == LNET_NID_ANY) + return true; + + if (LNET_NIDNET(nid) != LNET_NIDNET(msg_nid)) + return false; + + /* 255.255.255.255@net is wildcard for all addresses in a network */ + return LNET_NIDADDR(nid) == LNET_NIDADDR(LNET_NID_ANY); +} + +static bool +lnet_fault_attr_match(struct lnet_fault_attr *attr, lnet_nid_t src, + lnet_nid_t dst, unsigned int type, unsigned int portal) +{ + if (!lnet_fault_nid_match(attr->fa_src, src) || + !lnet_fault_nid_match(attr->fa_dst, dst)) + return false; + + if (!(attr->fa_msg_mask & (1 << type))) + return false; + + /** + * NB: ACK and REPLY have no portal, but they should have been + * rejected by message mask + */ + if (attr->fa_ptl_mask && /* has portal filter */ + !(attr->fa_ptl_mask & (1ULL << portal))) + return false; + + return true; +} + +static int +lnet_fault_attr_validate(struct lnet_fault_attr *attr) +{ + if (!attr->fa_msg_mask) + attr->fa_msg_mask = LNET_MSG_MASK; /* all message types */ + + if (!attr->fa_ptl_mask) /* no portal filter */ + return 0; + + /* NB: only PUT and GET can be filtered if portal filter has been set */ + attr->fa_msg_mask &= LNET_GET_BIT | LNET_PUT_BIT; + if (!attr->fa_msg_mask) { + CDEBUG(D_NET, "can't find valid message type bits %x\n", + attr->fa_msg_mask); + return -EINVAL; + } + return 0; +} + +static void +lnet_fault_stat_inc(struct lnet_fault_stat *stat, unsigned int type) +{ + /* NB: fs_counter is NOT updated by this function */ + switch (type) { + case LNET_MSG_PUT: + stat->fs_put++; + return; + case LNET_MSG_ACK: + stat->fs_ack++; + return; + case LNET_MSG_GET: + stat->fs_get++; + return; + case LNET_MSG_REPLY: + stat->fs_reply++; + return; + } +} + +/** + * LNet message drop simulation + */ + +/** + * Add a new drop rule to LNet + * There is no check for duplicated drop rule, all rules will be checked for + * incoming message. + */ +static int +lnet_drop_rule_add(struct lnet_fault_attr *attr) +{ + struct lnet_drop_rule *rule; + + if (attr->u.drop.da_rate & attr->u.drop.da_interval) { + CDEBUG(D_NET, "please provide either drop rate or drop interval, but not both at the same time %d/%d\n", + attr->u.drop.da_rate, attr->u.drop.da_interval); + return -EINVAL; + } + + if (lnet_fault_attr_validate(attr)) + return -EINVAL; + + CFS_ALLOC_PTR(rule); + if (!rule) + return -ENOMEM; + + spin_lock_init(&rule->dr_lock); + + rule->dr_attr = *attr; + if (attr->u.drop.da_interval) { + rule->dr_time_base = cfs_time_shift(attr->u.drop.da_interval); + rule->dr_drop_time = cfs_time_shift(cfs_rand() % + attr->u.drop.da_interval); + } else { + rule->dr_drop_at = cfs_rand() % attr->u.drop.da_rate; + } + + lnet_net_lock(LNET_LOCK_EX); + list_add(&rule->dr_link, &the_lnet.ln_drop_rules); + lnet_net_unlock(LNET_LOCK_EX); + + CDEBUG(D_NET, "Added drop rule: src %s, dst %s, rate %d, interval %d\n", + libcfs_nid2str(attr->fa_src), libcfs_nid2str(attr->fa_src), + attr->u.drop.da_rate, attr->u.drop.da_interval); + return 0; +} + +/** + * Remove matched drop rules from lnet, all rules that can match \a src and + * \a dst will be removed. + * If \a src is zero, then all rules have \a dst as destination will be remove + * If \a dst is zero, then all rules have \a src as source will be removed + * If both of them are zero, all rules will be removed + */ +static int +lnet_drop_rule_del(lnet_nid_t src, lnet_nid_t dst) +{ + struct lnet_drop_rule *rule; + struct lnet_drop_rule *tmp; + struct list_head zombies; + int n = 0; + + INIT_LIST_HEAD(&zombies); + + lnet_net_lock(LNET_LOCK_EX); + list_for_each_entry_safe(rule, tmp, &the_lnet.ln_drop_rules, dr_link) { + if (rule->dr_attr.fa_src != src && src) + continue; + + if (rule->dr_attr.fa_dst != dst && dst) + continue; + + list_move(&rule->dr_link, &zombies); + } + lnet_net_unlock(LNET_LOCK_EX); + + list_for_each_entry_safe(rule, tmp, &zombies, dr_link) { + CDEBUG(D_NET, "Remove drop rule: src %s->dst: %s (1/%d, %d)\n", + libcfs_nid2str(rule->dr_attr.fa_src), + libcfs_nid2str(rule->dr_attr.fa_dst), + rule->dr_attr.u.drop.da_rate, + rule->dr_attr.u.drop.da_interval); + + list_del(&rule->dr_link); + CFS_FREE_PTR(rule); + n++; + } + + return n; +} + +/** + * List drop rule at position of \a pos + */ +static int +lnet_drop_rule_list(int pos, struct lnet_fault_attr *attr, + struct lnet_fault_stat *stat) +{ + struct lnet_drop_rule *rule; + int cpt; + int i = 0; + int rc = -ENOENT; + + cpt = lnet_net_lock_current(); + list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) { + if (i++ < pos) + continue; + + spin_lock(&rule->dr_lock); + *attr = rule->dr_attr; + *stat = rule->dr_stat; + spin_unlock(&rule->dr_lock); + rc = 0; + break; + } + + lnet_net_unlock(cpt); + return rc; +} + +/** + * reset counters for all drop rules + */ +static void +lnet_drop_rule_reset(void) +{ + struct lnet_drop_rule *rule; + int cpt; + + cpt = lnet_net_lock_current(); + + list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) { + struct lnet_fault_attr *attr = &rule->dr_attr; + + spin_lock(&rule->dr_lock); + + memset(&rule->dr_stat, 0, sizeof(rule->dr_stat)); + if (attr->u.drop.da_rate) { + rule->dr_drop_at = cfs_rand() % attr->u.drop.da_rate; + } else { + rule->dr_drop_time = cfs_time_shift(cfs_rand() % + attr->u.drop.da_interval); + rule->dr_time_base = cfs_time_shift(attr->u.drop.da_interval); + } + spin_unlock(&rule->dr_lock); + } + + lnet_net_unlock(cpt); +} + +/** + * check source/destination NID, portal, message type and drop rate, + * decide whether should drop this message or not + */ +static bool +drop_rule_match(struct lnet_drop_rule *rule, lnet_nid_t src, + lnet_nid_t dst, unsigned int type, unsigned int portal) +{ + struct lnet_fault_attr *attr = &rule->dr_attr; + bool drop; + + if (!lnet_fault_attr_match(attr, src, dst, type, portal)) + return false; + + /* match this rule, check drop rate now */ + spin_lock(&rule->dr_lock); + if (rule->dr_drop_time) { /* time based drop */ + unsigned long now = cfs_time_current(); + + rule->dr_stat.fs_count++; + drop = cfs_time_aftereq(now, rule->dr_drop_time); + if (drop) { + if (cfs_time_after(now, rule->dr_time_base)) + rule->dr_time_base = now; + + rule->dr_drop_time = rule->dr_time_base + + cfs_time_seconds(cfs_rand() % + attr->u.drop.da_interval); + rule->dr_time_base += cfs_time_seconds(attr->u.drop.da_interval); + + CDEBUG(D_NET, "Drop Rule %s->%s: next drop : %lu\n", + libcfs_nid2str(attr->fa_src), + libcfs_nid2str(attr->fa_dst), + rule->dr_drop_time); + } + + } else { /* rate based drop */ + drop = rule->dr_stat.fs_count++ == rule->dr_drop_at; + + if (!do_div(rule->dr_stat.fs_count, attr->u.drop.da_rate)) { + rule->dr_drop_at = rule->dr_stat.fs_count + + cfs_rand() % attr->u.drop.da_rate; + CDEBUG(D_NET, "Drop Rule %s->%s: next drop: %lu\n", + libcfs_nid2str(attr->fa_src), + libcfs_nid2str(attr->fa_dst), rule->dr_drop_at); + } + } + + if (drop) { /* drop this message, update counters */ + lnet_fault_stat_inc(&rule->dr_stat, type); + rule->dr_stat.u.drop.ds_dropped++; + } + + spin_unlock(&rule->dr_lock); + return drop; +} + +/** + * Check if message from \a src to \a dst can match any existed drop rule + */ +bool +lnet_drop_rule_match(lnet_hdr_t *hdr) +{ + struct lnet_drop_rule *rule; + lnet_nid_t src = le64_to_cpu(hdr->src_nid); + lnet_nid_t dst = le64_to_cpu(hdr->dest_nid); + unsigned int typ = le32_to_cpu(hdr->type); + unsigned int ptl = -1; + bool drop = false; + int cpt; + + /** + * NB: if Portal is specified, then only PUT and GET will be + * filtered by drop rule + */ + if (typ == LNET_MSG_PUT) + ptl = le32_to_cpu(hdr->msg.put.ptl_index); + else if (typ == LNET_MSG_GET) + ptl = le32_to_cpu(hdr->msg.get.ptl_index); + + cpt = lnet_net_lock_current(); + list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) { + drop = drop_rule_match(rule, src, dst, typ, ptl); + if (drop) + break; + } + + lnet_net_unlock(cpt); + return drop; +} + +/** + * LNet Delay Simulation + */ +/** timestamp (second) to send delayed message */ +#define msg_delay_send msg_ev.hdr_data + +struct lnet_delay_rule { + /** link chain on the_lnet.ln_delay_rules */ + struct list_head dl_link; + /** link chain on delay_dd.dd_sched_rules */ + struct list_head dl_sched_link; + /** attributes of this rule */ + struct lnet_fault_attr dl_attr; + /** lock to protect \a below members */ + spinlock_t dl_lock; + /** refcount of delay rule */ + atomic_t dl_refcount; + /** + * the message sequence to delay, which means message is delayed when + * dl_stat.fs_count == dl_delay_at + */ + unsigned long dl_delay_at; + /** + * seconds to delay the next message, it's exclusive with dl_delay_at + */ + unsigned long dl_delay_time; + /** baseline to caculate dl_delay_time */ + unsigned long dl_time_base; + /** jiffies to send the next delayed message */ + unsigned long dl_msg_send; + /** delayed message list */ + struct list_head dl_msg_list; + /** statistic of delayed messages */ + struct lnet_fault_stat dl_stat; + /** timer to wakeup delay_daemon */ + struct timer_list dl_timer; +}; + +struct delay_daemon_data { + /** serialise rule add/remove */ + struct mutex dd_mutex; + /** protect rules on \a dd_sched_rules */ + spinlock_t dd_lock; + /** scheduled delay rules (by timer) */ + struct list_head dd_sched_rules; + /** daemon thread sleeps at here */ + wait_queue_head_t dd_waitq; + /** controller (lctl command) wait at here */ + wait_queue_head_t dd_ctl_waitq; + /** daemon is running */ + unsigned int dd_running; + /** daemon stopped */ + unsigned int dd_stopped; +}; + +static struct delay_daemon_data delay_dd; + +static unsigned long +round_timeout(unsigned long timeout) +{ + return cfs_time_seconds((unsigned int) + cfs_duration_sec(cfs_time_sub(timeout, 0)) + 1); +} + +static void +delay_rule_decref(struct lnet_delay_rule *rule) +{ + if (atomic_dec_and_test(&rule->dl_refcount)) { + LASSERT(list_empty(&rule->dl_sched_link)); + LASSERT(list_empty(&rule->dl_msg_list)); + LASSERT(list_empty(&rule->dl_link)); + + CFS_FREE_PTR(rule); + } +} + +/** + * check source/destination NID, portal, message type and delay rate, + * decide whether should delay this message or not + */ +static bool +delay_rule_match(struct lnet_delay_rule *rule, lnet_nid_t src, + lnet_nid_t dst, unsigned int type, unsigned int portal, + struct lnet_msg *msg) +{ + struct lnet_fault_attr *attr = &rule->dl_attr; + bool delay; + + if (!lnet_fault_attr_match(attr, src, dst, type, portal)) + return false; + + /* match this rule, check delay rate now */ + spin_lock(&rule->dl_lock); + if (rule->dl_delay_time) { /* time based delay */ + unsigned long now = cfs_time_current(); + + rule->dl_stat.fs_count++; + delay = cfs_time_aftereq(now, rule->dl_delay_time); + if (delay) { + if (cfs_time_after(now, rule->dl_time_base)) + rule->dl_time_base = now; + + rule->dl_delay_time = rule->dl_time_base + + cfs_time_seconds(cfs_rand() % + attr->u.delay.la_interval); + rule->dl_time_base += cfs_time_seconds(attr->u.delay.la_interval); + + CDEBUG(D_NET, "Delay Rule %s->%s: next delay : %lu\n", + libcfs_nid2str(attr->fa_src), + libcfs_nid2str(attr->fa_dst), + rule->dl_delay_time); + } + + } else { /* rate based delay */ + delay = rule->dl_stat.fs_count++ == rule->dl_delay_at; + /* generate the next random rate sequence */ + if (!do_div(rule->dl_stat.fs_count, attr->u.delay.la_rate)) { + rule->dl_delay_at = rule->dl_stat.fs_count + + cfs_rand() % attr->u.delay.la_rate; + CDEBUG(D_NET, "Delay Rule %s->%s: next delay: %lu\n", + libcfs_nid2str(attr->fa_src), + libcfs_nid2str(attr->fa_dst), rule->dl_delay_at); + } + } + + if (!delay) { + spin_unlock(&rule->dl_lock); + return false; + } + + /* delay this message, update counters */ + lnet_fault_stat_inc(&rule->dl_stat, type); + rule->dl_stat.u.delay.ls_delayed++; + + list_add_tail(&msg->msg_list, &rule->dl_msg_list); + msg->msg_delay_send = round_timeout( + cfs_time_shift(attr->u.delay.la_latency)); + if (rule->dl_msg_send == -1) { + rule->dl_msg_send = msg->msg_delay_send; + mod_timer(&rule->dl_timer, rule->dl_msg_send); + } + + spin_unlock(&rule->dl_lock); + return true; +} + +/** + * check if \a msg can match any Delay Rule, receiving of this message + * will be delayed if there is a match. + */ +bool +lnet_delay_rule_match_locked(lnet_hdr_t *hdr, struct lnet_msg *msg) +{ + struct lnet_delay_rule *rule; + lnet_nid_t src = le64_to_cpu(hdr->src_nid); + lnet_nid_t dst = le64_to_cpu(hdr->dest_nid); + unsigned int typ = le32_to_cpu(hdr->type); + unsigned int ptl = -1; + + /* NB: called with hold of lnet_net_lock */ + + /** + * NB: if Portal is specified, then only PUT and GET will be + * filtered by delay rule + */ + if (typ == LNET_MSG_PUT) + ptl = le32_to_cpu(hdr->msg.put.ptl_index); + else if (typ == LNET_MSG_GET) + ptl = le32_to_cpu(hdr->msg.get.ptl_index); + + list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) { + if (delay_rule_match(rule, src, dst, typ, ptl, msg)) + return true; + } + + return false; +} + +/** check out delayed messages for send */ +static void +delayed_msg_check(struct lnet_delay_rule *rule, bool all, + struct list_head *msg_list) +{ + struct lnet_msg *msg; + struct lnet_msg *tmp; + unsigned long now = cfs_time_current(); + + if (!all && rule->dl_msg_send > now) + return; + + spin_lock(&rule->dl_lock); + list_for_each_entry_safe(msg, tmp, &rule->dl_msg_list, msg_list) { + if (!all && msg->msg_delay_send > now) + break; + + msg->msg_delay_send = 0; + list_move_tail(&msg->msg_list, msg_list); + } + + if (list_empty(&rule->dl_msg_list)) { + del_timer(&rule->dl_timer); + rule->dl_msg_send = -1; + + } else if (!list_empty(msg_list)) { + /* + * dequeued some timedout messages, update timer for the + * next delayed message on rule + */ + msg = list_entry(rule->dl_msg_list.next, + struct lnet_msg, msg_list); + rule->dl_msg_send = msg->msg_delay_send; + mod_timer(&rule->dl_timer, rule->dl_msg_send); + } + spin_unlock(&rule->dl_lock); +} + +static void +delayed_msg_process(struct list_head *msg_list, bool drop) +{ + struct lnet_msg *msg; + + while (!list_empty(msg_list)) { + struct lnet_ni *ni; + int cpt; + int rc; + + msg = list_entry(msg_list->next, struct lnet_msg, msg_list); + LASSERT(msg->msg_rxpeer); + + ni = msg->msg_rxpeer->lp_ni; + cpt = msg->msg_rx_cpt; + + list_del_init(&msg->msg_list); + if (drop) { + rc = -ECANCELED; + + } else if (!msg->msg_routing) { + rc = lnet_parse_local(ni, msg); + if (!rc) + continue; + + } else { + lnet_net_lock(cpt); + rc = lnet_parse_forward_locked(ni, msg); + lnet_net_unlock(cpt); + + switch (rc) { + case LNET_CREDIT_OK: + lnet_ni_recv(ni, msg->msg_private, msg, 0, + 0, msg->msg_len, msg->msg_len); + case LNET_CREDIT_WAIT: + continue; + default: /* failures */ + break; + } + } + + lnet_drop_message(ni, cpt, msg->msg_private, msg->msg_len); + lnet_finalize(ni, msg, rc); + } +} + +/** + * Process delayed messages for scheduled rules + * This function can either be called by delay_rule_daemon, or by lnet_finalise + */ +void +lnet_delay_rule_check(void) +{ + struct lnet_delay_rule *rule; + struct list_head msgs; + + INIT_LIST_HEAD(&msgs); + while (1) { + if (list_empty(&delay_dd.dd_sched_rules)) + break; + + spin_lock_bh(&delay_dd.dd_lock); + if (list_empty(&delay_dd.dd_sched_rules)) { + spin_unlock_bh(&delay_dd.dd_lock); + break; + } + + rule = list_entry(delay_dd.dd_sched_rules.next, + struct lnet_delay_rule, dl_sched_link); + list_del_init(&rule->dl_sched_link); + spin_unlock_bh(&delay_dd.dd_lock); + + delayed_msg_check(rule, false, &msgs); + delay_rule_decref(rule); /* -1 for delay_dd.dd_sched_rules */ + } + + if (!list_empty(&msgs)) + delayed_msg_process(&msgs, false); +} + +/** daemon thread to handle delayed messages */ +static int +lnet_delay_rule_daemon(void *arg) +{ + delay_dd.dd_running = 1; + wake_up(&delay_dd.dd_ctl_waitq); + + while (delay_dd.dd_running) { + wait_event_interruptible(delay_dd.dd_waitq, + !delay_dd.dd_running || + !list_empty(&delay_dd.dd_sched_rules)); + lnet_delay_rule_check(); + } + + /* in case more rules have been enqueued after my last check */ + lnet_delay_rule_check(); + delay_dd.dd_stopped = 1; + wake_up(&delay_dd.dd_ctl_waitq); + + return 0; +} + +static void +delay_timer_cb(unsigned long arg) +{ + struct lnet_delay_rule *rule = (struct lnet_delay_rule *)arg; + + spin_lock_bh(&delay_dd.dd_lock); + if (list_empty(&rule->dl_sched_link) && delay_dd.dd_running) { + atomic_inc(&rule->dl_refcount); + list_add_tail(&rule->dl_sched_link, &delay_dd.dd_sched_rules); + wake_up(&delay_dd.dd_waitq); + } + spin_unlock_bh(&delay_dd.dd_lock); +} + +/** + * Add a new delay rule to LNet + * There is no check for duplicated delay rule, all rules will be checked for + * incoming message. + */ +int +lnet_delay_rule_add(struct lnet_fault_attr *attr) +{ + struct lnet_delay_rule *rule; + int rc = 0; + + if (attr->u.delay.la_rate & attr->u.delay.la_interval) { + CDEBUG(D_NET, "please provide either delay rate or delay interval, but not both at the same time %d/%d\n", + attr->u.delay.la_rate, attr->u.delay.la_interval); + return -EINVAL; + } + + if (!attr->u.delay.la_latency) { + CDEBUG(D_NET, "delay latency cannot be zero\n"); + return -EINVAL; + } + + if (lnet_fault_attr_validate(attr)) + return -EINVAL; + + CFS_ALLOC_PTR(rule); + if (!rule) + return -ENOMEM; + + mutex_lock(&delay_dd.dd_mutex); + if (!delay_dd.dd_running) { + struct task_struct *task; + + /** + * NB: although LND threads will process delayed message + * in lnet_finalize, but there is no guarantee that LND + * threads will be waken up if no other message needs to + * be handled. + * Only one daemon thread, performance is not the concern + * of this simualation module. + */ + task = kthread_run(lnet_delay_rule_daemon, NULL, "lnet_dd"); + if (IS_ERR(task)) { + rc = PTR_ERR(task); + goto failed; + } + wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_running); + } + + init_timer(&rule->dl_timer); + rule->dl_timer.function = delay_timer_cb; + rule->dl_timer.data = (unsigned long)rule; + + spin_lock_init(&rule->dl_lock); + INIT_LIST_HEAD(&rule->dl_msg_list); + INIT_LIST_HEAD(&rule->dl_sched_link); + + rule->dl_attr = *attr; + if (attr->u.delay.la_interval) { + rule->dl_time_base = cfs_time_shift(attr->u.delay.la_interval); + rule->dl_delay_time = cfs_time_shift(cfs_rand() % + attr->u.delay.la_interval); + } else { + rule->dl_delay_at = cfs_rand() % attr->u.delay.la_rate; + } + + rule->dl_msg_send = -1; + + lnet_net_lock(LNET_LOCK_EX); + atomic_set(&rule->dl_refcount, 1); + list_add(&rule->dl_link, &the_lnet.ln_delay_rules); + lnet_net_unlock(LNET_LOCK_EX); + + CDEBUG(D_NET, "Added delay rule: src %s, dst %s, rate %d\n", + libcfs_nid2str(attr->fa_src), libcfs_nid2str(attr->fa_src), + attr->u.delay.la_rate); + + mutex_unlock(&delay_dd.dd_mutex); + return 0; +failed: + mutex_unlock(&delay_dd.dd_mutex); + CFS_FREE_PTR(rule); + return rc; +} + +/** + * Remove matched Delay Rules from lnet, if \a shutdown is true or both \a src + * and \a dst are zero, all rules will be removed, otherwise only matched rules + * will be removed. + * If \a src is zero, then all rules have \a dst as destination will be remove + * If \a dst is zero, then all rules have \a src as source will be removed + * + * When a delay rule is removed, all delayed messages of this rule will be + * processed immediately. + */ +int +lnet_delay_rule_del(lnet_nid_t src, lnet_nid_t dst, bool shutdown) +{ + struct lnet_delay_rule *rule; + struct lnet_delay_rule *tmp; + struct list_head rule_list; + struct list_head msg_list; + int n = 0; + bool cleanup; + + INIT_LIST_HEAD(&rule_list); + INIT_LIST_HEAD(&msg_list); + + if (shutdown) { + src = 0; + dst = 0; + } + + mutex_lock(&delay_dd.dd_mutex); + lnet_net_lock(LNET_LOCK_EX); + + list_for_each_entry_safe(rule, tmp, &the_lnet.ln_delay_rules, dl_link) { + if (rule->dl_attr.fa_src != src && src) + continue; + + if (rule->dl_attr.fa_dst != dst && dst) + continue; + + CDEBUG(D_NET, "Remove delay rule: src %s->dst: %s (1/%d, %d)\n", + libcfs_nid2str(rule->dl_attr.fa_src), + libcfs_nid2str(rule->dl_attr.fa_dst), + rule->dl_attr.u.delay.la_rate, + rule->dl_attr.u.delay.la_interval); + /* refcount is taken over by rule_list */ + list_move(&rule->dl_link, &rule_list); + } + + /* check if we need to shutdown delay_daemon */ + cleanup = list_empty(&the_lnet.ln_delay_rules) && + !list_empty(&rule_list); + lnet_net_unlock(LNET_LOCK_EX); + + list_for_each_entry_safe(rule, tmp, &rule_list, dl_link) { + list_del_init(&rule->dl_link); + + del_timer_sync(&rule->dl_timer); + delayed_msg_check(rule, true, &msg_list); + delay_rule_decref(rule); /* -1 for the_lnet.ln_delay_rules */ + n++; + } + + if (cleanup) { /* no more delay rule, shutdown delay_daemon */ + LASSERT(delay_dd.dd_running); + delay_dd.dd_running = 0; + wake_up(&delay_dd.dd_waitq); + + while (!delay_dd.dd_stopped) + wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_stopped); + } + mutex_unlock(&delay_dd.dd_mutex); + + if (!list_empty(&msg_list)) + delayed_msg_process(&msg_list, shutdown); + + return n; +} + +/** + * List Delay Rule at position of \a pos + */ +int +lnet_delay_rule_list(int pos, struct lnet_fault_attr *attr, + struct lnet_fault_stat *stat) +{ + struct lnet_delay_rule *rule; + int cpt; + int i = 0; + int rc = -ENOENT; + + cpt = lnet_net_lock_current(); + list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) { + if (i++ < pos) + continue; + + spin_lock(&rule->dl_lock); + *attr = rule->dl_attr; + *stat = rule->dl_stat; + spin_unlock(&rule->dl_lock); + rc = 0; + break; + } + + lnet_net_unlock(cpt); + return rc; +} + +/** + * reset counters for all Delay Rules + */ +void +lnet_delay_rule_reset(void) +{ + struct lnet_delay_rule *rule; + int cpt; + + cpt = lnet_net_lock_current(); + + list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) { + struct lnet_fault_attr *attr = &rule->dl_attr; + + spin_lock(&rule->dl_lock); + + memset(&rule->dl_stat, 0, sizeof(rule->dl_stat)); + if (attr->u.delay.la_rate) { + rule->dl_delay_at = cfs_rand() % attr->u.delay.la_rate; + } else { + rule->dl_delay_time = cfs_time_shift(cfs_rand() % + attr->u.delay.la_interval); + rule->dl_time_base = cfs_time_shift(attr->u.delay.la_interval); + } + spin_unlock(&rule->dl_lock); + } + + lnet_net_unlock(cpt); +} + +int +lnet_fault_ctl(int opc, struct libcfs_ioctl_data *data) +{ + struct lnet_fault_attr *attr; + struct lnet_fault_stat *stat; + + attr = (struct lnet_fault_attr *)data->ioc_inlbuf1; + + switch (opc) { + default: + return -EINVAL; + + case LNET_CTL_DROP_ADD: + if (!attr) + return -EINVAL; + + return lnet_drop_rule_add(attr); + + case LNET_CTL_DROP_DEL: + if (!attr) + return -EINVAL; + + data->ioc_count = lnet_drop_rule_del(attr->fa_src, + attr->fa_dst); + return 0; + + case LNET_CTL_DROP_RESET: + lnet_drop_rule_reset(); + return 0; + + case LNET_CTL_DROP_LIST: + stat = (struct lnet_fault_stat *)data->ioc_inlbuf2; + if (!attr || !stat) + return -EINVAL; + + return lnet_drop_rule_list(data->ioc_count, attr, stat); + + case LNET_CTL_DELAY_ADD: + if (!attr) + return -EINVAL; + + return lnet_delay_rule_add(attr); + + case LNET_CTL_DELAY_DEL: + if (!attr) + return -EINVAL; + + data->ioc_count = lnet_delay_rule_del(attr->fa_src, + attr->fa_dst, false); + return 0; + + case LNET_CTL_DELAY_RESET: + lnet_delay_rule_reset(); + return 0; + + case LNET_CTL_DELAY_LIST: + stat = (struct lnet_fault_stat *)data->ioc_inlbuf2; + if (!attr || !stat) + return -EINVAL; + + return lnet_delay_rule_list(data->ioc_count, attr, stat); + } +} + +int +lnet_fault_init(void) +{ + CLASSERT(LNET_PUT_BIT == 1 << LNET_MSG_PUT); + CLASSERT(LNET_ACK_BIT == 1 << LNET_MSG_ACK); + CLASSERT(LNET_GET_BIT == 1 << LNET_MSG_GET); + CLASSERT(LNET_REPLY_BIT == 1 << LNET_MSG_REPLY); + + mutex_init(&delay_dd.dd_mutex); + spin_lock_init(&delay_dd.dd_lock); + init_waitqueue_head(&delay_dd.dd_waitq); + init_waitqueue_head(&delay_dd.dd_ctl_waitq); + INIT_LIST_HEAD(&delay_dd.dd_sched_rules); + + return 0; +} + +void +lnet_fault_fini(void) +{ + lnet_drop_rule_del(0, 0); + lnet_delay_rule_del(0, 0, true); + + LASSERT(list_empty(&the_lnet.ln_drop_rules)); + LASSERT(list_empty(&the_lnet.ln_delay_rules)); + LASSERT(list_empty(&delay_dd.dd_sched_rules)); +} diff --git a/drivers/staging/lustre/lnet/lnet/nidstrings.c b/drivers/staging/lustre/lnet/lnet/nidstrings.c index 80f585afa259..ebf468fbc64f 100644 --- a/drivers/staging/lustre/lnet/lnet/nidstrings.c +++ b/drivers/staging/lustre/lnet/lnet/nidstrings.c @@ -170,7 +170,7 @@ parse_addrange(const struct cfs_lstr *src, struct nidrange *nidrange) } LIBCFS_ALLOC(addrrange, sizeof(struct addrrange)); - if (addrrange == NULL) + if (!addrrange) return -ENOMEM; list_add_tail(&addrrange->ar_link, &nidrange->nr_addrranges); INIT_LIST_HEAD(&addrrange->ar_numaddr_ranges); @@ -203,16 +203,18 @@ add_nidrange(const struct cfs_lstr *src, return NULL; nf = libcfs_namenum2netstrfns(src->ls_str); - if (nf == NULL) + if (!nf) return NULL; endlen = src->ls_len - strlen(nf->nf_name); - if (endlen == 0) + if (!endlen) /* network name only, e.g. "elan" or "tcp" */ netnum = 0; else { - /* e.g. "elan25" or "tcp23", refuse to parse if + /* + * e.g. "elan25" or "tcp23", refuse to parse if * network name is not appended with decimal or - * hexadecimal number */ + * hexadecimal number + */ if (!cfs_str2num_check(src->ls_str + strlen(nf->nf_name), endlen, &netnum, 0, MAX_NUMERIC_VALUE)) return NULL; @@ -227,7 +229,7 @@ add_nidrange(const struct cfs_lstr *src, } LIBCFS_ALLOC(nr, sizeof(struct nidrange)); - if (nr == NULL) + if (!nr) return NULL; list_add_tail(&nr->nr_link, nidlist); INIT_LIST_HEAD(&nr->nr_addrranges); @@ -253,22 +255,21 @@ parse_nidrange(struct cfs_lstr *src, struct list_head *nidlist) struct nidrange *nr; tmp = *src; - if (cfs_gettok(src, '@', &addrrange) == 0) + if (!cfs_gettok(src, '@', &addrrange)) goto failed; - if (cfs_gettok(src, '@', &net) == 0 || src->ls_str != NULL) + if (!cfs_gettok(src, '@', &net) || src->ls_str) goto failed; nr = add_nidrange(&net, nidlist); - if (nr == NULL) + if (!nr) goto failed; - if (parse_addrange(&addrrange, nr) != 0) + if (parse_addrange(&addrrange, nr)) goto failed; return 1; failed: - CWARN("can't parse nidrange: \"%.*s\"\n", tmp.ls_len, tmp.ls_str); return 0; } @@ -342,12 +343,12 @@ cfs_parse_nidlist(char *str, int len, struct list_head *nidlist) INIT_LIST_HEAD(nidlist); while (src.ls_str) { rc = cfs_gettok(&src, ' ', &res); - if (rc == 0) { + if (!rc) { cfs_free_nidlist(nidlist); return 0; } rc = parse_nidrange(&res, nidlist); - if (rc == 0) { + if (!rc) { cfs_free_nidlist(nidlist); return 0; } @@ -378,7 +379,7 @@ int cfs_match_nid(lnet_nid_t nid, struct list_head *nidlist) return 1; list_for_each_entry(ar, &nr->nr_addrranges, ar_link) if (nr->nr_netstrfns->nf_match_addr(LNET_NIDADDR(nid), - &ar->ar_numaddr_ranges)) + &ar->ar_numaddr_ranges)) return 1; } return 0; @@ -395,7 +396,7 @@ cfs_print_network(char *buffer, int count, struct nidrange *nr) { struct netstrfns *nf = nr->nr_netstrfns; - if (nr->nr_netnum == 0) + if (!nr->nr_netnum) return scnprintf(buffer, count, "@%s", nf->nf_name); else return scnprintf(buffer, count, "@%s%u", @@ -417,7 +418,7 @@ cfs_print_addrranges(char *buffer, int count, struct list_head *addrranges, struct netstrfns *nf = nr->nr_netstrfns; list_for_each_entry(ar, addrranges, ar_link) { - if (i != 0) + if (i) i += scnprintf(buffer + i, count - i, " "); i += nf->nf_print_addrlist(buffer + i, count - i, &ar->ar_numaddr_ranges); @@ -442,10 +443,10 @@ int cfs_print_nidlist(char *buffer, int count, struct list_head *nidlist) return 0; list_for_each_entry(nr, nidlist, nr_link) { - if (i != 0) + if (i) i += scnprintf(buffer + i, count - i, " "); - if (nr->nr_all != 0) { + if (nr->nr_all) { LASSERT(list_empty(&nr->nr_addrranges)); i += scnprintf(buffer + i, count - i, "*"); i += cfs_print_network(buffer + i, count - i, nr); @@ -487,13 +488,13 @@ static void cfs_ip_ar_min_max(struct addrrange *ar, __u32 *min_nid, tmp_ip_addr = ((min_ip[0] << 24) | (min_ip[1] << 16) | (min_ip[2] << 8) | min_ip[3]); - if (min_nid != NULL) + if (min_nid) *min_nid = tmp_ip_addr; tmp_ip_addr = ((max_ip[0] << 24) | (max_ip[1] << 16) | (max_ip[2] << 8) | max_ip[3]); - if (max_nid != NULL) + if (max_nid) *max_nid = tmp_ip_addr; } @@ -515,16 +516,16 @@ static void cfs_num_ar_min_max(struct addrrange *ar, __u32 *min_nid, list_for_each_entry(el, &ar->ar_numaddr_ranges, el_link) { list_for_each_entry(re, &el->el_exprs, re_link) { - if (re->re_lo < min_addr || min_addr == 0) + if (re->re_lo < min_addr || !min_addr) min_addr = re->re_lo; if (re->re_hi > max_addr) max_addr = re->re_hi; } } - if (min_nid != NULL) + if (min_nid) *min_nid = min_addr; - if (max_nid != NULL) + if (max_nid) *max_nid = max_addr; } @@ -546,17 +547,17 @@ bool cfs_nidrange_is_contiguous(struct list_head *nidlist) list_for_each_entry(nr, nidlist, nr_link) { nf = nr->nr_netstrfns; - if (lndname == NULL) + if (!lndname) lndname = nf->nf_name; if (netnum == -1) netnum = nr->nr_netnum; - if (strcmp(lndname, nf->nf_name) != 0 || + if (strcmp(lndname, nf->nf_name) || netnum != nr->nr_netnum) return false; } - if (nf == NULL) + if (!nf) return false; if (!nf->nf_is_contiguous(nidlist)) @@ -590,7 +591,7 @@ static bool cfs_num_is_contiguous(struct list_head *nidlist) list_for_each_entry(ar, &nr->nr_addrranges, ar_link) { cfs_num_ar_min_max(ar, ¤t_start_nid, ¤t_end_nid); - if (last_end_nid != 0 && + if (last_end_nid && (current_start_nid - last_end_nid != 1)) return false; last_end_nid = current_end_nid; @@ -600,7 +601,7 @@ static bool cfs_num_is_contiguous(struct list_head *nidlist) re_link) { if (re->re_stride > 1) return false; - else if (last_hi != 0 && + else if (last_hi && re->re_hi - last_hi != 1) return false; last_hi = re->re_hi; @@ -640,7 +641,7 @@ static bool cfs_ip_is_contiguous(struct list_head *nidlist) last_diff = 0; cfs_ip_ar_min_max(ar, ¤t_start_nid, ¤t_end_nid); - if (last_end_nid != 0 && + if (last_end_nid && (current_start_nid - last_end_nid != 1)) return false; last_end_nid = current_end_nid; @@ -724,7 +725,7 @@ static void cfs_num_min_max(struct list_head *nidlist, __u32 *min_nid, list_for_each_entry(ar, &nr->nr_addrranges, ar_link) { cfs_num_ar_min_max(ar, &tmp_min_addr, &tmp_max_addr); - if (tmp_min_addr < min_addr || min_addr == 0) + if (tmp_min_addr < min_addr || !min_addr) min_addr = tmp_min_addr; if (tmp_max_addr > max_addr) max_addr = tmp_min_addr; @@ -756,16 +757,16 @@ static void cfs_ip_min_max(struct list_head *nidlist, __u32 *min_nid, list_for_each_entry(ar, &nr->nr_addrranges, ar_link) { cfs_ip_ar_min_max(ar, &tmp_min_ip_addr, &tmp_max_ip_addr); - if (tmp_min_ip_addr < min_ip_addr || min_ip_addr == 0) + if (tmp_min_ip_addr < min_ip_addr || !min_ip_addr) min_ip_addr = tmp_min_ip_addr; if (tmp_max_ip_addr > max_ip_addr) max_ip_addr = tmp_max_ip_addr; } } - if (min_nid != NULL) + if (min_nid) *min_nid = min_ip_addr; - if (max_nid != NULL) + if (max_nid) *max_nid = max_ip_addr; } @@ -784,12 +785,14 @@ libcfs_ip_addr2str(__u32 addr, char *str, size_t size) (addr >> 8) & 0xff, addr & 0xff); } -/* CAVEAT EMPTOR XscanfX +/* + * CAVEAT EMPTOR XscanfX * I use "%n" at the end of a sscanf format to detect trailing junk. However * sscanf may return immediately if it sees the terminating '0' in a string, so * I initialise the %n variable to the expected length. If sscanf sets it; * fine, if it doesn't, then the scan ended at the end of the string, which is - * fine too :) */ + * fine too :) + */ static int libcfs_ip_str2addr(const char *str, int nob, __u32 *addr) { @@ -802,9 +805,9 @@ libcfs_ip_str2addr(const char *str, int nob, __u32 *addr) /* numeric IP? */ if (sscanf(str, "%u.%u.%u.%u%n", &a, &b, &c, &d, &n) >= 4 && n == nob && - (a & ~0xff) == 0 && (b & ~0xff) == 0 && - (c & ~0xff) == 0 && (d & ~0xff) == 0) { - *addr = ((a<<24)|(b<<16)|(c<<8)|d); + !(a & ~0xff) && !(b & ~0xff) && + !(c & ~0xff) && !(d & ~0xff)) { + *addr = ((a << 24) | (b << 16) | (c << 8) | d); return 1; } @@ -824,7 +827,7 @@ cfs_ip_addr_parse(char *str, int len, struct list_head *list) src.ls_len = len; i = 0; - while (src.ls_str != NULL) { + while (src.ls_str) { struct cfs_lstr res; if (!cfs_gettok(&src, '.', &res)) { @@ -833,7 +836,7 @@ cfs_ip_addr_parse(char *str, int len, struct list_head *list) } rc = cfs_expr_list_parse(res.ls_str, res.ls_len, 0, 255, &el); - if (rc != 0) + if (rc) goto out; list_add_tail(&el->el_link, list); @@ -858,7 +861,7 @@ libcfs_ip_addr_range_print(char *buffer, int count, struct list_head *list) list_for_each_entry(el, list, el_link) { LASSERT(j++ < 4); - if (i != 0) + if (i) i += scnprintf(buffer + i, count - i, "."); i += cfs_expr_list_print(buffer + i, count - i, el); } @@ -928,7 +931,7 @@ libcfs_num_parse(char *str, int len, struct list_head *list) int rc; rc = cfs_expr_list_parse(str, len, 0, MAX_NUMERIC_VALUE, &el); - if (rc == 0) + if (!rc) list_add_tail(&el->el_link, list); return rc; @@ -1060,7 +1063,7 @@ libcfs_name2netstrfns(const char *name) int libcfs_isknown_lnd(__u32 lnd) { - return libcfs_lnd2netstrfns(lnd) != NULL; + return !!libcfs_lnd2netstrfns(lnd); } EXPORT_SYMBOL(libcfs_isknown_lnd); @@ -1069,7 +1072,7 @@ libcfs_lnd2modname(__u32 lnd) { struct netstrfns *nf = libcfs_lnd2netstrfns(lnd); - return (nf == NULL) ? NULL : nf->nf_modname; + return nf ? nf->nf_modname : NULL; } EXPORT_SYMBOL(libcfs_lnd2modname); @@ -1078,10 +1081,10 @@ libcfs_str2lnd(const char *str) { struct netstrfns *nf = libcfs_name2netstrfns(str); - if (nf != NULL) + if (nf) return nf->nf_type; - return -1; + return -ENXIO; } EXPORT_SYMBOL(libcfs_str2lnd); @@ -1091,7 +1094,7 @@ libcfs_lnd2str_r(__u32 lnd, char *buf, size_t buf_size) struct netstrfns *nf; nf = libcfs_lnd2netstrfns(lnd); - if (nf == NULL) + if (!nf) snprintf(buf, buf_size, "?%u?", lnd); else snprintf(buf, buf_size, "%s", nf->nf_name); @@ -1108,9 +1111,9 @@ libcfs_net2str_r(__u32 net, char *buf, size_t buf_size) struct netstrfns *nf; nf = libcfs_lnd2netstrfns(lnd); - if (nf == NULL) + if (!nf) snprintf(buf, buf_size, "<%u:%u>", lnd, nnum); - else if (nnum == 0) + else if (!nnum) snprintf(buf, buf_size, "%s", nf->nf_name); else snprintf(buf, buf_size, "%s%u", nf->nf_name, nnum); @@ -1135,14 +1138,14 @@ libcfs_nid2str_r(lnet_nid_t nid, char *buf, size_t buf_size) } nf = libcfs_lnd2netstrfns(lnd); - if (nf == NULL) + if (!nf) { snprintf(buf, buf_size, "%x@<%u:%u>", addr, lnd, nnum); - else { + } else { size_t addr_len; nf->nf_addr2str(addr, buf, buf_size); addr_len = strlen(buf); - if (nnum == 0) + if (!nnum) snprintf(buf + addr_len, buf_size - addr_len, "@%s", nf->nf_name); else @@ -1195,7 +1198,7 @@ libcfs_str2net(const char *str) { __u32 net; - if (libcfs_str2net_internal(str, &net) != NULL) + if (libcfs_str2net_internal(str, &net)) return net; return LNET_NIDNET(LNET_NID_ANY); @@ -1210,15 +1213,15 @@ libcfs_str2nid(const char *str) __u32 net; __u32 addr; - if (sep != NULL) { + if (sep) { nf = libcfs_str2net_internal(sep + 1, &net); - if (nf == NULL) + if (!nf) return LNET_NID_ANY; } else { sep = str + strlen(str); net = LNET_MKNET(SOCKLND, 0); nf = libcfs_lnd2netstrfns(SOCKLND); - LASSERT(nf != NULL); + LASSERT(nf); } if (!nf->nf_str2addr(str, (int)(sep - str), &addr)) @@ -1240,8 +1243,8 @@ libcfs_id2str(lnet_process_id_t id) } snprintf(str, LNET_NIDSTR_SIZE, "%s%u-%s", - ((id.pid & LNET_PID_USERFLAG) != 0) ? "U" : "", - (id.pid & ~LNET_PID_USERFLAG), libcfs_nid2str(id.nid)); + id.pid & LNET_PID_USERFLAG ? "U" : "", + id.pid & ~LNET_PID_USERFLAG, libcfs_nid2str(id.nid)); return str; } EXPORT_SYMBOL(libcfs_id2str); diff --git a/drivers/staging/lustre/lnet/lnet/peer.c b/drivers/staging/lustre/lnet/lnet/peer.c index 1fceed3c8fc0..b026feebc03a 100644 --- a/drivers/staging/lustre/lnet/lnet/peer.c +++ b/drivers/staging/lustre/lnet/lnet/peer.c @@ -39,6 +39,7 @@ #define DEBUG_SUBSYSTEM S_LNET #include "../../include/linux/lnet/lib-lnet.h" +#include "../../include/linux/lnet/lib-dlc.h" int lnet_peer_tables_create(void) @@ -50,7 +51,7 @@ lnet_peer_tables_create(void) the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*ptable)); - if (the_lnet.ln_peer_tables == NULL) { + if (!the_lnet.ln_peer_tables) { CERROR("Failed to allocate cpu-partition peer tables\n"); return -ENOMEM; } @@ -60,7 +61,7 @@ lnet_peer_tables_create(void) LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i, LNET_PEER_HASH_SIZE * sizeof(*hash)); - if (hash == NULL) { + if (!hash) { CERROR("Failed to create peer hash table\n"); lnet_peer_tables_destroy(); return -ENOMEM; @@ -82,12 +83,12 @@ lnet_peer_tables_destroy(void) int i; int j; - if (the_lnet.ln_peer_tables == NULL) + if (!the_lnet.ln_peer_tables) return; cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) { hash = ptable->pt_hash; - if (hash == NULL) /* not initialized */ + if (!hash) /* not initialized */ break; LASSERT(list_empty(&ptable->pt_deathrow)); @@ -103,62 +104,116 @@ lnet_peer_tables_destroy(void) the_lnet.ln_peer_tables = NULL; } +static void +lnet_peer_table_cleanup_locked(lnet_ni_t *ni, struct lnet_peer_table *ptable) +{ + int i; + lnet_peer_t *lp; + lnet_peer_t *tmp; + + for (i = 0; i < LNET_PEER_HASH_SIZE; i++) { + list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i], + lp_hashlist) { + if (ni && ni != lp->lp_ni) + continue; + list_del_init(&lp->lp_hashlist); + /* Lose hash table's ref */ + ptable->pt_zombies++; + lnet_peer_decref_locked(lp); + } + } +} + +static void +lnet_peer_table_deathrow_wait_locked(struct lnet_peer_table *ptable, + int cpt_locked) +{ + int i; + + for (i = 3; ptable->pt_zombies; i++) { + lnet_net_unlock(cpt_locked); + + if (is_power_of_2(i)) { + CDEBUG(D_WARNING, + "Waiting for %d zombies on peer table\n", + ptable->pt_zombies); + } + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(cfs_time_seconds(1) >> 1); + lnet_net_lock(cpt_locked); + } +} + +static void +lnet_peer_table_del_rtrs_locked(lnet_ni_t *ni, struct lnet_peer_table *ptable, + int cpt_locked) +{ + lnet_peer_t *lp; + lnet_peer_t *tmp; + lnet_nid_t lp_nid; + int i; + + for (i = 0; i < LNET_PEER_HASH_SIZE; i++) { + list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i], + lp_hashlist) { + if (ni != lp->lp_ni) + continue; + + if (!lp->lp_rtr_refcount) + continue; + + lp_nid = lp->lp_nid; + + lnet_net_unlock(cpt_locked); + lnet_del_route(LNET_NIDNET(LNET_NID_ANY), lp_nid); + lnet_net_lock(cpt_locked); + } + } +} + void -lnet_peer_tables_cleanup(void) +lnet_peer_tables_cleanup(lnet_ni_t *ni) { struct lnet_peer_table *ptable; + struct list_head deathrow; + lnet_peer_t *lp; + lnet_peer_t *temp; int i; - int j; - LASSERT(the_lnet.ln_shutdown); /* i.e. no new peers */ + INIT_LIST_HEAD(&deathrow); + LASSERT(the_lnet.ln_shutdown || ni); + /* + * If just deleting the peers for a NI, get rid of any routes these + * peers are gateways for. + */ cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) { lnet_net_lock(i); - - for (j = 0; j < LNET_PEER_HASH_SIZE; j++) { - struct list_head *peers = &ptable->pt_hash[j]; - - while (!list_empty(peers)) { - lnet_peer_t *lp = list_entry(peers->next, - lnet_peer_t, - lp_hashlist); - list_del_init(&lp->lp_hashlist); - /* lose hash table's ref */ - lnet_peer_decref_locked(lp); - } - } - + lnet_peer_table_del_rtrs_locked(ni, ptable, i); lnet_net_unlock(i); } + /* + * Start the process of moving the applicable peers to + * deathrow. + */ cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) { - LIST_HEAD(deathrow); - lnet_peer_t *lp; - lnet_net_lock(i); + lnet_peer_table_cleanup_locked(ni, ptable); + lnet_net_unlock(i); + } - for (j = 3; ptable->pt_number != 0; j++) { - lnet_net_unlock(i); - - if ((j & (j - 1)) == 0) { - CDEBUG(D_WARNING, - "Waiting for %d peers on peer table\n", - ptable->pt_number); - } - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1) / 2); - lnet_net_lock(i); - } + /* Cleanup all entries on deathrow. */ + cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) { + lnet_net_lock(i); + lnet_peer_table_deathrow_wait_locked(ptable, i); list_splice_init(&ptable->pt_deathrow, &deathrow); - lnet_net_unlock(i); + } - while (!list_empty(&deathrow)) { - lp = list_entry(deathrow.next, - lnet_peer_t, lp_hashlist); - list_del(&lp->lp_hashlist); - LIBCFS_FREE(lp, sizeof(*lp)); - } + list_for_each_entry_safe(lp, temp, &deathrow, lp_hashlist) { + list_del(&lp->lp_hashlist); + LIBCFS_FREE(lp, sizeof(*lp)); } } @@ -167,11 +222,11 @@ lnet_destroy_peer_locked(lnet_peer_t *lp) { struct lnet_peer_table *ptable; - LASSERT(lp->lp_refcount == 0); - LASSERT(lp->lp_rtr_refcount == 0); + LASSERT(!lp->lp_refcount); + LASSERT(!lp->lp_rtr_refcount); LASSERT(list_empty(&lp->lp_txq)); LASSERT(list_empty(&lp->lp_hashlist)); - LASSERT(lp->lp_txqnob == 0); + LASSERT(!lp->lp_txqnob); ptable = the_lnet.ln_peer_tables[lp->lp_cpt]; LASSERT(ptable->pt_number > 0); @@ -181,6 +236,8 @@ lnet_destroy_peer_locked(lnet_peer_t *lp) lp->lp_ni = NULL; list_add(&lp->lp_hashlist, &ptable->pt_deathrow); + LASSERT(ptable->pt_zombies > 0); + ptable->pt_zombies--; } lnet_peer_t * @@ -220,14 +277,14 @@ lnet_nid2peer_locked(lnet_peer_t **lpp, lnet_nid_t nid, int cpt) ptable = the_lnet.ln_peer_tables[cpt2]; lp = lnet_find_peer_locked(ptable, nid); - if (lp != NULL) { + if (lp) { *lpp = lp; return 0; } if (!list_empty(&ptable->pt_deathrow)) { lp = list_entry(ptable->pt_deathrow.next, - lnet_peer_t, lp_hashlist); + lnet_peer_t, lp_hashlist); list_del(&lp->lp_hashlist); } @@ -238,12 +295,12 @@ lnet_nid2peer_locked(lnet_peer_t **lpp, lnet_nid_t nid, int cpt) ptable->pt_number++; lnet_net_unlock(cpt); - if (lp != NULL) + if (lp) memset(lp, 0, sizeof(*lp)); else LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), cpt2, sizeof(*lp)); - if (lp == NULL) { + if (!lp) { rc = -ENOMEM; lnet_net_lock(cpt); goto out; @@ -276,30 +333,30 @@ lnet_nid2peer_locked(lnet_peer_t **lpp, lnet_nid_t nid, int cpt) } lp2 = lnet_find_peer_locked(ptable, nid); - if (lp2 != NULL) { + if (lp2) { *lpp = lp2; goto out; } lp->lp_ni = lnet_net2ni_locked(LNET_NIDNET(nid), cpt2); - if (lp->lp_ni == NULL) { + if (!lp->lp_ni) { rc = -EHOSTUNREACH; goto out; } - lp->lp_txcredits = - lp->lp_mintxcredits = lp->lp_ni->ni_peertxcredits; - lp->lp_rtrcredits = + lp->lp_txcredits = lp->lp_ni->ni_peertxcredits; + lp->lp_mintxcredits = lp->lp_ni->ni_peertxcredits; + lp->lp_rtrcredits = lnet_peer_buffer_credits(lp->lp_ni); lp->lp_minrtrcredits = lnet_peer_buffer_credits(lp->lp_ni); list_add_tail(&lp->lp_hashlist, - &ptable->pt_hash[lnet_nid2peerhash(nid)]); + &ptable->pt_hash[lnet_nid2peerhash(nid)]); ptable->pt_version++; *lpp = lp; return 0; out: - if (lp != NULL) + if (lp) list_add(&lp->lp_hashlist, &ptable->pt_deathrow); ptable->pt_number--; return rc; @@ -317,7 +374,7 @@ lnet_debug_peer(lnet_nid_t nid) lnet_net_lock(cpt); rc = lnet_nid2peer_locked(&lp, nid, cpt); - if (rc != 0) { + if (rc) { lnet_net_unlock(cpt); CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid)); return; @@ -336,3 +393,65 @@ lnet_debug_peer(lnet_nid_t nid) lnet_net_unlock(cpt); } + +int +lnet_get_peer_info(__u32 peer_index, __u64 *nid, + char aliveness[LNET_MAX_STR_LEN], + __u32 *cpt_iter, __u32 *refcount, + __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits, + __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits, + __u32 *peer_tx_qnob) +{ + struct lnet_peer_table *peer_table; + lnet_peer_t *lp; + bool found = false; + int lncpt, j; + + /* get the number of CPTs */ + lncpt = cfs_percpt_number(the_lnet.ln_peer_tables); + + /* + * if the cpt number to be examined is >= the number of cpts in + * the system then indicate that there are no more cpts to examin + */ + if (*cpt_iter >= lncpt) + return -ENOENT; + + /* get the current table */ + peer_table = the_lnet.ln_peer_tables[*cpt_iter]; + /* if the ptable is NULL then there are no more cpts to examine */ + if (!peer_table) + return -ENOENT; + + lnet_net_lock(*cpt_iter); + + for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) { + struct list_head *peers = &peer_table->pt_hash[j]; + + list_for_each_entry(lp, peers, lp_hashlist) { + if (peer_index-- > 0) + continue; + + snprintf(aliveness, LNET_MAX_STR_LEN, "NA"); + if (lnet_isrouter(lp) || + lnet_peer_aliveness_enabled(lp)) + snprintf(aliveness, LNET_MAX_STR_LEN, + lp->lp_alive ? "up" : "down"); + + *nid = lp->lp_nid; + *refcount = lp->lp_refcount; + *ni_peer_tx_credits = lp->lp_ni->ni_peertxcredits; + *peer_tx_credits = lp->lp_txcredits; + *peer_rtr_credits = lp->lp_rtrcredits; + *peer_min_rtr_credits = lp->lp_mintxcredits; + *peer_tx_qnob = lp->lp_txqnob; + + found = true; + } + } + lnet_net_unlock(*cpt_iter); + + *cpt_iter = lncpt; + + return found ? 0 : -ENOENT; +} diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c index f5faa414d250..61459cf9d58f 100644 --- a/drivers/staging/lustre/lnet/lnet/router.c +++ b/drivers/staging/lustre/lnet/lnet/router.c @@ -15,10 +15,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with Portals; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * */ #define DEBUG_SUBSYSTEM S_LNET @@ -28,8 +24,11 @@ #define LNET_NRB_TINY (LNET_NRB_TINY_MIN * 4) #define LNET_NRB_SMALL_MIN 4096 /* min value for each CPT */ #define LNET_NRB_SMALL (LNET_NRB_SMALL_MIN * 4) +#define LNET_NRB_SMALL_PAGES 1 #define LNET_NRB_LARGE_MIN 256 /* min value for each CPT */ #define LNET_NRB_LARGE (LNET_NRB_LARGE_MIN * 4) +#define LNET_NRB_LARGE_PAGES ((LNET_MTU + PAGE_CACHE_SIZE - 1) >> \ + PAGE_CACHE_SHIFT) static char *forwarding = ""; module_param(forwarding, charp, 0444); @@ -61,8 +60,10 @@ lnet_peer_buffer_credits(lnet_ni_t *ni) if (peer_buffer_credits > 0) return peer_buffer_credits; - /* As an approximation, allow this peer the same number of router - * buffers as it is allowed outstanding sends */ + /* + * As an approximation, allow this peer the same number of router + * buffers as it is allowed outstanding sends + */ return ni->ni_peertxcredits; } @@ -107,7 +108,7 @@ lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive, lp->lp_timestamp = when; /* update timestamp */ lp->lp_ping_deadline = 0; /* disable ping timeout */ - if (lp->lp_alive_count != 0 && /* got old news */ + if (lp->lp_alive_count && /* got old news */ (!lp->lp_alive) == (!alive)) { /* new date for old news */ CDEBUG(D_NET, "Old news\n"); return; @@ -131,11 +132,12 @@ lnet_ni_notify_locked(lnet_ni_t *ni, lnet_peer_t *lp) int alive; int notifylnd; - /* Notify only in 1 thread at any time to ensure ordered notification. + /* + * Notify only in 1 thread at any time to ensure ordered notification. * NB individual events can be missed; the only guarantee is that you - * always get the most recent news */ - - if (lp->lp_notifying || ni == NULL) + * always get the most recent news + */ + if (lp->lp_notifying || !ni) return; lp->lp_notifying = 1; @@ -147,13 +149,14 @@ lnet_ni_notify_locked(lnet_ni_t *ni, lnet_peer_t *lp) lp->lp_notifylnd = 0; lp->lp_notify = 0; - if (notifylnd && ni->ni_lnd->lnd_notify != NULL) { + if (notifylnd && ni->ni_lnd->lnd_notify) { lnet_net_unlock(lp->lp_cpt); - /* A new notification could happen now; I'll handle it - * when control returns to me */ - - (ni->ni_lnd->lnd_notify)(ni, lp->lp_nid, alive); + /* + * A new notification could happen now; I'll handle it + * when control returns to me + */ + ni->ni_lnd->lnd_notify(ni, lp->lp_nid, alive); lnet_net_lock(lp->lp_cpt); } @@ -176,7 +179,7 @@ lnet_rtr_addref_locked(lnet_peer_t *lp) /* a simple insertion sort */ list_for_each_prev(pos, &the_lnet.ln_routers) { lnet_peer_t *rtr = list_entry(pos, lnet_peer_t, - lp_rtr_list); + lp_rtr_list); if (rtr->lp_nid < lp->lp_nid) break; @@ -197,12 +200,12 @@ lnet_rtr_decref_locked(lnet_peer_t *lp) /* lnet_net_lock must be exclusively locked */ lp->lp_rtr_refcount--; - if (lp->lp_rtr_refcount == 0) { + if (!lp->lp_rtr_refcount) { LASSERT(list_empty(&lp->lp_routes)); - if (lp->lp_rcd != NULL) { + if (lp->lp_rcd) { list_add(&lp->lp_rcd->rcd_list, - &the_lnet.ln_rcd_deathrow); + &the_lnet.ln_rcd_deathrow); lp->lp_rcd = NULL; } @@ -245,8 +248,10 @@ static void lnet_shuffle_seed(void) cfs_get_random_bytes(seed, sizeof(seed)); - /* Nodes with small feet have little entropy - * the NID for this node gives the most entropy in the low bits */ + /* + * Nodes with small feet have little entropy + * the NID for this node gives the most entropy in the low bits + */ list_for_each(tmp, &the_lnet.ln_nis) { ni = list_entry(tmp, lnet_ni_t, ni_list); lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid)); @@ -277,7 +282,7 @@ lnet_add_route_to_rnet(lnet_remotenet_t *rnet, lnet_route_t *route) /* len+1 positions to add a new entry, also prevents division by 0 */ offset = cfs_rand() % (len + 1); list_for_each(e, &rnet->lrn_routes) { - if (offset == 0) + if (!offset) break; offset--; } @@ -289,7 +294,7 @@ lnet_add_route_to_rnet(lnet_remotenet_t *rnet, lnet_route_t *route) } int -lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway, +lnet_add_route(__u32 net, __u32 hops, lnet_nid_t gateway, unsigned int priority) { struct list_head *e; @@ -300,7 +305,7 @@ lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway, int add_route; int rc; - CDEBUG(D_NET, "Add route: net %s hops %u priority %u gw %s\n", + CDEBUG(D_NET, "Add route: net %s hops %d priority %u gw %s\n", libcfs_net2str(net), hops, priority, libcfs_nid2str(gateway)); if (gateway == LNET_NID_ANY || @@ -308,21 +313,21 @@ lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway, net == LNET_NIDNET(LNET_NID_ANY) || LNET_NETTYP(net) == LOLND || LNET_NIDNET(gateway) == net || - hops < 1 || hops > 255) + (hops != LNET_UNDEFINED_HOPS && (hops < 1 || hops > 255))) return -EINVAL; if (lnet_islocalnet(net)) /* it's a local network */ - return 0; /* ignore the route entry */ + return -EEXIST; /* Assume net, route, all new */ LIBCFS_ALLOC(route, sizeof(*route)); LIBCFS_ALLOC(rnet, sizeof(*rnet)); - if (route == NULL || rnet == NULL) { + if (!route || !rnet) { CERROR("Out of memory creating route %s %d %s\n", libcfs_net2str(net), hops, libcfs_nid2str(gateway)); - if (route != NULL) + if (route) LIBCFS_FREE(route, sizeof(*route)); - if (rnet != NULL) + if (rnet) LIBCFS_FREE(rnet, sizeof(*rnet)); return -ENOMEM; } @@ -336,25 +341,24 @@ lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway, lnet_net_lock(LNET_LOCK_EX); rc = lnet_nid2peer_locked(&route->lr_gateway, gateway, LNET_LOCK_EX); - if (rc != 0) { + if (rc) { lnet_net_unlock(LNET_LOCK_EX); LIBCFS_FREE(route, sizeof(*route)); LIBCFS_FREE(rnet, sizeof(*rnet)); if (rc == -EHOSTUNREACH) /* gateway is not on a local net */ - return 0; /* ignore the route entry */ + return rc; /* ignore the route entry */ CERROR("Error %d creating route %s %d %s\n", rc, libcfs_net2str(net), hops, libcfs_nid2str(gateway)); - return rc; } LASSERT(!the_lnet.ln_shutdown); rnet2 = lnet_find_net_locked(net); - if (rnet2 == NULL) { + if (!rnet2) { /* new network */ list_add_tail(&rnet->lrn_list, lnet_net2rnethash(net)); rnet2 = rnet; @@ -382,8 +386,8 @@ lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway, lnet_net_unlock(LNET_LOCK_EX); /* XXX Assume alive */ - if (ni->ni_lnd->lnd_notify != NULL) - (ni->ni_lnd->lnd_notify)(ni, gateway, 1); + if (ni->ni_lnd->lnd_notify) + ni->ni_lnd->lnd_notify(ni, gateway, 1); lnet_net_lock(LNET_LOCK_EX); } @@ -391,14 +395,20 @@ lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway, /* -1 for notify or !add_route */ lnet_peer_decref_locked(route->lr_gateway); lnet_net_unlock(LNET_LOCK_EX); + rc = 0; - if (!add_route) + if (!add_route) { + rc = -EEXIST; LIBCFS_FREE(route, sizeof(*route)); + } if (rnet != rnet2) LIBCFS_FREE(rnet, sizeof(*rnet)); - return 0; + /* indicate to startup the router checker if configured */ + wake_up(&the_lnet.ln_rc_waitq); + + return rc; } int @@ -426,10 +436,9 @@ lnet_check_routes(void) lnet_nid_t nid2; int net; - route = list_entry(e2, lnet_route_t, - lr_list); + route = list_entry(e2, lnet_route_t, lr_list); - if (route2 == NULL) { + if (!route2) { route2 = route; continue; } @@ -472,9 +481,10 @@ lnet_del_route(__u32 net, lnet_nid_t gw_nid) CDEBUG(D_NET, "Del route: net %s : gw %s\n", libcfs_net2str(net), libcfs_nid2str(gw_nid)); - /* NB Caller may specify either all routes via the given gateway - * or a specific route entry actual NIDs) */ - + /* + * NB Caller may specify either all routes via the given gateway + * or a specific route entry actual NIDs) + */ lnet_net_lock(LNET_LOCK_EX); if (net == LNET_NIDNET(LNET_NID_ANY)) rn_list = &the_lnet.ln_remote_nets_hash[0]; @@ -486,7 +496,7 @@ lnet_del_route(__u32 net, lnet_nid_t gw_nid) rnet = list_entry(e1, lnet_remotenet_t, lrn_list); if (!(net == LNET_NIDNET(LNET_NID_ANY) || - net == rnet->lrn_net)) + net == rnet->lrn_net)) continue; list_for_each(e2, &rnet->lrn_routes) { @@ -513,7 +523,7 @@ lnet_del_route(__u32 net, lnet_nid_t gw_nid) LIBCFS_FREE(route, sizeof(*route)); - if (rnet != NULL) + if (rnet) LIBCFS_FREE(rnet, sizeof(*rnet)); rc = 0; @@ -538,6 +548,38 @@ lnet_destroy_routes(void) lnet_del_route(LNET_NIDNET(LNET_NID_ANY), LNET_NID_ANY); } +int lnet_get_rtr_pool_cfg(int idx, struct lnet_ioctl_pool_cfg *pool_cfg) +{ + int i, rc = -ENOENT, j; + + if (!the_lnet.ln_rtrpools) + return rc; + + for (i = 0; i < LNET_NRBPOOLS; i++) { + lnet_rtrbufpool_t *rbp; + + lnet_net_lock(LNET_LOCK_EX); + cfs_percpt_for_each(rbp, j, the_lnet.ln_rtrpools) { + if (i++ != idx) + continue; + + pool_cfg->pl_pools[i].pl_npages = rbp[i].rbp_npages; + pool_cfg->pl_pools[i].pl_nbuffers = rbp[i].rbp_nbuffers; + pool_cfg->pl_pools[i].pl_credits = rbp[i].rbp_credits; + pool_cfg->pl_pools[i].pl_mincredits = rbp[i].rbp_mincredits; + rc = 0; + break; + } + lnet_net_unlock(LNET_LOCK_EX); + } + + lnet_net_lock(LNET_LOCK_EX); + pool_cfg->pl_routing = the_lnet.ln_routing; + lnet_net_unlock(LNET_LOCK_EX); + + return rc; +} + int lnet_get_route(int idx, __u32 *net, __u32 *hops, lnet_nid_t *gateway, __u32 *alive, __u32 *priority) @@ -558,15 +600,14 @@ lnet_get_route(int idx, __u32 *net, __u32 *hops, rnet = list_entry(e1, lnet_remotenet_t, lrn_list); list_for_each(e2, &rnet->lrn_routes) { - route = list_entry(e2, lnet_route_t, - lr_list); + route = list_entry(e2, lnet_route_t, lr_list); - if (idx-- == 0) { + if (!idx--) { *net = rnet->lrn_net; *hops = route->lr_hops; *priority = route->lr_priority; *gateway = route->lr_gateway->lp_nid; - *alive = route->lr_gateway->lp_alive; + *alive = lnet_is_route_alive(route); lnet_net_unlock(cpt); return 0; } @@ -604,7 +645,7 @@ lnet_parse_rc_info(lnet_rc_data_t *rcd) { lnet_ping_info_t *info = rcd->rcd_pinginfo; struct lnet_peer *gw = rcd->rcd_gateway; - lnet_route_t *rtr; + lnet_route_t *rte; if (!gw->lp_alive) return; @@ -621,21 +662,25 @@ lnet_parse_rc_info(lnet_rc_data_t *rcd) } gw->lp_ping_feats = info->pi_features; - if ((gw->lp_ping_feats & LNET_PING_FEAT_MASK) == 0) { + if (!(gw->lp_ping_feats & LNET_PING_FEAT_MASK)) { CDEBUG(D_NET, "%s: Unexpected features 0x%x\n", libcfs_nid2str(gw->lp_nid), gw->lp_ping_feats); return; /* nothing I can understand */ } - if ((gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) == 0) + if (!(gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS)) return; /* can't carry NI status info */ - list_for_each_entry(rtr, &gw->lp_routes, lr_gwlist) { - int ptl_status = LNET_NI_STATUS_INVALID; + list_for_each_entry(rte, &gw->lp_routes, lr_gwlist) { int down = 0; int up = 0; int i; + if (gw->lp_ping_feats & LNET_PING_FEAT_RTE_DISABLED) { + rte->lr_downis = 1; + continue; + } + for (i = 0; i < info->pi_nnis && i < LNET_MAX_RTR_NIS; i++) { lnet_ni_status_t *stat = &info->pi_ni[i]; lnet_nid_t nid = stat->ns_nid; @@ -651,22 +696,15 @@ lnet_parse_rc_info(lnet_rc_data_t *rcd) continue; if (stat->ns_status == LNET_NI_STATUS_DOWN) { - if (LNET_NETTYP(LNET_NIDNET(nid)) != PTLLND) - down++; - else if (ptl_status != LNET_NI_STATUS_UP) - ptl_status = LNET_NI_STATUS_DOWN; + down++; continue; } if (stat->ns_status == LNET_NI_STATUS_UP) { - if (LNET_NIDNET(nid) == rtr->lr_net) { + if (LNET_NIDNET(nid) == rte->lr_net) { up = 1; break; } - /* ptl NIs are considered down only when - * they're all down */ - if (LNET_NETTYP(LNET_NIDNET(nid)) == PTLLND) - ptl_status = LNET_NI_STATUS_UP; continue; } @@ -677,10 +715,17 @@ lnet_parse_rc_info(lnet_rc_data_t *rcd) } if (up) { /* ignore downed NIs if NI for dest network is up */ - rtr->lr_downis = 0; + rte->lr_downis = 0; continue; } - rtr->lr_downis = down + (ptl_status == LNET_NI_STATUS_DOWN); + /** + * if @down is zero and this route is single-hop, it means + * we can't find NI for target network + */ + if (!down && rte->lr_hops == 1) + down = 1; + + rte->lr_downis = down; } } @@ -690,7 +735,7 @@ lnet_router_checker_event(lnet_event_t *event) lnet_rc_data_t *rcd = event->md.user_ptr; struct lnet_peer *lp; - LASSERT(rcd != NULL); + LASSERT(rcd); if (event->unlinked) { LNetInvalidateHandle(&rcd->rcd_mdh); @@ -701,11 +746,13 @@ lnet_router_checker_event(lnet_event_t *event) event->type == LNET_EVENT_REPLY); lp = rcd->rcd_gateway; - LASSERT(lp != NULL); + LASSERT(lp); - /* NB: it's called with holding lnet_res_lock, we have a few - * places need to hold both locks at the same time, please take - * care of lock ordering */ + /* + * NB: it's called with holding lnet_res_lock, we have a few + * places need to hold both locks at the same time, please take + * care of lock ordering + */ lnet_net_lock(lp->lp_cpt); if (!lnet_isrouter(lp) || lp->lp_rcd != rcd) { /* ignore if no longer a router or rcd is replaced */ @@ -714,23 +761,26 @@ lnet_router_checker_event(lnet_event_t *event) if (event->type == LNET_EVENT_SEND) { lp->lp_ping_notsent = 0; - if (event->status == 0) + if (!event->status) goto out; } /* LNET_EVENT_REPLY */ - /* A successful REPLY means the router is up. If _any_ comms + /* + * A successful REPLY means the router is up. If _any_ comms * to the router fail I assume it's down (this will happen if * we ping alive routers to try to detect router death before - * apps get burned). */ + * apps get burned). + */ + lnet_notify_locked(lp, 1, !event->status, cfs_time_current()); - lnet_notify_locked(lp, 1, (event->status == 0), cfs_time_current()); - /* The router checker will wake up very shortly and do the + /* + * The router checker will wake up very shortly and do the * actual notification. * XXX If 'lp' stops being a router before then, it will still - * have the notification pending!!! */ - - if (avoid_asym_router_failure && event->status == 0) + * have the notification pending!!! + */ + if (avoid_asym_router_failure && !event->status) lnet_parse_rc_info(rcd); out: @@ -753,7 +803,7 @@ lnet_wait_known_routerstate(void) list_for_each(entry, &the_lnet.ln_routers) { rtr = list_entry(entry, lnet_peer_t, lp_rtr_list); - if (rtr->lp_alive_count == 0) { + if (!rtr->lp_alive_count) { all_known = 0; break; } @@ -774,7 +824,7 @@ lnet_router_ni_update_locked(lnet_peer_t *gw, __u32 net) { lnet_route_t *rte; - if ((gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) != 0) { + if ((gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS)) { list_for_each_entry(rte, &gw->lp_routes, lr_gwlist) { if (rte->lr_net == net) { rte->lr_downis = 0; @@ -811,13 +861,15 @@ lnet_update_ni_status_locked(void) continue; } - LASSERT(ni->ni_status != NULL); + LASSERT(ni->ni_status); if (ni->ni_status->ns_status != LNET_NI_STATUS_DOWN) { CDEBUG(D_NET, "NI(%s:%d) status changed to down\n", libcfs_nid2str(ni->ni_nid), timeout); - /* NB: so far, this is the only place to set - * NI status to "down" */ + /* + * NB: so far, this is the only place to set + * NI status to "down" + */ ni->ni_status->ns_status = LNET_NI_STATUS_DOWN; } lnet_ni_unlock(ni); @@ -831,7 +883,7 @@ lnet_destroy_rc_data(lnet_rc_data_t *rcd) /* detached from network */ LASSERT(LNetHandleIsInvalid(rcd->rcd_mdh)); - if (rcd->rcd_gateway != NULL) { + if (rcd->rcd_gateway) { int cpt = rcd->rcd_gateway->lp_cpt; lnet_net_lock(cpt); @@ -839,7 +891,7 @@ lnet_destroy_rc_data(lnet_rc_data_t *rcd) lnet_net_unlock(cpt); } - if (rcd->rcd_pinginfo != NULL) + if (rcd->rcd_pinginfo) LIBCFS_FREE(rcd->rcd_pinginfo, LNET_PINGINFO_SIZE); LIBCFS_FREE(rcd, sizeof(*rcd)); @@ -856,14 +908,14 @@ lnet_create_rc_data_locked(lnet_peer_t *gateway) lnet_net_unlock(gateway->lp_cpt); LIBCFS_ALLOC(rcd, sizeof(*rcd)); - if (rcd == NULL) + if (!rcd) goto out; LNetInvalidateHandle(&rcd->rcd_mdh); INIT_LIST_HEAD(&rcd->rcd_list); LIBCFS_ALLOC(pi, LNET_PINGINFO_SIZE); - if (pi == NULL) + if (!pi) goto out; for (i = 0; i < LNET_MAX_RTR_NIS; i++) { @@ -885,11 +937,11 @@ lnet_create_rc_data_locked(lnet_peer_t *gateway) CERROR("Can't bind MD: %d\n", rc); goto out; } - LASSERT(rc == 0); + LASSERT(!rc); lnet_net_lock(gateway->lp_cpt); /* router table changed or someone has created rcd for this gateway */ - if (!lnet_isrouter(gateway) || gateway->lp_rcd != NULL) { + if (!lnet_isrouter(gateway) || gateway->lp_rcd) { lnet_net_unlock(gateway->lp_cpt); goto out; } @@ -902,10 +954,10 @@ lnet_create_rc_data_locked(lnet_peer_t *gateway) return rcd; out: - if (rcd != NULL) { + if (rcd) { if (!LNetHandleIsInvalid(rcd->rcd_mdh)) { rc = LNetMDUnlink(rcd->rcd_mdh); - LASSERT(rc == 0); + LASSERT(!rc); } lnet_destroy_rc_data(rcd); } @@ -936,7 +988,7 @@ lnet_ping_router_locked(lnet_peer_t *rtr) lnet_peer_addref_locked(rtr); - if (rtr->lp_ping_deadline != 0 && /* ping timed out? */ + if (rtr->lp_ping_deadline && /* ping timed out? */ cfs_time_after(now, rtr->lp_ping_deadline)) lnet_notify_locked(rtr, 1, 0, now); @@ -950,10 +1002,10 @@ lnet_ping_router_locked(lnet_peer_t *rtr) return; } - rcd = rtr->lp_rcd != NULL ? + rcd = rtr->lp_rcd ? rtr->lp_rcd : lnet_create_rc_data_locked(rtr); - if (rcd == NULL) + if (!rcd) return; secs = lnet_router_check_interval(rtr); @@ -964,7 +1016,7 @@ lnet_ping_router_locked(lnet_peer_t *rtr) rtr->lp_ping_deadline, rtr->lp_ping_notsent, rtr->lp_alive, rtr->lp_alive_count, rtr->lp_ping_timestamp); - if (secs != 0 && !rtr->lp_ping_notsent && + if (secs && !rtr->lp_ping_notsent && cfs_time_after(now, cfs_time_add(rtr->lp_ping_timestamp, cfs_time_seconds(secs)))) { int rc; @@ -972,7 +1024,7 @@ lnet_ping_router_locked(lnet_peer_t *rtr) lnet_handle_md_t mdh; id.nid = rtr->lp_nid; - id.pid = LUSTRE_SRV_LNET_PID; + id.pid = LNET_PID_LUSTRE; CDEBUG(D_NET, "Check: %s\n", libcfs_id2str(id)); rtr->lp_ping_notsent = 1; @@ -980,7 +1032,7 @@ lnet_ping_router_locked(lnet_peer_t *rtr) mdh = rcd->rcd_mdh; - if (rtr->lp_ping_deadline == 0) { + if (!rtr->lp_ping_deadline) { rtr->lp_ping_deadline = cfs_time_shift(router_ping_timeout); } @@ -991,7 +1043,7 @@ lnet_ping_router_locked(lnet_peer_t *rtr) LNET_PROTO_PING_MATCHBITS, 0); lnet_net_lock(rtr->lp_cpt); - if (rc != 0) + if (rc) rtr->lp_ping_notsent = 0; /* no event pending */ } @@ -1001,8 +1053,9 @@ lnet_ping_router_locked(lnet_peer_t *rtr) int lnet_router_checker_start(void) { + struct task_struct *task; int rc; - int eqsz; + int eqsz = 0; LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN); @@ -1012,39 +1065,33 @@ lnet_router_checker_start(void) return -EINVAL; } - if (!the_lnet.ln_routing && - live_router_check_interval <= 0 && - dead_router_check_interval <= 0) - return 0; - sema_init(&the_lnet.ln_rc_signal, 0); - /* EQ size doesn't matter; the callback is guaranteed to get every - * event */ - eqsz = 0; - rc = LNetEQAlloc(eqsz, lnet_router_checker_event, - &the_lnet.ln_rc_eqh); - if (rc != 0) { + + rc = LNetEQAlloc(0, lnet_router_checker_event, &the_lnet.ln_rc_eqh); + if (rc) { CERROR("Can't allocate EQ(%d): %d\n", eqsz, rc); return -ENOMEM; } the_lnet.ln_rc_state = LNET_RC_STATE_RUNNING; - rc = PTR_ERR(kthread_run(lnet_router_checker, - NULL, "router_checker")); - if (IS_ERR_VALUE(rc)) { + task = kthread_run(lnet_router_checker, NULL, "router_checker"); + if (IS_ERR(task)) { + rc = PTR_ERR(task); CERROR("Can't start router checker thread: %d\n", rc); /* block until event callback signals exit */ down(&the_lnet.ln_rc_signal); rc = LNetEQFree(the_lnet.ln_rc_eqh); - LASSERT(rc == 0); + LASSERT(!rc); the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN; return -ENOMEM; } if (check_routers_before_use) { - /* Note that a helpful side-effect of pinging all known routers + /* + * Note that a helpful side-effect of pinging all known routers * at startup is that it makes them drop stale connections they - * may have to a previous instance of me. */ + * may have to a previous instance of me. + */ lnet_wait_known_routerstate(); } @@ -1061,13 +1108,15 @@ lnet_router_checker_stop(void) LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING); the_lnet.ln_rc_state = LNET_RC_STATE_STOPPING; + /* wakeup the RC thread if it's sleeping */ + wake_up(&the_lnet.ln_rc_waitq); /* block until event callback signals exit */ down(&the_lnet.ln_rc_signal); LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN); rc = LNetEQFree(the_lnet.ln_rc_eqh); - LASSERT(rc == 0); + LASSERT(!rc); } static void @@ -1091,13 +1140,13 @@ lnet_prune_rc_data(int wait_unlink) if (the_lnet.ln_rc_state != LNET_RC_STATE_RUNNING) { /* router checker is stopping, prune all */ list_for_each_entry(lp, &the_lnet.ln_routers, - lp_rtr_list) { - if (lp->lp_rcd == NULL) + lp_rtr_list) { + if (!lp->lp_rcd) continue; LASSERT(list_empty(&lp->lp_rcd->rcd_list)); list_add(&lp->lp_rcd->rcd_list, - &the_lnet.ln_rcd_deathrow); + &the_lnet.ln_rcd_deathrow); lp->lp_rcd = NULL; } } @@ -1119,7 +1168,7 @@ lnet_prune_rc_data(int wait_unlink) /* release all zombie RCDs */ while (!list_empty(&the_lnet.ln_rcd_zombie)) { list_for_each_entry_safe(rcd, tmp, &the_lnet.ln_rcd_zombie, - rcd_list) { + rcd_list) { if (LNetHandleIsInvalid(rcd->rcd_mdh)) list_move(&rcd->rcd_list, &head); } @@ -1131,7 +1180,7 @@ lnet_prune_rc_data(int wait_unlink) while (!list_empty(&head)) { rcd = list_entry(head.next, - lnet_rc_data_t, rcd_list); + lnet_rc_data_t, rcd_list); list_del_init(&rcd->rcd_list); lnet_destroy_rc_data(rcd); } @@ -1151,6 +1200,33 @@ lnet_prune_rc_data(int wait_unlink) lnet_net_unlock(LNET_LOCK_EX); } +/* + * This function is called to check if the RC should block indefinitely. + * It's called from lnet_router_checker() as well as being passed to + * wait_event_interruptible() to avoid the lost wake_up problem. + * + * When it's called from wait_event_interruptible() it is necessary to + * also not sleep if the rc state is not running to avoid a deadlock + * when the system is shutting down + */ +static inline bool +lnet_router_checker_active(void) +{ + if (the_lnet.ln_rc_state != LNET_RC_STATE_RUNNING) + return true; + + /* + * Router Checker thread needs to run when routing is enabled in + * order to call lnet_update_ni_status_locked() + */ + if (the_lnet.ln_routing) + return true; + + return !list_empty(&the_lnet.ln_routers) && + (live_router_check_interval > 0 || + dead_router_check_interval > 0); +} + static int lnet_router_checker(void *arg) { @@ -1159,8 +1235,6 @@ lnet_router_checker(void *arg) cfs_block_allsigs(); - LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING); - while (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING) { __u64 version; int cpt; @@ -1199,15 +1273,25 @@ rescan: lnet_prune_rc_data(0); /* don't wait for UNLINK */ - /* Call schedule_timeout() here always adds 1 to load average + /* + * Call schedule_timeout() here always adds 1 to load average * because kernel counts # active tasks as nr_running - * + nr_uninterruptible. */ - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1)); + * + nr_uninterruptible. + */ + /* + * if there are any routes then wakeup every second. If + * there are no routes then sleep indefinitely until woken + * up by a user adding a route + */ + if (!lnet_router_checker_active()) + wait_event_interruptible(the_lnet.ln_rc_waitq, + lnet_router_checker_active()); + else + wait_event_interruptible_timeout(the_lnet.ln_rc_waitq, + false, + cfs_time_seconds(1)); } - LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_STOPPING); - lnet_prune_rc_data(1); /* wait for UNLINK */ the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN; @@ -1216,7 +1300,7 @@ rescan: return 0; } -static void +void lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages) { int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]); @@ -1237,7 +1321,7 @@ lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt) int i; LIBCFS_CPT_ALLOC(rb, lnet_cpt_table(), cpt, sz); - if (rb == NULL) + if (!rb) return NULL; rb->rb_pool = rbp; @@ -1246,7 +1330,7 @@ lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt) page = alloc_pages_node( cfs_cpt_spread_node(lnet_cpt_table(), cpt), GFP_KERNEL | __GFP_ZERO, 0); - if (page == NULL) { + if (!page) { while (--i >= 0) __free_page(rb->rb_kiov[i].kiov_page); @@ -1263,66 +1347,119 @@ lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt) } static void -lnet_rtrpool_free_bufs(lnet_rtrbufpool_t *rbp) +lnet_rtrpool_free_bufs(lnet_rtrbufpool_t *rbp, int cpt) { int npages = rbp->rbp_npages; - int nbuffers = 0; + struct list_head tmp; lnet_rtrbuf_t *rb; + lnet_rtrbuf_t *temp; - if (rbp->rbp_nbuffers == 0) /* not initialized or already freed */ + if (!rbp->rbp_nbuffers) /* not initialized or already freed */ return; - LASSERT(list_empty(&rbp->rbp_msgs)); - LASSERT(rbp->rbp_credits == rbp->rbp_nbuffers); + INIT_LIST_HEAD(&tmp); - while (!list_empty(&rbp->rbp_bufs)) { - LASSERT(rbp->rbp_credits > 0); + lnet_net_lock(cpt); + lnet_drop_routed_msgs_locked(&rbp->rbp_msgs, cpt); + list_splice_init(&rbp->rbp_bufs, &tmp); + rbp->rbp_req_nbuffers = 0; + rbp->rbp_nbuffers = 0; + rbp->rbp_credits = 0; + rbp->rbp_mincredits = 0; + lnet_net_unlock(cpt); - rb = list_entry(rbp->rbp_bufs.next, - lnet_rtrbuf_t, rb_list); + /* Free buffers on the free list. */ + list_for_each_entry_safe(rb, temp, &tmp, rb_list) { list_del(&rb->rb_list); lnet_destroy_rtrbuf(rb, npages); - nbuffers++; } - - LASSERT(rbp->rbp_nbuffers == nbuffers); - LASSERT(rbp->rbp_credits == nbuffers); - - rbp->rbp_nbuffers = rbp->rbp_credits = 0; } static int -lnet_rtrpool_alloc_bufs(lnet_rtrbufpool_t *rbp, int nbufs, int cpt) +lnet_rtrpool_adjust_bufs(lnet_rtrbufpool_t *rbp, int nbufs, int cpt) { + struct list_head rb_list; lnet_rtrbuf_t *rb; - int i; + int num_rb; + int num_buffers = 0; + int old_req_nbufs; + int npages = rbp->rbp_npages; - if (rbp->rbp_nbuffers != 0) { - LASSERT(rbp->rbp_nbuffers == nbufs); + lnet_net_lock(cpt); + /* + * If we are called for less buffers than already in the pool, we + * just lower the req_nbuffers number and excess buffers will be + * thrown away as they are returned to the free list. Credits + * then get adjusted as well. + * If we already have enough buffers allocated to serve the + * increase requested, then we can treat that the same way as we + * do the decrease. + */ + num_rb = nbufs - rbp->rbp_nbuffers; + if (nbufs <= rbp->rbp_req_nbuffers || num_rb <= 0) { + rbp->rbp_req_nbuffers = nbufs; + lnet_net_unlock(cpt); return 0; } + /* + * store the older value of rbp_req_nbuffers and then set it to + * the new request to prevent lnet_return_rx_credits_locked() from + * freeing buffers that we need to keep around + */ + old_req_nbufs = rbp->rbp_req_nbuffers; + rbp->rbp_req_nbuffers = nbufs; + lnet_net_unlock(cpt); - for (i = 0; i < nbufs; i++) { + INIT_LIST_HEAD(&rb_list); + + /* + * allocate the buffers on a local list first. If all buffers are + * allocated successfully then join this list to the rbp buffer + * list. If not then free all allocated buffers. + */ + while (num_rb-- > 0) { rb = lnet_new_rtrbuf(rbp, cpt); + if (!rb) { + CERROR("Failed to allocate %d route bufs of %d pages\n", + nbufs, npages); - if (rb == NULL) { - CERROR("Failed to allocate %d router bufs of %d pages\n", - nbufs, rbp->rbp_npages); - return -ENOMEM; - } + lnet_net_lock(cpt); + rbp->rbp_req_nbuffers = old_req_nbufs; + lnet_net_unlock(cpt); - rbp->rbp_nbuffers++; - rbp->rbp_credits++; - rbp->rbp_mincredits++; - list_add(&rb->rb_list, &rbp->rbp_bufs); + goto failed; + } - /* No allocation "under fire" */ - /* Otherwise we'd need code to schedule blocked msgs etc */ - LASSERT(!the_lnet.ln_routing); + list_add(&rb->rb_list, &rb_list); + num_buffers++; } - LASSERT(rbp->rbp_credits == nbufs); + lnet_net_lock(cpt); + + list_splice_tail(&rb_list, &rbp->rbp_bufs); + rbp->rbp_nbuffers += num_buffers; + rbp->rbp_credits += num_buffers; + rbp->rbp_mincredits = rbp->rbp_credits; + /* + * We need to schedule blocked msg using the newly + * added buffers. + */ + while (!list_empty(&rbp->rbp_bufs) && + !list_empty(&rbp->rbp_msgs)) + lnet_schedule_blocked_locked(rbp); + + lnet_net_unlock(cpt); + return 0; + +failed: + while (!list_empty(&rb_list)) { + rb = list_entry(rb_list.next, lnet_rtrbuf_t, rb_list); + list_del(&rb->rb_list); + lnet_destroy_rtrbuf(rb, npages); + } + + return -ENOMEM; } static void @@ -1337,26 +1474,28 @@ lnet_rtrpool_init(lnet_rtrbufpool_t *rbp, int npages) } void -lnet_rtrpools_free(void) +lnet_rtrpools_free(int keep_pools) { lnet_rtrbufpool_t *rtrp; int i; - if (the_lnet.ln_rtrpools == NULL) /* uninitialized or freed */ + if (!the_lnet.ln_rtrpools) /* uninitialized or freed */ return; cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) { - lnet_rtrpool_free_bufs(&rtrp[0]); - lnet_rtrpool_free_bufs(&rtrp[1]); - lnet_rtrpool_free_bufs(&rtrp[2]); + lnet_rtrpool_free_bufs(&rtrp[LNET_TINY_BUF_IDX], i); + lnet_rtrpool_free_bufs(&rtrp[LNET_SMALL_BUF_IDX], i); + lnet_rtrpool_free_bufs(&rtrp[LNET_LARGE_BUF_IDX], i); } - cfs_percpt_free(the_lnet.ln_rtrpools); - the_lnet.ln_rtrpools = NULL; + if (!keep_pools) { + cfs_percpt_free(the_lnet.ln_rtrpools); + the_lnet.ln_rtrpools = NULL; + } } static int -lnet_nrb_tiny_calculate(int npages) +lnet_nrb_tiny_calculate(void) { int nrbs = LNET_NRB_TINY; @@ -1364,7 +1503,7 @@ lnet_nrb_tiny_calculate(int npages) LCONSOLE_ERROR_MSG(0x10c, "tiny_router_buffers=%d invalid when routing enabled\n", tiny_router_buffers); - return -1; + return -EINVAL; } if (tiny_router_buffers > 0) @@ -1375,7 +1514,7 @@ lnet_nrb_tiny_calculate(int npages) } static int -lnet_nrb_small_calculate(int npages) +lnet_nrb_small_calculate(void) { int nrbs = LNET_NRB_SMALL; @@ -1383,7 +1522,7 @@ lnet_nrb_small_calculate(int npages) LCONSOLE_ERROR_MSG(0x10c, "small_router_buffers=%d invalid when routing enabled\n", small_router_buffers); - return -1; + return -EINVAL; } if (small_router_buffers > 0) @@ -1394,7 +1533,7 @@ lnet_nrb_small_calculate(int npages) } static int -lnet_nrb_large_calculate(int npages) +lnet_nrb_large_calculate(void) { int nrbs = LNET_NRB_LARGE; @@ -1402,7 +1541,7 @@ lnet_nrb_large_calculate(int npages) LCONSOLE_ERROR_MSG(0x10c, "large_router_buffers=%d invalid when routing enabled\n", large_router_buffers); - return -1; + return -EINVAL; } if (large_router_buffers > 0) @@ -1416,16 +1555,12 @@ int lnet_rtrpools_alloc(int im_a_router) { lnet_rtrbufpool_t *rtrp; - int large_pages; - int small_pages = 1; int nrb_tiny; int nrb_small; int nrb_large; int rc; int i; - large_pages = (LNET_MTU + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; - if (!strcmp(forwarding, "")) { /* not set either way */ if (!im_a_router) @@ -1440,41 +1575,46 @@ lnet_rtrpools_alloc(int im_a_router) return -EINVAL; } - nrb_tiny = lnet_nrb_tiny_calculate(0); + nrb_tiny = lnet_nrb_tiny_calculate(); if (nrb_tiny < 0) return -EINVAL; - nrb_small = lnet_nrb_small_calculate(small_pages); + nrb_small = lnet_nrb_small_calculate(); if (nrb_small < 0) return -EINVAL; - nrb_large = lnet_nrb_large_calculate(large_pages); + nrb_large = lnet_nrb_large_calculate(); if (nrb_large < 0) return -EINVAL; the_lnet.ln_rtrpools = cfs_percpt_alloc(lnet_cpt_table(), LNET_NRBPOOLS * sizeof(lnet_rtrbufpool_t)); - if (the_lnet.ln_rtrpools == NULL) { + if (!the_lnet.ln_rtrpools) { LCONSOLE_ERROR_MSG(0x10c, "Failed to initialize router buffe pool\n"); return -ENOMEM; } cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) { - lnet_rtrpool_init(&rtrp[0], 0); - rc = lnet_rtrpool_alloc_bufs(&rtrp[0], nrb_tiny, i); - if (rc != 0) + lnet_rtrpool_init(&rtrp[LNET_TINY_BUF_IDX], 0); + rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_TINY_BUF_IDX], + nrb_tiny, i); + if (rc) goto failed; - lnet_rtrpool_init(&rtrp[1], small_pages); - rc = lnet_rtrpool_alloc_bufs(&rtrp[1], nrb_small, i); - if (rc != 0) + lnet_rtrpool_init(&rtrp[LNET_SMALL_BUF_IDX], + LNET_NRB_SMALL_PAGES); + rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_SMALL_BUF_IDX], + nrb_small, i); + if (rc) goto failed; - lnet_rtrpool_init(&rtrp[2], large_pages); - rc = lnet_rtrpool_alloc_bufs(&rtrp[2], nrb_large, i); - if (rc != 0) + lnet_rtrpool_init(&rtrp[LNET_LARGE_BUF_IDX], + LNET_NRB_LARGE_PAGES); + rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_LARGE_BUF_IDX], + nrb_large, i); + if (rc) goto failed; } @@ -1485,10 +1625,118 @@ lnet_rtrpools_alloc(int im_a_router) return 0; failed: - lnet_rtrpools_free(); + lnet_rtrpools_free(0); return rc; } +static int +lnet_rtrpools_adjust_helper(int tiny, int small, int large) +{ + int nrb = 0; + int rc = 0; + int i; + lnet_rtrbufpool_t *rtrp; + + /* + * If the provided values for each buffer pool are different than the + * configured values, we need to take action. + */ + if (tiny >= 0) { + tiny_router_buffers = tiny; + nrb = lnet_nrb_tiny_calculate(); + cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) { + rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_TINY_BUF_IDX], + nrb, i); + if (rc) + return rc; + } + } + if (small >= 0) { + small_router_buffers = small; + nrb = lnet_nrb_small_calculate(); + cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) { + rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_SMALL_BUF_IDX], + nrb, i); + if (rc) + return rc; + } + } + if (large >= 0) { + large_router_buffers = large; + nrb = lnet_nrb_large_calculate(); + cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) { + rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_LARGE_BUF_IDX], + nrb, i); + if (rc) + return rc; + } + } + + return 0; +} + +int +lnet_rtrpools_adjust(int tiny, int small, int large) +{ + /* + * this function doesn't revert the changes if adding new buffers + * failed. It's up to the user space caller to revert the + * changes. + */ + if (!the_lnet.ln_routing) + return 0; + + return lnet_rtrpools_adjust_helper(tiny, small, large); +} + +int +lnet_rtrpools_enable(void) +{ + int rc; + + if (the_lnet.ln_routing) + return 0; + + if (!the_lnet.ln_rtrpools) + /* + * If routing is turned off, and we have never + * initialized the pools before, just call the + * standard buffer pool allocation routine as + * if we are just configuring this for the first + * time. + */ + return lnet_rtrpools_alloc(1); + + rc = lnet_rtrpools_adjust_helper(0, 0, 0); + if (rc) + return rc; + + lnet_net_lock(LNET_LOCK_EX); + the_lnet.ln_routing = 1; + + the_lnet.ln_ping_info->pi_features &= ~LNET_PING_FEAT_RTE_DISABLED; + lnet_net_unlock(LNET_LOCK_EX); + + return 0; +} + +void +lnet_rtrpools_disable(void) +{ + if (!the_lnet.ln_routing) + return; + + lnet_net_lock(LNET_LOCK_EX); + the_lnet.ln_routing = 0; + the_lnet.ln_ping_info->pi_features |= LNET_PING_FEAT_RTE_DISABLED; + + tiny_router_buffers = 0; + small_router_buffers = 0; + large_router_buffers = 0; + lnet_net_unlock(LNET_LOCK_EX); + lnet_rtrpools_free(1); +} + int lnet_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive, unsigned long when) { @@ -1499,28 +1747,28 @@ lnet_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive, unsigned long when) LASSERT(!in_interrupt()); CDEBUG(D_NET, "%s notifying %s: %s\n", - (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid), - libcfs_nid2str(nid), - alive ? "up" : "down"); + !ni ? "userspace" : libcfs_nid2str(ni->ni_nid), + libcfs_nid2str(nid), + alive ? "up" : "down"); - if (ni != NULL && + if (ni && LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid)) { CWARN("Ignoring notification of %s %s by %s (different net)\n", - libcfs_nid2str(nid), alive ? "birth" : "death", - libcfs_nid2str(ni->ni_nid)); + libcfs_nid2str(nid), alive ? "birth" : "death", + libcfs_nid2str(ni->ni_nid)); return -EINVAL; } /* can't do predictions... */ if (cfs_time_after(when, now)) { CWARN("Ignoring prediction from %s of %s %s %ld seconds in the future\n", - (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid), + !ni ? "userspace" : libcfs_nid2str(ni->ni_nid), libcfs_nid2str(nid), alive ? "up" : "down", cfs_duration_sec(cfs_time_sub(when, now))); return -EINVAL; } - if (ni != NULL && !alive && /* LND telling me she's down */ + if (ni && !alive && /* LND telling me she's down */ !auto_down) { /* auto-down disabled */ CDEBUG(D_NET, "Auto-down disabled\n"); return 0; @@ -1534,23 +1782,26 @@ lnet_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive, unsigned long when) } lp = lnet_find_peer_locked(the_lnet.ln_peer_tables[cpt], nid); - if (lp == NULL) { + if (!lp) { /* nid not found */ lnet_net_unlock(cpt); CDEBUG(D_NET, "%s not found\n", libcfs_nid2str(nid)); return 0; } - /* We can't fully trust LND on reporting exact peer last_alive + /* + * We can't fully trust LND on reporting exact peer last_alive * if he notifies us about dead peer. For example ksocklnd can * call us with when == _time_when_the_node_was_booted_ if - * no connections were successfully established */ - if (ni != NULL && !alive && when < lp->lp_last_alive) + * no connections were successfully established + */ + if (ni && !alive && when < lp->lp_last_alive) when = lp->lp_last_alive; - lnet_notify_locked(lp, ni == NULL, alive, when); + lnet_notify_locked(lp, !ni, alive, when); - lnet_ni_notify_locked(ni, lp); + if (ni) + lnet_ni_notify_locked(ni, lp); lnet_peer_decref_locked(lp); diff --git a/drivers/staging/lustre/lnet/lnet/router_proc.c b/drivers/staging/lustre/lnet/lnet/router_proc.c index 396c7c4e5c83..65f65a3fc901 100644 --- a/drivers/staging/lustre/lnet/lnet/router_proc.c +++ b/drivers/staging/lustre/lnet/lnet/router_proc.c @@ -15,18 +15,16 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with Portals; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * */ #define DEBUG_SUBSYSTEM S_LNET #include "../../include/linux/libcfs/libcfs.h" #include "../../include/linux/lnet/lib-lnet.h" -/* This is really lnet_proc.c. You might need to update sanity test 215 - * if any file format is changed. */ +/* + * This is really lnet_proc.c. You might need to update sanity test 215 + * if any file format is changed. + */ #define LNET_LOFFT_BITS (sizeof(loff_t) * 8) /* @@ -75,25 +73,6 @@ #define LNET_PROC_VERSION(v) ((unsigned int)((v) & LNET_PROC_VER_MASK)) -static int proc_call_handler(void *data, int write, loff_t *ppos, - void __user *buffer, size_t *lenp, - int (*handler)(void *data, int write, - loff_t pos, void __user *buffer, int len)) -{ - int rc = handler(data, write, *ppos, buffer, *lenp); - - if (rc < 0) - return rc; - - if (write) { - *ppos += *lenp; - } else { - *lenp = rc; - *ppos += rc; - } - return 0; -} - static int __proc_lnet_stats(void *data, int write, loff_t pos, void __user *buffer, int nob) { @@ -111,11 +90,11 @@ static int __proc_lnet_stats(void *data, int write, /* read */ LIBCFS_ALLOC(ctrs, sizeof(*ctrs)); - if (ctrs == NULL) + if (!ctrs) return -ENOMEM; LIBCFS_ALLOC(tmpstr, tmpsiz); - if (tmpstr == NULL) { + if (!tmpstr) { LIBCFS_FREE(ctrs, sizeof(*ctrs)); return -ENOMEM; } @@ -145,8 +124,8 @@ static int __proc_lnet_stats(void *data, int write, static int proc_lnet_stats(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { - return proc_call_handler(table->data, write, ppos, buffer, lenp, - __proc_lnet_stats); + return lprocfs_call_handler(table->data, write, ppos, buffer, lenp, + __proc_lnet_stats); } static int proc_lnet_routes(struct ctl_table *table, int write, @@ -167,16 +146,16 @@ static int proc_lnet_routes(struct ctl_table *table, int write, LASSERT(!write); - if (*lenp == 0) + if (!*lenp) return 0; LIBCFS_ALLOC(tmpstr, tmpsiz); - if (tmpstr == NULL) + if (!tmpstr) return -ENOMEM; s = tmpstr; /* points to current position in tmpstr[] */ - if (*ppos == 0) { + if (!*ppos) { s += snprintf(s, tmpstr + tmpsiz - s, "Routing %s\n", the_lnet.ln_routing ? "enabled" : "disabled"); LASSERT(tmpstr + tmpsiz - s > 0); @@ -206,23 +185,22 @@ static int proc_lnet_routes(struct ctl_table *table, int write, return -ESTALE; } - for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE && route == NULL; - i++) { + for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE && !route; i++) { rn_list = &the_lnet.ln_remote_nets_hash[i]; n = rn_list->next; - while (n != rn_list && route == NULL) { + while (n != rn_list && !route) { rnet = list_entry(n, lnet_remotenet_t, - lrn_list); + lrn_list); r = rnet->lrn_routes.next; while (r != &rnet->lrn_routes) { lnet_route_t *re = list_entry(r, lnet_route_t, - lr_list); - if (skip == 0) { + lr_list); + if (!skip) { route = re; break; } @@ -235,12 +213,12 @@ static int proc_lnet_routes(struct ctl_table *table, int write, } } - if (route != NULL) { + if (route) { __u32 net = rnet->lrn_net; - unsigned int hops = route->lr_hops; + __u32 hops = route->lr_hops; unsigned int priority = route->lr_priority; lnet_nid_t nid = route->lr_gateway->lp_nid; - int alive = route->lr_gateway->lp_alive; + int alive = lnet_is_route_alive(route); s += snprintf(s, tmpstr + tmpsiz - s, "%-8s %4u %8u %7s %s\n", @@ -259,9 +237,9 @@ static int proc_lnet_routes(struct ctl_table *table, int write, if (len > *lenp) { /* linux-supplied buffer is too small */ rc = -EINVAL; } else if (len > 0) { /* wrote something */ - if (copy_to_user(buffer, tmpstr, len)) + if (copy_to_user(buffer, tmpstr, len)) { rc = -EFAULT; - else { + } else { off += 1; *ppos = LNET_PROC_POS_MAKE(0, ver, 0, off); } @@ -269,7 +247,7 @@ static int proc_lnet_routes(struct ctl_table *table, int write, LIBCFS_FREE(tmpstr, tmpsiz); - if (rc == 0) + if (!rc) *lenp = len; return rc; @@ -291,16 +269,16 @@ static int proc_lnet_routers(struct ctl_table *table, int write, LASSERT(!write); - if (*lenp == 0) + if (!*lenp) return 0; LIBCFS_ALLOC(tmpstr, tmpsiz); - if (tmpstr == NULL) + if (!tmpstr) return -ENOMEM; s = tmpstr; /* points to current position in tmpstr[] */ - if (*ppos == 0) { + if (!*ppos) { s += snprintf(s, tmpstr + tmpsiz - s, "%-4s %7s %9s %6s %12s %9s %8s %7s %s\n", "ref", "rtr_ref", "alive_cnt", "state", @@ -330,9 +308,9 @@ static int proc_lnet_routers(struct ctl_table *table, int write, while (r != &the_lnet.ln_routers) { lnet_peer_t *lp = list_entry(r, lnet_peer_t, - lp_rtr_list); + lp_rtr_list); - if (skip == 0) { + if (!skip) { peer = lp; break; } @@ -341,7 +319,7 @@ static int proc_lnet_routers(struct ctl_table *table, int write, r = r->next; } - if (peer != NULL) { + if (peer) { lnet_nid_t nid = peer->lp_nid; unsigned long now = cfs_time_current(); unsigned long deadline = peer->lp_ping_deadline; @@ -356,19 +334,21 @@ static int proc_lnet_routers(struct ctl_table *table, int write, lnet_route_t *rtr; if ((peer->lp_ping_feats & - LNET_PING_FEAT_NI_STATUS) != 0) { + LNET_PING_FEAT_NI_STATUS)) { list_for_each_entry(rtr, &peer->lp_routes, - lr_gwlist) { - /* downis on any route should be the - * number of downis on the gateway */ - if (rtr->lr_downis != 0) { + lr_gwlist) { + /* + * downis on any route should be the + * number of downis on the gateway + */ + if (rtr->lr_downis) { down_ni = rtr->lr_downis; break; } } } - if (deadline == 0) + if (!deadline) s += snprintf(s, tmpstr + tmpsiz - s, "%-4d %7d %9d %6s %12d %9d %8s %7d %s\n", nrefs, nrtrrefs, alive_cnt, @@ -394,9 +374,9 @@ static int proc_lnet_routers(struct ctl_table *table, int write, if (len > *lenp) { /* linux-supplied buffer is too small */ rc = -EINVAL; } else if (len > 0) { /* wrote something */ - if (copy_to_user(buffer, tmpstr, len)) + if (copy_to_user(buffer, tmpstr, len)) { rc = -EFAULT; - else { + } else { off += 1; *ppos = LNET_PROC_POS_MAKE(0, ver, 0, off); } @@ -404,7 +384,7 @@ static int proc_lnet_routers(struct ctl_table *table, int write, LIBCFS_FREE(tmpstr, tmpsiz); - if (rc == 0) + if (!rc) *lenp = len; return rc; @@ -427,7 +407,7 @@ static int proc_lnet_peers(struct ctl_table *table, int write, CLASSERT(LNET_PROC_HASH_BITS >= LNET_PEER_HASH_BITS); LASSERT(!write); - if (*lenp == 0) + if (!*lenp) return 0; if (cpt >= LNET_CPT_NUMBER) { @@ -436,12 +416,12 @@ static int proc_lnet_peers(struct ctl_table *table, int write, } LIBCFS_ALLOC(tmpstr, tmpsiz); - if (tmpstr == NULL) + if (!tmpstr) return -ENOMEM; s = tmpstr; /* points to current position in tmpstr[] */ - if (*ppos == 0) { + if (!*ppos) { s += snprintf(s, tmpstr + tmpsiz - s, "%-24s %4s %5s %5s %5s %5s %5s %5s %5s %s\n", "nid", "refs", "state", "last", "max", @@ -470,18 +450,20 @@ static int proc_lnet_peers(struct ctl_table *table, int write, } while (hash < LNET_PEER_HASH_SIZE) { - if (p == NULL) + if (!p) p = ptable->pt_hash[hash].next; while (p != &ptable->pt_hash[hash]) { lnet_peer_t *lp = list_entry(p, lnet_peer_t, - lp_hashlist); - if (skip == 0) { + lp_hashlist); + if (!skip) { peer = lp; - /* minor optimization: start from idx+1 + /* + * minor optimization: start from idx+1 * on next iteration if we've just - * drained lp_hashlist */ + * drained lp_hashlist + */ if (lp->lp_hashlist.next == &ptable->pt_hash[hash]) { hoff = 1; @@ -497,7 +479,7 @@ static int proc_lnet_peers(struct ctl_table *table, int write, p = lp->lp_hashlist.next; } - if (peer != NULL) + if (peer) break; p = NULL; @@ -505,7 +487,7 @@ static int proc_lnet_peers(struct ctl_table *table, int write, hash++; } - if (peer != NULL) { + if (peer) { lnet_nid_t nid = peer->lp_nid; int nrefs = peer->lp_refcount; int lastalive = -1; @@ -553,7 +535,7 @@ static int proc_lnet_peers(struct ctl_table *table, int write, cpt++; hash = 0; hoff = 1; - if (peer == NULL && cpt < LNET_CPT_NUMBER) + if (!peer && cpt < LNET_CPT_NUMBER) goto again; } } @@ -571,7 +553,7 @@ static int proc_lnet_peers(struct ctl_table *table, int write, LIBCFS_FREE(tmpstr, tmpsiz); - if (rc == 0) + if (!rc) *lenp = len; return rc; @@ -593,7 +575,7 @@ static int __proc_lnet_buffers(void *data, int write, /* (4 %d) * 4 * LNET_CPT_NUMBER */ tmpsiz = 64 * (LNET_NRBPOOLS + 1) * LNET_CPT_NUMBER; LIBCFS_ALLOC(tmpstr, tmpsiz); - if (tmpstr == NULL) + if (!tmpstr) return -ENOMEM; s = tmpstr; /* points to current position in tmpstr[] */ @@ -603,7 +585,7 @@ static int __proc_lnet_buffers(void *data, int write, "pages", "count", "credits", "min"); LASSERT(tmpstr + tmpsiz - s > 0); - if (the_lnet.ln_rtrpools == NULL) + if (!the_lnet.ln_rtrpools) goto out; /* I'm not a router */ for (idx = 0; idx < LNET_NRBPOOLS; idx++) { @@ -638,8 +620,8 @@ static int __proc_lnet_buffers(void *data, int write, static int proc_lnet_buffers(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { - return proc_call_handler(table->data, write, ppos, buffer, lenp, - __proc_lnet_buffers); + return lprocfs_call_handler(table->data, write, ppos, buffer, lenp, + __proc_lnet_buffers); } static int proc_lnet_nis(struct ctl_table *table, int write, @@ -653,16 +635,16 @@ static int proc_lnet_nis(struct ctl_table *table, int write, LASSERT(!write); - if (*lenp == 0) + if (!*lenp) return 0; LIBCFS_ALLOC(tmpstr, tmpsiz); - if (tmpstr == NULL) + if (!tmpstr) return -ENOMEM; s = tmpstr; /* points to current position in tmpstr[] */ - if (*ppos == 0) { + if (!*ppos) { s += snprintf(s, tmpstr + tmpsiz - s, "%-24s %6s %5s %4s %4s %4s %5s %5s %5s\n", "nid", "status", "alive", "refs", "peer", @@ -680,7 +662,7 @@ static int proc_lnet_nis(struct ctl_table *table, int write, while (n != &the_lnet.ln_nis) { lnet_ni_t *a_ni = list_entry(n, lnet_ni_t, ni_list); - if (skip == 0) { + if (!skip) { ni = a_ni; break; } @@ -689,7 +671,7 @@ static int proc_lnet_nis(struct ctl_table *table, int write, n = n->next; } - if (ni != NULL) { + if (ni) { struct lnet_tx_queue *tq; char *stat; time64_t now = ktime_get_real_seconds(); @@ -705,15 +687,17 @@ static int proc_lnet_nis(struct ctl_table *table, int write, last_alive = 0; lnet_ni_lock(ni); - LASSERT(ni->ni_status != NULL); + LASSERT(ni->ni_status); stat = (ni->ni_status->ns_status == LNET_NI_STATUS_UP) ? "up" : "down"; lnet_ni_unlock(ni); - /* we actually output credits information for - * TX queue of each partition */ + /* + * we actually output credits information for + * TX queue of each partition + */ cfs_percpt_for_each(tq, i, ni->ni_tx_queues) { - for (j = 0; ni->ni_cpts != NULL && + for (j = 0; ni->ni_cpts && j < ni->ni_ncpts; j++) { if (i == ni->ni_cpts[j]) break; @@ -722,18 +706,19 @@ static int proc_lnet_nis(struct ctl_table *table, int write, if (j == ni->ni_ncpts) continue; - if (i != 0) + if (i) lnet_net_lock(i); s += snprintf(s, tmpstr + tmpsiz - s, - "%-24s %6s %5d %4d %4d %4d %5d %5d %5d\n", - libcfs_nid2str(ni->ni_nid), stat, - last_alive, *ni->ni_refs[i], - ni->ni_peertxcredits, - ni->ni_peerrtrcredits, - tq->tq_credits_max, - tq->tq_credits, tq->tq_credits_min); - if (i != 0) + "%-24s %6s %5d %4d %4d %4d %5d %5d %5d\n", + libcfs_nid2str(ni->ni_nid), stat, + last_alive, *ni->ni_refs[i], + ni->ni_peertxcredits, + ni->ni_peerrtrcredits, + tq->tq_credits_max, + tq->tq_credits, + tq->tq_credits_min); + if (i) lnet_net_unlock(i); } LASSERT(tmpstr + tmpsiz - s > 0); @@ -755,7 +740,7 @@ static int proc_lnet_nis(struct ctl_table *table, int write, LIBCFS_FREE(tmpstr, tmpsiz); - if (rc == 0) + if (!rc) *lenp = len; return rc; @@ -795,8 +780,6 @@ static struct lnet_portal_rotors portal_rotors[] = { }, }; -extern int portal_rotor; - static int __proc_lnet_portal_rotor(void *data, int write, loff_t pos, void __user *buffer, int nob) { @@ -807,7 +790,7 @@ static int __proc_lnet_portal_rotor(void *data, int write, int i; LIBCFS_ALLOC(buf, buf_len); - if (buf == NULL) + if (!buf) return -ENOMEM; if (!write) { @@ -831,7 +814,7 @@ static int __proc_lnet_portal_rotor(void *data, int write, rc = 0; } else { rc = cfs_trace_copyout_string(buffer, nob, - buf + pos, "\n"); + buf + pos, "\n"); } goto out; } @@ -844,9 +827,9 @@ static int __proc_lnet_portal_rotor(void *data, int write, rc = -EINVAL; lnet_res_lock(0); - for (i = 0; portal_rotors[i].pr_name != NULL; i++) { - if (strncasecmp(portal_rotors[i].pr_name, tmp, - strlen(portal_rotors[i].pr_name)) == 0) { + for (i = 0; portal_rotors[i].pr_name; i++) { + if (!strncasecmp(portal_rotors[i].pr_name, tmp, + strlen(portal_rotors[i].pr_name))) { portal_rotor = portal_rotors[i].pr_value; rc = 0; break; @@ -862,8 +845,8 @@ static int proc_lnet_portal_rotor(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { - return proc_call_handler(table->data, write, ppos, buffer, lenp, - __proc_lnet_portal_rotor); + return lprocfs_call_handler(table->data, write, ppos, buffer, lenp, + __proc_lnet_portal_rotor); } static struct ctl_table lnet_table[] = { diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c index 1f04cc1fc31c..eebc92412061 100644 --- a/drivers/staging/lustre/lnet/selftest/brw_test.c +++ b/drivers/staging/lustre/lnet/selftest/brw_test.c @@ -51,14 +51,14 @@ MODULE_PARM_DESC(brw_inject_errors, "# data errors to inject randomly, zero by d static void brw_client_fini(sfw_test_instance_t *tsi) { - srpc_bulk_t *bulk; - sfw_test_unit_t *tsu; + srpc_bulk_t *bulk; + sfw_test_unit_t *tsu; LASSERT(tsi->tsi_is_client); list_for_each_entry(tsu, &tsi->tsi_units, tsu_list) { bulk = tsu->tsu_private; - if (bulk == NULL) + if (!bulk) continue; srpc_free_bulk(bulk); @@ -69,38 +69,42 @@ brw_client_fini(sfw_test_instance_t *tsi) static int brw_client_init(sfw_test_instance_t *tsi) { - sfw_session_t *sn = tsi->tsi_batch->bat_session; - int flags; - int npg; - int len; - int opc; - srpc_bulk_t *bulk; - sfw_test_unit_t *tsu; - - LASSERT(sn != NULL); + sfw_session_t *sn = tsi->tsi_batch->bat_session; + int flags; + int npg; + int len; + int opc; + srpc_bulk_t *bulk; + sfw_test_unit_t *tsu; + + LASSERT(sn); LASSERT(tsi->tsi_is_client); - if ((sn->sn_features & LST_FEAT_BULK_LEN) == 0) { - test_bulk_req_t *breq = &tsi->tsi_u.bulk_v0; + if (!(sn->sn_features & LST_FEAT_BULK_LEN)) { + test_bulk_req_t *breq = &tsi->tsi_u.bulk_v0; - opc = breq->blk_opc; + opc = breq->blk_opc; flags = breq->blk_flags; - npg = breq->blk_npg; - /* NB: this is not going to work for variable page size, - * but we have to keep it for compatibility */ - len = npg * PAGE_CACHE_SIZE; + npg = breq->blk_npg; + /* + * NB: this is not going to work for variable page size, + * but we have to keep it for compatibility + */ + len = npg * PAGE_CACHE_SIZE; } else { test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1; - /* I should never get this step if it's unknown feature - * because make_session will reject unknown feature */ - LASSERT((sn->sn_features & ~LST_FEATS_MASK) == 0); + /* + * I should never get this step if it's unknown feature + * because make_session will reject unknown feature + */ + LASSERT(!(sn->sn_features & ~LST_FEATS_MASK)); - opc = breq->blk_opc; + opc = breq->blk_opc; flags = breq->blk_flags; - len = breq->blk_len; - npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + len = breq->blk_len; + npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; } if (npg > LNET_MAX_IOV || npg <= 0) @@ -116,7 +120,7 @@ brw_client_init(sfw_test_instance_t *tsi) list_for_each_entry(tsu, &tsi->tsi_units, tsu_list) { bulk = srpc_alloc_bulk(lnet_cpt_of_nid(tsu->tsu_dest.nid), npg, len, opc == LST_BRW_READ); - if (bulk == NULL) { + if (!bulk) { brw_client_fini(tsi); return -ENOMEM; } @@ -127,9 +131,9 @@ brw_client_init(sfw_test_instance_t *tsi) return 0; } -#define BRW_POISON 0xbeefbeefbeefbeefULL -#define BRW_MAGIC 0xeeb0eeb1eeb2eeb3ULL -#define BRW_MSIZE sizeof(__u64) +#define BRW_POISON 0xbeefbeefbeefbeefULL +#define BRW_MAGIC 0xeeb0eeb1eeb2eeb3ULL +#define BRW_MSIZE sizeof(__u64) static int brw_inject_one_error(void) @@ -141,7 +145,7 @@ brw_inject_one_error(void) ktime_get_ts64(&ts); - if (((ts.tv_nsec / NSEC_PER_USEC) & 1) == 0) + if (!((ts.tv_nsec / NSEC_PER_USEC) & 1)) return 0; return brw_inject_errors--; @@ -151,9 +155,9 @@ static void brw_fill_page(struct page *pg, int pattern, __u64 magic) { char *addr = page_address(pg); - int i; + int i; - LASSERT(addr != NULL); + LASSERT(addr); if (pattern == LST_BRW_CHECK_NONE) return; @@ -180,22 +184,22 @@ brw_fill_page(struct page *pg, int pattern, __u64 magic) static int brw_check_page(struct page *pg, int pattern, __u64 magic) { - char *addr = page_address(pg); - __u64 data = 0; /* make compiler happy */ - int i; + char *addr = page_address(pg); + __u64 data = 0; /* make compiler happy */ + int i; - LASSERT(addr != NULL); + LASSERT(addr); if (pattern == LST_BRW_CHECK_NONE) return 0; if (pattern == LST_BRW_CHECK_SIMPLE) { - data = *((__u64 *) addr); + data = *((__u64 *)addr); if (data != magic) goto bad_data; addr += PAGE_CACHE_SIZE - BRW_MSIZE; - data = *((__u64 *) addr); + data = *((__u64 *)addr); if (data != magic) goto bad_data; @@ -204,7 +208,7 @@ brw_check_page(struct page *pg, int pattern, __u64 magic) if (pattern == LST_BRW_CHECK_FULL) { for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++) { - data = *(((__u64 *) addr) + i); + data = *(((__u64 *)addr) + i); if (data != magic) goto bad_data; } @@ -216,7 +220,7 @@ brw_check_page(struct page *pg, int pattern, __u64 magic) bad_data: CERROR("Bad data in page %p: %#llx, %#llx expected\n", - pg, data, magic); + pg, data, magic); return 1; } @@ -240,9 +244,9 @@ brw_check_bulk(srpc_bulk_t *bk, int pattern, __u64 magic) for (i = 0; i < bk->bk_niov; i++) { pg = bk->bk_iovs[i].kiov_page; - if (brw_check_page(pg, pattern, magic) != 0) { + if (brw_check_page(pg, pattern, magic)) { CERROR("Bulk page %p (%d/%d) is corrupted!\n", - pg, i, bk->bk_niov); + pg, i, bk->bk_niov); return 1; } } @@ -252,7 +256,7 @@ brw_check_bulk(srpc_bulk_t *bk, int pattern, __u64 magic) static int brw_client_prep_rpc(sfw_test_unit_t *tsu, - lnet_process_id_t dest, srpc_client_rpc_t **rpcpp) + lnet_process_id_t dest, srpc_client_rpc_t **rpcpp) { srpc_bulk_t *bulk = tsu->tsu_private; sfw_test_instance_t *tsi = tsu->tsu_instance; @@ -265,32 +269,34 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu, int opc; int rc; - LASSERT(sn != NULL); - LASSERT(bulk != NULL); + LASSERT(sn); + LASSERT(bulk); - if ((sn->sn_features & LST_FEAT_BULK_LEN) == 0) { + if (!(sn->sn_features & LST_FEAT_BULK_LEN)) { test_bulk_req_t *breq = &tsi->tsi_u.bulk_v0; - opc = breq->blk_opc; + opc = breq->blk_opc; flags = breq->blk_flags; - npg = breq->blk_npg; - len = npg * PAGE_CACHE_SIZE; + npg = breq->blk_npg; + len = npg * PAGE_CACHE_SIZE; } else { test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1; - /* I should never get this step if it's unknown feature - * because make_session will reject unknown feature */ - LASSERT((sn->sn_features & ~LST_FEATS_MASK) == 0); + /* + * I should never get this step if it's unknown feature + * because make_session will reject unknown feature + */ + LASSERT(!(sn->sn_features & ~LST_FEATS_MASK)); - opc = breq->blk_opc; + opc = breq->blk_opc; flags = breq->blk_flags; - len = breq->blk_len; - npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + len = breq->blk_len; + npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; } rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc); - if (rc != 0) + if (rc) return rc; memcpy(&rpc->crpc_bulk, bulk, offsetof(srpc_bulk_t, bk_iovs[npg])); @@ -301,8 +307,8 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu, req = &rpc->crpc_reqstmsg.msg_body.brw_reqst; req->brw_flags = flags; - req->brw_rw = opc; - req->brw_len = len; + req->brw_rw = opc; + req->brw_len = len; *rpcpp = rpc; return 0; @@ -318,14 +324,14 @@ brw_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc) srpc_brw_reply_t *reply = &msg->msg_body.brw_reply; srpc_brw_reqst_t *reqst = &rpc->crpc_reqstmsg.msg_body.brw_reqst; - LASSERT(sn != NULL); + LASSERT(sn); - if (rpc->crpc_status != 0) { + if (rpc->crpc_status) { CERROR("BRW RPC to %s failed with %d\n", - libcfs_id2str(rpc->crpc_dest), rpc->crpc_status); + libcfs_id2str(rpc->crpc_dest), rpc->crpc_status); if (!tsi->tsi_stopping) /* rpc could have been aborted */ atomic_inc(&sn->sn_brw_errors); - goto out; + return; } if (msg->msg_magic != SRPC_MSG_MAGIC) { @@ -334,27 +340,24 @@ brw_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc) } CDEBUG(reply->brw_status ? D_WARNING : D_NET, - "BRW RPC to %s finished with brw_status: %d\n", - libcfs_id2str(rpc->crpc_dest), reply->brw_status); + "BRW RPC to %s finished with brw_status: %d\n", + libcfs_id2str(rpc->crpc_dest), reply->brw_status); - if (reply->brw_status != 0) { + if (reply->brw_status) { atomic_inc(&sn->sn_brw_errors); rpc->crpc_status = -(int)reply->brw_status; - goto out; + return; } if (reqst->brw_rw == LST_BRW_WRITE) - goto out; + return; - if (brw_check_bulk(&rpc->crpc_bulk, reqst->brw_flags, magic) != 0) { + if (brw_check_bulk(&rpc->crpc_bulk, reqst->brw_flags, magic)) { CERROR("Bulk data from %s is corrupted!\n", - libcfs_id2str(rpc->crpc_dest)); + libcfs_id2str(rpc->crpc_dest)); atomic_inc(&sn->sn_brw_errors); rpc->crpc_status = -EBADMSG; } - -out: - return; } static void @@ -362,17 +365,17 @@ brw_server_rpc_done(struct srpc_server_rpc *rpc) { srpc_bulk_t *blk = rpc->srpc_bulk; - if (blk == NULL) + if (!blk) return; - if (rpc->srpc_status != 0) + if (rpc->srpc_status) CERROR("Bulk transfer %s %s has failed: %d\n", - blk->bk_sink ? "from" : "to", - libcfs_id2str(rpc->srpc_peer), rpc->srpc_status); + blk->bk_sink ? "from" : "to", + libcfs_id2str(rpc->srpc_peer), rpc->srpc_status); else CDEBUG(D_NET, "Transferred %d pages bulk data %s %s\n", - blk->bk_niov, blk->bk_sink ? "from" : "to", - libcfs_id2str(rpc->srpc_peer)); + blk->bk_niov, blk->bk_sink ? "from" : "to", + libcfs_id2str(rpc->srpc_peer)); sfw_free_pages(rpc); } @@ -385,16 +388,16 @@ brw_bulk_ready(struct srpc_server_rpc *rpc, int status) srpc_brw_reqst_t *reqst; srpc_msg_t *reqstmsg; - LASSERT(rpc->srpc_bulk != NULL); - LASSERT(rpc->srpc_reqstbuf != NULL); + LASSERT(rpc->srpc_bulk); + LASSERT(rpc->srpc_reqstbuf); reqstmsg = &rpc->srpc_reqstbuf->buf_msg; reqst = &reqstmsg->msg_body.brw_reqst; - if (status != 0) { + if (status) { CERROR("BRW bulk %s failed for RPC from %s: %d\n", - reqst->brw_rw == LST_BRW_READ ? "READ" : "WRITE", - libcfs_id2str(rpc->srpc_peer), status); + reqst->brw_rw == LST_BRW_READ ? "READ" : "WRITE", + libcfs_id2str(rpc->srpc_peer), status); return -EIO; } @@ -404,9 +407,9 @@ brw_bulk_ready(struct srpc_server_rpc *rpc, int status) if (reqstmsg->msg_magic != SRPC_MSG_MAGIC) __swab64s(&magic); - if (brw_check_bulk(rpc->srpc_bulk, reqst->brw_flags, magic) != 0) { + if (brw_check_bulk(rpc->srpc_bulk, reqst->brw_flags, magic)) { CERROR("Bulk data from %s is corrupted!\n", - libcfs_id2str(rpc->srpc_peer)); + libcfs_id2str(rpc->srpc_peer)); reply->brw_status = EBADMSG; } @@ -448,15 +451,15 @@ brw_server_handle(struct srpc_server_rpc *rpc) return 0; } - if ((reqstmsg->msg_ses_feats & ~LST_FEATS_MASK) != 0) { + if (reqstmsg->msg_ses_feats & ~LST_FEATS_MASK) { replymsg->msg_ses_feats = LST_FEATS_MASK; reply->brw_status = EPROTO; return 0; } - if ((reqstmsg->msg_ses_feats & LST_FEAT_BULK_LEN) == 0) { + if (!(reqstmsg->msg_ses_feats & LST_FEAT_BULK_LEN)) { /* compat with old version */ - if ((reqst->brw_len & ~CFS_PAGE_MASK) != 0) { + if (reqst->brw_len & ~CFS_PAGE_MASK) { reply->brw_status = EINVAL; return 0; } @@ -468,7 +471,7 @@ brw_server_handle(struct srpc_server_rpc *rpc) replymsg->msg_ses_feats = reqstmsg->msg_ses_feats; - if (reqst->brw_len == 0 || npg > LNET_MAX_IOV) { + if (!reqst->brw_len || npg > LNET_MAX_IOV) { reply->brw_status = EINVAL; return 0; } @@ -476,7 +479,7 @@ brw_server_handle(struct srpc_server_rpc *rpc) rc = sfw_alloc_pages(rpc, rpc->srpc_scd->scd_cpt, npg, reqst->brw_len, reqst->brw_rw == LST_BRW_WRITE); - if (rc != 0) + if (rc) return rc; if (reqst->brw_rw == LST_BRW_READ) @@ -490,8 +493,8 @@ brw_server_handle(struct srpc_server_rpc *rpc) sfw_test_client_ops_t brw_test_client; void brw_init_test_client(void) { - brw_test_client.tso_init = brw_client_init; - brw_test_client.tso_fini = brw_client_fini; + brw_test_client.tso_init = brw_client_init; + brw_test_client.tso_fini = brw_client_fini; brw_test_client.tso_prep_rpc = brw_client_prep_rpc; brw_test_client.tso_done_rpc = brw_client_done_rpc; }; @@ -499,10 +502,9 @@ void brw_init_test_client(void) srpc_service_t brw_test_service; void brw_init_test_service(void) { - - brw_test_service.sv_id = SRPC_SERVICE_BRW; - brw_test_service.sv_name = "brw_test"; - brw_test_service.sv_handler = brw_server_handle; + brw_test_service.sv_id = SRPC_SERVICE_BRW; + brw_test_service.sv_name = "brw_test"; + brw_test_service.sv_handler = brw_server_handle; brw_test_service.sv_bulk_ready = brw_bulk_ready; - brw_test_service.sv_wi_total = brw_srv_workitems; + brw_test_service.sv_wi_total = brw_srv_workitems; } diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c index a534665403e5..5c7cb72eac9a 100644 --- a/drivers/staging/lustre/lnet/selftest/conctl.c +++ b/drivers/staging/lustre/lnet/selftest/conctl.c @@ -51,20 +51,19 @@ lst_session_new_ioctl(lstio_session_new_args_t *args) char *name; int rc; - if (args->lstio_ses_idp == NULL || /* address for output sid */ - args->lstio_ses_key == 0 || /* no key is specified */ - args->lstio_ses_namep == NULL || /* session name */ + if (!args->lstio_ses_idp || /* address for output sid */ + !args->lstio_ses_key || /* no key is specified */ + !args->lstio_ses_namep || /* session name */ args->lstio_ses_nmlen <= 0 || args->lstio_ses_nmlen > LST_NAME_SIZE) return -EINVAL; LIBCFS_ALLOC(name, args->lstio_ses_nmlen + 1); - if (name == NULL) + if (!name) return -ENOMEM; - if (copy_from_user(name, - args->lstio_ses_namep, - args->lstio_ses_nmlen)) { + if (copy_from_user(name, args->lstio_ses_namep, + args->lstio_ses_nmlen)) { LIBCFS_FREE(name, args->lstio_ses_nmlen + 1); return -EFAULT; } @@ -96,12 +95,12 @@ lst_session_info_ioctl(lstio_session_info_args_t *args) { /* no checking of key */ - if (args->lstio_ses_idp == NULL || /* address for output sid */ - args->lstio_ses_keyp == NULL || /* address for output key */ - args->lstio_ses_featp == NULL || /* address for output features */ - args->lstio_ses_ndinfo == NULL || /* address for output ndinfo */ - args->lstio_ses_namep == NULL || /* address for output name */ - args->lstio_ses_nmlen <= 0 || + if (!args->lstio_ses_idp || /* address for output sid */ + !args->lstio_ses_keyp || /* address for output key */ + !args->lstio_ses_featp || /* address for output features */ + !args->lstio_ses_ndinfo || /* address for output ndinfo */ + !args->lstio_ses_namep || /* address for output name */ + args->lstio_ses_nmlen <= 0 || args->lstio_ses_nmlen > LST_NAME_SIZE) return -EINVAL; @@ -116,28 +115,28 @@ lst_session_info_ioctl(lstio_session_info_args_t *args) static int lst_debug_ioctl(lstio_debug_args_t *args) { - char *name = NULL; - int client = 1; - int rc; + char *name = NULL; + int client = 1; + int rc; if (args->lstio_dbg_key != console_session.ses_key) return -EACCES; - if (args->lstio_dbg_resultp == NULL) + if (!args->lstio_dbg_resultp) return -EINVAL; - if (args->lstio_dbg_namep != NULL && /* name of batch/group */ + if (args->lstio_dbg_namep && /* name of batch/group */ (args->lstio_dbg_nmlen <= 0 || args->lstio_dbg_nmlen > LST_NAME_SIZE)) return -EINVAL; - if (args->lstio_dbg_namep != NULL) { + if (args->lstio_dbg_namep) { LIBCFS_ALLOC(name, args->lstio_dbg_nmlen + 1); - if (name == NULL) + if (!name) return -ENOMEM; if (copy_from_user(name, args->lstio_dbg_namep, - args->lstio_dbg_nmlen)) { + args->lstio_dbg_nmlen)) { LIBCFS_FREE(name, args->lstio_dbg_nmlen + 1); return -EFAULT; @@ -157,7 +156,7 @@ lst_debug_ioctl(lstio_debug_args_t *args) case LST_OPC_BATCHSRV: client = 0; case LST_OPC_BATCHCLI: - if (name == NULL) + if (!name) goto out; rc = lstcon_batch_debug(args->lstio_dbg_timeout, @@ -165,7 +164,7 @@ lst_debug_ioctl(lstio_debug_args_t *args) break; case LST_OPC_GROUP: - if (name == NULL) + if (!name) goto out; rc = lstcon_group_debug(args->lstio_dbg_timeout, @@ -174,7 +173,7 @@ lst_debug_ioctl(lstio_debug_args_t *args) case LST_OPC_NODES: if (args->lstio_dbg_count <= 0 || - args->lstio_dbg_idsp == NULL) + !args->lstio_dbg_idsp) goto out; rc = lstcon_nodes_debug(args->lstio_dbg_timeout, @@ -188,7 +187,7 @@ lst_debug_ioctl(lstio_debug_args_t *args) } out: - if (name != NULL) + if (name) LIBCFS_FREE(name, args->lstio_dbg_nmlen + 1); return rc; @@ -203,18 +202,17 @@ lst_group_add_ioctl(lstio_group_add_args_t *args) if (args->lstio_grp_key != console_session.ses_key) return -EACCES; - if (args->lstio_grp_namep == NULL || + if (!args->lstio_grp_namep || args->lstio_grp_nmlen <= 0 || args->lstio_grp_nmlen > LST_NAME_SIZE) return -EINVAL; LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1); - if (name == NULL) + if (!name) return -ENOMEM; - if (copy_from_user(name, - args->lstio_grp_namep, - args->lstio_grp_nmlen)) { + if (copy_from_user(name, args->lstio_grp_namep, + args->lstio_grp_nmlen)) { LIBCFS_FREE(name, args->lstio_grp_nmlen); return -EFAULT; } @@ -231,24 +229,23 @@ lst_group_add_ioctl(lstio_group_add_args_t *args) static int lst_group_del_ioctl(lstio_group_del_args_t *args) { - int rc; - char *name; + int rc; + char *name; if (args->lstio_grp_key != console_session.ses_key) return -EACCES; - if (args->lstio_grp_namep == NULL || + if (!args->lstio_grp_namep || args->lstio_grp_nmlen <= 0 || args->lstio_grp_nmlen > LST_NAME_SIZE) return -EINVAL; LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1); - if (name == NULL) + if (!name) return -ENOMEM; - if (copy_from_user(name, - args->lstio_grp_namep, - args->lstio_grp_nmlen)) { + if (copy_from_user(name, args->lstio_grp_namep, + args->lstio_grp_nmlen)) { LIBCFS_FREE(name, args->lstio_grp_nmlen + 1); return -EFAULT; } @@ -265,24 +262,23 @@ lst_group_del_ioctl(lstio_group_del_args_t *args) static int lst_group_update_ioctl(lstio_group_update_args_t *args) { - int rc; - char *name; + int rc; + char *name; if (args->lstio_grp_key != console_session.ses_key) return -EACCES; - if (args->lstio_grp_resultp == NULL || - args->lstio_grp_namep == NULL || + if (!args->lstio_grp_resultp || + !args->lstio_grp_namep || args->lstio_grp_nmlen <= 0 || args->lstio_grp_nmlen > LST_NAME_SIZE) return -EINVAL; LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1); - if (name == NULL) + if (!name) return -ENOMEM; - if (copy_from_user(name, - args->lstio_grp_namep, + if (copy_from_user(name, args->lstio_grp_namep, args->lstio_grp_nmlen)) { LIBCFS_FREE(name, args->lstio_grp_nmlen + 1); return -EFAULT; @@ -300,8 +296,8 @@ lst_group_update_ioctl(lstio_group_update_args_t *args) break; case LST_GROUP_RMND: - if (args->lstio_grp_count <= 0 || - args->lstio_grp_idsp == NULL) { + if (args->lstio_grp_count <= 0 || + !args->lstio_grp_idsp) { rc = -EINVAL; break; } @@ -330,21 +326,21 @@ lst_nodes_add_ioctl(lstio_group_nodes_args_t *args) if (args->lstio_grp_key != console_session.ses_key) return -EACCES; - if (args->lstio_grp_idsp == NULL || /* array of ids */ + if (!args->lstio_grp_idsp || /* array of ids */ args->lstio_grp_count <= 0 || - args->lstio_grp_resultp == NULL || - args->lstio_grp_featp == NULL || - args->lstio_grp_namep == NULL || + !args->lstio_grp_resultp || + !args->lstio_grp_featp || + !args->lstio_grp_namep || args->lstio_grp_nmlen <= 0 || args->lstio_grp_nmlen > LST_NAME_SIZE) return -EINVAL; LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1); - if (name == NULL) + if (!name) return -ENOMEM; if (copy_from_user(name, args->lstio_grp_namep, - args->lstio_grp_nmlen)) { + args->lstio_grp_nmlen)) { LIBCFS_FREE(name, args->lstio_grp_nmlen + 1); return -EFAULT; @@ -357,7 +353,7 @@ lst_nodes_add_ioctl(lstio_group_nodes_args_t *args) args->lstio_grp_resultp); LIBCFS_FREE(name, args->lstio_grp_nmlen + 1); - if (rc == 0 && + if (!rc && copy_to_user(args->lstio_grp_featp, &feats, sizeof(feats))) { return -EINVAL; } @@ -371,15 +367,15 @@ lst_group_list_ioctl(lstio_group_list_args_t *args) if (args->lstio_grp_key != console_session.ses_key) return -EACCES; - if (args->lstio_grp_idx < 0 || - args->lstio_grp_namep == NULL || + if (args->lstio_grp_idx < 0 || + !args->lstio_grp_namep || args->lstio_grp_nmlen <= 0 || args->lstio_grp_nmlen > LST_NAME_SIZE) return -EINVAL; return lstcon_group_list(args->lstio_grp_idx, - args->lstio_grp_nmlen, - args->lstio_grp_namep); + args->lstio_grp_nmlen, + args->lstio_grp_namep); } static int @@ -393,24 +389,24 @@ lst_group_info_ioctl(lstio_group_info_args_t *args) if (args->lstio_grp_key != console_session.ses_key) return -EACCES; - if (args->lstio_grp_namep == NULL || + if (!args->lstio_grp_namep || args->lstio_grp_nmlen <= 0 || args->lstio_grp_nmlen > LST_NAME_SIZE) return -EINVAL; - if (args->lstio_grp_entp == NULL && /* output: group entry */ - args->lstio_grp_dentsp == NULL) /* output: node entry */ + if (!args->lstio_grp_entp && /* output: group entry */ + !args->lstio_grp_dentsp) /* output: node entry */ return -EINVAL; - if (args->lstio_grp_dentsp != NULL) { /* have node entry */ - if (args->lstio_grp_idxp == NULL || /* node index */ - args->lstio_grp_ndentp == NULL) /* # of node entry */ + if (args->lstio_grp_dentsp) { /* have node entry */ + if (!args->lstio_grp_idxp || /* node index */ + !args->lstio_grp_ndentp) /* # of node entry */ return -EINVAL; if (copy_from_user(&ndent, args->lstio_grp_ndentp, - sizeof(ndent)) || + sizeof(ndent)) || copy_from_user(&index, args->lstio_grp_idxp, - sizeof(index))) + sizeof(index))) return -EFAULT; if (ndent <= 0 || index < 0) @@ -418,12 +414,11 @@ lst_group_info_ioctl(lstio_group_info_args_t *args) } LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1); - if (name == NULL) + if (!name) return -ENOMEM; - if (copy_from_user(name, - args->lstio_grp_namep, - args->lstio_grp_nmlen)) { + if (copy_from_user(name, args->lstio_grp_namep, + args->lstio_grp_nmlen)) { LIBCFS_FREE(name, args->lstio_grp_nmlen + 1); return -EFAULT; } @@ -435,10 +430,10 @@ lst_group_info_ioctl(lstio_group_info_args_t *args) LIBCFS_FREE(name, args->lstio_grp_nmlen + 1); - if (rc != 0) + if (rc) return rc; - if (args->lstio_grp_dentsp != NULL && + if (args->lstio_grp_dentsp && (copy_to_user(args->lstio_grp_idxp, &index, sizeof(index)) || copy_to_user(args->lstio_grp_ndentp, &ndent, sizeof(ndent)))) return -EFAULT; @@ -455,18 +450,17 @@ lst_batch_add_ioctl(lstio_batch_add_args_t *args) if (args->lstio_bat_key != console_session.ses_key) return -EACCES; - if (args->lstio_bat_namep == NULL || + if (!args->lstio_bat_namep || args->lstio_bat_nmlen <= 0 || args->lstio_bat_nmlen > LST_NAME_SIZE) return -EINVAL; LIBCFS_ALLOC(name, args->lstio_bat_nmlen + 1); - if (name == NULL) + if (!name) return -ENOMEM; - if (copy_from_user(name, - args->lstio_bat_namep, - args->lstio_bat_nmlen)) { + if (copy_from_user(name, args->lstio_bat_namep, + args->lstio_bat_nmlen)) { LIBCFS_FREE(name, args->lstio_bat_nmlen + 1); return -EFAULT; } @@ -489,18 +483,17 @@ lst_batch_run_ioctl(lstio_batch_run_args_t *args) if (args->lstio_bat_key != console_session.ses_key) return -EACCES; - if (args->lstio_bat_namep == NULL || + if (!args->lstio_bat_namep || args->lstio_bat_nmlen <= 0 || args->lstio_bat_nmlen > LST_NAME_SIZE) return -EINVAL; LIBCFS_ALLOC(name, args->lstio_bat_nmlen + 1); - if (name == NULL) + if (!name) return -ENOMEM; - if (copy_from_user(name, - args->lstio_bat_namep, - args->lstio_bat_nmlen)) { + if (copy_from_user(name, args->lstio_bat_namep, + args->lstio_bat_nmlen)) { LIBCFS_FREE(name, args->lstio_bat_nmlen + 1); return -EFAULT; } @@ -524,19 +517,18 @@ lst_batch_stop_ioctl(lstio_batch_stop_args_t *args) if (args->lstio_bat_key != console_session.ses_key) return -EACCES; - if (args->lstio_bat_resultp == NULL || - args->lstio_bat_namep == NULL || + if (!args->lstio_bat_resultp || + !args->lstio_bat_namep || args->lstio_bat_nmlen <= 0 || args->lstio_bat_nmlen > LST_NAME_SIZE) return -EINVAL; LIBCFS_ALLOC(name, args->lstio_bat_nmlen + 1); - if (name == NULL) + if (!name) return -ENOMEM; - if (copy_from_user(name, - args->lstio_bat_namep, - args->lstio_bat_nmlen)) { + if (copy_from_user(name, args->lstio_bat_namep, + args->lstio_bat_nmlen)) { LIBCFS_FREE(name, args->lstio_bat_nmlen + 1); return -EFAULT; } @@ -554,14 +546,14 @@ lst_batch_stop_ioctl(lstio_batch_stop_args_t *args) static int lst_batch_query_ioctl(lstio_batch_query_args_t *args) { - char *name; - int rc; + char *name; + int rc; if (args->lstio_bat_key != console_session.ses_key) return -EACCES; - if (args->lstio_bat_resultp == NULL || - args->lstio_bat_namep == NULL || + if (!args->lstio_bat_resultp || + !args->lstio_bat_namep || args->lstio_bat_nmlen <= 0 || args->lstio_bat_nmlen > LST_NAME_SIZE) return -EINVAL; @@ -570,12 +562,11 @@ lst_batch_query_ioctl(lstio_batch_query_args_t *args) return -EINVAL; LIBCFS_ALLOC(name, args->lstio_bat_nmlen + 1); - if (name == NULL) + if (!name) return -ENOMEM; - if (copy_from_user(name, - args->lstio_bat_namep, - args->lstio_bat_nmlen)) { + if (copy_from_user(name, args->lstio_bat_namep, + args->lstio_bat_nmlen)) { LIBCFS_FREE(name, args->lstio_bat_nmlen + 1); return -EFAULT; } @@ -599,8 +590,8 @@ lst_batch_list_ioctl(lstio_batch_list_args_t *args) if (args->lstio_bat_key != console_session.ses_key) return -EACCES; - if (args->lstio_bat_idx < 0 || - args->lstio_bat_namep == NULL || + if (args->lstio_bat_idx < 0 || + !args->lstio_bat_namep || args->lstio_bat_nmlen <= 0 || args->lstio_bat_nmlen > LST_NAME_SIZE) return -EINVAL; @@ -621,24 +612,24 @@ lst_batch_info_ioctl(lstio_batch_info_args_t *args) if (args->lstio_bat_key != console_session.ses_key) return -EACCES; - if (args->lstio_bat_namep == NULL || /* batch name */ + if (!args->lstio_bat_namep || /* batch name */ args->lstio_bat_nmlen <= 0 || args->lstio_bat_nmlen > LST_NAME_SIZE) return -EINVAL; - if (args->lstio_bat_entp == NULL && /* output: batch entry */ - args->lstio_bat_dentsp == NULL) /* output: node entry */ + if (!args->lstio_bat_entp && /* output: batch entry */ + !args->lstio_bat_dentsp) /* output: node entry */ return -EINVAL; - if (args->lstio_bat_dentsp != NULL) { /* have node entry */ - if (args->lstio_bat_idxp == NULL || /* node index */ - args->lstio_bat_ndentp == NULL) /* # of node entry */ + if (args->lstio_bat_dentsp) { /* have node entry */ + if (!args->lstio_bat_idxp || /* node index */ + !args->lstio_bat_ndentp) /* # of node entry */ return -EINVAL; if (copy_from_user(&index, args->lstio_bat_idxp, - sizeof(index)) || + sizeof(index)) || copy_from_user(&ndent, args->lstio_bat_ndentp, - sizeof(ndent))) + sizeof(ndent))) return -EFAULT; if (ndent <= 0 || index < 0) @@ -646,28 +637,27 @@ lst_batch_info_ioctl(lstio_batch_info_args_t *args) } LIBCFS_ALLOC(name, args->lstio_bat_nmlen + 1); - if (name == NULL) + if (!name) return -ENOMEM; - if (copy_from_user(name, - args->lstio_bat_namep, args->lstio_bat_nmlen)) { + if (copy_from_user(name, args->lstio_bat_namep, + args->lstio_bat_nmlen)) { LIBCFS_FREE(name, args->lstio_bat_nmlen + 1); return -EFAULT; } name[args->lstio_bat_nmlen] = 0; - rc = lstcon_batch_info(name, - args->lstio_bat_entp, args->lstio_bat_server, - args->lstio_bat_testidx, &index, &ndent, - args->lstio_bat_dentsp); + rc = lstcon_batch_info(name, args->lstio_bat_entp, + args->lstio_bat_server, args->lstio_bat_testidx, + &index, &ndent, args->lstio_bat_dentsp); LIBCFS_FREE(name, args->lstio_bat_nmlen + 1); - if (rc != 0) + if (rc) return rc; - if (args->lstio_bat_dentsp != NULL && + if (args->lstio_bat_dentsp && (copy_to_user(args->lstio_bat_idxp, &index, sizeof(index)) || copy_to_user(args->lstio_bat_ndentp, &ndent, sizeof(ndent)))) rc = -EFAULT; @@ -679,98 +669,104 @@ static int lst_stat_query_ioctl(lstio_stat_args_t *args) { int rc; - char *name; + char *name = NULL; /* TODO: not finished */ if (args->lstio_sta_key != console_session.ses_key) return -EACCES; - if (args->lstio_sta_resultp == NULL || - (args->lstio_sta_namep == NULL && - args->lstio_sta_idsp == NULL) || - args->lstio_sta_nmlen <= 0 || - args->lstio_sta_nmlen > LST_NAME_SIZE) + if (!args->lstio_sta_resultp) return -EINVAL; - if (args->lstio_sta_idsp != NULL && - args->lstio_sta_count <= 0) - return -EINVAL; - - LIBCFS_ALLOC(name, args->lstio_sta_nmlen + 1); - if (name == NULL) - return -ENOMEM; - - if (copy_from_user(name, args->lstio_sta_namep, - args->lstio_sta_nmlen)) { - LIBCFS_FREE(name, args->lstio_sta_nmlen + 1); - return -EFAULT; - } + if (args->lstio_sta_idsp) { + if (args->lstio_sta_count <= 0) + return -EINVAL; - if (args->lstio_sta_idsp == NULL) { - rc = lstcon_group_stat(name, args->lstio_sta_timeout, - args->lstio_sta_resultp); - } else { rc = lstcon_nodes_stat(args->lstio_sta_count, args->lstio_sta_idsp, args->lstio_sta_timeout, args->lstio_sta_resultp); - } + } else if (args->lstio_sta_namep) { + if (args->lstio_sta_nmlen <= 0 || + args->lstio_sta_nmlen > LST_NAME_SIZE) + return -EINVAL; + + LIBCFS_ALLOC(name, args->lstio_sta_nmlen + 1); + if (!name) + return -ENOMEM; - LIBCFS_FREE(name, args->lstio_sta_nmlen + 1); + rc = copy_from_user(name, args->lstio_sta_namep, + args->lstio_sta_nmlen); + if (!rc) + rc = lstcon_group_stat(name, args->lstio_sta_timeout, + args->lstio_sta_resultp); + else + rc = -EFAULT; + } else { + rc = -EINVAL; + } + if (name) + LIBCFS_FREE(name, args->lstio_sta_nmlen + 1); return rc; } static int lst_test_add_ioctl(lstio_test_args_t *args) { - char *batch_name; - char *src_name = NULL; - char *dst_name = NULL; - void *param = NULL; - int ret = 0; - int rc = -ENOMEM; - - if (args->lstio_tes_resultp == NULL || - args->lstio_tes_retp == NULL || - args->lstio_tes_bat_name == NULL || /* no specified batch */ + char *batch_name; + char *src_name = NULL; + char *dst_name = NULL; + void *param = NULL; + int ret = 0; + int rc = -ENOMEM; + + if (!args->lstio_tes_resultp || + !args->lstio_tes_retp || + !args->lstio_tes_bat_name || /* no specified batch */ args->lstio_tes_bat_nmlen <= 0 || args->lstio_tes_bat_nmlen > LST_NAME_SIZE || - args->lstio_tes_sgrp_name == NULL || /* no source group */ + !args->lstio_tes_sgrp_name || /* no source group */ args->lstio_tes_sgrp_nmlen <= 0 || args->lstio_tes_sgrp_nmlen > LST_NAME_SIZE || - args->lstio_tes_dgrp_name == NULL || /* no target group */ + !args->lstio_tes_dgrp_name || /* no target group */ args->lstio_tes_dgrp_nmlen <= 0 || args->lstio_tes_dgrp_nmlen > LST_NAME_SIZE) return -EINVAL; - if (args->lstio_tes_loop == 0 || /* negative is infinite */ + if (!args->lstio_tes_loop || /* negative is infinite */ args->lstio_tes_concur <= 0 || args->lstio_tes_dist <= 0 || args->lstio_tes_span <= 0) return -EINVAL; /* have parameter, check if parameter length is valid */ - if (args->lstio_tes_param != NULL && + if (args->lstio_tes_param && (args->lstio_tes_param_len <= 0 || - args->lstio_tes_param_len > PAGE_CACHE_SIZE - sizeof(lstcon_test_t))) + args->lstio_tes_param_len > + PAGE_CACHE_SIZE - sizeof(lstcon_test_t))) return -EINVAL; LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1); - if (batch_name == NULL) + if (!batch_name) return rc; LIBCFS_ALLOC(src_name, args->lstio_tes_sgrp_nmlen + 1); - if (src_name == NULL) + if (!src_name) goto out; LIBCFS_ALLOC(dst_name, args->lstio_tes_dgrp_nmlen + 1); - if (dst_name == NULL) + if (!dst_name) goto out; - if (args->lstio_tes_param != NULL) { + if (args->lstio_tes_param) { LIBCFS_ALLOC(param, args->lstio_tes_param_len); - if (param == NULL) + if (!param) goto out; + if (copy_from_user(param, args->lstio_tes_param, + args->lstio_tes_param_len)) { + rc = -EFAULT; + goto out; + } } rc = -EFAULT; @@ -779,54 +775,55 @@ static int lst_test_add_ioctl(lstio_test_args_t *args) copy_from_user(src_name, args->lstio_tes_sgrp_name, args->lstio_tes_sgrp_nmlen) || copy_from_user(dst_name, args->lstio_tes_dgrp_name, - args->lstio_tes_dgrp_nmlen) || - copy_from_user(param, args->lstio_tes_param, - args->lstio_tes_param_len)) + args->lstio_tes_dgrp_nmlen)) goto out; - rc = lstcon_test_add(batch_name, - args->lstio_tes_type, - args->lstio_tes_loop, - args->lstio_tes_concur, - args->lstio_tes_dist, args->lstio_tes_span, - src_name, dst_name, param, - args->lstio_tes_param_len, - &ret, args->lstio_tes_resultp); + rc = lstcon_test_add(batch_name, args->lstio_tes_type, + args->lstio_tes_loop, args->lstio_tes_concur, + args->lstio_tes_dist, args->lstio_tes_span, + src_name, dst_name, param, + args->lstio_tes_param_len, + &ret, args->lstio_tes_resultp); - if (ret != 0) + if (ret) rc = (copy_to_user(args->lstio_tes_retp, &ret, - sizeof(ret))) ? -EFAULT : 0; + sizeof(ret))) ? -EFAULT : 0; out: - if (batch_name != NULL) + if (batch_name) LIBCFS_FREE(batch_name, args->lstio_tes_bat_nmlen + 1); - if (src_name != NULL) + if (src_name) LIBCFS_FREE(src_name, args->lstio_tes_sgrp_nmlen + 1); - if (dst_name != NULL) + if (dst_name) LIBCFS_FREE(dst_name, args->lstio_tes_dgrp_nmlen + 1); - if (param != NULL) + if (param) LIBCFS_FREE(param, args->lstio_tes_param_len); return rc; } int -lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_data *data) +lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_hdr *hdr) { - char *buf; - int opc = data->ioc_u32[0]; - int rc; + char *buf; + struct libcfs_ioctl_data *data; + int opc; + int rc; if (cmd != IOC_LIBCFS_LNETST) return -EINVAL; + data = container_of(hdr, struct libcfs_ioctl_data, ioc_hdr); + + opc = data->ioc_u32[0]; + if (data->ioc_plen1 > PAGE_CACHE_SIZE) return -EINVAL; LIBCFS_ALLOC(buf, data->ioc_plen1); - if (buf == NULL) + if (!buf) return -ENOMEM; /* copy in parameter */ @@ -916,7 +913,7 @@ lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_data *data) } if (copy_to_user(data->ioc_pbuf2, &console_session.ses_trans_stat, - sizeof(lstcon_trans_stat_t))) + sizeof(lstcon_trans_stat_t))) rc = -EFAULT; out: mutex_unlock(&console_session.ses_mutex); diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c index 1066c70434b1..bcd78888f9cc 100644 --- a/drivers/staging/lustre/lnet/selftest/conrpc.c +++ b/drivers/staging/lustre/lnet/selftest/conrpc.c @@ -54,14 +54,16 @@ lstcon_rpc_done(srpc_client_rpc_t *rpc) { lstcon_rpc_t *crpc = (lstcon_rpc_t *)rpc->crpc_priv; - LASSERT(crpc != NULL && rpc == crpc->crp_rpc); + LASSERT(crpc && rpc == crpc->crp_rpc); LASSERT(crpc->crp_posted && !crpc->crp_finished); spin_lock(&rpc->crpc_lock); - if (crpc->crp_trans == NULL) { - /* Orphan RPC is not in any transaction, - * I'm just a poor body and nobody loves me */ + if (!crpc->crp_trans) { + /* + * Orphan RPC is not in any transaction, + * I'm just a poor body and nobody loves me + */ spin_unlock(&rpc->crpc_lock); /* release it */ @@ -72,11 +74,11 @@ lstcon_rpc_done(srpc_client_rpc_t *rpc) /* not an orphan RPC */ crpc->crp_finished = 1; - if (crpc->crp_stamp == 0) { + if (!crpc->crp_stamp) { /* not aborted */ - LASSERT(crpc->crp_status == 0); + LASSERT(!crpc->crp_status); - crpc->crp_stamp = cfs_time_current(); + crpc->crp_stamp = cfs_time_current(); crpc->crp_status = rpc->crpc_status; } @@ -94,16 +96,16 @@ lstcon_rpc_init(lstcon_node_t *nd, int service, unsigned feats, crpc->crp_rpc = sfw_create_rpc(nd->nd_id, service, feats, bulk_npg, bulk_len, lstcon_rpc_done, (void *)crpc); - if (crpc->crp_rpc == NULL) + if (!crpc->crp_rpc) return -ENOMEM; - crpc->crp_trans = NULL; - crpc->crp_node = nd; - crpc->crp_posted = 0; + crpc->crp_trans = NULL; + crpc->crp_node = nd; + crpc->crp_posted = 0; crpc->crp_finished = 0; crpc->crp_unpacked = 0; - crpc->crp_status = 0; - crpc->crp_stamp = 0; + crpc->crp_status = 0; + crpc->crp_stamp = 0; crpc->crp_embedded = embedded; INIT_LIST_HEAD(&crpc->crp_link); @@ -121,22 +123,21 @@ lstcon_rpc_prep(lstcon_node_t *nd, int service, unsigned feats, spin_lock(&console_session.ses_rpc_lock); - if (!list_empty(&console_session.ses_rpc_freelist)) { - crpc = list_entry(console_session.ses_rpc_freelist.next, - lstcon_rpc_t, crp_link); + crpc = list_first_entry_or_null(&console_session.ses_rpc_freelist, + lstcon_rpc_t, crp_link); + if (crpc) list_del_init(&crpc->crp_link); - } spin_unlock(&console_session.ses_rpc_lock); - if (crpc == NULL) { + if (!crpc) { LIBCFS_ALLOC(crpc, sizeof(*crpc)); - if (crpc == NULL) + if (!crpc) return -ENOMEM; } rc = lstcon_rpc_init(nd, service, feats, bulk_npg, bulk_len, 0, crpc); - if (rc == 0) { + if (!rc) { *crpcpp = crpc; return 0; } @@ -155,7 +156,7 @@ lstcon_rpc_put(lstcon_rpc_t *crpc) LASSERT(list_empty(&crpc->crp_link)); for (i = 0; i < bulk->bk_niov; i++) { - if (bulk->bk_iovs[i].kiov_page == NULL) + if (!bulk->bk_iovs[i].kiov_page) continue; __free_page(bulk->bk_iovs[i].kiov_page); @@ -172,7 +173,7 @@ lstcon_rpc_put(lstcon_rpc_t *crpc) spin_lock(&console_session.ses_rpc_lock); list_add(&crpc->crp_link, - &console_session.ses_rpc_freelist); + &console_session.ses_rpc_freelist); spin_unlock(&console_session.ses_rpc_lock); } @@ -186,7 +187,7 @@ lstcon_rpc_post(lstcon_rpc_t *crpc) { lstcon_rpc_trans_t *trans = crpc->crp_trans; - LASSERT(trans != NULL); + LASSERT(trans); atomic_inc(&trans->tas_remaining); crpc->crp_posted = 1; @@ -234,15 +235,17 @@ lstcon_rpc_trans_name(int transop) } int -lstcon_rpc_trans_prep(struct list_head *translist, - int transop, lstcon_rpc_trans_t **transpp) +lstcon_rpc_trans_prep(struct list_head *translist, int transop, + lstcon_rpc_trans_t **transpp) { lstcon_rpc_trans_t *trans; - if (translist != NULL) { + if (translist) { list_for_each_entry(trans, translist, tas_link) { - /* Can't enqueue two private transaction on - * the same object */ + /* + * Can't enqueue two private transaction on + * the same object + */ if ((trans->tas_opc & transop) == LST_TRANS_PRIVATE) return -EPERM; } @@ -250,12 +253,12 @@ lstcon_rpc_trans_prep(struct list_head *translist, /* create a trans group */ LIBCFS_ALLOC(trans, sizeof(*trans)); - if (trans == NULL) + if (!trans) return -ENOMEM; trans->tas_opc = transop; - if (translist == NULL) + if (!translist) INIT_LIST_HEAD(&trans->tas_olink); else list_add_tail(&trans->tas_olink, translist); @@ -285,8 +288,8 @@ void lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error) { srpc_client_rpc_t *rpc; - lstcon_rpc_t *crpc; - lstcon_node_t *nd; + lstcon_rpc_t *crpc; + lstcon_node_t *nd; list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) { rpc = crpc->crp_rpc; @@ -294,8 +297,8 @@ lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error) spin_lock(&rpc->crpc_lock); if (!crpc->crp_posted || /* not posted */ - crpc->crp_stamp != 0) { /* rpc done or aborted already */ - if (crpc->crp_stamp == 0) { + crpc->crp_stamp) { /* rpc done or aborted already */ + if (!crpc->crp_stamp) { crpc->crp_stamp = cfs_time_current(); crpc->crp_status = -EINTR; } @@ -303,14 +306,14 @@ lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error) continue; } - crpc->crp_stamp = cfs_time_current(); + crpc->crp_stamp = cfs_time_current(); crpc->crp_status = error; spin_unlock(&rpc->crpc_lock); sfw_abort_rpc(rpc); - if (error != ETIMEDOUT) + if (error != -ETIMEDOUT) continue; nd = crpc->crp_node; @@ -329,7 +332,7 @@ lstcon_rpc_trans_check(lstcon_rpc_trans_t *trans) !list_empty(&trans->tas_olink)) /* Not an end session RPC */ return 1; - return (atomic_read(&trans->tas_remaining) == 0) ? 1 : 0; + return !atomic_read(&trans->tas_remaining) ? 1 : 0; } int @@ -366,7 +369,7 @@ lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout) if (console_session.ses_shutdown) rc = -ESHUTDOWN; - if (rc != 0 || atomic_read(&trans->tas_remaining) != 0) { + if (rc || atomic_read(&trans->tas_remaining)) { /* treat short timeout as canceled */ if (rc == -ETIMEDOUT && timeout < LST_TRANS_MIN_TIMEOUT * 2) rc = -EINTR; @@ -385,14 +388,14 @@ lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout) static int lstcon_rpc_get_reply(lstcon_rpc_t *crpc, srpc_msg_t **msgpp) { - lstcon_node_t *nd = crpc->crp_node; + lstcon_node_t *nd = crpc->crp_node; srpc_client_rpc_t *rpc = crpc->crp_rpc; srpc_generic_reply_t *rep; - LASSERT(nd != NULL && rpc != NULL); - LASSERT(crpc->crp_stamp != 0); + LASSERT(nd && rpc); + LASSERT(crpc->crp_stamp); - if (crpc->crp_status != 0) { + if (crpc->crp_status) { *msgpp = NULL; return crpc->crp_status; } @@ -422,23 +425,23 @@ lstcon_rpc_get_reply(lstcon_rpc_t *crpc, srpc_msg_t **msgpp) void lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans, lstcon_trans_stat_t *stat) { - lstcon_rpc_t *crpc; + lstcon_rpc_t *crpc; srpc_msg_t *rep; int error; - LASSERT(stat != NULL); + LASSERT(stat); memset(stat, 0, sizeof(*stat)); list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) { lstcon_rpc_stat_total(stat, 1); - LASSERT(crpc->crp_stamp != 0); + LASSERT(crpc->crp_stamp); error = lstcon_rpc_get_reply(crpc, &rep); - if (error != 0) { + if (error) { lstcon_rpc_stat_failure(stat, 1); - if (stat->trs_rpc_errno == 0) + if (!stat->trs_rpc_errno) stat->trs_rpc_errno = -error; continue; @@ -449,7 +452,7 @@ lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans, lstcon_trans_stat_t *stat) lstcon_rpc_stat_reply(trans, rep, crpc->crp_node, stat); } - if (trans->tas_opc == LST_TRANS_SESNEW && stat->trs_fwk_errno == 0) { + if (trans->tas_opc == LST_TRANS_SESNEW && !stat->trs_fwk_errno) { stat->trs_fwk_errno = lstcon_session_feats_check(trans->tas_features); } @@ -460,17 +463,15 @@ lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans, lstcon_trans_stat_t *stat) lstcon_rpc_stat_failure(stat, 0), lstcon_rpc_stat_total(stat, 0), stat->trs_rpc_errno, stat->trs_fwk_errno); - - return; } int lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans, - struct list_head *head_up, + struct list_head __user *head_up, lstcon_rpc_readent_func_t readent) { struct list_head tmp; - struct list_head *next; + struct list_head __user *next; lstcon_rpc_ent_t *ent; srpc_generic_reply_t *rep; lstcon_rpc_t *crpc; @@ -480,13 +481,13 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans, struct timeval tv; int error; - LASSERT(head_up != NULL); + LASSERT(head_up); next = head_up; list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) { if (copy_from_user(&tmp, next, - sizeof(struct list_head))) + sizeof(struct list_head))) return -EFAULT; if (tmp.next == head_up) @@ -496,7 +497,7 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans, ent = list_entry(next, lstcon_rpc_ent_t, rpe_link); - LASSERT(crpc->crp_stamp != 0); + LASSERT(crpc->crp_stamp); error = lstcon_rpc_get_reply(crpc, &msg); @@ -506,33 +507,32 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans, (unsigned long)console_session.ses_id.ses_stamp); jiffies_to_timeval(dur, &tv); - if (copy_to_user(&ent->rpe_peer, - &nd->nd_id, sizeof(lnet_process_id_t)) || + if (copy_to_user(&ent->rpe_peer, &nd->nd_id, + sizeof(lnet_process_id_t)) || copy_to_user(&ent->rpe_stamp, &tv, sizeof(tv)) || - copy_to_user(&ent->rpe_state, - &nd->nd_state, sizeof(nd->nd_state)) || + copy_to_user(&ent->rpe_state, &nd->nd_state, + sizeof(nd->nd_state)) || copy_to_user(&ent->rpe_rpc_errno, &error, - sizeof(error))) + sizeof(error))) return -EFAULT; - if (error != 0) + if (error) continue; /* RPC is done */ rep = (srpc_generic_reply_t *)&msg->msg_body.reply; - if (copy_to_user(&ent->rpe_sid, - &rep->sid, sizeof(lst_sid_t)) || - copy_to_user(&ent->rpe_fwk_errno, - &rep->status, sizeof(rep->status))) + if (copy_to_user(&ent->rpe_sid, &rep->sid, sizeof(lst_sid_t)) || + copy_to_user(&ent->rpe_fwk_errno, &rep->status, + sizeof(rep->status))) return -EFAULT; - if (readent == NULL) + if (!readent) continue; error = readent(trans->tas_opc, msg, ent); - if (error != 0) + if (error) return error; } @@ -547,8 +547,7 @@ lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans) lstcon_rpc_t *tmp; int count = 0; - list_for_each_entry_safe(crpc, tmp, &trans->tas_rpcs_list, - crp_link) { + list_for_each_entry_safe(crpc, tmp, &trans->tas_rpcs_list, crp_link) { rpc = crpc->crp_rpc; spin_lock(&rpc->crpc_lock); @@ -563,14 +562,15 @@ lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans) continue; } - /* rpcs can be still not callbacked (even LNetMDUnlink is called) + /* + * rpcs can be still not callbacked (even LNetMDUnlink is called) * because huge timeout for inaccessible network, don't make * user wait for them, just abandon them, they will be recycled - * in callback */ + * in callback + */ + LASSERT(crpc->crp_status); - LASSERT(crpc->crp_status != 0); - - crpc->crp_node = NULL; + crpc->crp_node = NULL; crpc->crp_trans = NULL; list_del_init(&crpc->crp_link); count++; @@ -580,7 +580,7 @@ lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans) atomic_dec(&trans->tas_remaining); } - LASSERT(atomic_read(&trans->tas_remaining) == 0); + LASSERT(!atomic_read(&trans->tas_remaining)); list_del(&trans->tas_link); if (!list_empty(&trans->tas_olink)) @@ -590,8 +590,6 @@ lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans) lstcon_rpc_trans_name(trans->tas_opc), count); LIBCFS_FREE(trans, sizeof(*trans)); - - return; } int @@ -606,12 +604,12 @@ lstcon_sesrpc_prep(lstcon_node_t *nd, int transop, case LST_TRANS_SESNEW: rc = lstcon_rpc_prep(nd, SRPC_SERVICE_MAKE_SESSION, feats, 0, 0, crpc); - if (rc != 0) + if (rc) return rc; msrq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.mksn_reqst; - msrq->mksn_sid = console_session.ses_id; - msrq->mksn_force = console_session.ses_force; + msrq->mksn_sid = console_session.ses_id; + msrq->mksn_force = console_session.ses_force; strlcpy(msrq->mksn_name, console_session.ses_name, sizeof(msrq->mksn_name)); break; @@ -619,7 +617,7 @@ lstcon_sesrpc_prep(lstcon_node_t *nd, int transop, case LST_TRANS_SESEND: rc = lstcon_rpc_prep(nd, SRPC_SERVICE_REMOVE_SESSION, feats, 0, 0, crpc); - if (rc != 0) + if (rc) return rc; rsrq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.rmsn_reqst; @@ -640,12 +638,12 @@ lstcon_dbgrpc_prep(lstcon_node_t *nd, unsigned feats, lstcon_rpc_t **crpc) int rc; rc = lstcon_rpc_prep(nd, SRPC_SERVICE_DEBUG, feats, 0, 0, crpc); - if (rc != 0) + if (rc) return rc; drq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.dbg_reqst; - drq->dbg_sid = console_session.ses_id; + drq->dbg_sid = console_session.ses_id; drq->dbg_flags = 0; return rc; @@ -655,28 +653,28 @@ int lstcon_batrpc_prep(lstcon_node_t *nd, int transop, unsigned feats, lstcon_tsb_hdr_t *tsb, lstcon_rpc_t **crpc) { - lstcon_batch_t *batch; + lstcon_batch_t *batch; srpc_batch_reqst_t *brq; - int rc; + int rc; rc = lstcon_rpc_prep(nd, SRPC_SERVICE_BATCH, feats, 0, 0, crpc); - if (rc != 0) + if (rc) return rc; brq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.bat_reqst; - brq->bar_sid = console_session.ses_id; - brq->bar_bid = tsb->tsb_id; + brq->bar_sid = console_session.ses_id; + brq->bar_bid = tsb->tsb_id; brq->bar_testidx = tsb->tsb_index; - brq->bar_opc = transop == LST_TRANS_TSBRUN ? SRPC_BATCH_OPC_RUN : - (transop == LST_TRANS_TSBSTOP ? SRPC_BATCH_OPC_STOP : - SRPC_BATCH_OPC_QUERY); + brq->bar_opc = transop == LST_TRANS_TSBRUN ? SRPC_BATCH_OPC_RUN : + (transop == LST_TRANS_TSBSTOP ? SRPC_BATCH_OPC_STOP : + SRPC_BATCH_OPC_QUERY); if (transop != LST_TRANS_TSBRUN && transop != LST_TRANS_TSBSTOP) return 0; - LASSERT(tsb->tsb_index == 0); + LASSERT(!tsb->tsb_index); batch = (lstcon_batch_t *)tsb; brq->bar_arg = batch->bat_arg; @@ -688,15 +686,15 @@ int lstcon_statrpc_prep(lstcon_node_t *nd, unsigned feats, lstcon_rpc_t **crpc) { srpc_stat_reqst_t *srq; - int rc; + int rc; rc = lstcon_rpc_prep(nd, SRPC_SERVICE_QUERY_STAT, feats, 0, 0, crpc); - if (rc != 0) + if (rc) return rc; srq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.stat_reqst; - srq->str_sid = console_session.ses_id; + srq->str_sid = console_session.ses_id; srq->str_type = 0; /* XXX remove it */ return 0; @@ -736,7 +734,7 @@ lstcon_dstnodes_prep(lstcon_group_t *grp, int idx, return -EINVAL; start = ((idx / dist) * span) % grp->grp_nnode; - end = ((idx / dist) * span + span - 1) % grp->grp_nnode; + end = ((idx / dist) * span + span - 1) % grp->grp_nnode; list_for_each_entry(ndl, &grp->grp_ndl_list, ndl_link) { nd = ndl->ndl_node; @@ -776,7 +774,7 @@ lstcon_pingrpc_prep(lst_test_ping_param_t *param, srpc_test_reqst_t *req) { test_ping_req_t *prq = &req->tsr_u.ping; - prq->png_size = param->png_size; + prq->png_size = param->png_size; prq->png_flags = param->png_flags; /* TODO dest */ return 0; @@ -787,9 +785,9 @@ lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req) { test_bulk_req_t *brq = &req->tsr_u.bulk_v0; - brq->blk_opc = param->blk_opc; - brq->blk_npg = (param->blk_size + PAGE_CACHE_SIZE - 1) / - PAGE_CACHE_SIZE; + brq->blk_opc = param->blk_opc; + brq->blk_npg = (param->blk_size + PAGE_CACHE_SIZE - 1) / + PAGE_CACHE_SIZE; brq->blk_flags = param->blk_flags; return 0; @@ -800,9 +798,9 @@ lstcon_bulkrpc_v1_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req) { test_bulk_req_v1_t *brq = &req->tsr_u.bulk_v1; - brq->blk_opc = param->blk_opc; - brq->blk_flags = param->blk_flags; - brq->blk_len = param->blk_size; + brq->blk_opc = param->blk_opc; + brq->blk_flags = param->blk_flags; + brq->blk_len = param->blk_size; brq->blk_offset = 0; /* reserved */ return 0; @@ -812,27 +810,27 @@ int lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats, lstcon_test_t *test, lstcon_rpc_t **crpc) { - lstcon_group_t *sgrp = test->tes_src_grp; - lstcon_group_t *dgrp = test->tes_dst_grp; + lstcon_group_t *sgrp = test->tes_src_grp; + lstcon_group_t *dgrp = test->tes_dst_grp; srpc_test_reqst_t *trq; - srpc_bulk_t *bulk; - int i; - int npg = 0; - int nob = 0; - int rc = 0; + srpc_bulk_t *bulk; + int i; + int npg = 0; + int nob = 0; + int rc = 0; if (transop == LST_TRANS_TSBCLIADD) { npg = sfw_id_pages(test->tes_span); - nob = (feats & LST_FEAT_BULK_LEN) == 0 ? + nob = !(feats & LST_FEAT_BULK_LEN) ? npg * PAGE_CACHE_SIZE : sizeof(lnet_process_id_packed_t) * test->tes_span; } rc = lstcon_rpc_prep(nd, SRPC_SERVICE_TEST, feats, npg, nob, crpc); - if (rc != 0) + if (rc) return rc; - trq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.tes_reqst; + trq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.tes_reqst; if (transop == LST_TRANS_TSBSRVADD) { int ndist = (sgrp->grp_nnode + test->tes_dist - 1) / @@ -842,27 +840,27 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats, int nmax = (ndist + nspan - 1) / nspan; trq->tsr_ndest = 0; - trq->tsr_loop = nmax * test->tes_dist * test->tes_concur; + trq->tsr_loop = nmax * test->tes_dist * test->tes_concur; } else { bulk = &(*crpc)->crp_rpc->crpc_bulk; for (i = 0; i < npg; i++) { - int len; + int len; LASSERT(nob > 0); - len = (feats & LST_FEAT_BULK_LEN) == 0 ? + len = !(feats & LST_FEAT_BULK_LEN) ? PAGE_CACHE_SIZE : min_t(int, nob, PAGE_CACHE_SIZE); nob -= len; bulk->bk_iovs[i].kiov_offset = 0; - bulk->bk_iovs[i].kiov_len = len; - bulk->bk_iovs[i].kiov_page = + bulk->bk_iovs[i].kiov_len = len; + bulk->bk_iovs[i].kiov_page = alloc_page(GFP_KERNEL); - if (bulk->bk_iovs[i].kiov_page == NULL) { + if (!bulk->bk_iovs[i].kiov_page) { lstcon_rpc_put(*crpc); return -ENOMEM; } @@ -877,19 +875,19 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats, test->tes_dist, test->tes_span, npg, &bulk->bk_iovs[0]); - if (rc != 0) { + if (rc) { lstcon_rpc_put(*crpc); return rc; } trq->tsr_ndest = test->tes_span; - trq->tsr_loop = test->tes_loop; + trq->tsr_loop = test->tes_loop; } - trq->tsr_sid = console_session.ses_id; - trq->tsr_bid = test->tes_hdr.tsb_id; - trq->tsr_concur = test->tes_concur; - trq->tsr_is_client = (transop == LST_TRANS_TSBCLIADD) ? 1 : 0; + trq->tsr_sid = console_session.ses_id; + trq->tsr_bid = test->tes_hdr.tsb_id; + trq->tsr_concur = test->tes_concur; + trq->tsr_is_client = (transop == LST_TRANS_TSBCLIADD) ? 1 : 0; trq->tsr_stop_onerr = !!test->tes_stop_onerr; switch (test->tes_type) { @@ -901,7 +899,7 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats, case LST_TEST_BULK: trq->tsr_service = SRPC_SERVICE_BRW; - if ((feats & LST_FEAT_BULK_LEN) == 0) { + if (!(feats & LST_FEAT_BULK_LEN)) { rc = lstcon_bulkrpc_v0_prep((lst_test_bulk_param_t *) &test->tes_param[0], trq); } else { @@ -923,10 +921,10 @@ lstcon_sesnew_stat_reply(lstcon_rpc_trans_t *trans, lstcon_node_t *nd, srpc_msg_t *reply) { srpc_mksn_reply_t *mksn_rep = &reply->msg_body.mksn_reply; - int status = mksn_rep->mksn_status; + int status = mksn_rep->mksn_status; - if (status == 0 && - (reply->msg_ses_feats & ~LST_FEATS_MASK) != 0) { + if (!status && + (reply->msg_ses_feats & ~LST_FEATS_MASK)) { mksn_rep->mksn_status = EPROTO; status = EPROTO; } @@ -937,22 +935,27 @@ lstcon_sesnew_stat_reply(lstcon_rpc_trans_t *trans, reply->msg_ses_feats); } - if (status != 0) + if (status) return status; if (!trans->tas_feats_updated) { - trans->tas_feats_updated = 1; - trans->tas_features = reply->msg_ses_feats; + spin_lock(&console_session.ses_rpc_lock); + if (!trans->tas_feats_updated) { /* recheck with lock */ + trans->tas_feats_updated = 1; + trans->tas_features = reply->msg_ses_feats; + } + spin_unlock(&console_session.ses_rpc_lock); } if (reply->msg_ses_feats != trans->tas_features) { CNETERR("Framework features %x from %s is different with features on this transaction: %x\n", - reply->msg_ses_feats, libcfs_nid2str(nd->nd_id.nid), - trans->tas_features); - status = mksn_rep->mksn_status = EPROTO; + reply->msg_ses_feats, libcfs_nid2str(nd->nd_id.nid), + trans->tas_features); + mksn_rep->mksn_status = EPROTO; + status = EPROTO; } - if (status == 0) { + if (!status) { /* session timeout on remote node */ nd->nd_timeout = mksn_rep->mksn_timeout; } @@ -964,17 +967,17 @@ void lstcon_rpc_stat_reply(lstcon_rpc_trans_t *trans, srpc_msg_t *msg, lstcon_node_t *nd, lstcon_trans_stat_t *stat) { - srpc_rmsn_reply_t *rmsn_rep; + srpc_rmsn_reply_t *rmsn_rep; srpc_debug_reply_t *dbg_rep; srpc_batch_reply_t *bat_rep; - srpc_test_reply_t *test_rep; - srpc_stat_reply_t *stat_rep; - int rc = 0; + srpc_test_reply_t *test_rep; + srpc_stat_reply_t *stat_rep; + int rc = 0; switch (trans->tas_opc) { case LST_TRANS_SESNEW: rc = lstcon_sesnew_stat_reply(trans, nd, msg); - if (rc == 0) { + if (!rc) { lstcon_sesop_stat_success(stat, 1); return; } @@ -985,7 +988,7 @@ lstcon_rpc_stat_reply(lstcon_rpc_trans_t *trans, srpc_msg_t *msg, case LST_TRANS_SESEND: rmsn_rep = &msg->msg_body.rmsn_reply; /* ESRCH is not an error for end session */ - if (rmsn_rep->rmsn_status == 0 || + if (!rmsn_rep->rmsn_status || rmsn_rep->rmsn_status == ESRCH) { lstcon_sesop_stat_success(stat, 1); return; @@ -1014,7 +1017,7 @@ lstcon_rpc_stat_reply(lstcon_rpc_trans_t *trans, srpc_msg_t *msg, case LST_TRANS_TSBSTOP: bat_rep = &msg->msg_body.bat_reply; - if (bat_rep->bar_status == 0) { + if (!bat_rep->bar_status) { lstcon_tsbop_stat_success(stat, 1); return; } @@ -1033,12 +1036,12 @@ lstcon_rpc_stat_reply(lstcon_rpc_trans_t *trans, srpc_msg_t *msg, case LST_TRANS_TSBSRVQRY: bat_rep = &msg->msg_body.bat_reply; - if (bat_rep->bar_active != 0) + if (bat_rep->bar_active) lstcon_tsbqry_stat_run(stat, 1); else lstcon_tsbqry_stat_idle(stat, 1); - if (bat_rep->bar_status == 0) + if (!bat_rep->bar_status) return; lstcon_tsbqry_stat_failure(stat, 1); @@ -1049,7 +1052,7 @@ lstcon_rpc_stat_reply(lstcon_rpc_trans_t *trans, srpc_msg_t *msg, case LST_TRANS_TSBSRVADD: test_rep = &msg->msg_body.tes_reply; - if (test_rep->tsr_status == 0) { + if (!test_rep->tsr_status) { lstcon_tsbop_stat_success(stat, 1); return; } @@ -1061,7 +1064,7 @@ lstcon_rpc_stat_reply(lstcon_rpc_trans_t *trans, srpc_msg_t *msg, case LST_TRANS_STATQRY: stat_rep = &msg->msg_body.stat_reply; - if (stat_rep->str_status == 0) { + if (!stat_rep->str_status) { lstcon_statqry_stat_success(stat, 1); return; } @@ -1074,10 +1077,8 @@ lstcon_rpc_stat_reply(lstcon_rpc_trans_t *trans, srpc_msg_t *msg, LBUG(); } - if (stat->trs_fwk_errno == 0) + if (!stat->trs_fwk_errno) stat->trs_fwk_errno = rc; - - return; } int @@ -1096,22 +1097,22 @@ lstcon_rpc_trans_ndlist(struct list_head *ndlist, /* Creating session RPG for list of nodes */ rc = lstcon_rpc_trans_prep(translist, transop, &trans); - if (rc != 0) { + if (rc) { CERROR("Can't create transaction %d: %d\n", transop, rc); return rc; } feats = trans->tas_features; list_for_each_entry(ndl, ndlist, ndl_link) { - rc = condition == NULL ? 1 : + rc = !condition ? 1 : condition(transop, ndl->ndl_node, arg); - if (rc == 0) + if (!rc) continue; if (rc < 0) { CDEBUG(D_NET, "Condition error while creating RPC for transaction %d: %d\n", - transop, rc); + transop, rc); break; } @@ -1146,7 +1147,7 @@ lstcon_rpc_trans_ndlist(struct list_head *ndlist, break; } - if (rc != 0) { + if (rc) { CERROR("Failed to create RPC for transaction %s: %d\n", lstcon_rpc_trans_name(transop), rc); break; @@ -1155,7 +1156,7 @@ lstcon_rpc_trans_ndlist(struct list_head *ndlist, lstcon_rpc_trans_addreq(trans, rpc); } - if (rc == 0) { + if (!rc) { *transpp = trans; return 0; } @@ -1168,7 +1169,7 @@ lstcon_rpc_trans_ndlist(struct list_head *ndlist, static void lstcon_rpc_pinger(void *arg) { - stt_timer_t *ptimer = (stt_timer_t *)arg; + struct stt_timer *ptimer = (struct stt_timer *)arg; lstcon_rpc_trans_t *trans; lstcon_rpc_t *crpc; srpc_msg_t *rep; @@ -1196,7 +1197,7 @@ lstcon_rpc_pinger(void *arg) trans = console_session.ses_ping; - LASSERT(trans != NULL); + LASSERT(trans); list_for_each_entry(ndl, &console_session.ses_ndl_list, ndl_link) { nd = ndl->ndl_node; @@ -1208,7 +1209,7 @@ lstcon_rpc_pinger(void *arg) rc = lstcon_sesrpc_prep(nd, LST_TRANS_SESEND, trans->tas_features, &crpc); - if (rc != 0) { + if (rc) { CERROR("Out of memory\n"); break; } @@ -1221,7 +1222,7 @@ lstcon_rpc_pinger(void *arg) crpc = &nd->nd_ping; - if (crpc->crp_rpc != NULL) { + if (crpc->crp_rpc) { LASSERT(crpc->crp_trans == trans); LASSERT(!list_empty(&crpc->crp_link)); @@ -1247,20 +1248,20 @@ lstcon_rpc_pinger(void *arg) if (nd->nd_state != LST_NODE_ACTIVE) continue; - intv = (jiffies - nd->nd_stamp) / HZ; + intv = (jiffies - nd->nd_stamp) / msecs_to_jiffies(MSEC_PER_SEC); if (intv < nd->nd_timeout / 2) continue; rc = lstcon_rpc_init(nd, SRPC_SERVICE_DEBUG, trans->tas_features, 0, 0, 1, crpc); - if (rc != 0) { + if (rc) { CERROR("Out of memory\n"); break; } drq = &crpc->crp_rpc->crpc_reqstmsg.msg_body.dbg_reqst; - drq->dbg_sid = console_session.ses_id; + drq->dbg_sid = console_session.ses_id; drq->dbg_flags = 0; lstcon_rpc_trans_addreq(trans, crpc); @@ -1285,15 +1286,15 @@ lstcon_rpc_pinger(void *arg) int lstcon_rpc_pinger_start(void) { - stt_timer_t *ptimer; + struct stt_timer *ptimer; int rc; LASSERT(list_empty(&console_session.ses_rpc_freelist)); - LASSERT(atomic_read(&console_session.ses_rpc_counter) == 0); + LASSERT(!atomic_read(&console_session.ses_rpc_counter)); rc = lstcon_rpc_trans_prep(NULL, LST_TRANS_SESPING, &console_session.ses_ping); - if (rc != 0) { + if (rc) { CERROR("Failed to create console pinger\n"); return rc; } @@ -1327,6 +1328,7 @@ lstcon_rpc_cleanup_wait(void) { lstcon_rpc_trans_t *trans; lstcon_rpc_t *crpc; + lstcon_rpc_t *temp; struct list_head *pacer; struct list_head zlist; @@ -1337,7 +1339,7 @@ lstcon_rpc_cleanup_wait(void) while (!list_empty(&console_session.ses_trans_list)) { list_for_each(pacer, &console_session.ses_trans_list) { trans = list_entry(pacer, lstcon_rpc_trans_t, - tas_link); + tas_link); CDEBUG(D_NET, "Session closed, wakeup transaction %s\n", lstcon_rpc_trans_name(trans->tas_opc)); @@ -1356,7 +1358,7 @@ lstcon_rpc_cleanup_wait(void) spin_lock(&console_session.ses_rpc_lock); - lst_wait_until((atomic_read(&console_session.ses_rpc_counter) == 0), + lst_wait_until(!atomic_read(&console_session.ses_rpc_counter), console_session.ses_rpc_lock, "Network is not accessible or target is down, waiting for %d console RPCs to being recycled\n", atomic_read(&console_session.ses_rpc_counter)); @@ -1366,9 +1368,7 @@ lstcon_rpc_cleanup_wait(void) spin_unlock(&console_session.ses_rpc_lock); - while (!list_empty(&zlist)) { - crpc = list_entry(zlist.next, lstcon_rpc_t, crp_link); - + list_for_each_entry_safe(crpc, temp, &zlist, crp_link) { list_del(&crpc->crp_link); LIBCFS_FREE(crpc, sizeof(lstcon_rpc_t)); } @@ -1394,5 +1394,5 @@ void lstcon_rpc_module_fini(void) { LASSERT(list_empty(&console_session.ses_rpc_freelist)); - LASSERT(atomic_read(&console_session.ses_rpc_counter) == 0); + LASSERT(!atomic_read(&console_session.ses_rpc_counter)); } diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.h b/drivers/staging/lustre/lnet/selftest/conrpc.h index 95c832ff7375..3e7839dad5bb 100644 --- a/drivers/staging/lustre/lnet/selftest/conrpc.h +++ b/drivers/staging/lustre/lnet/selftest/conrpc.h @@ -51,12 +51,12 @@ #include "selftest.h" /* Console rpc and rpc transaction */ -#define LST_TRANS_TIMEOUT 30 -#define LST_TRANS_MIN_TIMEOUT 3 +#define LST_TRANS_TIMEOUT 30 +#define LST_TRANS_MIN_TIMEOUT 3 #define LST_VALIDATE_TIMEOUT(t) min(max(t, LST_TRANS_MIN_TIMEOUT), LST_TRANS_TIMEOUT) -#define LST_PING_INTERVAL 8 +#define LST_PING_INTERVAL 8 struct lstcon_rpc_trans; struct lstcon_tsb_hdr; @@ -64,49 +64,50 @@ struct lstcon_test; struct lstcon_node; typedef struct lstcon_rpc { - struct list_head crp_link; /* chain on rpc transaction */ - srpc_client_rpc_t *crp_rpc; /* client rpc */ - struct lstcon_node *crp_node; /* destination node */ - struct lstcon_rpc_trans *crp_trans; /* conrpc transaction */ - - unsigned int crp_posted:1; /* rpc is posted */ - unsigned int crp_finished:1; /* rpc is finished */ - unsigned int crp_unpacked:1; /* reply is unpacked */ + struct list_head crp_link; /* chain on rpc transaction */ + srpc_client_rpc_t *crp_rpc; /* client rpc */ + struct lstcon_node *crp_node; /* destination node */ + struct lstcon_rpc_trans *crp_trans; /* conrpc transaction */ + + unsigned int crp_posted:1; /* rpc is posted */ + unsigned int crp_finished:1; /* rpc is finished */ + unsigned int crp_unpacked:1; /* reply is unpacked */ /** RPC is embedded in other structure and can't free it */ - unsigned int crp_embedded:1; - int crp_status; /* console rpc errors */ - unsigned long crp_stamp; /* replied time stamp */ + unsigned int crp_embedded:1; + int crp_status; /* console rpc errors */ + unsigned long crp_stamp; /* replied time stamp */ } lstcon_rpc_t; typedef struct lstcon_rpc_trans { - struct list_head tas_olink; /* link chain on owner list */ - struct list_head tas_link; /* link chain on global list */ - int tas_opc; /* operation code of transaction */ - unsigned tas_feats_updated; /* features mask is uptodate */ - unsigned tas_features; /* test features mask */ - wait_queue_head_t tas_waitq; /* wait queue head */ - atomic_t tas_remaining; /* # of un-scheduled rpcs */ + struct list_head tas_olink; /* link chain on owner list */ + struct list_head tas_link; /* link chain on global list */ + int tas_opc; /* operation code of transaction */ + unsigned tas_feats_updated; /* features mask is uptodate */ + unsigned tas_features; /* test features mask */ + wait_queue_head_t tas_waitq; /* wait queue head */ + atomic_t tas_remaining; /* # of un-scheduled rpcs */ struct list_head tas_rpcs_list; /* queued requests */ } lstcon_rpc_trans_t; -#define LST_TRANS_PRIVATE 0x1000 +#define LST_TRANS_PRIVATE 0x1000 #define LST_TRANS_SESNEW (LST_TRANS_PRIVATE | 0x01) #define LST_TRANS_SESEND (LST_TRANS_PRIVATE | 0x02) #define LST_TRANS_SESQRY 0x03 -#define LST_TRANS_SESPING 0x04 +#define LST_TRANS_SESPING 0x04 -#define LST_TRANS_TSBCLIADD (LST_TRANS_PRIVATE | 0x11) -#define LST_TRANS_TSBSRVADD (LST_TRANS_PRIVATE | 0x12) +#define LST_TRANS_TSBCLIADD (LST_TRANS_PRIVATE | 0x11) +#define LST_TRANS_TSBSRVADD (LST_TRANS_PRIVATE | 0x12) #define LST_TRANS_TSBRUN (LST_TRANS_PRIVATE | 0x13) -#define LST_TRANS_TSBSTOP (LST_TRANS_PRIVATE | 0x14) -#define LST_TRANS_TSBCLIQRY 0x15 -#define LST_TRANS_TSBSRVQRY 0x16 +#define LST_TRANS_TSBSTOP (LST_TRANS_PRIVATE | 0x14) +#define LST_TRANS_TSBCLIQRY 0x15 +#define LST_TRANS_TSBSRVQRY 0x16 -#define LST_TRANS_STATQRY 0x21 +#define LST_TRANS_STATQRY 0x21 typedef int (*lstcon_rpc_cond_func_t)(int, struct lstcon_node *, void *); -typedef int (*lstcon_rpc_readent_func_t)(int, srpc_msg_t *, lstcon_rpc_ent_t *); +typedef int (*lstcon_rpc_readent_func_t)(int, srpc_msg_t *, + lstcon_rpc_ent_t __user *); int lstcon_sesrpc_prep(struct lstcon_node *nd, int transop, unsigned version, lstcon_rpc_t **crpc); @@ -128,7 +129,7 @@ int lstcon_rpc_trans_ndlist(struct list_head *ndlist, void lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans, lstcon_trans_stat_t *stat); int lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans, - struct list_head *head_up, + struct list_head __user *head_up, lstcon_rpc_readent_func_t readent); void lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error); void lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans); diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c index 5619fc430e8d..1a923ea3a755 100644 --- a/drivers/staging/lustre/lnet/selftest/console.c +++ b/drivers/staging/lustre/lnet/selftest/console.c @@ -49,16 +49,16 @@ do { \ if ((nd)->nd_state == LST_NODE_ACTIVE) \ (p)->nle_nactive++; \ - else if ((nd)->nd_state == LST_NODE_BUSY) \ + else if ((nd)->nd_state == LST_NODE_BUSY) \ (p)->nle_nbusy++; \ - else if ((nd)->nd_state == LST_NODE_DOWN) \ + else if ((nd)->nd_state == LST_NODE_DOWN) \ (p)->nle_ndown++; \ else \ (p)->nle_nunknown++; \ (p)->nle_nnode++; \ } while (0) -lstcon_session_t console_session; +struct lstcon_session console_session; static void lstcon_node_get(lstcon_node_t *nd) @@ -71,12 +71,13 @@ lstcon_node_get(lstcon_node_t *nd) static int lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create) { - lstcon_ndlink_t *ndl; + lstcon_ndlink_t *ndl; unsigned int idx = LNET_NIDADDR(id.nid) % LST_GLOBAL_HASHSIZE; LASSERT(id.nid != LNET_NID_ANY); - list_for_each_entry(ndl, &console_session.ses_ndl_hash[idx], ndl_hlink) { + list_for_each_entry(ndl, &console_session.ses_ndl_hash[idx], + ndl_hlink) { if (ndl->ndl_node->nd_id.nid != id.nid || ndl->ndl_node->nd_id.pid != id.pid) continue; @@ -90,23 +91,25 @@ lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create) return -ENOENT; LIBCFS_ALLOC(*ndpp, sizeof(lstcon_node_t) + sizeof(lstcon_ndlink_t)); - if (*ndpp == NULL) + if (!*ndpp) return -ENOMEM; ndl = (lstcon_ndlink_t *)(*ndpp + 1); ndl->ndl_node = *ndpp; - ndl->ndl_node->nd_ref = 1; - ndl->ndl_node->nd_id = id; + ndl->ndl_node->nd_ref = 1; + ndl->ndl_node->nd_id = id; ndl->ndl_node->nd_stamp = cfs_time_current(); ndl->ndl_node->nd_state = LST_NODE_UNKNOWN; ndl->ndl_node->nd_timeout = 0; memset(&ndl->ndl_node->nd_ping, 0, sizeof(lstcon_rpc_t)); - /* queued in global hash & list, no refcount is taken by + /* + * queued in global hash & list, no refcount is taken by * global hash & list, if caller release his refcount, - * node will be released */ + * node will be released + */ list_add_tail(&ndl->ndl_hlink, &console_session.ses_ndl_hash[idx]); list_add_tail(&ndl->ndl_link, &console_session.ses_ndl_list); @@ -157,16 +160,16 @@ lstcon_ndlink_find(struct list_head *hash, return 0; } - if (create == 0) + if (!create) return -ENOENT; /* find or create in session hash */ rc = lstcon_node_find(id, &nd, (create == 1) ? 1 : 0); - if (rc != 0) + if (rc) return rc; LIBCFS_ALLOC(ndl, sizeof(lstcon_ndlink_t)); - if (ndl == NULL) { + if (!ndl) { lstcon_node_put(nd); return -ENOMEM; } @@ -177,7 +180,7 @@ lstcon_ndlink_find(struct list_head *hash, INIT_LIST_HEAD(&ndl->ndl_link); list_add_tail(&ndl->ndl_hlink, &hash[idx]); - return 0; + return 0; } static void @@ -200,12 +203,18 @@ lstcon_group_alloc(char *name, lstcon_group_t **grpp) LIBCFS_ALLOC(grp, offsetof(lstcon_group_t, grp_ndl_hash[LST_NODE_HASHSIZE])); - if (grp == NULL) + if (!grp) return -ENOMEM; grp->grp_ref = 1; - if (name != NULL) - strcpy(grp->grp_name, name); + if (name) { + if (strlen(name) > sizeof(grp->grp_name) - 1) { + LIBCFS_FREE(grp, offsetof(lstcon_group_t, + grp_ndl_hash[LST_NODE_HASHSIZE])); + return -E2BIG; + } + strncpy(grp->grp_name, name, sizeof(grp->grp_name)); + } INIT_LIST_HEAD(&grp->grp_link); INIT_LIST_HEAD(&grp->grp_ndl_list); @@ -234,7 +243,7 @@ lstcon_group_drain(lstcon_group_t *grp, int keep) lstcon_ndlink_t *tmp; list_for_each_entry_safe(ndl, tmp, &grp->grp_ndl_list, ndl_link) { - if ((ndl->ndl_node->nd_state & keep) == 0) + if (!(ndl->ndl_node->nd_state & keep)) lstcon_group_ndlink_release(grp, ndl); } } @@ -252,9 +261,8 @@ lstcon_group_decref(lstcon_group_t *grp) lstcon_group_drain(grp, 0); - for (i = 0; i < LST_NODE_HASHSIZE; i++) { + for (i = 0; i < LST_NODE_HASHSIZE; i++) LASSERT(list_empty(&grp->grp_ndl_hash[i])); - } LIBCFS_FREE(grp, offsetof(lstcon_group_t, grp_ndl_hash[LST_NODE_HASHSIZE])); @@ -266,7 +274,7 @@ lstcon_group_find(const char *name, lstcon_group_t **grpp) lstcon_group_t *grp; list_for_each_entry(grp, &console_session.ses_grp_list, grp_link) { - if (strncmp(grp->grp_name, name, LST_NAME_SIZE) != 0) + if (strncmp(grp->grp_name, name, LST_NAME_SIZE)) continue; lstcon_group_addref(grp); /* +1 ref for caller */ @@ -284,7 +292,7 @@ lstcon_group_ndlink_find(lstcon_group_t *grp, lnet_process_id_t id, int rc; rc = lstcon_ndlink_find(&grp->grp_ndl_hash[0], id, ndlpp, create); - if (rc != 0) + if (rc) return rc; if (!list_empty(&(*ndlpp)->ndl_link)) @@ -309,7 +317,7 @@ lstcon_group_ndlink_move(lstcon_group_t *old, lstcon_group_t *new, lstcon_ndlink_t *ndl) { unsigned int idx = LNET_NIDADDR(ndl->ndl_node->nd_id.nid) % - LST_NODE_HASHSIZE; + LST_NODE_HASHSIZE; list_del(&ndl->ndl_hlink); list_del(&ndl->ndl_link); @@ -327,7 +335,7 @@ lstcon_group_move(lstcon_group_t *old, lstcon_group_t *new) while (!list_empty(&old->grp_ndl_list)) { ndl = list_entry(old->grp_ndl_list.next, - lstcon_ndlink_t, ndl_link); + lstcon_ndlink_t, ndl_link); lstcon_group_ndlink_move(old, new, ndl); } } @@ -347,7 +355,7 @@ lstcon_sesrpc_condition(int transop, lstcon_node_t *nd, void *arg) if (nd->nd_state != LST_NODE_ACTIVE) return 0; - if (grp != NULL && nd->nd_ref > 1) + if (grp && nd->nd_ref > 1) return 0; break; @@ -363,7 +371,7 @@ lstcon_sesrpc_condition(int transop, lstcon_node_t *nd, void *arg) static int lstcon_sesrpc_readent(int transop, srpc_msg_t *msg, - lstcon_rpc_ent_t *ent_up) + lstcon_rpc_ent_t __user *ent_up) { srpc_debug_reply_t *rep; @@ -376,9 +384,9 @@ lstcon_sesrpc_readent(int transop, srpc_msg_t *msg, rep = &msg->msg_body.dbg_reply; if (copy_to_user(&ent_up->rpe_priv[0], - &rep->dbg_timeout, sizeof(int)) || + &rep->dbg_timeout, sizeof(int)) || copy_to_user(&ent_up->rpe_payload[0], - &rep->dbg_name, LST_NAME_SIZE)) + &rep->dbg_name, LST_NAME_SIZE)) return -EFAULT; return 0; @@ -392,18 +400,18 @@ lstcon_sesrpc_readent(int transop, srpc_msg_t *msg, static int lstcon_group_nodes_add(lstcon_group_t *grp, - int count, lnet_process_id_t *ids_up, - unsigned *featp, struct list_head *result_up) + int count, lnet_process_id_t __user *ids_up, + unsigned *featp, struct list_head __user *result_up) { lstcon_rpc_trans_t *trans; - lstcon_ndlink_t *ndl; + lstcon_ndlink_t *ndl; lstcon_group_t *tmp; lnet_process_id_t id; int i; int rc; rc = lstcon_group_alloc(NULL, &tmp); - if (rc != 0) { + if (rc) { CERROR("Out of memory\n"); return -ENOMEM; } @@ -416,18 +424,18 @@ lstcon_group_nodes_add(lstcon_group_t *grp, /* skip if it's in this group already */ rc = lstcon_group_ndlink_find(grp, id, &ndl, 0); - if (rc == 0) + if (!rc) continue; /* add to tmp group */ rc = lstcon_group_ndlink_find(tmp, id, &ndl, 1); - if (rc != 0) { + if (rc) { CERROR("Can't create ndlink, out of memory\n"); break; } } - if (rc != 0) { + if (rc) { lstcon_group_decref(tmp); return rc; } @@ -435,7 +443,7 @@ lstcon_group_nodes_add(lstcon_group_t *grp, rc = lstcon_rpc_trans_ndlist(&tmp->grp_ndl_list, &tmp->grp_trans_list, LST_TRANS_SESNEW, tmp, lstcon_sesrpc_condition, &trans); - if (rc != 0) { + if (rc) { CERROR("Can't create transaction: %d\n", rc); lstcon_group_decref(tmp); return rc; @@ -459,8 +467,8 @@ lstcon_group_nodes_add(lstcon_group_t *grp, static int lstcon_group_nodes_remove(lstcon_group_t *grp, - int count, lnet_process_id_t *ids_up, - struct list_head *result_up) + int count, lnet_process_id_t __user *ids_up, + struct list_head __user *result_up) { lstcon_rpc_trans_t *trans; lstcon_ndlink_t *ndl; @@ -472,7 +480,7 @@ lstcon_group_nodes_remove(lstcon_group_t *grp, /* End session and remove node from the group */ rc = lstcon_group_alloc(NULL, &tmp); - if (rc != 0) { + if (rc) { CERROR("Out of memory\n"); return -ENOMEM; } @@ -484,14 +492,14 @@ lstcon_group_nodes_remove(lstcon_group_t *grp, } /* move node to tmp group */ - if (lstcon_group_ndlink_find(grp, id, &ndl, 0) == 0) + if (!lstcon_group_ndlink_find(grp, id, &ndl, 0)) lstcon_group_ndlink_move(grp, tmp, ndl); } rc = lstcon_rpc_trans_ndlist(&tmp->grp_ndl_list, &tmp->grp_trans_list, LST_TRANS_SESEND, tmp, lstcon_sesrpc_condition, &trans); - if (rc != 0) { + if (rc) { CERROR("Can't create transaction: %d\n", rc); goto error; } @@ -518,15 +526,15 @@ lstcon_group_add(char *name) lstcon_group_t *grp; int rc; - rc = (lstcon_group_find(name, &grp) == 0) ? -EEXIST : 0; - if (rc != 0) { + rc = lstcon_group_find(name, &grp) ? 0 : -EEXIST; + if (rc) { /* find a group with same name */ lstcon_group_decref(grp); return rc; } rc = lstcon_group_alloc(name, &grp); - if (rc != 0) { + if (rc) { CERROR("Can't allocate descriptor for group %s\n", name); return -ENOMEM; } @@ -537,17 +545,17 @@ lstcon_group_add(char *name) } int -lstcon_nodes_add(char *name, int count, lnet_process_id_t *ids_up, - unsigned *featp, struct list_head *result_up) +lstcon_nodes_add(char *name, int count, lnet_process_id_t __user *ids_up, + unsigned *featp, struct list_head __user *result_up) { lstcon_group_t *grp; int rc; LASSERT(count > 0); - LASSERT(ids_up != NULL); + LASSERT(ids_up); rc = lstcon_group_find(name, &grp); - if (rc != 0) { + if (rc) { CDEBUG(D_NET, "Can't find group %s\n", name); return rc; } @@ -575,7 +583,7 @@ lstcon_group_del(char *name) int rc; rc = lstcon_group_find(name, &grp); - if (rc != 0) { + if (rc) { CDEBUG(D_NET, "Can't find group: %s\n", name); return rc; } @@ -590,7 +598,7 @@ lstcon_group_del(char *name) rc = lstcon_rpc_trans_ndlist(&grp->grp_ndl_list, &grp->grp_trans_list, LST_TRANS_SESEND, grp, lstcon_sesrpc_condition, &trans); - if (rc != 0) { + if (rc) { CERROR("Can't create transaction: %d\n", rc); lstcon_group_decref(grp); return rc; @@ -601,8 +609,10 @@ lstcon_group_del(char *name) lstcon_rpc_trans_destroy(trans); lstcon_group_decref(grp); - /* -ref for session, it's destroyed, - * status can't be rolled back, destroy group anyway */ + /* + * -ref for session, it's destroyed, + * status can't be rolled back, destroy group anyway + */ lstcon_group_decref(grp); return rc; @@ -615,7 +625,7 @@ lstcon_group_clean(char *name, int args) int rc; rc = lstcon_group_find(name, &grp); - if (rc != 0) { + if (rc) { CDEBUG(D_NET, "Can't find group %s\n", name); return rc; } @@ -641,14 +651,14 @@ lstcon_group_clean(char *name, int args) } int -lstcon_nodes_remove(char *name, int count, - lnet_process_id_t *ids_up, struct list_head *result_up) +lstcon_nodes_remove(char *name, int count, lnet_process_id_t __user *ids_up, + struct list_head __user *result_up) { lstcon_group_t *grp = NULL; int rc; rc = lstcon_group_find(name, &grp); - if (rc != 0) { + if (rc) { CDEBUG(D_NET, "Can't find group: %s\n", name); return rc; } @@ -671,14 +681,14 @@ lstcon_nodes_remove(char *name, int count, } int -lstcon_group_refresh(char *name, struct list_head *result_up) +lstcon_group_refresh(char *name, struct list_head __user *result_up) { lstcon_rpc_trans_t *trans; lstcon_group_t *grp; int rc; rc = lstcon_group_find(name, &grp); - if (rc != 0) { + if (rc) { CDEBUG(D_NET, "Can't find group: %s\n", name); return rc; } @@ -694,7 +704,7 @@ lstcon_group_refresh(char *name, struct list_head *result_up) rc = lstcon_rpc_trans_ndlist(&grp->grp_ndl_list, &grp->grp_trans_list, LST_TRANS_SESNEW, grp, lstcon_sesrpc_condition, &trans); - if (rc != 0) { + if (rc) { /* local error, return */ CDEBUG(D_NET, "Can't create transaction: %d\n", rc); lstcon_group_decref(grp); @@ -713,15 +723,15 @@ lstcon_group_refresh(char *name, struct list_head *result_up) } int -lstcon_group_list(int index, int len, char *name_up) +lstcon_group_list(int index, int len, char __user *name_up) { lstcon_group_t *grp; LASSERT(index >= 0); - LASSERT(name_up != NULL); + LASSERT(name_up); list_for_each_entry(grp, &console_session.ses_grp_list, grp_link) { - if (index-- == 0) { + if (!index--) { return copy_to_user(name_up, grp->grp_name, len) ? -EFAULT : 0; } @@ -732,15 +742,15 @@ lstcon_group_list(int index, int len, char *name_up) static int lstcon_nodes_getent(struct list_head *head, int *index_p, - int *count_p, lstcon_node_ent_t *dents_up) + int *count_p, lstcon_node_ent_t __user *dents_up) { lstcon_ndlink_t *ndl; lstcon_node_t *nd; int count = 0; int index = 0; - LASSERT(index_p != NULL && count_p != NULL); - LASSERT(dents_up != NULL); + LASSERT(index_p && count_p); + LASSERT(dents_up); LASSERT(*index_p >= 0); LASSERT(*count_p > 0); @@ -753,9 +763,9 @@ lstcon_nodes_getent(struct list_head *head, int *index_p, nd = ndl->ndl_node; if (copy_to_user(&dents_up[count].nde_id, - &nd->nd_id, sizeof(nd->nd_id)) || + &nd->nd_id, sizeof(nd->nd_id)) || copy_to_user(&dents_up[count].nde_state, - &nd->nd_state, sizeof(nd->nd_state))) + &nd->nd_state, sizeof(nd->nd_state))) return -EFAULT; count++; @@ -771,8 +781,9 @@ lstcon_nodes_getent(struct list_head *head, int *index_p, } int -lstcon_group_info(char *name, lstcon_ndlist_ent_t *gents_p, - int *index_p, int *count_p, lstcon_node_ent_t *dents_up) +lstcon_group_info(char *name, lstcon_ndlist_ent_t __user *gents_p, + int *index_p, int *count_p, + lstcon_node_ent_t __user *dents_up) { lstcon_ndlist_ent_t *gentp; lstcon_group_t *grp; @@ -780,7 +791,7 @@ lstcon_group_info(char *name, lstcon_ndlist_ent_t *gents_p, int rc; rc = lstcon_group_find(name, &grp); - if (rc != 0) { + if (rc) { CDEBUG(D_NET, "Can't find group %s\n", name); return rc; } @@ -796,7 +807,7 @@ lstcon_group_info(char *name, lstcon_ndlist_ent_t *gents_p, /* non-verbose query */ LIBCFS_ALLOC(gentp, sizeof(lstcon_ndlist_ent_t)); - if (gentp == NULL) { + if (!gentp) { CERROR("Can't allocate ndlist_ent\n"); lstcon_group_decref(grp); @@ -807,7 +818,7 @@ lstcon_group_info(char *name, lstcon_ndlist_ent_t *gents_p, LST_NODE_STATE_COUNTER(ndl->ndl_node, gentp); rc = copy_to_user(gents_p, gentp, - sizeof(lstcon_ndlist_ent_t)) ? -EFAULT : 0; + sizeof(lstcon_ndlist_ent_t)) ? -EFAULT : 0; LIBCFS_FREE(gentp, sizeof(lstcon_ndlist_ent_t)); @@ -822,7 +833,7 @@ lstcon_batch_find(const char *name, lstcon_batch_t **batpp) lstcon_batch_t *bat; list_for_each_entry(bat, &console_session.ses_bat_list, bat_link) { - if (strncmp(bat->bat_name, name, LST_NAME_SIZE) == 0) { + if (!strncmp(bat->bat_name, name, LST_NAME_SIZE)) { *batpp = bat; return 0; } @@ -838,21 +849,21 @@ lstcon_batch_add(char *name) int i; int rc; - rc = (lstcon_batch_find(name, &bat) == 0) ? -EEXIST : 0; - if (rc != 0) { + rc = !lstcon_batch_find(name, &bat) ? -EEXIST : 0; + if (rc) { CDEBUG(D_NET, "Batch %s already exists\n", name); return rc; } LIBCFS_ALLOC(bat, sizeof(lstcon_batch_t)); - if (bat == NULL) { + if (!bat) { CERROR("Can't allocate descriptor for batch %s\n", name); return -ENOMEM; } LIBCFS_ALLOC(bat->bat_cli_hash, sizeof(struct list_head) * LST_NODE_HASHSIZE); - if (bat->bat_cli_hash == NULL) { + if (!bat->bat_cli_hash) { CERROR("Can't allocate hash for batch %s\n", name); LIBCFS_FREE(bat, sizeof(lstcon_batch_t)); @@ -861,7 +872,7 @@ lstcon_batch_add(char *name) LIBCFS_ALLOC(bat->bat_srv_hash, sizeof(struct list_head) * LST_NODE_HASHSIZE); - if (bat->bat_srv_hash == NULL) { + if (!bat->bat_srv_hash) { CERROR("Can't allocate hash for batch %s\n", name); LIBCFS_FREE(bat->bat_cli_hash, LST_NODE_HASHSIZE); LIBCFS_FREE(bat, sizeof(lstcon_batch_t)); @@ -869,7 +880,13 @@ lstcon_batch_add(char *name) return -ENOMEM; } - strcpy(bat->bat_name, name); + if (strlen(name) > sizeof(bat->bat_name) - 1) { + LIBCFS_FREE(bat->bat_srv_hash, LST_NODE_HASHSIZE); + LIBCFS_FREE(bat->bat_cli_hash, LST_NODE_HASHSIZE); + LIBCFS_FREE(bat, sizeof(lstcon_batch_t)); + return -E2BIG; + } + strncpy(bat->bat_name, name, sizeof(bat->bat_name)); bat->bat_hdr.tsb_index = 0; bat->bat_hdr.tsb_id.bat_id = ++console_session.ses_id_cookie; @@ -892,17 +909,17 @@ lstcon_batch_add(char *name) } int -lstcon_batch_list(int index, int len, char *name_up) +lstcon_batch_list(int index, int len, char __user *name_up) { lstcon_batch_t *bat; - LASSERT(name_up != NULL); + LASSERT(name_up); LASSERT(index >= 0); list_for_each_entry(bat, &console_session.ses_bat_list, bat_link) { - if (index-- == 0) { + if (!index--) { return copy_to_user(name_up, bat->bat_name, len) ? - -EFAULT : 0; + -EFAULT : 0; } } @@ -910,20 +927,20 @@ lstcon_batch_list(int index, int len, char *name_up) } int -lstcon_batch_info(char *name, lstcon_test_batch_ent_t *ent_up, int server, - int testidx, int *index_p, int *ndent_p, - lstcon_node_ent_t *dents_up) +lstcon_batch_info(char *name, lstcon_test_batch_ent_t __user *ent_up, + int server, int testidx, int *index_p, int *ndent_p, + lstcon_node_ent_t __user *dents_up) { lstcon_test_batch_ent_t *entp; struct list_head *clilst; struct list_head *srvlst; lstcon_test_t *test = NULL; lstcon_batch_t *bat; - lstcon_ndlink_t *ndl; + lstcon_ndlink_t *ndl; int rc; rc = lstcon_batch_find(name, &bat); - if (rc != 0) { + if (rc) { CDEBUG(D_NET, "Can't find batch %s\n", name); return -ENOENT; } @@ -941,12 +958,12 @@ lstcon_batch_info(char *name, lstcon_test_batch_ent_t *ent_up, int server, } } - clilst = (test == NULL) ? &bat->bat_cli_list : - &test->tes_src_grp->grp_ndl_list; - srvlst = (test == NULL) ? &bat->bat_srv_list : - &test->tes_dst_grp->grp_ndl_list; + clilst = !test ? &bat->bat_cli_list : + &test->tes_src_grp->grp_ndl_list; + srvlst = !test ? &bat->bat_srv_list : + &test->tes_dst_grp->grp_ndl_list; - if (dents_up != NULL) { + if (dents_up) { rc = lstcon_nodes_getent((server ? srvlst : clilst), index_p, ndent_p, dents_up); return rc; @@ -954,17 +971,16 @@ lstcon_batch_info(char *name, lstcon_test_batch_ent_t *ent_up, int server, /* non-verbose query */ LIBCFS_ALLOC(entp, sizeof(lstcon_test_batch_ent_t)); - if (entp == NULL) + if (!entp) return -ENOMEM; - if (test == NULL) { + if (!test) { entp->u.tbe_batch.bae_ntest = bat->bat_ntest; entp->u.tbe_batch.bae_state = bat->bat_state; } else { - - entp->u.tbe_test.tse_type = test->tes_type; - entp->u.tbe_test.tse_loop = test->tes_loop; + entp->u.tbe_test.tse_type = test->tes_type; + entp->u.tbe_test.tse_loop = test->tes_loop; entp->u.tbe_test.tse_concur = test->tes_concur; } @@ -975,7 +991,7 @@ lstcon_batch_info(char *name, lstcon_test_batch_ent_t *ent_up, int server, LST_NODE_STATE_COUNTER(ndl->ndl_node, &entp->tbe_srv_nle); rc = copy_to_user(ent_up, entp, - sizeof(lstcon_test_batch_ent_t)) ? -EFAULT : 0; + sizeof(lstcon_test_batch_ent_t)) ? -EFAULT : 0; LIBCFS_FREE(entp, sizeof(lstcon_test_batch_ent_t)); @@ -1006,7 +1022,7 @@ lstcon_batrpc_condition(int transop, lstcon_node_t *nd, void *arg) static int lstcon_batch_op(lstcon_batch_t *bat, int transop, - struct list_head *result_up) + struct list_head __user *result_up) { lstcon_rpc_trans_t *trans; int rc; @@ -1014,7 +1030,7 @@ lstcon_batch_op(lstcon_batch_t *bat, int transop, rc = lstcon_rpc_trans_ndlist(&bat->bat_cli_list, &bat->bat_trans_list, transop, bat, lstcon_batrpc_condition, &trans); - if (rc != 0) { + if (rc) { CERROR("Can't create transaction: %d\n", rc); return rc; } @@ -1029,12 +1045,12 @@ lstcon_batch_op(lstcon_batch_t *bat, int transop, } int -lstcon_batch_run(char *name, int timeout, struct list_head *result_up) +lstcon_batch_run(char *name, int timeout, struct list_head __user *result_up) { lstcon_batch_t *bat; int rc; - if (lstcon_batch_find(name, &bat) != 0) { + if (lstcon_batch_find(name, &bat)) { CDEBUG(D_NET, "Can't find batch %s\n", name); return -ENOENT; } @@ -1044,19 +1060,19 @@ lstcon_batch_run(char *name, int timeout, struct list_head *result_up) rc = lstcon_batch_op(bat, LST_TRANS_TSBRUN, result_up); /* mark batch as running if it's started in any node */ - if (lstcon_tsbop_stat_success(lstcon_trans_stat(), 0) != 0) + if (lstcon_tsbop_stat_success(lstcon_trans_stat(), 0)) bat->bat_state = LST_BATCH_RUNNING; return rc; } int -lstcon_batch_stop(char *name, int force, struct list_head *result_up) +lstcon_batch_stop(char *name, int force, struct list_head __user *result_up) { lstcon_batch_t *bat; int rc; - if (lstcon_batch_find(name, &bat) != 0) { + if (lstcon_batch_find(name, &bat)) { CDEBUG(D_NET, "Can't find batch %s\n", name); return -ENOENT; } @@ -1066,7 +1082,7 @@ lstcon_batch_stop(char *name, int force, struct list_head *result_up) rc = lstcon_batch_op(bat, LST_TRANS_TSBSTOP, result_up); /* mark batch as stopped if all RPCs finished */ - if (lstcon_tsbop_stat_failure(lstcon_trans_stat(), 0) == 0) + if (!lstcon_tsbop_stat_failure(lstcon_trans_stat(), 0)) bat->bat_state = LST_BATCH_IDLE; return rc; @@ -1083,7 +1099,7 @@ lstcon_batch_destroy(lstcon_batch_t *bat) while (!list_empty(&bat->bat_test_list)) { test = list_entry(bat->bat_test_list.next, - lstcon_test_t, tes_link); + lstcon_test_t, tes_link); LASSERT(list_empty(&test->tes_trans_list)); list_del(&test->tes_link); @@ -1099,7 +1115,7 @@ lstcon_batch_destroy(lstcon_batch_t *bat) while (!list_empty(&bat->bat_cli_list)) { ndl = list_entry(bat->bat_cli_list.next, - lstcon_ndlink_t, ndl_link); + lstcon_ndlink_t, ndl_link); list_del_init(&ndl->ndl_link); lstcon_ndlink_release(ndl); @@ -1107,7 +1123,7 @@ lstcon_batch_destroy(lstcon_batch_t *bat) while (!list_empty(&bat->bat_srv_list)) { ndl = list_entry(bat->bat_srv_list.next, - lstcon_ndlink_t, ndl_link); + lstcon_ndlink_t, ndl_link); list_del_init(&ndl->ndl_link); lstcon_ndlink_release(ndl); @@ -1135,10 +1151,10 @@ lstcon_testrpc_condition(int transop, lstcon_node_t *nd, void *arg) struct list_head *head; test = (lstcon_test_t *)arg; - LASSERT(test != NULL); + LASSERT(test); batch = test->tes_batch; - LASSERT(batch != NULL); + LASSERT(batch); if (test->tes_oneside && transop == LST_TRANS_TSBSRVADD) @@ -1160,7 +1176,7 @@ lstcon_testrpc_condition(int transop, lstcon_node_t *nd, void *arg) LASSERT(nd->nd_id.nid != LNET_NID_ANY); - if (lstcon_ndlink_find(hash, nd->nd_id, &ndl, 1) != 0) + if (lstcon_ndlink_find(hash, nd->nd_id, &ndl, 1)) return -ENOMEM; if (list_empty(&ndl->ndl_link)) @@ -1170,31 +1186,31 @@ lstcon_testrpc_condition(int transop, lstcon_node_t *nd, void *arg) } static int -lstcon_test_nodes_add(lstcon_test_t *test, struct list_head *result_up) +lstcon_test_nodes_add(lstcon_test_t *test, struct list_head __user *result_up) { lstcon_rpc_trans_t *trans; lstcon_group_t *grp; int transop; int rc; - LASSERT(test->tes_src_grp != NULL); - LASSERT(test->tes_dst_grp != NULL); + LASSERT(test->tes_src_grp); + LASSERT(test->tes_dst_grp); transop = LST_TRANS_TSBSRVADD; - grp = test->tes_dst_grp; + grp = test->tes_dst_grp; again: rc = lstcon_rpc_trans_ndlist(&grp->grp_ndl_list, &test->tes_trans_list, transop, test, lstcon_testrpc_condition, &trans); - if (rc != 0) { + if (rc) { CERROR("Can't create transaction: %d\n", rc); return rc; } lstcon_rpc_trans_postwait(trans, LST_TRANS_TIMEOUT); - if (lstcon_trans_stat()->trs_rpc_errno != 0 || - lstcon_trans_stat()->trs_fwk_errno != 0) { + if (lstcon_trans_stat()->trs_rpc_errno || + lstcon_trans_stat()->trs_fwk_errno) { lstcon_rpc_trans_interpreter(trans, result_up, NULL); lstcon_rpc_trans_destroy(trans); @@ -1226,7 +1242,7 @@ lstcon_verify_batch(const char *name, lstcon_batch_t **batch) int rc; rc = lstcon_batch_find(name, batch); - if (rc != 0) { + if (rc) { CDEBUG(D_NET, "Can't find batch %s\n", name); return rc; } @@ -1243,10 +1259,10 @@ static int lstcon_verify_group(const char *name, lstcon_group_t **grp) { int rc; - lstcon_ndlink_t *ndl; + lstcon_ndlink_t *ndl; rc = lstcon_group_find(name, grp); - if (rc != 0) { + if (rc) { CDEBUG(D_NET, "can't find group %s\n", name); return rc; } @@ -1266,13 +1282,13 @@ lstcon_test_add(char *batch_name, int type, int loop, int concur, int dist, int span, char *src_name, char *dst_name, void *param, int paramlen, int *retp, - struct list_head *result_up) + struct list_head __user *result_up) { - lstcon_test_t *test = NULL; - int rc; - lstcon_group_t *src_grp = NULL; - lstcon_group_t *dst_grp = NULL; - lstcon_batch_t *batch = NULL; + lstcon_test_t *test = NULL; + int rc; + lstcon_group_t *src_grp = NULL; + lstcon_group_t *dst_grp = NULL; + lstcon_batch_t *batch = NULL; /* * verify that a batch of the given name exists, and the groups @@ -1280,15 +1296,15 @@ lstcon_test_add(char *batch_name, int type, int loop, * active node */ rc = lstcon_verify_batch(batch_name, &batch); - if (rc != 0) + if (rc) goto out; rc = lstcon_verify_group(src_name, &src_grp); - if (rc != 0) + if (rc) goto out; rc = lstcon_verify_group(dst_name, &dst_grp); - if (rc != 0) + if (rc) goto out; if (dst_grp->grp_userland) @@ -1302,32 +1318,32 @@ lstcon_test_add(char *batch_name, int type, int loop, goto out; } - test->tes_hdr.tsb_id = batch->bat_hdr.tsb_id; - test->tes_batch = batch; - test->tes_type = type; - test->tes_oneside = 0; /* TODO */ - test->tes_loop = loop; - test->tes_concur = concur; - test->tes_stop_onerr = 1; /* TODO */ - test->tes_span = span; - test->tes_dist = dist; - test->tes_cliidx = 0; /* just used for creating RPC */ - test->tes_src_grp = src_grp; - test->tes_dst_grp = dst_grp; + test->tes_hdr.tsb_id = batch->bat_hdr.tsb_id; + test->tes_batch = batch; + test->tes_type = type; + test->tes_oneside = 0; /* TODO */ + test->tes_loop = loop; + test->tes_concur = concur; + test->tes_stop_onerr = 1; /* TODO */ + test->tes_span = span; + test->tes_dist = dist; + test->tes_cliidx = 0; /* just used for creating RPC */ + test->tes_src_grp = src_grp; + test->tes_dst_grp = dst_grp; INIT_LIST_HEAD(&test->tes_trans_list); - if (param != NULL) { + if (param) { test->tes_paramlen = paramlen; memcpy(&test->tes_param[0], param, paramlen); } rc = lstcon_test_nodes_add(test, result_up); - if (rc != 0) + if (rc) goto out; - if (lstcon_trans_stat()->trs_rpc_errno != 0 || - lstcon_trans_stat()->trs_fwk_errno != 0) + if (lstcon_trans_stat()->trs_rpc_errno || + lstcon_trans_stat()->trs_fwk_errno) CDEBUG(D_NET, "Failed to add test %d to batch %s\n", type, batch_name); @@ -1340,13 +1356,13 @@ lstcon_test_add(char *batch_name, int type, int loop, /* hold groups so nobody can change them */ return rc; out: - if (test != NULL) + if (test) LIBCFS_FREE(test, offsetof(lstcon_test_t, tes_param[paramlen])); - if (dst_grp != NULL) + if (dst_grp) lstcon_group_decref(dst_grp); - if (src_grp != NULL) + if (src_grp) lstcon_group_decref(src_grp); return rc; @@ -1369,16 +1385,16 @@ lstcon_test_find(lstcon_batch_t *batch, int idx, lstcon_test_t **testpp) static int lstcon_tsbrpc_readent(int transop, srpc_msg_t *msg, - lstcon_rpc_ent_t *ent_up) + lstcon_rpc_ent_t __user *ent_up) { srpc_batch_reply_t *rep = &msg->msg_body.bat_reply; LASSERT(transop == LST_TRANS_TSBCLIQRY || - transop == LST_TRANS_TSBSRVQRY); + transop == LST_TRANS_TSBSRVQRY); /* positive errno, framework error code */ - if (copy_to_user(&ent_up->rpe_priv[0], - &rep->bar_active, sizeof(rep->bar_active))) + if (copy_to_user(&ent_up->rpe_priv[0], &rep->bar_active, + sizeof(rep->bar_active))) return -EFAULT; return 0; @@ -1386,7 +1402,7 @@ lstcon_tsbrpc_readent(int transop, srpc_msg_t *msg, int lstcon_test_batch_query(char *name, int testidx, int client, - int timeout, struct list_head *result_up) + int timeout, struct list_head __user *result_up) { lstcon_rpc_trans_t *trans; struct list_head *translist; @@ -1398,43 +1414,43 @@ lstcon_test_batch_query(char *name, int testidx, int client, int rc; rc = lstcon_batch_find(name, &batch); - if (rc != 0) { + if (rc) { CDEBUG(D_NET, "Can't find batch: %s\n", name); return rc; } - if (testidx == 0) { + if (!testidx) { translist = &batch->bat_trans_list; - ndlist = &batch->bat_cli_list; - hdr = &batch->bat_hdr; + ndlist = &batch->bat_cli_list; + hdr = &batch->bat_hdr; } else { /* query specified test only */ rc = lstcon_test_find(batch, testidx, &test); - if (rc != 0) { + if (rc) { CDEBUG(D_NET, "Can't find test: %d\n", testidx); return rc; } translist = &test->tes_trans_list; - ndlist = &test->tes_src_grp->grp_ndl_list; - hdr = &test->tes_hdr; + ndlist = &test->tes_src_grp->grp_ndl_list; + hdr = &test->tes_hdr; } transop = client ? LST_TRANS_TSBCLIQRY : LST_TRANS_TSBSRVQRY; rc = lstcon_rpc_trans_ndlist(ndlist, translist, transop, hdr, lstcon_batrpc_condition, &trans); - if (rc != 0) { + if (rc) { CERROR("Can't create transaction: %d\n", rc); return rc; } lstcon_rpc_trans_postwait(trans, timeout); - if (testidx == 0 && /* query a batch, not a test */ - lstcon_rpc_stat_failure(lstcon_trans_stat(), 0) == 0 && - lstcon_tsbqry_stat_run(lstcon_trans_stat(), 0) == 0) { + if (!testidx && /* query a batch, not a test */ + !lstcon_rpc_stat_failure(lstcon_trans_stat(), 0) && + !lstcon_tsbqry_stat_run(lstcon_trans_stat(), 0)) { /* all RPCs finished, and no active test */ batch->bat_state = LST_BATCH_IDLE; } @@ -1448,19 +1464,19 @@ lstcon_test_batch_query(char *name, int testidx, int client, static int lstcon_statrpc_readent(int transop, srpc_msg_t *msg, - lstcon_rpc_ent_t *ent_up) + lstcon_rpc_ent_t __user *ent_up) { srpc_stat_reply_t *rep = &msg->msg_body.stat_reply; - sfw_counters_t *sfwk_stat; - srpc_counters_t *srpc_stat; - lnet_counters_t *lnet_stat; + sfw_counters_t __user *sfwk_stat; + srpc_counters_t __user *srpc_stat; + lnet_counters_t __user *lnet_stat; - if (rep->str_status != 0) + if (rep->str_status) return 0; - sfwk_stat = (sfw_counters_t *)&ent_up->rpe_payload[0]; - srpc_stat = (srpc_counters_t *)((char *)sfwk_stat + sizeof(*sfwk_stat)); - lnet_stat = (lnet_counters_t *)((char *)srpc_stat + sizeof(*srpc_stat)); + sfwk_stat = (sfw_counters_t __user *)&ent_up->rpe_payload[0]; + srpc_stat = (srpc_counters_t __user *)(sfwk_stat + 1); + lnet_stat = (lnet_counters_t __user *)(srpc_stat + 1); if (copy_to_user(sfwk_stat, &rep->str_fw, sizeof(*sfwk_stat)) || copy_to_user(srpc_stat, &rep->str_rpc, sizeof(*srpc_stat)) || @@ -1472,7 +1488,7 @@ lstcon_statrpc_readent(int transop, srpc_msg_t *msg, static int lstcon_ndlist_stat(struct list_head *ndlist, - int timeout, struct list_head *result_up) + int timeout, struct list_head __user *result_up) { struct list_head head; lstcon_rpc_trans_t *trans; @@ -1482,7 +1498,7 @@ lstcon_ndlist_stat(struct list_head *ndlist, rc = lstcon_rpc_trans_ndlist(ndlist, &head, LST_TRANS_STATQRY, NULL, NULL, &trans); - if (rc != 0) { + if (rc) { CERROR("Can't create transaction: %d\n", rc); return rc; } @@ -1497,13 +1513,14 @@ lstcon_ndlist_stat(struct list_head *ndlist, } int -lstcon_group_stat(char *grp_name, int timeout, struct list_head *result_up) +lstcon_group_stat(char *grp_name, int timeout, + struct list_head __user *result_up) { lstcon_group_t *grp; int rc; rc = lstcon_group_find(grp_name, &grp); - if (rc != 0) { + if (rc) { CDEBUG(D_NET, "Can't find group %s\n", grp_name); return rc; } @@ -1516,17 +1533,17 @@ lstcon_group_stat(char *grp_name, int timeout, struct list_head *result_up) } int -lstcon_nodes_stat(int count, lnet_process_id_t *ids_up, - int timeout, struct list_head *result_up) +lstcon_nodes_stat(int count, lnet_process_id_t __user *ids_up, + int timeout, struct list_head __user *result_up) { - lstcon_ndlink_t *ndl; + lstcon_ndlink_t *ndl; lstcon_group_t *tmp; lnet_process_id_t id; int i; int rc; rc = lstcon_group_alloc(NULL, &tmp); - if (rc != 0) { + if (rc) { CERROR("Out of memory\n"); return -ENOMEM; } @@ -1539,7 +1556,7 @@ lstcon_nodes_stat(int count, lnet_process_id_t *ids_up, /* add to tmp group */ rc = lstcon_group_ndlink_find(tmp, id, &ndl, 2); - if (rc != 0) { + if (rc) { CDEBUG((rc == -ENOMEM) ? D_ERROR : D_NET, "Failed to find or create %s: %d\n", libcfs_id2str(id), rc); @@ -1547,7 +1564,7 @@ lstcon_nodes_stat(int count, lnet_process_id_t *ids_up, } } - if (rc != 0) { + if (rc) { lstcon_group_decref(tmp); return rc; } @@ -1562,14 +1579,14 @@ lstcon_nodes_stat(int count, lnet_process_id_t *ids_up, static int lstcon_debug_ndlist(struct list_head *ndlist, struct list_head *translist, - int timeout, struct list_head *result_up) + int timeout, struct list_head __user *result_up) { lstcon_rpc_trans_t *trans; - int rc; + int rc; rc = lstcon_rpc_trans_ndlist(ndlist, translist, LST_TRANS_SESQRY, NULL, lstcon_sesrpc_condition, &trans); - if (rc != 0) { + if (rc) { CERROR("Can't create transaction: %d\n", rc); return rc; } @@ -1584,7 +1601,7 @@ lstcon_debug_ndlist(struct list_head *ndlist, } int -lstcon_session_debug(int timeout, struct list_head *result_up) +lstcon_session_debug(int timeout, struct list_head __user *result_up) { return lstcon_debug_ndlist(&console_session.ses_ndl_list, NULL, timeout, result_up); @@ -1592,13 +1609,13 @@ lstcon_session_debug(int timeout, struct list_head *result_up) int lstcon_batch_debug(int timeout, char *name, - int client, struct list_head *result_up) + int client, struct list_head __user *result_up) { lstcon_batch_t *bat; int rc; rc = lstcon_batch_find(name, &bat); - if (rc != 0) + if (rc) return -ENOENT; rc = lstcon_debug_ndlist(client ? &bat->bat_cli_list : @@ -1610,13 +1627,13 @@ lstcon_batch_debug(int timeout, char *name, int lstcon_group_debug(int timeout, char *name, - struct list_head *result_up) + struct list_head __user *result_up) { lstcon_group_t *grp; int rc; rc = lstcon_group_find(name, &grp); - if (rc != 0) + if (rc) return -ENOENT; rc = lstcon_debug_ndlist(&grp->grp_ndl_list, NULL, @@ -1628,8 +1645,8 @@ lstcon_group_debug(int timeout, char *name, int lstcon_nodes_debug(int timeout, - int count, lnet_process_id_t *ids_up, - struct list_head *result_up) + int count, lnet_process_id_t __user *ids_up, + struct list_head __user *result_up) { lnet_process_id_t id; lstcon_ndlink_t *ndl; @@ -1638,7 +1655,7 @@ lstcon_nodes_debug(int timeout, int rc; rc = lstcon_group_alloc(NULL, &grp); - if (rc != 0) { + if (rc) { CDEBUG(D_NET, "Out of memory\n"); return rc; } @@ -1651,13 +1668,13 @@ lstcon_nodes_debug(int timeout, /* node is added to tmp group */ rc = lstcon_group_ndlink_find(grp, id, &ndl, 1); - if (rc != 0) { + if (rc) { CERROR("Can't create node link\n"); break; } } - if (rc != 0) { + if (rc) { lstcon_group_decref(grp); return rc; } @@ -1673,8 +1690,8 @@ lstcon_nodes_debug(int timeout, int lstcon_session_match(lst_sid_t sid) { - return (console_session.ses_id.ses_nid == sid.ses_nid && - console_session.ses_id.ses_stamp == sid.ses_stamp) ? 1 : 0; + return (console_session.ses_id.ses_nid == sid.ses_nid && + console_session.ses_id.ses_stamp == sid.ses_stamp) ? 1 : 0; } static void @@ -1685,15 +1702,13 @@ lstcon_new_session_id(lst_sid_t *sid) LASSERT(console_session.ses_state == LST_SESSION_NONE); LNetGetId(1, &id); - sid->ses_nid = id.nid; + sid->ses_nid = id.nid; sid->ses_stamp = cfs_time_current(); } -extern srpc_service_t lstcon_acceptor_service; - int lstcon_session_new(char *name, int key, unsigned feats, - int timeout, int force, lst_sid_t *sid_up) + int timeout, int force, lst_sid_t __user *sid_up) { int rc = 0; int i; @@ -1709,11 +1724,11 @@ lstcon_session_new(char *name, int key, unsigned feats, rc = lstcon_session_end(); /* lstcon_session_end() only return local error */ - if (rc != 0) + if (rc) return rc; } - if ((feats & ~LST_FEATS_MASK) != 0) { + if (feats & ~LST_FEATS_MASK) { CNETERR("Unknown session features %x\n", (feats & ~LST_FEATS_MASK)); return -EINVAL; @@ -1731,15 +1746,18 @@ lstcon_session_new(char *name, int key, unsigned feats, console_session.ses_feats_updated = 0; console_session.ses_timeout = (timeout <= 0) ? LST_CONSOLE_TIMEOUT : timeout; - strlcpy(console_session.ses_name, name, + + if (strlen(name) > sizeof(console_session.ses_name) - 1) + return -E2BIG; + strncpy(console_session.ses_name, name, sizeof(console_session.ses_name)); rc = lstcon_batch_add(LST_DEFAULT_BATCH); - if (rc != 0) + if (rc) return rc; rc = lstcon_rpc_pinger_start(); - if (rc != 0) { + if (rc) { lstcon_batch_t *bat = NULL; lstcon_batch_find(LST_DEFAULT_BATCH, &bat); @@ -1748,8 +1766,8 @@ lstcon_session_new(char *name, int key, unsigned feats, return rc; } - if (copy_to_user(sid_up, &console_session.ses_id, - sizeof(lst_sid_t)) == 0) + if (!copy_to_user(sid_up, &console_session.ses_id, + sizeof(lst_sid_t))) return rc; lstcon_session_end(); @@ -1758,8 +1776,10 @@ lstcon_session_new(char *name, int key, unsigned feats, } int -lstcon_session_info(lst_sid_t *sid_up, int *key_up, unsigned *featp, - lstcon_ndlist_ent_t *ndinfo_up, char *name_up, int len) +lstcon_session_info(lst_sid_t __user *sid_up, int __user *key_up, + unsigned __user *featp, + lstcon_ndlist_ent_t __user *ndinfo_up, + char __user *name_up, int len) { lstcon_ndlist_ent_t *entp; lstcon_ndlink_t *ndl; @@ -1769,18 +1789,18 @@ lstcon_session_info(lst_sid_t *sid_up, int *key_up, unsigned *featp, return -ESRCH; LIBCFS_ALLOC(entp, sizeof(*entp)); - if (entp == NULL) + if (!entp) return -ENOMEM; list_for_each_entry(ndl, &console_session.ses_ndl_list, ndl_link) LST_NODE_STATE_COUNTER(ndl->ndl_node, entp); if (copy_to_user(sid_up, &console_session.ses_id, - sizeof(lst_sid_t)) || + sizeof(lst_sid_t)) || copy_to_user(key_up, &console_session.ses_key, - sizeof(*key_up)) || + sizeof(*key_up)) || copy_to_user(featp, &console_session.ses_features, - sizeof(*featp)) || + sizeof(*featp)) || copy_to_user(ndinfo_up, entp, sizeof(*entp)) || copy_to_user(name_up, console_session.ses_name, len)) rc = -EFAULT; @@ -1803,7 +1823,7 @@ lstcon_session_end(void) rc = lstcon_rpc_trans_ndlist(&console_session.ses_ndl_list, NULL, LST_TRANS_SESEND, NULL, lstcon_sesrpc_condition, &trans); - if (rc != 0) { + if (rc) { CERROR("Can't create transaction: %d\n", rc); return rc; } @@ -1820,16 +1840,16 @@ lstcon_session_end(void) /* waiting for orphan rpcs to die */ lstcon_rpc_cleanup_wait(); - console_session.ses_id = LST_INVALID_SID; + console_session.ses_id = LST_INVALID_SID; console_session.ses_state = LST_SESSION_NONE; - console_session.ses_key = 0; + console_session.ses_key = 0; console_session.ses_force = 0; console_session.ses_feats_updated = 0; /* destroy all batches */ while (!list_empty(&console_session.ses_bat_list)) { bat = list_entry(console_session.ses_bat_list.next, - lstcon_batch_t, bat_link); + lstcon_batch_t, bat_link); lstcon_batch_destroy(bat); } @@ -1837,7 +1857,7 @@ lstcon_session_end(void) /* destroy all groups */ while (!list_empty(&console_session.ses_grp_list)) { grp = list_entry(console_session.ses_grp_list.next, - lstcon_group_t, grp_link); + lstcon_group_t, grp_link); LASSERT(grp->grp_ref == 1); lstcon_group_decref(grp); @@ -1847,7 +1867,7 @@ lstcon_session_end(void) LASSERT(list_empty(&console_session.ses_ndl_list)); console_session.ses_shutdown = 0; - console_session.ses_expired = 0; + console_session.ses_expired = 0; return rc; } @@ -1857,7 +1877,7 @@ lstcon_session_feats_check(unsigned feats) { int rc = 0; - if ((feats & ~LST_FEATS_MASK) != 0) { + if (feats & ~LST_FEATS_MASK) { CERROR("Can't support these features: %x\n", (feats & ~LST_FEATS_MASK)); return -EPROTO; @@ -1875,7 +1895,7 @@ lstcon_session_feats_check(unsigned feats) spin_unlock(&console_session.ses_rpc_lock); - if (rc != 0) { + if (rc) { CERROR("remote features %x do not match with session features %x of console\n", feats, console_session.ses_features); } @@ -1886,13 +1906,13 @@ lstcon_session_feats_check(unsigned feats) static int lstcon_acceptor_handle(struct srpc_server_rpc *rpc) { - srpc_msg_t *rep = &rpc->srpc_replymsg; - srpc_msg_t *req = &rpc->srpc_reqstbuf->buf_msg; + srpc_msg_t *rep = &rpc->srpc_replymsg; + srpc_msg_t *req = &rpc->srpc_reqstbuf->buf_msg; srpc_join_reqst_t *jreq = &req->msg_body.join_reqst; srpc_join_reply_t *jrep = &rep->msg_body.join_reply; - lstcon_group_t *grp = NULL; + lstcon_group_t *grp = NULL; lstcon_ndlink_t *ndl; - int rc = 0; + int rc = 0; sfw_unpack_message(req); @@ -1905,26 +1925,26 @@ lstcon_acceptor_handle(struct srpc_server_rpc *rpc) goto out; } - if (lstcon_session_feats_check(req->msg_ses_feats) != 0) { + if (lstcon_session_feats_check(req->msg_ses_feats)) { jrep->join_status = EPROTO; goto out; } if (jreq->join_sid.ses_nid != LNET_NID_ANY && - !lstcon_session_match(jreq->join_sid)) { + !lstcon_session_match(jreq->join_sid)) { jrep->join_status = EBUSY; goto out; } - if (lstcon_group_find(jreq->join_group, &grp) != 0) { + if (lstcon_group_find(jreq->join_group, &grp)) { rc = lstcon_group_alloc(jreq->join_group, &grp); - if (rc != 0) { + if (rc) { CERROR("Out of memory\n"); goto out; } list_add_tail(&grp->grp_link, - &console_session.ses_grp_list); + &console_session.ses_grp_list); lstcon_group_addref(grp); } @@ -1935,31 +1955,31 @@ lstcon_acceptor_handle(struct srpc_server_rpc *rpc) } rc = lstcon_group_ndlink_find(grp, rpc->srpc_peer, &ndl, 0); - if (rc == 0) { + if (!rc) { jrep->join_status = EEXIST; goto out; } rc = lstcon_group_ndlink_find(grp, rpc->srpc_peer, &ndl, 1); - if (rc != 0) { + if (rc) { CERROR("Out of memory\n"); goto out; } - ndl->ndl_node->nd_state = LST_NODE_ACTIVE; + ndl->ndl_node->nd_state = LST_NODE_ACTIVE; ndl->ndl_node->nd_timeout = console_session.ses_timeout; - if (grp->grp_userland == 0) + if (!grp->grp_userland) grp->grp_userland = 1; strlcpy(jrep->join_session, console_session.ses_name, sizeof(jrep->join_session)); jrep->join_timeout = console_session.ses_timeout; - jrep->join_status = 0; + jrep->join_status = 0; out: rep->msg_ses_feats = console_session.ses_features; - if (grp != NULL) + if (grp) lstcon_group_decref(grp); mutex_unlock(&console_session.ses_mutex); @@ -1967,17 +1987,17 @@ out: return rc; } -srpc_service_t lstcon_acceptor_service; +static srpc_service_t lstcon_acceptor_service; static void lstcon_init_acceptor_service(void) { /* initialize selftest console acceptor service table */ - lstcon_acceptor_service.sv_name = "join session"; - lstcon_acceptor_service.sv_handler = lstcon_acceptor_handle; - lstcon_acceptor_service.sv_id = SRPC_SERVICE_JOIN; + lstcon_acceptor_service.sv_name = "join session"; + lstcon_acceptor_service.sv_handler = lstcon_acceptor_handle; + lstcon_acceptor_service.sv_id = SRPC_SERVICE_JOIN; lstcon_acceptor_service.sv_wi_total = SFW_FRWK_WI_MAX; } -extern int lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_data *data); +extern int lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_hdr *hdr); static DECLARE_IOCTL_HANDLER(lstcon_ioctl_handler, lstcon_ioctl_entry); @@ -1988,16 +2008,16 @@ lstcon_console_init(void) int i; int rc; - memset(&console_session, 0, sizeof(lstcon_session_t)); + memset(&console_session, 0, sizeof(struct lstcon_session)); - console_session.ses_id = LST_INVALID_SID; - console_session.ses_state = LST_SESSION_NONE; - console_session.ses_timeout = 0; - console_session.ses_force = 0; - console_session.ses_expired = 0; + console_session.ses_id = LST_INVALID_SID; + console_session.ses_state = LST_SESSION_NONE; + console_session.ses_timeout = 0; + console_session.ses_force = 0; + console_session.ses_expired = 0; console_session.ses_feats_updated = 0; - console_session.ses_features = LST_FEATS_MASK; - console_session.ses_laststamp = ktime_get_real_seconds(); + console_session.ses_features = LST_FEATS_MASK; + console_session.ses_laststamp = ktime_get_real_seconds(); mutex_init(&console_session.ses_mutex); @@ -2008,7 +2028,7 @@ lstcon_console_init(void) LIBCFS_ALLOC(console_session.ses_ndl_hash, sizeof(struct list_head) * LST_GLOBAL_HASHSIZE); - if (console_session.ses_ndl_hash == NULL) + if (!console_session.ses_ndl_hash) return -ENOMEM; for (i = 0; i < LST_GLOBAL_HASHSIZE; i++) @@ -2019,7 +2039,7 @@ lstcon_console_init(void) rc = srpc_add_service(&lstcon_acceptor_service); LASSERT(rc != -EBUSY); - if (rc != 0) { + if (rc) { LIBCFS_FREE(console_session.ses_ndl_hash, sizeof(struct list_head) * LST_GLOBAL_HASHSIZE); return rc; @@ -2027,14 +2047,14 @@ lstcon_console_init(void) rc = srpc_service_add_buffers(&lstcon_acceptor_service, lstcon_acceptor_service.sv_wi_total); - if (rc != 0) { + if (rc) { rc = -ENOMEM; goto out; } rc = libcfs_register_ioctl(&lstcon_ioctl_handler); - if (rc == 0) { + if (!rc) { lstcon_rpc_module_init(); return 0; } @@ -2075,9 +2095,8 @@ lstcon_console_fini(void) LASSERT(list_empty(&console_session.ses_bat_list)); LASSERT(list_empty(&console_session.ses_trans_list)); - for (i = 0; i < LST_NODE_HASHSIZE; i++) { + for (i = 0; i < LST_NODE_HASHSIZE; i++) LASSERT(list_empty(&console_session.ses_ndl_hash[i])); - } LIBCFS_FREE(console_session.ses_ndl_hash, sizeof(struct list_head) * LST_GLOBAL_HASHSIZE); diff --git a/drivers/staging/lustre/lnet/selftest/console.h b/drivers/staging/lustre/lnet/selftest/console.h index 3f3286c0c7bf..554f582441f1 100644 --- a/drivers/staging/lustre/lnet/selftest/console.h +++ b/drivers/staging/lustre/lnet/selftest/console.h @@ -52,79 +52,79 @@ typedef struct lstcon_node { lnet_process_id_t nd_id; /* id of the node */ - int nd_ref; /* reference count */ - int nd_state; /* state of the node */ - int nd_timeout; /* session timeout */ - unsigned long nd_stamp; /* timestamp of last replied RPC */ + int nd_ref; /* reference count */ + int nd_state; /* state of the node */ + int nd_timeout; /* session timeout */ + unsigned long nd_stamp; /* timestamp of last replied RPC */ struct lstcon_rpc nd_ping; /* ping rpc */ } lstcon_node_t; /* node descriptor */ typedef struct { struct list_head ndl_link; /* chain on list */ struct list_head ndl_hlink; /* chain on hash */ - lstcon_node_t *ndl_node; /* pointer to node */ + lstcon_node_t *ndl_node; /* pointer to node */ } lstcon_ndlink_t; /* node link descriptor */ typedef struct { - struct list_head grp_link; /* chain on global group list + struct list_head grp_link; /* chain on global group list */ - int grp_ref; /* reference count */ - int grp_userland; /* has userland nodes */ - int grp_nnode; /* # of nodes */ - char grp_name[LST_NAME_SIZE]; /* group name */ - - struct list_head grp_trans_list; /* transaction list */ - struct list_head grp_ndl_list; /* nodes list */ - struct list_head grp_ndl_hash[0]; /* hash table for nodes */ + int grp_ref; /* reference count */ + int grp_userland; /* has userland nodes */ + int grp_nnode; /* # of nodes */ + char grp_name[LST_NAME_SIZE]; /* group name */ + + struct list_head grp_trans_list; /* transaction list */ + struct list_head grp_ndl_list; /* nodes list */ + struct list_head grp_ndl_hash[0]; /* hash table for nodes */ } lstcon_group_t; /* (alias of nodes) group descriptor */ -#define LST_BATCH_IDLE 0xB0 /* idle batch */ +#define LST_BATCH_IDLE 0xB0 /* idle batch */ #define LST_BATCH_RUNNING 0xB1 /* running batch */ typedef struct lstcon_tsb_hdr { - lst_bid_t tsb_id; /* batch ID */ - int tsb_index; /* test index */ + lst_bid_t tsb_id; /* batch ID */ + int tsb_index; /* test index */ } lstcon_tsb_hdr_t; typedef struct { - lstcon_tsb_hdr_t bat_hdr; /* test_batch header */ - struct list_head bat_link; /* chain on session's batches list */ - int bat_ntest; /* # of test */ - int bat_state; /* state of the batch */ - int bat_arg; /* parameter for run|stop, timeout + lstcon_tsb_hdr_t bat_hdr; /* test_batch header */ + struct list_head bat_link; /* chain on session's batches list */ + int bat_ntest; /* # of test */ + int bat_state; /* state of the batch */ + int bat_arg; /* parameter for run|stop, timeout * for run, force for stop */ - char bat_name[LST_NAME_SIZE];/* name of batch */ + char bat_name[LST_NAME_SIZE];/* name of batch */ struct list_head bat_test_list; /* list head of tests (lstcon_test_t) */ struct list_head bat_trans_list; /* list head of transaction */ - struct list_head bat_cli_list; /* list head of client nodes + struct list_head bat_cli_list; /* list head of client nodes * (lstcon_node_t) */ struct list_head *bat_cli_hash; /* hash table of client nodes */ - struct list_head bat_srv_list; /* list head of server nodes */ + struct list_head bat_srv_list; /* list head of server nodes */ struct list_head *bat_srv_hash; /* hash table of server nodes */ } lstcon_batch_t; /* (tests ) batch descriptor */ typedef struct lstcon_test { - lstcon_tsb_hdr_t tes_hdr; /* test batch header */ - struct list_head tes_link; /* chain on batch's tests list */ - lstcon_batch_t *tes_batch; /* pointer to batch */ - - int tes_type; /* type of the test, i.e: bulk, ping */ - int tes_stop_onerr; /* stop on error */ - int tes_oneside; /* one-sided test */ - int tes_concur; /* concurrency */ - int tes_loop; /* loop count */ - int tes_dist; /* nodes distribution of target group */ - int tes_span; /* nodes span of target group */ - int tes_cliidx; /* client index, used for RPC creating */ + lstcon_tsb_hdr_t tes_hdr; /* test batch header */ + struct list_head tes_link; /* chain on batch's tests list */ + lstcon_batch_t *tes_batch; /* pointer to batch */ + + int tes_type; /* type of the test, i.e: bulk, ping */ + int tes_stop_onerr; /* stop on error */ + int tes_oneside; /* one-sided test */ + int tes_concur; /* concurrency */ + int tes_loop; /* loop count */ + int tes_dist; /* nodes distribution of target group */ + int tes_span; /* nodes span of target group */ + int tes_cliidx; /* client index, used for RPC creating */ struct list_head tes_trans_list; /* transaction list */ - lstcon_group_t *tes_src_grp; /* group run the test */ - lstcon_group_t *tes_dst_grp; /* target group */ + lstcon_group_t *tes_src_grp; /* group run the test */ + lstcon_group_t *tes_dst_grp; /* target group */ - int tes_paramlen; /* test parameter length */ - char tes_param[0]; /* test parameter */ + int tes_paramlen; /* test parameter length */ + char tes_param[0]; /* test parameter */ } lstcon_test_t; /* a single test descriptor */ #define LST_GLOBAL_HASHSIZE 503 /* global nodes hash table size */ @@ -135,25 +135,25 @@ typedef struct lstcon_test { #define LST_CONSOLE_TIMEOUT 300 /* default console timeout */ -typedef struct { - struct mutex ses_mutex; /* only 1 thread in session */ - lst_sid_t ses_id; /* global session id */ - int ses_key; /* local session key */ - int ses_state; /* state of session */ - int ses_timeout; /* timeout in seconds */ - time64_t ses_laststamp; /* last operation stamp (seconds) +struct lstcon_session { + struct mutex ses_mutex; /* only 1 thread in session */ + lst_sid_t ses_id; /* global session id */ + int ses_key; /* local session key */ + int ses_state; /* state of session */ + int ses_timeout; /* timeout in seconds */ + time64_t ses_laststamp; /* last operation stamp (seconds) */ - unsigned ses_features; /* tests features of the session + unsigned ses_features; /* tests features of the session */ - unsigned ses_feats_updated:1; /* features are synced with + unsigned ses_feats_updated:1; /* features are synced with * remote test nodes */ - unsigned ses_force:1; /* force creating */ - unsigned ses_shutdown:1; /* session is shutting down */ - unsigned ses_expired:1; /* console is timedout */ - __u64 ses_id_cookie; /* batch id cookie */ - char ses_name[LST_NAME_SIZE];/* session name */ - lstcon_rpc_trans_t *ses_ping; /* session pinger */ - stt_timer_t ses_ping_timer; /* timer for pinger */ + unsigned ses_force:1; /* force creating */ + unsigned ses_shutdown:1; /* session is shutting down */ + unsigned ses_expired:1; /* console is timedout */ + __u64 ses_id_cookie; /* batch id cookie */ + char ses_name[LST_NAME_SIZE];/* session name */ + lstcon_rpc_trans_t *ses_ping; /* session pinger */ + struct stt_timer ses_ping_timer; /* timer for pinger */ lstcon_trans_stat_t ses_trans_stat; /* transaction stats */ struct list_head ses_trans_list; /* global list of transaction */ @@ -162,12 +162,12 @@ typedef struct { struct list_head ses_ndl_list; /* global list of nodes */ struct list_head *ses_ndl_hash; /* hash table of nodes */ - spinlock_t ses_rpc_lock; /* serialize */ - atomic_t ses_rpc_counter; /* # of initialized RPCs */ + spinlock_t ses_rpc_lock; /* serialize */ + atomic_t ses_rpc_counter; /* # of initialized RPCs */ struct list_head ses_rpc_freelist; /* idle console rpc */ -} lstcon_session_t; /* session descriptor */ +}; /* session descriptor */ -extern lstcon_session_t console_session; +extern struct lstcon_session console_session; static inline lstcon_trans_stat_t * lstcon_trans_stat(void) @@ -176,7 +176,7 @@ lstcon_trans_stat(void) } static inline struct list_head * -lstcon_id2hash (lnet_process_id_t id, struct list_head *hash) +lstcon_id2hash(lnet_process_id_t id, struct list_head *hash) { unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE; @@ -184,51 +184,54 @@ lstcon_id2hash (lnet_process_id_t id, struct list_head *hash) } int lstcon_console_init(void); -int lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_data *data); int lstcon_console_fini(void); int lstcon_session_match(lst_sid_t sid); int lstcon_session_new(char *name, int key, unsigned version, - int timeout, int flags, lst_sid_t *sid_up); -int lstcon_session_info(lst_sid_t *sid_up, int *key, unsigned *verp, - lstcon_ndlist_ent_t *entp, char *name_up, int len); + int timeout, int flags, lst_sid_t __user *sid_up); +int lstcon_session_info(lst_sid_t __user *sid_up, int __user *key, + unsigned __user *verp, lstcon_ndlist_ent_t __user *entp, + char __user *name_up, int len); int lstcon_session_end(void); -int lstcon_session_debug(int timeout, struct list_head *result_up); +int lstcon_session_debug(int timeout, struct list_head __user *result_up); int lstcon_session_feats_check(unsigned feats); int lstcon_batch_debug(int timeout, char *name, - int client, struct list_head *result_up); + int client, struct list_head __user *result_up); int lstcon_group_debug(int timeout, char *name, - struct list_head *result_up); -int lstcon_nodes_debug(int timeout, int nnd, lnet_process_id_t *nds_up, - struct list_head *result_up); + struct list_head __user *result_up); +int lstcon_nodes_debug(int timeout, int nnd, lnet_process_id_t __user *nds_up, + struct list_head __user *result_up); int lstcon_group_add(char *name); int lstcon_group_del(char *name); int lstcon_group_clean(char *name, int args); -int lstcon_group_refresh(char *name, struct list_head *result_up); -int lstcon_nodes_add(char *name, int nnd, lnet_process_id_t *nds_up, - unsigned *featp, struct list_head *result_up); -int lstcon_nodes_remove(char *name, int nnd, lnet_process_id_t *nds_up, - struct list_head *result_up); -int lstcon_group_info(char *name, lstcon_ndlist_ent_t *gent_up, - int *index_p, int *ndent_p, lstcon_node_ent_t *ndents_up); -int lstcon_group_list(int idx, int len, char *name_up); +int lstcon_group_refresh(char *name, struct list_head __user *result_up); +int lstcon_nodes_add(char *name, int nnd, lnet_process_id_t __user *nds_up, + unsigned *featp, struct list_head __user *result_up); +int lstcon_nodes_remove(char *name, int nnd, lnet_process_id_t __user *nds_up, + struct list_head __user *result_up); +int lstcon_group_info(char *name, lstcon_ndlist_ent_t __user *gent_up, + int *index_p, int *ndent_p, + lstcon_node_ent_t __user *ndents_up); +int lstcon_group_list(int idx, int len, char __user *name_up); int lstcon_batch_add(char *name); -int lstcon_batch_run(char *name, int timeout, struct list_head *result_up); -int lstcon_batch_stop(char *name, int force, struct list_head *result_up); +int lstcon_batch_run(char *name, int timeout, + struct list_head __user *result_up); +int lstcon_batch_stop(char *name, int force, + struct list_head __user *result_up); int lstcon_test_batch_query(char *name, int testidx, int client, int timeout, - struct list_head *result_up); + struct list_head __user *result_up); int lstcon_batch_del(char *name); -int lstcon_batch_list(int idx, int namelen, char *name_up); -int lstcon_batch_info(char *name, lstcon_test_batch_ent_t *ent_up, +int lstcon_batch_list(int idx, int namelen, char __user *name_up); +int lstcon_batch_info(char *name, lstcon_test_batch_ent_t __user *ent_up, int server, int testidx, int *index_p, - int *ndent_p, lstcon_node_ent_t *dents_up); + int *ndent_p, lstcon_node_ent_t __user *dents_up); int lstcon_group_stat(char *grp_name, int timeout, - struct list_head *result_up); -int lstcon_nodes_stat(int count, lnet_process_id_t *ids_up, - int timeout, struct list_head *result_up); + struct list_head __user *result_up); +int lstcon_nodes_stat(int count, lnet_process_id_t __user *ids_up, + int timeout, struct list_head __user *result_up); int lstcon_test_add(char *batch_name, int type, int loop, int concur, int dist, int span, char *src_name, char *dst_name, void *param, int paramlen, int *retp, - struct list_head *result_up); + struct list_head __user *result_up); #endif diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c index 1a2da7430190..926c3970c498 100644 --- a/drivers/staging/lustre/lnet/selftest/framework.c +++ b/drivers/staging/lustre/lnet/selftest/framework.c @@ -53,64 +53,64 @@ static int rpc_timeout = 64; module_param(rpc_timeout, int, 0644); MODULE_PARM_DESC(rpc_timeout, "rpc timeout in seconds (64 by default, 0 == never)"); -#define sfw_unpack_id(id) \ -do { \ - __swab64s(&(id).nid); \ - __swab32s(&(id).pid); \ +#define sfw_unpack_id(id) \ +do { \ + __swab64s(&(id).nid); \ + __swab32s(&(id).pid); \ } while (0) -#define sfw_unpack_sid(sid) \ -do { \ - __swab64s(&(sid).ses_nid); \ - __swab64s(&(sid).ses_stamp); \ +#define sfw_unpack_sid(sid) \ +do { \ + __swab64s(&(sid).ses_nid); \ + __swab64s(&(sid).ses_stamp); \ } while (0) -#define sfw_unpack_fw_counters(fc) \ -do { \ - __swab32s(&(fc).running_ms); \ +#define sfw_unpack_fw_counters(fc) \ +do { \ + __swab32s(&(fc).running_ms); \ __swab32s(&(fc).active_batches); \ __swab32s(&(fc).zombie_sessions); \ - __swab32s(&(fc).brw_errors); \ - __swab32s(&(fc).ping_errors); \ + __swab32s(&(fc).brw_errors); \ + __swab32s(&(fc).ping_errors); \ } while (0) -#define sfw_unpack_rpc_counters(rc) \ -do { \ +#define sfw_unpack_rpc_counters(rc) \ +do { \ __swab32s(&(rc).errors); \ - __swab32s(&(rc).rpcs_sent); \ - __swab32s(&(rc).rpcs_rcvd); \ - __swab32s(&(rc).rpcs_dropped); \ - __swab32s(&(rc).rpcs_expired); \ - __swab64s(&(rc).bulk_get); \ - __swab64s(&(rc).bulk_put); \ + __swab32s(&(rc).rpcs_sent); \ + __swab32s(&(rc).rpcs_rcvd); \ + __swab32s(&(rc).rpcs_dropped); \ + __swab32s(&(rc).rpcs_expired); \ + __swab64s(&(rc).bulk_get); \ + __swab64s(&(rc).bulk_put); \ } while (0) -#define sfw_unpack_lnet_counters(lc) \ -do { \ +#define sfw_unpack_lnet_counters(lc) \ +do { \ __swab32s(&(lc).errors); \ - __swab32s(&(lc).msgs_max); \ - __swab32s(&(lc).msgs_alloc); \ - __swab32s(&(lc).send_count); \ - __swab32s(&(lc).recv_count); \ - __swab32s(&(lc).drop_count); \ - __swab32s(&(lc).route_count); \ - __swab64s(&(lc).send_length); \ - __swab64s(&(lc).recv_length); \ - __swab64s(&(lc).drop_length); \ - __swab64s(&(lc).route_length); \ + __swab32s(&(lc).msgs_max); \ + __swab32s(&(lc).msgs_alloc); \ + __swab32s(&(lc).send_count); \ + __swab32s(&(lc).recv_count); \ + __swab32s(&(lc).drop_count); \ + __swab32s(&(lc).route_count); \ + __swab64s(&(lc).send_length); \ + __swab64s(&(lc).recv_length); \ + __swab64s(&(lc).drop_length); \ + __swab64s(&(lc).route_length); \ } while (0) -#define sfw_test_active(t) (atomic_read(&(t)->tsi_nactive) != 0) -#define sfw_batch_active(b) (atomic_read(&(b)->bat_nactive) != 0) +#define sfw_test_active(t) (atomic_read(&(t)->tsi_nactive)) +#define sfw_batch_active(b) (atomic_read(&(b)->bat_nactive)) static struct smoketest_framework { struct list_head fw_zombie_rpcs; /* RPCs to be recycled */ struct list_head fw_zombie_sessions; /* stopping sessions */ - struct list_head fw_tests; /* registered test cases */ - atomic_t fw_nzombies; /* # zombie sessions */ - spinlock_t fw_lock; /* serialise */ - sfw_session_t *fw_session; /* _the_ session */ - int fw_shuttingdown; /* shutdown in progress */ + struct list_head fw_tests; /* registered test cases */ + atomic_t fw_nzombies; /* # zombie sessions */ + spinlock_t fw_lock; /* serialise */ + sfw_session_t *fw_session; /* _the_ session */ + int fw_shuttingdown; /* shutdown in progress */ struct srpc_server_rpc *fw_active_srpc;/* running RPC */ } sfw_data; @@ -139,17 +139,17 @@ sfw_register_test(srpc_service_t *service, sfw_test_client_ops_t *cliops) { sfw_test_case_t *tsc; - if (sfw_find_test_case(service->sv_id) != NULL) { + if (sfw_find_test_case(service->sv_id)) { CERROR("Failed to register test %s (%d)\n", - service->sv_name, service->sv_id); + service->sv_name, service->sv_id); return -EEXIST; } LIBCFS_ALLOC(tsc, sizeof(sfw_test_case_t)); - if (tsc == NULL) + if (!tsc) return -ENOMEM; - tsc->tsc_cli_ops = cliops; + tsc->tsc_cli_ops = cliops; tsc->tsc_srv_service = service; list_add_tail(&tsc->tsc_list, &sfw_data.fw_tests); @@ -160,11 +160,11 @@ static void sfw_add_session_timer(void) { sfw_session_t *sn = sfw_data.fw_session; - stt_timer_t *timer = &sn->sn_timer; + struct stt_timer *timer = &sn->sn_timer; LASSERT(!sfw_data.fw_shuttingdown); - if (sn == NULL || sn->sn_timeout == 0) + if (!sn || !sn->sn_timeout) return; LASSERT(!sn->sn_timer_active); @@ -172,7 +172,6 @@ sfw_add_session_timer(void) sn->sn_timer_active = 1; timer->stt_expires = ktime_get_real_seconds() + sn->sn_timeout; stt_add_timer(timer); - return; } static int @@ -180,10 +179,10 @@ sfw_del_session_timer(void) { sfw_session_t *sn = sfw_data.fw_session; - if (sn == NULL || !sn->sn_timer_active) + if (!sn || !sn->sn_timer_active) return 0; - LASSERT(sn->sn_timeout != 0); + LASSERT(sn->sn_timeout); if (stt_del_timer(&sn->sn_timer)) { /* timer defused */ sn->sn_timer_active = 0; @@ -195,14 +194,14 @@ sfw_del_session_timer(void) static void sfw_deactivate_session(void) - __must_hold(&sfw_data.fw_lock) +__must_hold(&sfw_data.fw_lock) { sfw_session_t *sn = sfw_data.fw_session; int nactive = 0; sfw_batch_t *tsb; sfw_test_case_t *tsc; - if (sn == NULL) + if (!sn) return; LASSERT(!sn->sn_timer_active); @@ -226,7 +225,7 @@ sfw_deactivate_session(void) } } - if (nactive != 0) + if (nactive) return; /* wait for active batches to stop */ list_del_init(&sn->sn_list); @@ -248,8 +247,8 @@ sfw_session_expired(void *data) LASSERT(sn == sfw_data.fw_session); CWARN("Session expired! sid: %s-%llu, name: %s\n", - libcfs_nid2str(sn->sn_id.ses_nid), - sn->sn_id.ses_stamp, &sn->sn_name[0]); + libcfs_nid2str(sn->sn_id.ses_nid), + sn->sn_id.ses_stamp, &sn->sn_name[0]); sn->sn_timer_active = 0; sfw_deactivate_session(); @@ -261,7 +260,7 @@ static inline void sfw_init_session(sfw_session_t *sn, lst_sid_t sid, unsigned features, const char *name) { - stt_timer_t *timer = &sn->sn_timer; + struct stt_timer *timer = &sn->sn_timer; memset(sn, 0, sizeof(sfw_session_t)); INIT_LIST_HEAD(&sn->sn_list); @@ -272,10 +271,10 @@ sfw_init_session(sfw_session_t *sn, lst_sid_t sid, strlcpy(&sn->sn_name[0], name, sizeof(sn->sn_name)); sn->sn_timer_active = 0; - sn->sn_id = sid; - sn->sn_features = features; - sn->sn_timeout = session_timeout; - sn->sn_started = cfs_time_current(); + sn->sn_id = sid; + sn->sn_features = features; + sn->sn_timeout = session_timeout; + sn->sn_started = cfs_time_current(); timer->stt_data = sn; timer->stt_func = sfw_session_expired; @@ -289,29 +288,26 @@ sfw_server_rpc_done(struct srpc_server_rpc *rpc) struct srpc_service *sv = rpc->srpc_scd->scd_svc; int status = rpc->srpc_status; - CDEBUG(D_NET, - "Incoming framework RPC done: service %s, peer %s, status %s:%d\n", - sv->sv_name, libcfs_id2str(rpc->srpc_peer), - swi_state2str(rpc->srpc_wi.swi_state), - status); + CDEBUG(D_NET, "Incoming framework RPC done: service %s, peer %s, status %s:%d\n", + sv->sv_name, libcfs_id2str(rpc->srpc_peer), + swi_state2str(rpc->srpc_wi.swi_state), + status); - if (rpc->srpc_bulk != NULL) + if (rpc->srpc_bulk) sfw_free_pages(rpc); - return; } static void sfw_client_rpc_fini(srpc_client_rpc_t *rpc) { - LASSERT(rpc->crpc_bulk.bk_niov == 0); + LASSERT(!rpc->crpc_bulk.bk_niov); LASSERT(list_empty(&rpc->crpc_list)); - LASSERT(atomic_read(&rpc->crpc_refcount) == 0); + LASSERT(!atomic_read(&rpc->crpc_refcount)); - CDEBUG(D_NET, - "Outgoing framework RPC done: service %d, peer %s, status %s:%d:%d\n", - rpc->crpc_service, libcfs_id2str(rpc->crpc_dest), - swi_state2str(rpc->crpc_wi.swi_state), - rpc->crpc_aborted, rpc->crpc_status); + CDEBUG(D_NET, "Outgoing framework RPC done: service %d, peer %s, status %s:%d:%d\n", + rpc->crpc_service, libcfs_id2str(rpc->crpc_dest), + swi_state2str(rpc->crpc_wi.swi_state), + rpc->crpc_aborted, rpc->crpc_status); spin_lock(&sfw_data.fw_lock); @@ -328,7 +324,7 @@ sfw_find_batch(lst_bid_t bid) sfw_session_t *sn = sfw_data.fw_session; sfw_batch_t *bat; - LASSERT(sn != NULL); + LASSERT(sn); list_for_each_entry(bat, &sn->sn_batches, bat_list) { if (bat->bat_id.bat_id == bid.bat_id) @@ -344,19 +340,19 @@ sfw_bid2batch(lst_bid_t bid) sfw_session_t *sn = sfw_data.fw_session; sfw_batch_t *bat; - LASSERT(sn != NULL); + LASSERT(sn); bat = sfw_find_batch(bid); - if (bat != NULL) + if (bat) return bat; LIBCFS_ALLOC(bat, sizeof(sfw_batch_t)); - if (bat == NULL) + if (!bat) return NULL; - bat->bat_error = 0; - bat->bat_session = sn; - bat->bat_id = bid; + bat->bat_error = 0; + bat->bat_session = sn; + bat->bat_id = bid; atomic_set(&bat->bat_nactive, 0); INIT_LIST_HEAD(&bat->bat_tests); @@ -371,14 +367,14 @@ sfw_get_stats(srpc_stat_reqst_t *request, srpc_stat_reply_t *reply) sfw_counters_t *cnt = &reply->str_fw; sfw_batch_t *bat; - reply->str_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id; + reply->str_sid = !sn ? LST_INVALID_SID : sn->sn_id; if (request->str_sid.ses_nid == LNET_NID_ANY) { reply->str_status = EINVAL; return 0; } - if (sn == NULL || !sfw_sid_equal(request->str_sid, sn->sn_id)) { + if (!sn || !sfw_sid_equal(request->str_sid, sn->sn_id)) { reply->str_status = ESRCH; return 0; } @@ -386,11 +382,13 @@ sfw_get_stats(srpc_stat_reqst_t *request, srpc_stat_reply_t *reply) lnet_counters_get(&reply->str_lnet); srpc_get_counters(&reply->str_rpc); - /* send over the msecs since the session was started - - with 32 bits to send, this is ~49 days */ - cnt->running_ms = jiffies_to_msecs(jiffies - sn->sn_started); - cnt->brw_errors = atomic_read(&sn->sn_brw_errors); - cnt->ping_errors = atomic_read(&sn->sn_ping_errors); + /* + * send over the msecs since the session was started + * with 32 bits to send, this is ~49 days + */ + cnt->running_ms = jiffies_to_msecs(jiffies - sn->sn_started); + cnt->brw_errors = atomic_read(&sn->sn_brw_errors); + cnt->ping_errors = atomic_read(&sn->sn_ping_errors); cnt->zombie_sessions = atomic_read(&sfw_data.fw_nzombies); cnt->active_batches = 0; @@ -408,18 +406,18 @@ sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply) { sfw_session_t *sn = sfw_data.fw_session; srpc_msg_t *msg = container_of(request, srpc_msg_t, - msg_body.mksn_reqst); + msg_body.mksn_reqst); int cplen = 0; if (request->mksn_sid.ses_nid == LNET_NID_ANY) { - reply->mksn_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id; + reply->mksn_sid = !sn ? LST_INVALID_SID : sn->sn_id; reply->mksn_status = EINVAL; return 0; } - if (sn != NULL) { - reply->mksn_status = 0; - reply->mksn_sid = sn->sn_id; + if (sn) { + reply->mksn_status = 0; + reply->mksn_sid = sn->sn_id; reply->mksn_timeout = sn->sn_timeout; if (sfw_sid_equal(request->mksn_sid, sn->sn_id)) { @@ -437,21 +435,23 @@ sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply) } } - /* reject the request if it requires unknown features + /* + * reject the request if it requires unknown features * NB: old version will always accept all features because it's not * aware of srpc_msg_t::msg_ses_feats, it's a defect but it's also * harmless because it will return zero feature to console, and it's * console's responsibility to make sure all nodes in a session have - * same feature mask. */ - if ((msg->msg_ses_feats & ~LST_FEATS_MASK) != 0) { + * same feature mask. + */ + if (msg->msg_ses_feats & ~LST_FEATS_MASK) { reply->mksn_status = EPROTO; return 0; } /* brand new or create by force */ LIBCFS_ALLOC(sn, sizeof(sfw_session_t)); - if (sn == NULL) { - CERROR("Dropping RPC (mksn) under memory pressure.\n"); + if (!sn) { + CERROR("dropping RPC mksn under memory pressure\n"); return -ENOMEM; } @@ -461,13 +461,13 @@ sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply) spin_lock(&sfw_data.fw_lock); sfw_deactivate_session(); - LASSERT(sfw_data.fw_session == NULL); + LASSERT(!sfw_data.fw_session); sfw_data.fw_session = sn; spin_unlock(&sfw_data.fw_lock); - reply->mksn_status = 0; - reply->mksn_sid = sn->sn_id; + reply->mksn_status = 0; + reply->mksn_sid = sn->sn_id; reply->mksn_timeout = sn->sn_timeout; return 0; } @@ -477,15 +477,15 @@ sfw_remove_session(srpc_rmsn_reqst_t *request, srpc_rmsn_reply_t *reply) { sfw_session_t *sn = sfw_data.fw_session; - reply->rmsn_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id; + reply->rmsn_sid = !sn ? LST_INVALID_SID : sn->sn_id; if (request->rmsn_sid.ses_nid == LNET_NID_ANY) { reply->rmsn_status = EINVAL; return 0; } - if (sn == NULL || !sfw_sid_equal(request->rmsn_sid, sn->sn_id)) { - reply->rmsn_status = (sn == NULL) ? ESRCH : EBUSY; + if (!sn || !sfw_sid_equal(request->rmsn_sid, sn->sn_id)) { + reply->rmsn_status = !sn ? ESRCH : EBUSY; return 0; } @@ -499,8 +499,8 @@ sfw_remove_session(srpc_rmsn_reqst_t *request, srpc_rmsn_reply_t *reply) spin_unlock(&sfw_data.fw_lock); reply->rmsn_status = 0; - reply->rmsn_sid = LST_INVALID_SID; - LASSERT(sfw_data.fw_session == NULL); + reply->rmsn_sid = LST_INVALID_SID; + LASSERT(!sfw_data.fw_session); return 0; } @@ -509,14 +509,14 @@ sfw_debug_session(srpc_debug_reqst_t *request, srpc_debug_reply_t *reply) { sfw_session_t *sn = sfw_data.fw_session; - if (sn == NULL) { + if (!sn) { reply->dbg_status = ESRCH; - reply->dbg_sid = LST_INVALID_SID; + reply->dbg_sid = LST_INVALID_SID; return 0; } - reply->dbg_status = 0; - reply->dbg_sid = sn->sn_id; + reply->dbg_status = 0; + reply->dbg_sid = sn->sn_id; reply->dbg_timeout = sn->sn_timeout; if (strlcpy(reply->dbg_name, &sn->sn_name[0], sizeof(reply->dbg_name)) >= sizeof(reply->dbg_name)) @@ -539,10 +539,16 @@ sfw_test_rpc_fini(srpc_client_rpc_t *rpc) static inline int sfw_test_buffers(sfw_test_instance_t *tsi) { - struct sfw_test_case *tsc = sfw_find_test_case(tsi->tsi_service); - struct srpc_service *svc = tsc->tsc_srv_service; + struct sfw_test_case *tsc; + struct srpc_service *svc; int nbuf; + LASSERT(tsi); + tsc = sfw_find_test_case(tsi->tsi_service); + LASSERT(tsc); + svc = tsc->tsc_srv_service; + LASSERT(svc); + nbuf = min(svc->sv_wi_total, tsi->tsi_loop) / svc->sv_ncpts; return max(SFW_TEST_WI_MIN, nbuf + SFW_TEST_WI_EXTRA); } @@ -555,10 +561,10 @@ sfw_load_test(struct sfw_test_instance *tsi) int nbuf; int rc; - LASSERT(tsi != NULL); + LASSERT(tsi); tsc = sfw_find_test_case(tsi->tsi_service); nbuf = sfw_test_buffers(tsi); - LASSERT(tsc != NULL); + LASSERT(tsc); svc = tsc->tsc_srv_service; if (tsi->tsi_is_client) { @@ -567,39 +573,44 @@ sfw_load_test(struct sfw_test_instance *tsi) } rc = srpc_service_add_buffers(svc, nbuf); - if (rc != 0) { + if (rc) { CWARN("Failed to reserve enough buffers: service %s, %d needed: %d\n", svc->sv_name, nbuf, rc); - /* NB: this error handler is not strictly correct, because + /* + * NB: this error handler is not strictly correct, because * it may release more buffers than already allocated, * but it doesn't matter because request portal should - * be lazy portal and will grow buffers if necessary. */ + * be lazy portal and will grow buffers if necessary. + */ srpc_service_remove_buffers(svc, nbuf); return -ENOMEM; } CDEBUG(D_NET, "Reserved %d buffers for test %s\n", nbuf * (srpc_serv_is_framework(svc) ? - 1 : cfs_cpt_number(cfs_cpt_table)), svc->sv_name); + 2 : cfs_cpt_number(cfs_cpt_table)), svc->sv_name); return 0; } static void sfw_unload_test(struct sfw_test_instance *tsi) { - struct sfw_test_case *tsc = sfw_find_test_case(tsi->tsi_service); + struct sfw_test_case *tsc; - LASSERT(tsc != NULL); + LASSERT(tsi); + tsc = sfw_find_test_case(tsi->tsi_service); + LASSERT(tsc); if (tsi->tsi_is_client) return; - /* shrink buffers, because request portal is lazy portal + /* + * shrink buffers, because request portal is lazy portal * which can grow buffers at runtime so we may leave - * some buffers behind, but never mind... */ + * some buffers behind, but never mind... + */ srpc_service_remove_buffers(tsc->tsc_srv_service, sfw_test_buffers(tsi)); - return; } static void @@ -619,14 +630,14 @@ sfw_destroy_test_instance(sfw_test_instance_t *tsi) while (!list_empty(&tsi->tsi_units)) { tsu = list_entry(tsi->tsi_units.next, - sfw_test_unit_t, tsu_list); + sfw_test_unit_t, tsu_list); list_del(&tsu->tsu_list); LIBCFS_FREE(tsu, sizeof(*tsu)); } while (!list_empty(&tsi->tsi_free_rpcs)) { rpc = list_entry(tsi->tsi_free_rpcs.next, - srpc_client_rpc_t, crpc_list); + srpc_client_rpc_t, crpc_list); list_del(&rpc->crpc_list); LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc)); } @@ -634,7 +645,6 @@ sfw_destroy_test_instance(sfw_test_instance_t *tsi) clean: sfw_unload_test(tsi); LIBCFS_FREE(tsi, sizeof(*tsi)); - return; } static void @@ -647,13 +657,12 @@ sfw_destroy_batch(sfw_batch_t *tsb) while (!list_empty(&tsb->bat_tests)) { tsi = list_entry(tsb->bat_tests.next, - sfw_test_instance_t, tsi_list); + sfw_test_instance_t, tsi_list); list_del_init(&tsi->tsi_list); sfw_destroy_test_instance(tsi); } LIBCFS_FREE(tsb, sizeof(sfw_batch_t)); - return; } void @@ -666,14 +675,13 @@ sfw_destroy_session(sfw_session_t *sn) while (!list_empty(&sn->sn_batches)) { batch = list_entry(sn->sn_batches.next, - sfw_batch_t, bat_list); + sfw_batch_t, bat_list); list_del_init(&batch->bat_list); sfw_destroy_batch(batch); } LIBCFS_FREE(sn, sizeof(*sn)); atomic_dec(&sfw_data.fw_nzombies); - return; } static void @@ -690,7 +698,7 @@ sfw_unpack_addtest_req(srpc_msg_t *msg) LASSERT(msg->msg_magic == __swab32(SRPC_MSG_MAGIC)); if (req->tsr_service == SRPC_SERVICE_BRW) { - if ((msg->msg_ses_feats & LST_FEAT_BULK_LEN) == 0) { + if (!(msg->msg_ses_feats & LST_FEAT_BULK_LEN)) { test_bulk_req_t *bulk = &req->tsr_u.bulk_v0; __swab32s(&bulk->blk_opc); @@ -718,7 +726,6 @@ sfw_unpack_addtest_req(srpc_msg_t *msg) } LBUG(); - return; } static int @@ -734,9 +741,9 @@ sfw_add_test_instance(sfw_batch_t *tsb, struct srpc_server_rpc *rpc) int rc; LIBCFS_ALLOC(tsi, sizeof(*tsi)); - if (tsi == NULL) { + if (!tsi) { CERROR("Can't allocate test instance for batch: %llu\n", - tsb->bat_id.bat_id); + tsb->bat_id.bat_id); return -ENOMEM; } @@ -746,16 +753,16 @@ sfw_add_test_instance(sfw_batch_t *tsb, struct srpc_server_rpc *rpc) INIT_LIST_HEAD(&tsi->tsi_free_rpcs); INIT_LIST_HEAD(&tsi->tsi_active_rpcs); - tsi->tsi_stopping = 0; - tsi->tsi_batch = tsb; - tsi->tsi_loop = req->tsr_loop; - tsi->tsi_concur = req->tsr_concur; - tsi->tsi_service = req->tsr_service; - tsi->tsi_is_client = !!(req->tsr_is_client); + tsi->tsi_stopping = 0; + tsi->tsi_batch = tsb; + tsi->tsi_loop = req->tsr_loop; + tsi->tsi_concur = req->tsr_concur; + tsi->tsi_service = req->tsr_service; + tsi->tsi_is_client = !!(req->tsr_is_client); tsi->tsi_stoptsu_onerr = !!(req->tsr_stop_onerr); rc = sfw_load_test(tsi); - if (rc != 0) { + if (rc) { LIBCFS_FREE(tsi, sizeof(*tsi)); return rc; } @@ -768,7 +775,7 @@ sfw_add_test_instance(sfw_batch_t *tsb, struct srpc_server_rpc *rpc) return 0; } - LASSERT(bk != NULL); + LASSERT(bk); LASSERT(bk->bk_niov * SFW_ID_PER_PAGE >= (unsigned int)ndest); LASSERT((unsigned int)bk->bk_len >= sizeof(lnet_process_id_packed_t) * ndest); @@ -782,36 +789,36 @@ sfw_add_test_instance(sfw_batch_t *tsb, struct srpc_server_rpc *rpc) int j; dests = page_address(bk->bk_iovs[i / SFW_ID_PER_PAGE].kiov_page); - LASSERT(dests != NULL); /* my pages are within KVM always */ + LASSERT(dests); /* my pages are within KVM always */ id = dests[i % SFW_ID_PER_PAGE]; if (msg->msg_magic != SRPC_MSG_MAGIC) sfw_unpack_id(id); for (j = 0; j < tsi->tsi_concur; j++) { LIBCFS_ALLOC(tsu, sizeof(sfw_test_unit_t)); - if (tsu == NULL) { + if (!tsu) { rc = -ENOMEM; CERROR("Can't allocate tsu for %d\n", - tsi->tsi_service); + tsi->tsi_service); goto error; } tsu->tsu_dest.nid = id.nid; tsu->tsu_dest.pid = id.pid; tsu->tsu_instance = tsi; - tsu->tsu_private = NULL; + tsu->tsu_private = NULL; list_add_tail(&tsu->tsu_list, &tsi->tsi_units); } } rc = tsi->tsi_ops->tso_init(tsi); - if (rc == 0) { + if (!rc) { list_add_tail(&tsi->tsi_list, &tsb->bat_tests); return 0; } error: - LASSERT(rc != 0); + LASSERT(rc); sfw_destroy_test_instance(tsi); return rc; } @@ -856,7 +863,6 @@ sfw_test_unit_done(sfw_test_unit_t *tsu) spin_unlock(&sfw_data.fw_lock); sfw_destroy_session(sn); - return; } static void @@ -876,9 +882,8 @@ sfw_test_rpc_done(srpc_client_rpc_t *rpc) list_del_init(&rpc->crpc_list); /* batch is stopping or loop is done or get error */ - if (tsi->tsi_stopping || - tsu->tsu_loop == 0 || - (rpc->crpc_status != 0 && tsi->tsi_stoptsu_onerr)) + if (tsi->tsi_stopping || !tsu->tsu_loop || + (rpc->crpc_status && tsi->tsi_stoptsu_onerr)) done = 1; /* dec ref for poster */ @@ -892,7 +897,6 @@ sfw_test_rpc_done(srpc_client_rpc_t *rpc) } sfw_test_unit_done(tsu); - return; } int @@ -906,18 +910,17 @@ sfw_create_test_rpc(sfw_test_unit_t *tsu, lnet_process_id_t peer, spin_lock(&tsi->tsi_lock); LASSERT(sfw_test_active(tsi)); - - if (!list_empty(&tsi->tsi_free_rpcs)) { /* pick request from buffer */ - rpc = list_entry(tsi->tsi_free_rpcs.next, - srpc_client_rpc_t, crpc_list); + rpc = list_first_entry_or_null(&tsi->tsi_free_rpcs, + srpc_client_rpc_t, crpc_list); + if (rpc) { LASSERT(nblk == rpc->crpc_bulk.bk_niov); list_del_init(&rpc->crpc_list); } spin_unlock(&tsi->tsi_lock); - if (rpc == NULL) { + if (!rpc) { rpc = srpc_create_client_rpc(peer, tsi->tsi_service, nblk, blklen, sfw_test_rpc_done, sfw_test_rpc_fini, tsu); @@ -927,7 +930,7 @@ sfw_create_test_rpc(sfw_test_unit_t *tsu, lnet_process_id_t peer, sfw_test_rpc_fini, tsu); } - if (rpc == NULL) { + if (!rpc) { CERROR("Can't create rpc for test %d\n", tsi->tsi_service); return -ENOMEM; } @@ -947,12 +950,12 @@ sfw_run_test(swi_workitem_t *wi) LASSERT(wi == &tsu->tsu_worker); - if (tsi->tsi_ops->tso_prep_rpc(tsu, tsu->tsu_dest, &rpc) != 0) { - LASSERT(rpc == NULL); + if (tsi->tsi_ops->tso_prep_rpc(tsu, tsu->tsu_dest, &rpc)) { + LASSERT(!rpc); goto test_done; } - LASSERT(rpc != NULL); + LASSERT(rpc); spin_lock(&tsi->tsi_lock); @@ -968,9 +971,8 @@ sfw_run_test(swi_workitem_t *wi) list_add_tail(&rpc->crpc_list, &tsi->tsi_active_rpcs); spin_unlock(&tsi->tsi_lock); - rpc->crpc_timeout = rpc_timeout; - spin_lock(&rpc->crpc_lock); + rpc->crpc_timeout = rpc_timeout; srpc_post_rpc(rpc); spin_unlock(&rpc->crpc_lock); return 0; @@ -1015,8 +1017,7 @@ sfw_run_batch(sfw_batch_t *tsb) tsu->tsu_loop = tsi->tsi_loop; wi = &tsu->tsu_worker; swi_init_workitem(wi, tsu, sfw_run_test, - lst_sched_test[\ - lnet_cpt_of_nid(tsu->tsu_dest.nid)]); + lst_sched_test[lnet_cpt_of_nid(tsu->tsu_dest.nid)]); swi_schedule_workitem(wi); } } @@ -1074,7 +1075,7 @@ sfw_query_batch(sfw_batch_t *tsb, int testidx, srpc_batch_reply_t *reply) if (testidx < 0) return -EINVAL; - if (testidx == 0) { + if (!testidx) { reply->bar_active = atomic_read(&tsb->bat_nactive); return 0; } @@ -1101,11 +1102,11 @@ int sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len, int sink) { - LASSERT(rpc->srpc_bulk == NULL); + LASSERT(!rpc->srpc_bulk); LASSERT(npages > 0 && npages <= LNET_MAX_IOV); rpc->srpc_bulk = srpc_alloc_bulk(cpt, npages, len, sink); - if (rpc->srpc_bulk == NULL) + if (!rpc->srpc_bulk) return -ENOMEM; return 0; @@ -1121,13 +1122,13 @@ sfw_add_test(struct srpc_server_rpc *rpc) sfw_batch_t *bat; request = &rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst; - reply->tsr_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id; + reply->tsr_sid = !sn ? LST_INVALID_SID : sn->sn_id; - if (request->tsr_loop == 0 || - request->tsr_concur == 0 || + if (!request->tsr_loop || + !request->tsr_concur || request->tsr_sid.ses_nid == LNET_NID_ANY || request->tsr_ndest > SFW_MAX_NDESTS || - (request->tsr_is_client && request->tsr_ndest == 0) || + (request->tsr_is_client && !request->tsr_ndest) || request->tsr_concur > SFW_MAX_CONCUR || request->tsr_service > SRPC_SERVICE_MAX_ID || request->tsr_service <= SRPC_FRAMEWORK_SERVICE_MAX_ID) { @@ -1135,17 +1136,17 @@ sfw_add_test(struct srpc_server_rpc *rpc) return 0; } - if (sn == NULL || !sfw_sid_equal(request->tsr_sid, sn->sn_id) || - sfw_find_test_case(request->tsr_service) == NULL) { + if (!sn || !sfw_sid_equal(request->tsr_sid, sn->sn_id) || + !sfw_find_test_case(request->tsr_service)) { reply->tsr_status = ENOENT; return 0; } bat = sfw_bid2batch(request->tsr_bid); - if (bat == NULL) { - CERROR("Dropping RPC (%s) from %s under memory pressure.\n", - rpc->srpc_scd->scd_svc->sv_name, - libcfs_id2str(rpc->srpc_peer)); + if (!bat) { + CERROR("dropping RPC %s from %s under memory pressure\n", + rpc->srpc_scd->scd_svc->sv_name, + libcfs_id2str(rpc->srpc_peer)); return -ENOMEM; } @@ -1154,15 +1155,15 @@ sfw_add_test(struct srpc_server_rpc *rpc) return 0; } - if (request->tsr_is_client && rpc->srpc_bulk == NULL) { + if (request->tsr_is_client && !rpc->srpc_bulk) { /* rpc will be resumed later in sfw_bulk_ready */ int npg = sfw_id_pages(request->tsr_ndest); int len; - if ((sn->sn_features & LST_FEAT_BULK_LEN) == 0) { + if (!(sn->sn_features & LST_FEAT_BULK_LEN)) { len = npg * PAGE_CACHE_SIZE; - } else { + } else { len = sizeof(lnet_process_id_packed_t) * request->tsr_ndest; } @@ -1171,11 +1172,11 @@ sfw_add_test(struct srpc_server_rpc *rpc) } rc = sfw_add_test_instance(bat, rpc); - CDEBUG(rc == 0 ? D_NET : D_WARNING, - "%s test: sv %d %s, loop %d, concur %d, ndest %d\n", - rc == 0 ? "Added" : "Failed to add", request->tsr_service, - request->tsr_is_client ? "client" : "server", - request->tsr_loop, request->tsr_concur, request->tsr_ndest); + CDEBUG(!rc ? D_NET : D_WARNING, + "%s test: sv %d %s, loop %d, concur %d, ndest %d\n", + !rc ? "Added" : "Failed to add", request->tsr_service, + request->tsr_is_client ? "client" : "server", + request->tsr_loop, request->tsr_concur, request->tsr_ndest); reply->tsr_status = (rc < 0) ? -rc : rc; return 0; @@ -1188,15 +1189,15 @@ sfw_control_batch(srpc_batch_reqst_t *request, srpc_batch_reply_t *reply) int rc = 0; sfw_batch_t *bat; - reply->bar_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id; + reply->bar_sid = !sn ? LST_INVALID_SID : sn->sn_id; - if (sn == NULL || !sfw_sid_equal(request->bar_sid, sn->sn_id)) { + if (!sn || !sfw_sid_equal(request->bar_sid, sn->sn_id)) { reply->bar_status = ESRCH; return 0; } bat = sfw_find_batch(request->bar_bid); - if (bat == NULL) { + if (!bat) { reply->bar_status = ENOENT; return 0; } @@ -1231,7 +1232,7 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc) unsigned features = LST_FEATS_MASK; int rc = 0; - LASSERT(sfw_data.fw_active_srpc == NULL); + LASSERT(!sfw_data.fw_active_srpc); LASSERT(sv->sv_id <= SRPC_FRAMEWORK_SERVICE_MAX_ID); spin_lock(&sfw_data.fw_lock); @@ -1242,7 +1243,7 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc) } /* Remove timer to avoid racing with it or expiring active session */ - if (sfw_del_session_timer() != 0) { + if (sfw_del_session_timer()) { CERROR("Dropping RPC (%s) from %s: racing with expiry timer.", sv->sv_name, libcfs_id2str(rpc->srpc_peer)); spin_unlock(&sfw_data.fw_lock); @@ -1262,19 +1263,21 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc) sv->sv_id != SRPC_SERVICE_DEBUG) { sfw_session_t *sn = sfw_data.fw_session; - if (sn != NULL && + if (sn && sn->sn_features != request->msg_ses_feats) { CNETERR("Features of framework RPC don't match features of current session: %x/%x\n", request->msg_ses_feats, sn->sn_features); reply->msg_body.reply.status = EPROTO; - reply->msg_body.reply.sid = sn->sn_id; + reply->msg_body.reply.sid = sn->sn_id; goto out; } - } else if ((request->msg_ses_feats & ~LST_FEATS_MASK) != 0) { - /* NB: at this point, old version will ignore features and + } else if (request->msg_ses_feats & ~LST_FEATS_MASK) { + /** + * NB: at this point, old version will ignore features and * create new session anyway, so console should be able - * to handle this */ + * to handle this + */ reply->msg_body.reply.status = EPROTO; goto out; } @@ -1312,7 +1315,7 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc) break; } - if (sfw_data.fw_session != NULL) + if (sfw_data.fw_session) features = sfw_data.fw_session->sn_features; out: reply->msg_ses_feats = features; @@ -1333,14 +1336,14 @@ sfw_bulk_ready(struct srpc_server_rpc *rpc, int status) struct srpc_service *sv = rpc->srpc_scd->scd_svc; int rc; - LASSERT(rpc->srpc_bulk != NULL); + LASSERT(rpc->srpc_bulk); LASSERT(sv->sv_id == SRPC_SERVICE_TEST); - LASSERT(sfw_data.fw_active_srpc == NULL); + LASSERT(!sfw_data.fw_active_srpc); LASSERT(rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst.tsr_is_client); spin_lock(&sfw_data.fw_lock); - if (status != 0) { + if (status) { CERROR("Bulk transfer failed for RPC: service %s, peer %s, status %d\n", sv->sv_name, libcfs_id2str(rpc->srpc_peer), status); spin_unlock(&sfw_data.fw_lock); @@ -1352,8 +1355,8 @@ sfw_bulk_ready(struct srpc_server_rpc *rpc, int status) return -ESHUTDOWN; } - if (sfw_del_session_timer() != 0) { - CERROR("Dropping RPC (%s) from %s: racing with expiry timer", + if (sfw_del_session_timer()) { + CERROR("dropping RPC %s from %s: racing with expiry timer\n", sv->sv_name, libcfs_id2str(rpc->srpc_peer)); spin_unlock(&sfw_data.fw_lock); return -EAGAIN; @@ -1386,9 +1389,9 @@ sfw_create_rpc(lnet_process_id_t peer, int service, LASSERT(!sfw_data.fw_shuttingdown); LASSERT(service <= SRPC_FRAMEWORK_SERVICE_MAX_ID); - if (nbulkiov == 0 && !list_empty(&sfw_data.fw_zombie_rpcs)) { + if (!nbulkiov && !list_empty(&sfw_data.fw_zombie_rpcs)) { rpc = list_entry(sfw_data.fw_zombie_rpcs.next, - srpc_client_rpc_t, crpc_list); + srpc_client_rpc_t, crpc_list); list_del(&rpc->crpc_list); srpc_init_client_rpc(rpc, peer, service, 0, 0, @@ -1397,15 +1400,15 @@ sfw_create_rpc(lnet_process_id_t peer, int service, spin_unlock(&sfw_data.fw_lock); - if (rpc == NULL) { + if (!rpc) { rpc = srpc_create_client_rpc(peer, service, nbulkiov, bulklen, done, - nbulkiov != 0 ? NULL : + nbulkiov ? NULL : sfw_client_rpc_fini, priv); } - if (rpc != NULL) /* "session" is concept in framework */ + if (rpc) /* "session" is concept in framework */ rpc->crpc_reqstmsg.msg_ses_feats = features; return rpc; @@ -1552,7 +1555,6 @@ sfw_unpack_message(srpc_msg_t *msg) } LBUG(); - return; } void @@ -1564,7 +1566,6 @@ sfw_abort_rpc(srpc_client_rpc_t *rpc) spin_lock(&rpc->crpc_lock); srpc_abort_rpc(rpc, -EINTR); spin_unlock(&rpc->crpc_lock); - return; } void @@ -1581,7 +1582,6 @@ sfw_post_rpc(srpc_client_rpc_t *rpc) srpc_post_rpc(rpc); spin_unlock(&rpc->crpc_lock); - return; } static srpc_service_t sfw_services[] = { @@ -1622,16 +1622,6 @@ static srpc_service_t sfw_services[] = { } }; -extern sfw_test_client_ops_t ping_test_client; -extern srpc_service_t ping_test_service; -extern void ping_init_test_client(void); -extern void ping_init_test_service(void); - -extern sfw_test_client_ops_t brw_test_client; -extern srpc_service_t brw_test_service; -extern void brw_init_test_client(void); -extern void brw_init_test_service(void); - int sfw_startup(void) { @@ -1643,25 +1633,25 @@ sfw_startup(void) if (session_timeout < 0) { CERROR("Session timeout must be non-negative: %d\n", - session_timeout); + session_timeout); return -EINVAL; } if (rpc_timeout < 0) { CERROR("RPC timeout must be non-negative: %d\n", - rpc_timeout); + rpc_timeout); return -EINVAL; } - if (session_timeout == 0) + if (!session_timeout) CWARN("Zero session_timeout specified - test sessions never expire.\n"); - if (rpc_timeout == 0) + if (!rpc_timeout) CWARN("Zero rpc_timeout specified - test RPC never expire.\n"); memset(&sfw_data, 0, sizeof(struct smoketest_framework)); - sfw_data.fw_session = NULL; + sfw_data.fw_session = NULL; sfw_data.fw_active_srpc = NULL; spin_lock_init(&sfw_data.fw_lock); atomic_set(&sfw_data.fw_nzombies, 0); @@ -1672,12 +1662,12 @@ sfw_startup(void) brw_init_test_client(); brw_init_test_service(); rc = sfw_register_test(&brw_test_service, &brw_test_client); - LASSERT(rc == 0); + LASSERT(!rc); ping_init_test_client(); ping_init_test_service(); rc = sfw_register_test(&ping_test_service, &ping_test_client); - LASSERT(rc == 0); + LASSERT(!rc); error = 0; list_for_each_entry(tsc, &sfw_data.fw_tests, tsc_list) { @@ -1685,29 +1675,29 @@ sfw_startup(void) rc = srpc_add_service(sv); LASSERT(rc != -EBUSY); - if (rc != 0) { + if (rc) { CWARN("Failed to add %s service: %d\n", - sv->sv_name, rc); + sv->sv_name, rc); error = rc; } } for (i = 0; ; i++) { sv = &sfw_services[i]; - if (sv->sv_name == NULL) + if (!sv->sv_name) break; sv->sv_bulk_ready = NULL; - sv->sv_handler = sfw_handle_server_rpc; - sv->sv_wi_total = SFW_FRWK_WI_MAX; + sv->sv_handler = sfw_handle_server_rpc; + sv->sv_wi_total = SFW_FRWK_WI_MAX; if (sv->sv_id == SRPC_SERVICE_TEST) sv->sv_bulk_ready = sfw_bulk_ready; rc = srpc_add_service(sv); LASSERT(rc != -EBUSY); - if (rc != 0) { + if (rc) { CWARN("Failed to add %s service: %d\n", - sv->sv_name, rc); + sv->sv_name, rc); error = rc; } @@ -1716,14 +1706,14 @@ sfw_startup(void) continue; rc = srpc_service_add_buffers(sv, sv->sv_wi_total); - if (rc != 0) { + if (rc) { CWARN("Failed to reserve enough buffers: service %s, %d needed: %d\n", sv->sv_name, sv->sv_wi_total, rc); error = -ENOMEM; } } - if (error != 0) + if (error) sfw_shutdown(); return error; } @@ -1738,15 +1728,15 @@ sfw_shutdown(void) spin_lock(&sfw_data.fw_lock); sfw_data.fw_shuttingdown = 1; - lst_wait_until(sfw_data.fw_active_srpc == NULL, sfw_data.fw_lock, + lst_wait_until(!sfw_data.fw_active_srpc, sfw_data.fw_lock, "waiting for active RPC to finish.\n"); - if (sfw_del_session_timer() != 0) - lst_wait_until(sfw_data.fw_session == NULL, sfw_data.fw_lock, + if (sfw_del_session_timer()) + lst_wait_until(!sfw_data.fw_session, sfw_data.fw_lock, "waiting for session timer to explode.\n"); sfw_deactivate_session(); - lst_wait_until(atomic_read(&sfw_data.fw_nzombies) == 0, + lst_wait_until(!atomic_read(&sfw_data.fw_nzombies), sfw_data.fw_lock, "waiting for %d zombie sessions to die.\n", atomic_read(&sfw_data.fw_nzombies)); @@ -1755,7 +1745,7 @@ sfw_shutdown(void) for (i = 0; ; i++) { sv = &sfw_services[i]; - if (sv->sv_name == NULL) + if (!sv->sv_name) break; srpc_shutdown_service(sv); @@ -1772,7 +1762,7 @@ sfw_shutdown(void) srpc_client_rpc_t *rpc; rpc = list_entry(sfw_data.fw_zombie_rpcs.next, - srpc_client_rpc_t, crpc_list); + srpc_client_rpc_t, crpc_list); list_del(&rpc->crpc_list); LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc)); @@ -1780,7 +1770,7 @@ sfw_shutdown(void) for (i = 0; ; i++) { sv = &sfw_services[i]; - if (sv->sv_name == NULL) + if (!sv->sv_name) break; srpc_wait_service_shutdown(sv); @@ -1788,13 +1778,11 @@ sfw_shutdown(void) while (!list_empty(&sfw_data.fw_tests)) { tsc = list_entry(sfw_data.fw_tests.next, - sfw_test_case_t, tsc_list); + sfw_test_case_t, tsc_list); srpc_wait_service_shutdown(tsc->tsc_srv_service); list_del(&tsc->tsc_list); LIBCFS_FREE(tsc, sizeof(*tsc)); } - - return; } diff --git a/drivers/staging/lustre/lnet/selftest/module.c b/drivers/staging/lustre/lnet/selftest/module.c index 46cbdf0456cc..cc046b1d4d0a 100644 --- a/drivers/staging/lustre/lnet/selftest/module.c +++ b/drivers/staging/lustre/lnet/selftest/module.c @@ -37,9 +37,10 @@ #define DEBUG_SUBSYSTEM S_LNET #include "selftest.h" +#include "console.h" enum { - LST_INIT_NONE = 0, + LST_INIT_NONE = 0, LST_INIT_WI_SERIAL, LST_INIT_WI_TEST, LST_INIT_RPC, @@ -47,16 +48,13 @@ enum { LST_INIT_CONSOLE }; -extern int lstcon_console_init(void); -extern int lstcon_console_fini(void); - static int lst_init_step = LST_INIT_NONE; struct cfs_wi_sched *lst_sched_serial; struct cfs_wi_sched **lst_sched_test; static void -lnet_selftest_fini(void) +lnet_selftest_exit(void) { int i; @@ -70,7 +68,7 @@ lnet_selftest_fini(void) case LST_INIT_WI_TEST: for (i = 0; i < cfs_cpt_number(lnet_cpt_table()); i++) { - if (lst_sched_test[i] == NULL) + if (!lst_sched_test[i]) continue; cfs_wi_sched_destroy(lst_sched_test[i]); } @@ -98,7 +96,7 @@ lnet_selftest_init(void) rc = cfs_wi_sched_create("lst_s", lnet_cpt_table(), CFS_CPT_ANY, 1, &lst_sched_serial); - if (rc != 0) { + if (rc) { CERROR("Failed to create serial WI scheduler for LST\n"); return rc; } @@ -106,7 +104,7 @@ lnet_selftest_init(void) nscheds = cfs_cpt_number(lnet_cpt_table()); LIBCFS_ALLOC(lst_sched_test, sizeof(lst_sched_test[0]) * nscheds); - if (lst_sched_test == NULL) + if (!lst_sched_test) goto error; lst_init_step = LST_INIT_WI_TEST; @@ -117,42 +115,42 @@ lnet_selftest_init(void) nthrs = max(nthrs - 1, 1); rc = cfs_wi_sched_create("lst_t", lnet_cpt_table(), i, nthrs, &lst_sched_test[i]); - if (rc != 0) { - CERROR("Failed to create CPT affinity WI scheduler %d for LST\n", - i); + if (rc) { + CERROR("Failed to create CPT affinity WI scheduler %d for LST\n", i); goto error; } } rc = srpc_startup(); - if (rc != 0) { + if (rc) { CERROR("LST can't startup rpc\n"); goto error; } lst_init_step = LST_INIT_RPC; rc = sfw_startup(); - if (rc != 0) { + if (rc) { CERROR("LST can't startup framework\n"); goto error; } lst_init_step = LST_INIT_FW; rc = lstcon_console_init(); - if (rc != 0) { + if (rc) { CERROR("LST can't startup console\n"); goto error; } lst_init_step = LST_INIT_CONSOLE; return 0; error: - lnet_selftest_fini(); + lnet_selftest_exit(); return rc; } +MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>"); MODULE_DESCRIPTION("LNet Selftest"); +MODULE_VERSION("2.7.0"); MODULE_LICENSE("GPL"); -MODULE_VERSION("0.9.0"); module_init(lnet_selftest_init); -module_exit(lnet_selftest_fini); +module_exit(lnet_selftest_exit); diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c index d42653654fa8..81a45045e186 100644 --- a/drivers/staging/lustre/lnet/selftest/ping_test.c +++ b/drivers/staging/lustre/lnet/selftest/ping_test.c @@ -42,18 +42,18 @@ #include "selftest.h" -#define LST_PING_TEST_MAGIC 0xbabeface +#define LST_PING_TEST_MAGIC 0xbabeface static int ping_srv_workitems = SFW_TEST_WI_MAX; module_param(ping_srv_workitems, int, 0644); MODULE_PARM_DESC(ping_srv_workitems, "# PING server workitems"); -typedef struct { +struct lst_ping_data { spinlock_t pnd_lock; /* serialize */ int pnd_counter; /* sequence counter */ -} lst_ping_data_t; +}; -static lst_ping_data_t lst_ping_data; +static struct lst_ping_data lst_ping_data; static int ping_client_init(sfw_test_instance_t *tsi) @@ -61,7 +61,7 @@ ping_client_init(sfw_test_instance_t *tsi) sfw_session_t *sn = tsi->tsi_batch->bat_session; LASSERT(tsi->tsi_is_client); - LASSERT(sn != NULL && (sn->sn_features & ~LST_FEATS_MASK) == 0); + LASSERT(sn && !(sn->sn_features & ~LST_FEATS_MASK)); spin_lock_init(&lst_ping_data.pnd_lock); lst_ping_data.pnd_counter = 0; @@ -75,7 +75,7 @@ ping_client_fini(sfw_test_instance_t *tsi) sfw_session_t *sn = tsi->tsi_batch->bat_session; int errors; - LASSERT(sn != NULL); + LASSERT(sn); LASSERT(tsi->tsi_is_client); errors = atomic_read(&sn->sn_ping_errors); @@ -95,11 +95,11 @@ ping_client_prep_rpc(sfw_test_unit_t *tsu, struct timespec64 ts; int rc; - LASSERT(sn != NULL); - LASSERT((sn->sn_features & ~LST_FEATS_MASK) == 0); + LASSERT(sn); + LASSERT(!(sn->sn_features & ~LST_FEATS_MASK)); rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, 0, 0, rpc); - if (rc != 0) + if (rc) return rc; req = &(*rpc)->crpc_reqstmsg.msg_body.ping_reqst; @@ -111,7 +111,7 @@ ping_client_prep_rpc(sfw_test_unit_t *tsu, spin_unlock(&lst_ping_data.pnd_lock); ktime_get_real_ts64(&ts); - req->pnr_time_sec = ts.tv_sec; + req->pnr_time_sec = ts.tv_sec; req->pnr_time_usec = ts.tv_nsec / NSEC_PER_USEC; return rc; @@ -126,14 +126,14 @@ ping_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc) srpc_ping_reply_t *reply = &rpc->crpc_replymsg.msg_body.ping_reply; struct timespec64 ts; - LASSERT(sn != NULL); + LASSERT(sn); - if (rpc->crpc_status != 0) { + if (rpc->crpc_status) { if (!tsi->tsi_stopping) /* rpc could have been aborted */ atomic_inc(&sn->sn_ping_errors); CERROR("Unable to ping %s (%d): %d\n", - libcfs_id2str(rpc->crpc_dest), - reqst->pnr_seq, rpc->crpc_status); + libcfs_id2str(rpc->crpc_dest), + reqst->pnr_seq, rpc->crpc_status); return; } @@ -147,8 +147,8 @@ ping_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc) rpc->crpc_status = -EBADMSG; atomic_inc(&sn->sn_ping_errors); CERROR("Bad magic %u from %s, %u expected.\n", - reply->pnr_magic, libcfs_id2str(rpc->crpc_dest), - LST_PING_TEST_MAGIC); + reply->pnr_magic, libcfs_id2str(rpc->crpc_dest), + LST_PING_TEST_MAGIC); return; } @@ -156,8 +156,8 @@ ping_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc) rpc->crpc_status = -EBADMSG; atomic_inc(&sn->sn_ping_errors); CERROR("Bad seq %u from %s, %u expected.\n", - reply->pnr_seq, libcfs_id2str(rpc->crpc_dest), - reqst->pnr_seq); + reply->pnr_seq, libcfs_id2str(rpc->crpc_dest), + reqst->pnr_seq); return; } @@ -165,13 +165,12 @@ ping_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc) CDEBUG(D_NET, "%d reply in %u usec\n", reply->pnr_seq, (unsigned)((ts.tv_sec - reqst->pnr_time_sec) * 1000000 + (ts.tv_nsec / NSEC_PER_USEC - reqst->pnr_time_usec))); - return; } static int ping_server_handle(struct srpc_server_rpc *rpc) { - struct srpc_service *sv = rpc->srpc_scd->scd_svc; + struct srpc_service *sv = rpc->srpc_scd->scd_svc; srpc_msg_t *reqstmsg = &rpc->srpc_reqstbuf->buf_msg; srpc_msg_t *replymsg = &rpc->srpc_replymsg; srpc_ping_reqst_t *req = &reqstmsg->msg_body.ping_reqst; @@ -191,14 +190,14 @@ ping_server_handle(struct srpc_server_rpc *rpc) if (req->pnr_magic != LST_PING_TEST_MAGIC) { CERROR("Unexpected magic %08x from %s\n", - req->pnr_magic, libcfs_id2str(rpc->srpc_peer)); + req->pnr_magic, libcfs_id2str(rpc->srpc_peer)); return -EINVAL; } - rep->pnr_seq = req->pnr_seq; + rep->pnr_seq = req->pnr_seq; rep->pnr_magic = LST_PING_TEST_MAGIC; - if ((reqstmsg->msg_ses_feats & ~LST_FEATS_MASK) != 0) { + if (reqstmsg->msg_ses_feats & ~LST_FEATS_MASK) { replymsg->msg_ses_feats = LST_FEATS_MASK; rep->pnr_status = EPROTO; return 0; @@ -214,8 +213,8 @@ ping_server_handle(struct srpc_server_rpc *rpc) sfw_test_client_ops_t ping_test_client; void ping_init_test_client(void) { - ping_test_client.tso_init = ping_client_init; - ping_test_client.tso_fini = ping_client_fini; + ping_test_client.tso_init = ping_client_init; + ping_test_client.tso_fini = ping_client_fini; ping_test_client.tso_prep_rpc = ping_client_prep_rpc; ping_test_client.tso_done_rpc = ping_client_done_rpc; } @@ -223,8 +222,8 @@ void ping_init_test_client(void) srpc_service_t ping_test_service; void ping_init_test_service(void) { - ping_test_service.sv_id = SRPC_SERVICE_PING; - ping_test_service.sv_name = "ping_test"; - ping_test_service.sv_handler = ping_server_handle; + ping_test_service.sv_id = SRPC_SERVICE_PING; + ping_test_service.sv_name = "ping_test"; + ping_test_service.sv_handler = ping_server_handle; ping_test_service.sv_wi_total = ping_srv_workitems; } diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c index 2acf6ec717be..69be7d6f48fa 100644 --- a/drivers/staging/lustre/lnet/selftest/rpc.c +++ b/drivers/staging/lustre/lnet/selftest/rpc.c @@ -96,8 +96,8 @@ srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob) LASSERT(i >= 0 && i < bk->bk_niov); bk->bk_iovs[i].kiov_offset = 0; - bk->bk_iovs[i].kiov_page = pg; - bk->bk_iovs[i].kiov_len = nob; + bk->bk_iovs[i].kiov_page = pg; + bk->bk_iovs[i].kiov_len = nob; return nob; } @@ -107,18 +107,17 @@ srpc_free_bulk(srpc_bulk_t *bk) int i; struct page *pg; - LASSERT(bk != NULL); + LASSERT(bk); for (i = 0; i < bk->bk_niov; i++) { pg = bk->bk_iovs[i].kiov_page; - if (pg == NULL) + if (!pg) break; __free_page(pg); } LIBCFS_FREE(bk, offsetof(srpc_bulk_t, bk_iovs[bk->bk_niov])); - return; } srpc_bulk_t * @@ -131,15 +130,15 @@ srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink) LIBCFS_CPT_ALLOC(bk, lnet_cpt_table(), cpt, offsetof(srpc_bulk_t, bk_iovs[bulk_npg])); - if (bk == NULL) { + if (!bk) { CERROR("Can't allocate descriptor for %d pages\n", bulk_npg); return NULL; } memset(bk, 0, offsetof(srpc_bulk_t, bk_iovs[bulk_npg])); - bk->bk_sink = sink; - bk->bk_len = bulk_len; - bk->bk_niov = bulk_npg; + bk->bk_sink = sink; + bk->bk_len = bulk_len; + bk->bk_niov = bulk_npg; for (i = 0; i < bulk_npg; i++) { struct page *pg; @@ -147,7 +146,7 @@ srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink) pg = alloc_pages_node(cfs_cpt_spread_node(lnet_cpt_table(), cpt), GFP_KERNEL, 0); - if (pg == NULL) { + if (!pg) { CERROR("Can't allocate page %d of %d\n", i, bulk_npg); srpc_free_bulk(bk); return NULL; @@ -183,10 +182,10 @@ srpc_init_server_rpc(struct srpc_server_rpc *rpc, rpc->srpc_ev.ev_fired = 1; /* no event expected now */ - rpc->srpc_scd = scd; + rpc->srpc_scd = scd; rpc->srpc_reqstbuf = buffer; - rpc->srpc_peer = buffer->buf_peer; - rpc->srpc_self = buffer->buf_self; + rpc->srpc_peer = buffer->buf_peer; + rpc->srpc_self = buffer->buf_self; LNetInvalidateHandle(&rpc->srpc_replymdh); } @@ -199,7 +198,7 @@ srpc_service_fini(struct srpc_service *svc) struct list_head *q; int i; - if (svc->sv_cpt_data == NULL) + if (!svc->sv_cpt_data) return; cfs_percpt_for_each(scd, i, svc->sv_cpt_data) { @@ -212,9 +211,8 @@ srpc_service_fini(struct srpc_service *svc) break; while (!list_empty(q)) { - buf = list_entry(q->next, - struct srpc_buffer, - buf_list); + buf = list_entry(q->next, struct srpc_buffer, + buf_list); list_del(&buf->buf_list); LIBCFS_FREE(buf, sizeof(*buf)); } @@ -224,8 +222,8 @@ srpc_service_fini(struct srpc_service *svc) while (!list_empty(&scd->scd_rpc_free)) { rpc = list_entry(scd->scd_rpc_free.next, - struct srpc_server_rpc, - srpc_list); + struct srpc_server_rpc, + srpc_list); list_del(&rpc->srpc_list); LIBCFS_FREE(rpc, sizeof(*rpc)); } @@ -259,7 +257,7 @@ srpc_service_init(struct srpc_service *svc) svc->sv_cpt_data = cfs_percpt_alloc(lnet_cpt_table(), sizeof(struct srpc_service_cd)); - if (svc->sv_cpt_data == NULL) + if (!svc->sv_cpt_data) return -ENOMEM; svc->sv_ncpts = srpc_serv_is_framework(svc) ? @@ -278,23 +276,27 @@ srpc_service_init(struct srpc_service *svc) scd->scd_ev.ev_data = scd; scd->scd_ev.ev_type = SRPC_REQUEST_RCVD; - /* NB: don't use lst_sched_serial for adding buffer, - * see details in srpc_service_add_buffers() */ + /* + * NB: don't use lst_sched_serial for adding buffer, + * see details in srpc_service_add_buffers() + */ swi_init_workitem(&scd->scd_buf_wi, scd, srpc_add_buffer, lst_sched_test[i]); - if (i != 0 && srpc_serv_is_framework(svc)) { - /* NB: framework service only needs srpc_service_cd for + if (i && srpc_serv_is_framework(svc)) { + /* + * NB: framework service only needs srpc_service_cd for * one partition, but we allocate for all to make * it easier to implement, it will waste a little - * memory but nobody should care about this */ + * memory but nobody should care about this + */ continue; } for (j = 0; j < nrpcs; j++) { LIBCFS_CPT_ALLOC(rpc, lnet_cpt_table(), i, sizeof(*rpc)); - if (rpc == NULL) { + if (!rpc) { srpc_service_fini(svc); return -ENOMEM; } @@ -312,14 +314,14 @@ srpc_add_service(struct srpc_service *sv) LASSERT(0 <= id && id <= SRPC_SERVICE_MAX_ID); - if (srpc_service_init(sv) != 0) + if (srpc_service_init(sv)) return -ENOMEM; spin_lock(&srpc_data.rpc_glock); LASSERT(srpc_data.rpc_state == SRPC_STATE_RUNNING); - if (srpc_data.rpc_services[id] != NULL) { + if (srpc_data.rpc_services[id]) { spin_unlock(&srpc_data.rpc_glock); goto failed; } @@ -363,32 +365,31 @@ srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf, rc = LNetMEAttach(portal, peer, matchbits, 0, LNET_UNLINK, local ? LNET_INS_LOCAL : LNET_INS_AFTER, &meh); - if (rc != 0) { + if (rc) { CERROR("LNetMEAttach failed: %d\n", rc); LASSERT(rc == -ENOMEM); return -ENOMEM; } md.threshold = 1; - md.user_ptr = ev; - md.start = buf; - md.length = len; - md.options = options; + md.user_ptr = ev; + md.start = buf; + md.length = len; + md.options = options; md.eq_handle = srpc_data.rpc_lnet_eq; rc = LNetMDAttach(meh, md, LNET_UNLINK, mdh); - if (rc != 0) { + if (rc) { CERROR("LNetMDAttach failed: %d\n", rc); LASSERT(rc == -ENOMEM); rc = LNetMEUnlink(meh); - LASSERT(rc == 0); + LASSERT(!rc); return -ENOMEM; } - CDEBUG(D_NET, - "Posted passive RDMA: peer %s, portal %d, matchbits %#llx\n", - libcfs_id2str(peer), portal, matchbits); + CDEBUG(D_NET, "Posted passive RDMA: peer %s, portal %d, matchbits %#llx\n", + libcfs_id2str(peer), portal, matchbits); return 0; } @@ -400,46 +401,48 @@ srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len, int rc; lnet_md_t md; - md.user_ptr = ev; - md.start = buf; - md.length = len; + md.user_ptr = ev; + md.start = buf; + md.length = len; md.eq_handle = srpc_data.rpc_lnet_eq; - md.threshold = ((options & LNET_MD_OP_GET) != 0) ? 2 : 1; - md.options = options & ~(LNET_MD_OP_PUT | LNET_MD_OP_GET); + md.threshold = options & LNET_MD_OP_GET ? 2 : 1; + md.options = options & ~(LNET_MD_OP_PUT | LNET_MD_OP_GET); rc = LNetMDBind(md, LNET_UNLINK, mdh); - if (rc != 0) { + if (rc) { CERROR("LNetMDBind failed: %d\n", rc); LASSERT(rc == -ENOMEM); return -ENOMEM; } - /* this is kind of an abuse of the LNET_MD_OP_{PUT,GET} options. + /* + * this is kind of an abuse of the LNET_MD_OP_{PUT,GET} options. * they're only meaningful for MDs attached to an ME (i.e. passive - * buffers... */ - if ((options & LNET_MD_OP_PUT) != 0) { + * buffers... + */ + if (options & LNET_MD_OP_PUT) { rc = LNetPut(self, *mdh, LNET_NOACK_REQ, peer, portal, matchbits, 0, 0); } else { - LASSERT((options & LNET_MD_OP_GET) != 0); + LASSERT(options & LNET_MD_OP_GET); rc = LNetGet(self, *mdh, peer, portal, matchbits, 0); } - if (rc != 0) { + if (rc) { CERROR("LNet%s(%s, %d, %lld) failed: %d\n", - ((options & LNET_MD_OP_PUT) != 0) ? "Put" : "Get", - libcfs_id2str(peer), portal, matchbits, rc); + options & LNET_MD_OP_PUT ? "Put" : "Get", + libcfs_id2str(peer), portal, matchbits, rc); - /* The forthcoming unlink event will complete this operation + /* + * The forthcoming unlink event will complete this operation * with failure, so fall through and return success here. */ rc = LNetMDUnlink(*mdh); - LASSERT(rc == 0); + LASSERT(!rc); } else { - CDEBUG(D_NET, - "Posted active RDMA: peer %s, portal %u, matchbits %#llx\n", - libcfs_id2str(peer), portal, matchbits); + CDEBUG(D_NET, "Posted active RDMA: peer %s, portal %u, matchbits %#llx\n", + libcfs_id2str(peer), portal, matchbits); } return 0; } @@ -448,7 +451,7 @@ static int srpc_post_passive_rqtbuf(int service, int local, void *buf, int len, lnet_handle_md_t *mdh, srpc_event_t *ev) { - lnet_process_id_t any = {0}; + lnet_process_id_t any = { 0 }; any.nid = LNET_NID_ANY; any.pid = LNET_PID_ANY; @@ -460,10 +463,10 @@ srpc_post_passive_rqtbuf(int service, int local, void *buf, int len, static int srpc_service_post_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf) - __must_hold(&scd->scd_lock) +__must_hold(&scd->scd_lock) { struct srpc_service *sv = scd->scd_svc; - struct srpc_msg *msg = &buf->buf_msg; + struct srpc_msg *msg = &buf->buf_msg; int rc; LNetInvalidateHandle(&buf->buf_mdh); @@ -476,19 +479,22 @@ srpc_service_post_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf) msg, sizeof(*msg), &buf->buf_mdh, &scd->scd_ev); - /* At this point, a RPC (new or delayed) may have arrived in + /* + * At this point, a RPC (new or delayed) may have arrived in * msg and its event handler has been called. So we must add - * buf to scd_buf_posted _before_ dropping scd_lock */ - + * buf to scd_buf_posted _before_ dropping scd_lock + */ spin_lock(&scd->scd_lock); - if (rc == 0) { + if (!rc) { if (!sv->sv_shuttingdown) return 0; spin_unlock(&scd->scd_lock); - /* srpc_shutdown_service might have tried to unlink me - * when my buf_mdh was still invalid */ + /* + * srpc_shutdown_service might have tried to unlink me + * when my buf_mdh was still invalid + */ LNetMDUnlink(buf->buf_mdh); spin_lock(&scd->scd_lock); return 0; @@ -514,9 +520,11 @@ srpc_add_buffer(struct swi_workitem *wi) struct srpc_buffer *buf; int rc = 0; - /* it's called by workitem scheduler threads, these threads + /* + * it's called by workitem scheduler threads, these threads * should have been set CPT affinity, so buffers will be posted - * on CPT local list of Portal */ + * on CPT local list of Portal + */ spin_lock(&scd->scd_lock); while (scd->scd_buf_adjust > 0 && @@ -527,7 +535,7 @@ srpc_add_buffer(struct swi_workitem *wi) spin_unlock(&scd->scd_lock); LIBCFS_ALLOC(buf, sizeof(*buf)); - if (buf == NULL) { + if (!buf) { CERROR("Failed to add new buf to service: %s\n", scd->scd_svc->sv_name); spin_lock(&scd->scd_lock); @@ -546,7 +554,7 @@ srpc_add_buffer(struct swi_workitem *wi) } rc = srpc_service_post_buffer(scd, buf); - if (rc != 0) + if (rc) break; /* buf has been freed inside */ LASSERT(scd->scd_buf_posting > 0); @@ -555,7 +563,7 @@ srpc_add_buffer(struct swi_workitem *wi) scd->scd_buf_low = max(2, scd->scd_buf_total / 4); } - if (rc != 0) { + if (rc) { scd->scd_buf_err_stamp = ktime_get_real_seconds(); scd->scd_buf_err = rc; @@ -607,12 +615,12 @@ srpc_service_add_buffers(struct srpc_service *sv, int nbuffer) * block all WIs pending on lst_sched_serial for a moment * which is not good but not fatal. */ - lst_wait_until(scd->scd_buf_err != 0 || - (scd->scd_buf_adjust == 0 && - scd->scd_buf_posting == 0), + lst_wait_until(scd->scd_buf_err || + (!scd->scd_buf_adjust && + !scd->scd_buf_posting), scd->scd_lock, "waiting for adding buffer\n"); - if (scd->scd_buf_err != 0 && rc == 0) + if (scd->scd_buf_err && !rc) rc = scd->scd_buf_err; spin_unlock(&scd->scd_lock); @@ -658,7 +666,7 @@ srpc_finish_service(struct srpc_service *sv) } if (scd->scd_buf_nposted > 0) { - CDEBUG(D_NET, "waiting for %d posted buffers to unlink", + CDEBUG(D_NET, "waiting for %d posted buffers to unlink\n", scd->scd_buf_nposted); spin_unlock(&scd->scd_lock); return 0; @@ -670,7 +678,7 @@ srpc_finish_service(struct srpc_service *sv) } rpc = list_entry(scd->scd_rpc_active.next, - struct srpc_server_rpc, srpc_list); + struct srpc_server_rpc, srpc_list); CNETERR("Active RPC %p on shutdown: sv %s, peer %s, wi %s scheduled %d running %d, ev fired %d type %d status %d lnet %d\n", rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer), swi_state2str(rpc->srpc_wi.swi_state), @@ -690,10 +698,10 @@ srpc_finish_service(struct srpc_service *sv) /* called with sv->sv_lock held */ static void srpc_service_recycle_buffer(struct srpc_service_cd *scd, srpc_buffer_t *buf) - __must_hold(&scd->scd_lock) +__must_hold(&scd->scd_lock) { if (!scd->scd_svc->sv_shuttingdown && scd->scd_buf_adjust >= 0) { - if (srpc_service_post_buffer(scd, buf) != 0) { + if (srpc_service_post_buffer(scd, buf)) { CWARN("Failed to post %s buffer\n", scd->scd_svc->sv_name); } @@ -706,7 +714,7 @@ srpc_service_recycle_buffer(struct srpc_service_cd *scd, srpc_buffer_t *buf) if (scd->scd_buf_adjust < 0) { scd->scd_buf_adjust++; if (scd->scd_buf_adjust < 0 && - scd->scd_buf_total == 0 && scd->scd_buf_posting == 0) { + !scd->scd_buf_total && !scd->scd_buf_posting) { CDEBUG(D_INFO, "Try to recycle %d buffers but nothing left\n", scd->scd_buf_adjust); @@ -732,9 +740,11 @@ srpc_abort_service(struct srpc_service *sv) cfs_percpt_for_each(scd, i, sv->sv_cpt_data) { spin_lock(&scd->scd_lock); - /* schedule in-flight RPCs to notice the abort, NB: + /* + * schedule in-flight RPCs to notice the abort, NB: * racing with incoming RPCs; complete fix should make test - * RPCs carry session ID in its headers */ + * RPCs carry session ID in its headers + */ list_for_each_entry(rpc, &scd->scd_rpc_active, srpc_list) { rpc->srpc_aborted = 1; swi_schedule_workitem(&rpc->srpc_wi); @@ -772,8 +782,10 @@ srpc_shutdown_service(srpc_service_t *sv) spin_unlock(&scd->scd_lock); - /* OK to traverse scd_buf_posted without lock, since no one - * touches scd_buf_posted now */ + /* + * OK to traverse scd_buf_posted without lock, since no one + * touches scd_buf_posted now + */ list_for_each_entry(buf, &scd->scd_buf_posted, buf_list) LNetMDUnlink(buf->buf_mdh); } @@ -786,15 +798,15 @@ srpc_send_request(srpc_client_rpc_t *rpc) int rc; ev->ev_fired = 0; - ev->ev_data = rpc; - ev->ev_type = SRPC_REQUEST_SENT; + ev->ev_data = rpc; + ev->ev_type = SRPC_REQUEST_SENT; rc = srpc_post_active_rdma(srpc_serv_portal(rpc->crpc_service), rpc->crpc_service, &rpc->crpc_reqstmsg, sizeof(srpc_msg_t), LNET_MD_OP_PUT, rpc->crpc_dest, LNET_NID_ANY, &rpc->crpc_reqstmdh, ev); - if (rc != 0) { + if (rc) { LASSERT(rc == -ENOMEM); ev->ev_fired = 1; /* no more event expected */ } @@ -809,8 +821,8 @@ srpc_prepare_reply(srpc_client_rpc_t *rpc) int rc; ev->ev_fired = 0; - ev->ev_data = rpc; - ev->ev_type = SRPC_REPLY_RCVD; + ev->ev_data = rpc; + ev->ev_type = SRPC_REPLY_RCVD; *id = srpc_next_id(); @@ -818,7 +830,7 @@ srpc_prepare_reply(srpc_client_rpc_t *rpc) &rpc->crpc_replymsg, sizeof(srpc_msg_t), LNET_MD_OP_PUT, rpc->crpc_dest, &rpc->crpc_replymdh, ev); - if (rc != 0) { + if (rc) { LASSERT(rc == -ENOMEM); ev->ev_fired = 1; /* no more event expected */ } @@ -830,28 +842,28 @@ srpc_prepare_bulk(srpc_client_rpc_t *rpc) { srpc_bulk_t *bk = &rpc->crpc_bulk; srpc_event_t *ev = &rpc->crpc_bulkev; - __u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.bulkid; + __u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.bulkid; int rc; int opt; LASSERT(bk->bk_niov <= LNET_MAX_IOV); - if (bk->bk_niov == 0) + if (!bk->bk_niov) return 0; /* nothing to do */ opt = bk->bk_sink ? LNET_MD_OP_PUT : LNET_MD_OP_GET; opt |= LNET_MD_KIOV; ev->ev_fired = 0; - ev->ev_data = rpc; - ev->ev_type = SRPC_BULK_REQ_RCVD; + ev->ev_data = rpc; + ev->ev_type = SRPC_BULK_REQ_RCVD; *id = srpc_next_id(); rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, 0, *id, &bk->bk_iovs[0], bk->bk_niov, opt, rpc->crpc_dest, &bk->bk_mdh, ev); - if (rc != 0) { + if (rc) { LASSERT(rc == -ENOMEM); ev->ev_fired = 1; /* no more event expected */ } @@ -867,20 +879,20 @@ srpc_do_bulk(struct srpc_server_rpc *rpc) int rc; int opt; - LASSERT(bk != NULL); + LASSERT(bk); opt = bk->bk_sink ? LNET_MD_OP_GET : LNET_MD_OP_PUT; opt |= LNET_MD_KIOV; ev->ev_fired = 0; - ev->ev_data = rpc; - ev->ev_type = bk->bk_sink ? SRPC_BULK_GET_RPLD : SRPC_BULK_PUT_SENT; + ev->ev_data = rpc; + ev->ev_type = bk->bk_sink ? SRPC_BULK_GET_RPLD : SRPC_BULK_PUT_SENT; rc = srpc_post_active_rdma(SRPC_RDMA_PORTAL, id, &bk->bk_iovs[0], bk->bk_niov, opt, rpc->srpc_peer, rpc->srpc_self, &bk->bk_mdh, ev); - if (rc != 0) + if (rc) ev->ev_fired = 1; /* no more event expected */ return rc; } @@ -890,33 +902,35 @@ static void srpc_server_rpc_done(struct srpc_server_rpc *rpc, int status) { struct srpc_service_cd *scd = rpc->srpc_scd; - struct srpc_service *sv = scd->scd_svc; + struct srpc_service *sv = scd->scd_svc; srpc_buffer_t *buffer; - LASSERT(status != 0 || rpc->srpc_wi.swi_state == SWI_STATE_DONE); + LASSERT(status || rpc->srpc_wi.swi_state == SWI_STATE_DONE); rpc->srpc_status = status; - CDEBUG_LIMIT(status == 0 ? D_NET : D_NETERROR, - "Server RPC %p done: service %s, peer %s, status %s:%d\n", - rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer), - swi_state2str(rpc->srpc_wi.swi_state), status); + CDEBUG_LIMIT(!status ? D_NET : D_NETERROR, + "Server RPC %p done: service %s, peer %s, status %s:%d\n", + rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer), + swi_state2str(rpc->srpc_wi.swi_state), status); - if (status != 0) { + if (status) { spin_lock(&srpc_data.rpc_glock); srpc_data.rpc_counters.rpcs_dropped++; spin_unlock(&srpc_data.rpc_glock); } - if (rpc->srpc_done != NULL) + if (rpc->srpc_done) (*rpc->srpc_done) (rpc); - LASSERT(rpc->srpc_bulk == NULL); + LASSERT(!rpc->srpc_bulk); spin_lock(&scd->scd_lock); - if (rpc->srpc_reqstbuf != NULL) { - /* NB might drop sv_lock in srpc_service_recycle_buffer, but - * sv won't go away for scd_rpc_active must not be empty */ + if (rpc->srpc_reqstbuf) { + /* + * NB might drop sv_lock in srpc_service_recycle_buffer, but + * sv won't go away for scd_rpc_active must not be empty + */ srpc_service_recycle_buffer(scd, rpc->srpc_reqstbuf); rpc->srpc_reqstbuf = NULL; } @@ -934,7 +948,7 @@ srpc_server_rpc_done(struct srpc_server_rpc *rpc, int status) if (!sv->sv_shuttingdown && !list_empty(&scd->scd_buf_blocked)) { buffer = list_entry(scd->scd_buf_blocked.next, - srpc_buffer_t, buf_list); + srpc_buffer_t, buf_list); list_del(&buffer->buf_list); srpc_init_server_rpc(rpc, scd, buffer); @@ -945,7 +959,6 @@ srpc_server_rpc_done(struct srpc_server_rpc *rpc, int status) } spin_unlock(&scd->scd_lock); - return; } /* handles an incoming RPC */ @@ -965,7 +978,7 @@ srpc_handle_rpc(swi_workitem_t *wi) if (sv->sv_shuttingdown || rpc->srpc_aborted) { spin_unlock(&scd->scd_lock); - if (rpc->srpc_bulk != NULL) + if (rpc->srpc_bulk) LNetMDUnlink(rpc->srpc_bulk->bk_mdh); LNetMDUnlink(rpc->srpc_replymdh); @@ -988,7 +1001,7 @@ srpc_handle_rpc(swi_workitem_t *wi) msg = &rpc->srpc_reqstbuf->buf_msg; reply = &rpc->srpc_replymsg.msg_body.reply; - if (msg->msg_magic == 0) { + if (!msg->msg_magic) { /* moaned already in srpc_lnet_ev_handler */ srpc_server_rpc_done(rpc, EBADMSG); return 1; @@ -1004,8 +1017,8 @@ srpc_handle_rpc(swi_workitem_t *wi) } else { reply->status = 0; rc = (*sv->sv_handler)(rpc); - LASSERT(reply->status == 0 || !rpc->srpc_bulk); - if (rc != 0) { + LASSERT(!reply->status || !rpc->srpc_bulk); + if (rc) { srpc_server_rpc_done(rpc, rc); return 1; } @@ -1013,9 +1026,9 @@ srpc_handle_rpc(swi_workitem_t *wi) wi->swi_state = SWI_STATE_BULK_STARTED; - if (rpc->srpc_bulk != NULL) { + if (rpc->srpc_bulk) { rc = srpc_do_bulk(rpc); - if (rc == 0) + if (!rc) return 0; /* wait for bulk */ LASSERT(ev->ev_fired); @@ -1023,15 +1036,15 @@ srpc_handle_rpc(swi_workitem_t *wi) } } case SWI_STATE_BULK_STARTED: - LASSERT(rpc->srpc_bulk == NULL || ev->ev_fired); + LASSERT(!rpc->srpc_bulk || ev->ev_fired); - if (rpc->srpc_bulk != NULL) { + if (rpc->srpc_bulk) { rc = ev->ev_status; - if (sv->sv_bulk_ready != NULL) + if (sv->sv_bulk_ready) rc = (*sv->sv_bulk_ready) (rpc, rc); - if (rc != 0) { + if (rc) { srpc_server_rpc_done(rpc, rc); return 1; } @@ -1039,7 +1052,7 @@ srpc_handle_rpc(swi_workitem_t *wi) wi->swi_state = SWI_STATE_REPLY_SUBMITTED; rc = srpc_send_reply(rpc); - if (rc == 0) + if (!rc) return 0; /* wait for reply */ srpc_server_rpc_done(rpc, rc); return 1; @@ -1067,8 +1080,8 @@ srpc_client_rpc_expired(void *data) srpc_client_rpc_t *rpc = data; CWARN("Client RPC expired: service %d, peer %s, timeout %d.\n", - rpc->crpc_service, libcfs_id2str(rpc->crpc_dest), - rpc->crpc_timeout); + rpc->crpc_service, libcfs_id2str(rpc->crpc_dest), + rpc->crpc_timeout); spin_lock(&rpc->crpc_lock); @@ -1082,32 +1095,32 @@ srpc_client_rpc_expired(void *data) spin_unlock(&srpc_data.rpc_glock); } -inline void +static void srpc_add_client_rpc_timer(srpc_client_rpc_t *rpc) { - stt_timer_t *timer = &rpc->crpc_timer; + struct stt_timer *timer = &rpc->crpc_timer; - if (rpc->crpc_timeout == 0) + if (!rpc->crpc_timeout) return; INIT_LIST_HEAD(&timer->stt_list); - timer->stt_data = rpc; - timer->stt_func = srpc_client_rpc_expired; + timer->stt_data = rpc; + timer->stt_func = srpc_client_rpc_expired; timer->stt_expires = ktime_get_real_seconds() + rpc->crpc_timeout; stt_add_timer(timer); - return; } /* * Called with rpc->crpc_lock held. * * Upon exit the RPC expiry timer is not queued and the handler is not - * running on any CPU. */ + * running on any CPU. + */ static void srpc_del_client_rpc_timer(srpc_client_rpc_t *rpc) { /* timer not planted or already exploded */ - if (rpc->crpc_timeout == 0) + if (!rpc->crpc_timeout) return; /* timer successfully defused */ @@ -1115,7 +1128,7 @@ srpc_del_client_rpc_timer(srpc_client_rpc_t *rpc) return; /* timer detonated, wait for it to explode */ - while (rpc->crpc_timeout != 0) { + while (rpc->crpc_timeout) { spin_unlock(&rpc->crpc_lock); schedule(); @@ -1129,20 +1142,20 @@ srpc_client_rpc_done(srpc_client_rpc_t *rpc, int status) { swi_workitem_t *wi = &rpc->crpc_wi; - LASSERT(status != 0 || wi->swi_state == SWI_STATE_DONE); + LASSERT(status || wi->swi_state == SWI_STATE_DONE); spin_lock(&rpc->crpc_lock); rpc->crpc_closed = 1; - if (rpc->crpc_status == 0) + if (!rpc->crpc_status) rpc->crpc_status = status; srpc_del_client_rpc_timer(rpc); - CDEBUG_LIMIT((status == 0) ? D_NET : D_NETERROR, - "Client RPC done: service %d, peer %s, status %s:%d:%d\n", - rpc->crpc_service, libcfs_id2str(rpc->crpc_dest), - swi_state2str(wi->swi_state), rpc->crpc_aborted, status); + CDEBUG_LIMIT(!status ? D_NET : D_NETERROR, + "Client RPC done: service %d, peer %s, status %s:%d:%d\n", + rpc->crpc_service, libcfs_id2str(rpc->crpc_dest), + swi_state2str(wi->swi_state), rpc->crpc_aborted, status); /* * No one can schedule me now since: @@ -1158,7 +1171,6 @@ srpc_client_rpc_done(srpc_client_rpc_t *rpc, int status) spin_unlock(&rpc->crpc_lock); (*rpc->crpc_done)(rpc); - return; } /* sends an outgoing RPC */ @@ -1170,11 +1182,11 @@ srpc_send_rpc(swi_workitem_t *wi) srpc_msg_t *reply; int do_bulk; - LASSERT(wi != NULL); + LASSERT(wi); rpc = wi->swi_workitem.wi_data; - LASSERT(rpc != NULL); + LASSERT(rpc); LASSERT(wi == &rpc->crpc_wi); reply = &rpc->crpc_replymsg; @@ -1196,13 +1208,13 @@ srpc_send_rpc(swi_workitem_t *wi) LASSERT(!srpc_event_pending(rpc)); rc = srpc_prepare_reply(rpc); - if (rc != 0) { + if (rc) { srpc_client_rpc_done(rpc, rc); return 1; } rc = srpc_prepare_bulk(rpc); - if (rc != 0) + if (rc) break; wi->swi_state = SWI_STATE_REQUEST_SUBMITTED; @@ -1210,14 +1222,16 @@ srpc_send_rpc(swi_workitem_t *wi) break; case SWI_STATE_REQUEST_SUBMITTED: - /* CAVEAT EMPTOR: rqtev, rpyev, and bulkev may come in any + /* + * CAVEAT EMPTOR: rqtev, rpyev, and bulkev may come in any * order; however, they're processed in a strict order: - * rqt, rpy, and bulk. */ + * rqt, rpy, and bulk. + */ if (!rpc->crpc_reqstev.ev_fired) break; rc = rpc->crpc_reqstev.ev_status; - if (rc != 0) + if (rc) break; wi->swi_state = SWI_STATE_REQUEST_SENT; @@ -1229,7 +1243,7 @@ srpc_send_rpc(swi_workitem_t *wi) break; rc = rpc->crpc_replyev.ev_status; - if (rc != 0) + if (rc) break; srpc_unpack_msg_hdr(reply); @@ -1244,7 +1258,7 @@ srpc_send_rpc(swi_workitem_t *wi) break; } - if (do_bulk && reply->msg_body.reply.status != 0) { + if (do_bulk && reply->msg_body.reply.status) { CWARN("Remote error %d at %s, unlink bulk buffer in case peer didn't initiate bulk transfer\n", reply->msg_body.reply.status, libcfs_id2str(rpc->crpc_dest)); @@ -1259,12 +1273,14 @@ srpc_send_rpc(swi_workitem_t *wi) rc = do_bulk ? rpc->crpc_bulkev.ev_status : 0; - /* Bulk buffer was unlinked due to remote error. Clear error + /* + * Bulk buffer was unlinked due to remote error. Clear error * since reply buffer still contains valid data. * NB rpc->crpc_done shouldn't look into bulk data in case of - * remote error. */ + * remote error. + */ if (do_bulk && rpc->crpc_bulkev.ev_lnet == LNET_EVENT_UNLINK && - rpc->crpc_status == 0 && reply->msg_body.reply.status != 0) + !rpc->crpc_status && reply->msg_body.reply.status) rc = 0; wi->swi_state = SWI_STATE_DONE; @@ -1272,7 +1288,7 @@ srpc_send_rpc(swi_workitem_t *wi) return 1; } - if (rc != 0) { + if (rc) { spin_lock(&rpc->crpc_lock); srpc_abort_rpc(rpc, rc); spin_unlock(&rpc->crpc_lock); @@ -1294,15 +1310,15 @@ abort: srpc_client_rpc_t * srpc_create_client_rpc(lnet_process_id_t peer, int service, - int nbulkiov, int bulklen, - void (*rpc_done)(srpc_client_rpc_t *), - void (*rpc_fini)(srpc_client_rpc_t *), void *priv) + int nbulkiov, int bulklen, + void (*rpc_done)(srpc_client_rpc_t *), + void (*rpc_fini)(srpc_client_rpc_t *), void *priv) { srpc_client_rpc_t *rpc; LIBCFS_ALLOC(rpc, offsetof(srpc_client_rpc_t, crpc_bulk.bk_iovs[nbulkiov])); - if (rpc == NULL) + if (!rpc) return NULL; srpc_init_client_rpc(rpc, peer, service, nbulkiov, @@ -1314,21 +1330,19 @@ srpc_create_client_rpc(lnet_process_id_t peer, int service, void srpc_abort_rpc(srpc_client_rpc_t *rpc, int why) { - LASSERT(why != 0); + LASSERT(why); if (rpc->crpc_aborted || /* already aborted */ - rpc->crpc_closed) /* callback imminent */ + rpc->crpc_closed) /* callback imminent */ return; - CDEBUG(D_NET, - "Aborting RPC: service %d, peer %s, state %s, why %d\n", - rpc->crpc_service, libcfs_id2str(rpc->crpc_dest), - swi_state2str(rpc->crpc_wi.swi_state), why); + CDEBUG(D_NET, "Aborting RPC: service %d, peer %s, state %s, why %d\n", + rpc->crpc_service, libcfs_id2str(rpc->crpc_dest), + swi_state2str(rpc->crpc_wi.swi_state), why); rpc->crpc_aborted = 1; - rpc->crpc_status = why; + rpc->crpc_status = why; swi_schedule_workitem(&rpc->crpc_wi); - return; } /* called with rpc->crpc_lock held */ @@ -1339,12 +1353,11 @@ srpc_post_rpc(srpc_client_rpc_t *rpc) LASSERT(srpc_data.rpc_state == SRPC_STATE_RUNNING); CDEBUG(D_NET, "Posting RPC: peer %s, service %d, timeout %d\n", - libcfs_id2str(rpc->crpc_dest), rpc->crpc_service, - rpc->crpc_timeout); + libcfs_id2str(rpc->crpc_dest), rpc->crpc_service, + rpc->crpc_timeout); srpc_add_client_rpc_timer(rpc); swi_schedule_workitem(&rpc->crpc_wi); - return; } int @@ -1358,15 +1371,17 @@ srpc_send_reply(struct srpc_server_rpc *rpc) __u64 rpyid; int rc; - LASSERT(buffer != NULL); + LASSERT(buffer); rpyid = buffer->buf_msg.msg_body.reqst.rpyid; spin_lock(&scd->scd_lock); if (!sv->sv_shuttingdown && !srpc_serv_is_framework(sv)) { - /* Repost buffer before replying since test client - * might send me another RPC once it gets the reply */ - if (srpc_service_post_buffer(scd, buffer) != 0) + /* + * Repost buffer before replying since test client + * might send me another RPC once it gets the reply + */ + if (srpc_service_post_buffer(scd, buffer)) CWARN("Failed to repost %s buffer\n", sv->sv_name); rpc->srpc_reqstbuf = NULL; } @@ -1374,18 +1389,18 @@ srpc_send_reply(struct srpc_server_rpc *rpc) spin_unlock(&scd->scd_lock); ev->ev_fired = 0; - ev->ev_data = rpc; - ev->ev_type = SRPC_REPLY_SENT; + ev->ev_data = rpc; + ev->ev_type = SRPC_REPLY_SENT; - msg->msg_magic = SRPC_MSG_MAGIC; + msg->msg_magic = SRPC_MSG_MAGIC; msg->msg_version = SRPC_MSG_VERSION; - msg->msg_type = srpc_service2reply(sv->sv_id); + msg->msg_type = srpc_service2reply(sv->sv_id); rc = srpc_post_active_rdma(SRPC_RDMA_PORTAL, rpyid, msg, sizeof(*msg), LNET_MD_OP_PUT, rpc->srpc_peer, rpc->srpc_self, &rpc->srpc_replymdh, ev); - if (rc != 0) + if (rc) ev->ev_fired = 1; /* no more event expected */ return rc; } @@ -1405,10 +1420,17 @@ srpc_lnet_ev_handler(lnet_event_t *ev) LASSERT(!in_interrupt()); - if (ev->status != 0) { + if (ev->status) { + __u32 errors; + spin_lock(&srpc_data.rpc_glock); - srpc_data.rpc_counters.errors++; + if (ev->status != -ECANCELED) /* cancellation is not error */ + srpc_data.rpc_counters.errors++; + errors = srpc_data.rpc_counters.errors; spin_unlock(&srpc_data.rpc_glock); + + CNETERR("LNet event status %d type %d, RPC errors %u\n", + ev->status, ev->type, errors); } rpcev->ev_lnet = ev->type; @@ -1419,7 +1441,7 @@ srpc_lnet_ev_handler(lnet_event_t *ev) rpcev->ev_status, rpcev->ev_type, rpcev->ev_lnet); LBUG(); case SRPC_REQUEST_SENT: - if (ev->status == 0 && ev->type != LNET_EVENT_UNLINK) { + if (!ev->status && ev->type != LNET_EVENT_UNLINK) { spin_lock(&srpc_data.rpc_glock); srpc_data.rpc_counters.rpcs_sent++; spin_unlock(&srpc_data.rpc_glock); @@ -1441,8 +1463,8 @@ srpc_lnet_ev_handler(lnet_event_t *ev) spin_lock(&crpc->crpc_lock); - LASSERT(rpcev->ev_fired == 0); - rpcev->ev_fired = 1; + LASSERT(!rpcev->ev_fired); + rpcev->ev_fired = 1; rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ? -EINTR : ev->status; swi_schedule_workitem(&crpc->crpc_wi); @@ -1460,9 +1482,9 @@ srpc_lnet_ev_handler(lnet_event_t *ev) LASSERT(ev->unlinked); LASSERT(ev->type == LNET_EVENT_PUT || - ev->type == LNET_EVENT_UNLINK); + ev->type == LNET_EVENT_UNLINK); LASSERT(ev->type != LNET_EVENT_UNLINK || - sv->sv_shuttingdown); + sv->sv_shuttingdown); buffer = container_of(ev->md.start, srpc_buffer_t, buf_msg); buffer->buf_peer = ev->initiator; @@ -1472,21 +1494,23 @@ srpc_lnet_ev_handler(lnet_event_t *ev) scd->scd_buf_nposted--; if (sv->sv_shuttingdown) { - /* Leave buffer on scd->scd_buf_nposted since - * srpc_finish_service needs to traverse it. */ + /* + * Leave buffer on scd->scd_buf_nposted since + * srpc_finish_service needs to traverse it. + */ spin_unlock(&scd->scd_lock); break; } - if (scd->scd_buf_err_stamp != 0 && + if (scd->scd_buf_err_stamp && scd->scd_buf_err_stamp < ktime_get_real_seconds()) { /* re-enable adding buffer */ scd->scd_buf_err_stamp = 0; scd->scd_buf_err = 0; } - if (scd->scd_buf_err == 0 && /* adding buffer is enabled */ - scd->scd_buf_adjust == 0 && + if (!scd->scd_buf_err && /* adding buffer is enabled */ + !scd->scd_buf_adjust && scd->scd_buf_nposted < scd->scd_buf_low) { scd->scd_buf_adjust = max(scd->scd_buf_total / 2, SFW_TEST_WI_MIN); @@ -1497,7 +1521,7 @@ srpc_lnet_ev_handler(lnet_event_t *ev) msg = &buffer->buf_msg; type = srpc_service2request(sv->sv_id); - if (ev->status != 0 || ev->mlength != sizeof(*msg) || + if (ev->status || ev->mlength != sizeof(*msg) || (msg->msg_type != type && msg->msg_type != __swab32(type)) || (msg->msg_magic != SRPC_MSG_MAGIC && @@ -1507,25 +1531,27 @@ srpc_lnet_ev_handler(lnet_event_t *ev) ev->status, ev->mlength, msg->msg_type, msg->msg_magic); - /* NB can't call srpc_service_recycle_buffer here since + /* + * NB can't call srpc_service_recycle_buffer here since * it may call LNetM[DE]Attach. The invalid magic tells - * srpc_handle_rpc to drop this RPC */ + * srpc_handle_rpc to drop this RPC + */ msg->msg_magic = 0; } if (!list_empty(&scd->scd_rpc_free)) { srpc = list_entry(scd->scd_rpc_free.next, - struct srpc_server_rpc, - srpc_list); + struct srpc_server_rpc, + srpc_list); list_del(&srpc->srpc_list); srpc_init_server_rpc(srpc, scd, buffer); list_add_tail(&srpc->srpc_list, - &scd->scd_rpc_active); + &scd->scd_rpc_active); swi_schedule_workitem(&srpc->srpc_wi); } else { list_add_tail(&buffer->buf_list, - &scd->scd_buf_blocked); + &scd->scd_buf_blocked); } spin_unlock(&scd->scd_lock); @@ -1537,14 +1563,14 @@ srpc_lnet_ev_handler(lnet_event_t *ev) case SRPC_BULK_GET_RPLD: LASSERT(ev->type == LNET_EVENT_SEND || - ev->type == LNET_EVENT_REPLY || - ev->type == LNET_EVENT_UNLINK); + ev->type == LNET_EVENT_REPLY || + ev->type == LNET_EVENT_UNLINK); if (!ev->unlinked) break; /* wait for final event */ case SRPC_BULK_PUT_SENT: - if (ev->status == 0 && ev->type != LNET_EVENT_UNLINK) { + if (!ev->status && ev->type != LNET_EVENT_UNLINK) { spin_lock(&srpc_data.rpc_glock); if (rpcev->ev_type == SRPC_BULK_GET_RPLD) @@ -1556,13 +1582,13 @@ srpc_lnet_ev_handler(lnet_event_t *ev) } case SRPC_REPLY_SENT: srpc = rpcev->ev_data; - scd = srpc->srpc_scd; + scd = srpc->srpc_scd; LASSERT(rpcev == &srpc->srpc_ev); spin_lock(&scd->scd_lock); - rpcev->ev_fired = 1; + rpcev->ev_fired = 1; rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ? -EINTR : ev->status; swi_schedule_workitem(&srpc->srpc_wi); @@ -1587,7 +1613,7 @@ srpc_startup(void) srpc_data.rpc_state = SRPC_STATE_NONE; - rc = LNetNIInit(LUSTRE_SRV_LNET_PID); + rc = LNetNIInit(LNET_PID_LUSTRE); if (rc < 0) { CERROR("LNetNIInit() has failed: %d\n", rc); return rc; @@ -1597,22 +1623,22 @@ srpc_startup(void) LNetInvalidateHandle(&srpc_data.rpc_lnet_eq); rc = LNetEQAlloc(0, srpc_lnet_ev_handler, &srpc_data.rpc_lnet_eq); - if (rc != 0) { + if (rc) { CERROR("LNetEQAlloc() has failed: %d\n", rc); goto bail; } rc = LNetSetLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL); - LASSERT(rc == 0); + LASSERT(!rc); rc = LNetSetLazyPortal(SRPC_REQUEST_PORTAL); - LASSERT(rc == 0); + LASSERT(!rc); srpc_data.rpc_state = SRPC_STATE_EQ_INIT; rc = stt_startup(); bail: - if (rc != 0) + if (rc) srpc_shutdown(); else srpc_data.rpc_state = SRPC_STATE_RUNNING; @@ -1639,9 +1665,8 @@ srpc_shutdown(void) for (i = 0; i <= SRPC_SERVICE_MAX_ID; i++) { srpc_service_t *sv = srpc_data.rpc_services[i]; - LASSERTF(sv == NULL, - "service not empty: id %d, name %s\n", - i, sv->sv_name); + LASSERTF(!sv, "service not empty: id %d, name %s\n", + i, sv->sv_name); } spin_unlock(&srpc_data.rpc_glock); @@ -1651,13 +1676,11 @@ srpc_shutdown(void) case SRPC_STATE_EQ_INIT: rc = LNetClearLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL); rc = LNetClearLazyPortal(SRPC_REQUEST_PORTAL); - LASSERT(rc == 0); + LASSERT(!rc); rc = LNetEQFree(srpc_data.rpc_lnet_eq); - LASSERT(rc == 0); /* the EQ should have no user by now */ + LASSERT(!rc); /* the EQ should have no user by now */ case SRPC_STATE_NI_INIT: LNetNIFini(); } - - return; } diff --git a/drivers/staging/lustre/lnet/selftest/rpc.h b/drivers/staging/lustre/lnet/selftest/rpc.h index 6b4a32a90857..a79c315f2ceb 100644 --- a/drivers/staging/lustre/lnet/selftest/rpc.h +++ b/drivers/staging/lustre/lnet/selftest/rpc.h @@ -45,24 +45,24 @@ * XXX: *REPLY == *REQST + 1 */ typedef enum { - SRPC_MSG_MKSN_REQST = 0, - SRPC_MSG_MKSN_REPLY = 1, - SRPC_MSG_RMSN_REQST = 2, - SRPC_MSG_RMSN_REPLY = 3, - SRPC_MSG_BATCH_REQST = 4, - SRPC_MSG_BATCH_REPLY = 5, - SRPC_MSG_STAT_REQST = 6, - SRPC_MSG_STAT_REPLY = 7, - SRPC_MSG_TEST_REQST = 8, - SRPC_MSG_TEST_REPLY = 9, - SRPC_MSG_DEBUG_REQST = 10, - SRPC_MSG_DEBUG_REPLY = 11, - SRPC_MSG_BRW_REQST = 12, - SRPC_MSG_BRW_REPLY = 13, - SRPC_MSG_PING_REQST = 14, - SRPC_MSG_PING_REPLY = 15, - SRPC_MSG_JOIN_REQST = 16, - SRPC_MSG_JOIN_REPLY = 17, + SRPC_MSG_MKSN_REQST = 0, + SRPC_MSG_MKSN_REPLY = 1, + SRPC_MSG_RMSN_REQST = 2, + SRPC_MSG_RMSN_REPLY = 3, + SRPC_MSG_BATCH_REQST = 4, + SRPC_MSG_BATCH_REPLY = 5, + SRPC_MSG_STAT_REQST = 6, + SRPC_MSG_STAT_REPLY = 7, + SRPC_MSG_TEST_REQST = 8, + SRPC_MSG_TEST_REPLY = 9, + SRPC_MSG_DEBUG_REQST = 10, + SRPC_MSG_DEBUG_REPLY = 11, + SRPC_MSG_BRW_REQST = 12, + SRPC_MSG_BRW_REPLY = 13, + SRPC_MSG_PING_REQST = 14, + SRPC_MSG_PING_REPLY = 15, + SRPC_MSG_JOIN_REQST = 16, + SRPC_MSG_JOIN_REPLY = 17, } srpc_msg_type_t; /* CAVEAT EMPTOR: @@ -78,127 +78,127 @@ typedef struct { } WIRE_ATTR srpc_generic_reqst_t; typedef struct { - __u32 status; - lst_sid_t sid; + __u32 status; + lst_sid_t sid; } WIRE_ATTR srpc_generic_reply_t; /* FRAMEWORK RPCs */ typedef struct { - __u64 mksn_rpyid; /* reply buffer matchbits */ - lst_sid_t mksn_sid; /* session id */ - __u32 mksn_force; /* use brute force */ + __u64 mksn_rpyid; /* reply buffer matchbits */ + lst_sid_t mksn_sid; /* session id */ + __u32 mksn_force; /* use brute force */ char mksn_name[LST_NAME_SIZE]; } WIRE_ATTR srpc_mksn_reqst_t; /* make session request */ typedef struct { - __u32 mksn_status; /* session status */ - lst_sid_t mksn_sid; /* session id */ - __u32 mksn_timeout; /* session timeout */ - char mksn_name[LST_NAME_SIZE]; + __u32 mksn_status; /* session status */ + lst_sid_t mksn_sid; /* session id */ + __u32 mksn_timeout; /* session timeout */ + char mksn_name[LST_NAME_SIZE]; } WIRE_ATTR srpc_mksn_reply_t; /* make session reply */ typedef struct { - __u64 rmsn_rpyid; /* reply buffer matchbits */ - lst_sid_t rmsn_sid; /* session id */ + __u64 rmsn_rpyid; /* reply buffer matchbits */ + lst_sid_t rmsn_sid; /* session id */ } WIRE_ATTR srpc_rmsn_reqst_t; /* remove session request */ typedef struct { - __u32 rmsn_status; - lst_sid_t rmsn_sid; /* session id */ + __u32 rmsn_status; + lst_sid_t rmsn_sid; /* session id */ } WIRE_ATTR srpc_rmsn_reply_t; /* remove session reply */ typedef struct { - __u64 join_rpyid; /* reply buffer matchbits */ - lst_sid_t join_sid; /* session id to join */ - char join_group[LST_NAME_SIZE]; /* group name */ + __u64 join_rpyid; /* reply buffer matchbits */ + lst_sid_t join_sid; /* session id to join */ + char join_group[LST_NAME_SIZE]; /* group name */ } WIRE_ATTR srpc_join_reqst_t; typedef struct { - __u32 join_status; /* returned status */ - lst_sid_t join_sid; /* session id */ - __u32 join_timeout; /* # seconds' inactivity to + __u32 join_status; /* returned status */ + lst_sid_t join_sid; /* session id */ + __u32 join_timeout; /* # seconds' inactivity to * expire */ - char join_session[LST_NAME_SIZE]; /* session name */ + char join_session[LST_NAME_SIZE]; /* session name */ } WIRE_ATTR srpc_join_reply_t; typedef struct { - __u64 dbg_rpyid; /* reply buffer matchbits */ - lst_sid_t dbg_sid; /* session id */ - __u32 dbg_flags; /* bitmap of debug */ + __u64 dbg_rpyid; /* reply buffer matchbits */ + lst_sid_t dbg_sid; /* session id */ + __u32 dbg_flags; /* bitmap of debug */ } WIRE_ATTR srpc_debug_reqst_t; typedef struct { - __u32 dbg_status; /* returned code */ - lst_sid_t dbg_sid; /* session id */ - __u32 dbg_timeout; /* session timeout */ - __u32 dbg_nbatch; /* # of batches in the node */ - char dbg_name[LST_NAME_SIZE]; /* session name */ + __u32 dbg_status; /* returned code */ + lst_sid_t dbg_sid; /* session id */ + __u32 dbg_timeout; /* session timeout */ + __u32 dbg_nbatch; /* # of batches in the node */ + char dbg_name[LST_NAME_SIZE]; /* session name */ } WIRE_ATTR srpc_debug_reply_t; -#define SRPC_BATCH_OPC_RUN 1 -#define SRPC_BATCH_OPC_STOP 2 -#define SRPC_BATCH_OPC_QUERY 3 +#define SRPC_BATCH_OPC_RUN 1 +#define SRPC_BATCH_OPC_STOP 2 +#define SRPC_BATCH_OPC_QUERY 3 typedef struct { - __u64 bar_rpyid; /* reply buffer matchbits */ - lst_sid_t bar_sid; /* session id */ - lst_bid_t bar_bid; /* batch id */ - __u32 bar_opc; /* create/start/stop batch */ - __u32 bar_testidx; /* index of test */ - __u32 bar_arg; /* parameters */ + __u64 bar_rpyid; /* reply buffer matchbits */ + lst_sid_t bar_sid; /* session id */ + lst_bid_t bar_bid; /* batch id */ + __u32 bar_opc; /* create/start/stop batch */ + __u32 bar_testidx; /* index of test */ + __u32 bar_arg; /* parameters */ } WIRE_ATTR srpc_batch_reqst_t; typedef struct { - __u32 bar_status; /* status of request */ - lst_sid_t bar_sid; /* session id */ - __u32 bar_active; /* # of active tests in batch/test */ - __u32 bar_time; /* remained time */ + __u32 bar_status; /* status of request */ + lst_sid_t bar_sid; /* session id */ + __u32 bar_active; /* # of active tests in batch/test */ + __u32 bar_time; /* remained time */ } WIRE_ATTR srpc_batch_reply_t; typedef struct { - __u64 str_rpyid; /* reply buffer matchbits */ - lst_sid_t str_sid; /* session id */ - __u32 str_type; /* type of stat */ + __u64 str_rpyid; /* reply buffer matchbits */ + lst_sid_t str_sid; /* session id */ + __u32 str_type; /* type of stat */ } WIRE_ATTR srpc_stat_reqst_t; typedef struct { - __u32 str_status; - lst_sid_t str_sid; - sfw_counters_t str_fw; + __u32 str_status; + lst_sid_t str_sid; + sfw_counters_t str_fw; srpc_counters_t str_rpc; lnet_counters_t str_lnet; } WIRE_ATTR srpc_stat_reply_t; typedef struct { - __u32 blk_opc; /* bulk operation code */ - __u32 blk_npg; /* # of pages */ - __u32 blk_flags; /* reserved flags */ + __u32 blk_opc; /* bulk operation code */ + __u32 blk_npg; /* # of pages */ + __u32 blk_flags; /* reserved flags */ } WIRE_ATTR test_bulk_req_t; typedef struct { - __u16 blk_opc; /* bulk operation code */ - __u16 blk_flags; /* data check flags */ - __u32 blk_len; /* data length */ - __u32 blk_offset; /* reserved: offset */ + __u16 blk_opc; /* bulk operation code */ + __u16 blk_flags; /* data check flags */ + __u32 blk_len; /* data length */ + __u32 blk_offset; /* reserved: offset */ } WIRE_ATTR test_bulk_req_v1_t; typedef struct { - __u32 png_size; /* size of ping message */ - __u32 png_flags; /* reserved flags */ + __u32 png_size; /* size of ping message */ + __u32 png_flags; /* reserved flags */ } WIRE_ATTR test_ping_req_t; typedef struct { - __u64 tsr_rpyid; /* reply buffer matchbits */ - __u64 tsr_bulkid; /* bulk buffer matchbits */ + __u64 tsr_rpyid; /* reply buffer matchbits */ + __u64 tsr_bulkid; /* bulk buffer matchbits */ lst_sid_t tsr_sid; /* session id */ lst_bid_t tsr_bid; /* batch id */ - __u32 tsr_service; /* test type: bulk|ping|... */ - __u32 tsr_loop; /* test client loop count or + __u32 tsr_service; /* test type: bulk|ping|... */ + __u32 tsr_loop; /* test client loop count or * # server buffers needed */ - __u32 tsr_concur; /* concurrency of test */ - __u8 tsr_is_client; /* is test client or not */ + __u32 tsr_concur; /* concurrency of test */ + __u8 tsr_is_client; /* is test client or not */ __u8 tsr_stop_onerr; /* stop on error */ - __u32 tsr_ndest; /* # of dest nodes */ + __u32 tsr_ndest; /* # of dest nodes */ union { test_ping_req_t ping; @@ -208,7 +208,7 @@ typedef struct { } WIRE_ATTR srpc_test_reqst_t; typedef struct { - __u32 tsr_status; /* returned code */ + __u32 tsr_status; /* returned code */ lst_sid_t tsr_sid; } WIRE_ATTR srpc_test_reply_t; @@ -228,19 +228,19 @@ typedef struct { } WIRE_ATTR srpc_ping_reply_t; typedef struct { - __u64 brw_rpyid; /* reply buffer matchbits */ - __u64 brw_bulkid; /* bulk buffer matchbits */ - __u32 brw_rw; /* read or write */ - __u32 brw_len; /* bulk data len */ - __u32 brw_flags; /* bulk data patterns */ + __u64 brw_rpyid; /* reply buffer matchbits */ + __u64 brw_bulkid; /* bulk buffer matchbits */ + __u32 brw_rw; /* read or write */ + __u32 brw_len; /* bulk data len */ + __u32 brw_flags; /* bulk data patterns */ } WIRE_ATTR srpc_brw_reqst_t; /* bulk r/w request */ typedef struct { __u32 brw_status; } WIRE_ATTR srpc_brw_reply_t; /* bulk r/w reply */ -#define SRPC_MSG_MAGIC 0xeeb0f00d -#define SRPC_MSG_VERSION 1 +#define SRPC_MSG_MAGIC 0xeeb0f00d +#define SRPC_MSG_VERSION 1 typedef struct srpc_msg { __u32 msg_magic; /* magic number */ @@ -281,8 +281,10 @@ srpc_unpack_msg_hdr(srpc_msg_t *msg) if (msg->msg_magic == SRPC_MSG_MAGIC) return; /* no flipping needed */ - /* We do not swap the magic number here as it is needed to - determine whether the body needs to be swapped. */ + /* + * We do not swap the magic number here as it is needed to + * determine whether the body needs to be swapped. + */ /* __swab32s(&msg->msg_magic); */ __swab32s(&msg->msg_type); __swab32s(&msg->msg_version); diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h index 870498339538..288522d4d7b9 100644 --- a/drivers/staging/lustre/lnet/selftest/selftest.h +++ b/drivers/staging/lustre/lnet/selftest/selftest.h @@ -56,14 +56,14 @@ #define MADE_WITHOUT_COMPROMISE #endif -#define SWI_STATE_NEWBORN 0 -#define SWI_STATE_REPLY_SUBMITTED 1 -#define SWI_STATE_REPLY_SENT 2 -#define SWI_STATE_REQUEST_SUBMITTED 3 -#define SWI_STATE_REQUEST_SENT 4 -#define SWI_STATE_REPLY_RECEIVED 5 -#define SWI_STATE_BULK_STARTED 6 -#define SWI_STATE_DONE 10 +#define SWI_STATE_NEWBORN 0 +#define SWI_STATE_REPLY_SUBMITTED 1 +#define SWI_STATE_REPLY_SENT 2 +#define SWI_STATE_REQUEST_SUBMITTED 3 +#define SWI_STATE_REQUEST_SENT 4 +#define SWI_STATE_REPLY_RECEIVED 5 +#define SWI_STATE_BULK_STARTED 6 +#define SWI_STATE_DONE 10 /* forward refs */ struct srpc_service; @@ -74,31 +74,31 @@ struct sfw_test_instance; /* services below SRPC_FRAMEWORK_SERVICE_MAX_ID are framework * services, e.g. create/modify session. */ -#define SRPC_SERVICE_DEBUG 0 -#define SRPC_SERVICE_MAKE_SESSION 1 -#define SRPC_SERVICE_REMOVE_SESSION 2 -#define SRPC_SERVICE_BATCH 3 -#define SRPC_SERVICE_TEST 4 -#define SRPC_SERVICE_QUERY_STAT 5 -#define SRPC_SERVICE_JOIN 6 -#define SRPC_FRAMEWORK_SERVICE_MAX_ID 10 +#define SRPC_SERVICE_DEBUG 0 +#define SRPC_SERVICE_MAKE_SESSION 1 +#define SRPC_SERVICE_REMOVE_SESSION 2 +#define SRPC_SERVICE_BATCH 3 +#define SRPC_SERVICE_TEST 4 +#define SRPC_SERVICE_QUERY_STAT 5 +#define SRPC_SERVICE_JOIN 6 +#define SRPC_FRAMEWORK_SERVICE_MAX_ID 10 /* other services start from SRPC_FRAMEWORK_SERVICE_MAX_ID+1 */ -#define SRPC_SERVICE_BRW 11 -#define SRPC_SERVICE_PING 12 -#define SRPC_SERVICE_MAX_ID 12 +#define SRPC_SERVICE_BRW 11 +#define SRPC_SERVICE_PING 12 +#define SRPC_SERVICE_MAX_ID 12 -#define SRPC_REQUEST_PORTAL 50 +#define SRPC_REQUEST_PORTAL 50 /* a lazy portal for framework RPC requests */ -#define SRPC_FRAMEWORK_REQUEST_PORTAL 51 +#define SRPC_FRAMEWORK_REQUEST_PORTAL 51 /* all reply/bulk RDMAs go to this portal */ -#define SRPC_RDMA_PORTAL 52 +#define SRPC_RDMA_PORTAL 52 static inline srpc_msg_type_t -srpc_service2request (int service) +srpc_service2request(int service) { switch (service) { default: - LBUG (); + LBUG(); case SRPC_SERVICE_DEBUG: return SRPC_MSG_DEBUG_REQST; @@ -129,7 +129,7 @@ srpc_service2request (int service) } static inline srpc_msg_type_t -srpc_service2reply (int service) +srpc_service2reply(int service) { return srpc_service2request(service) + 1; } @@ -149,25 +149,25 @@ typedef enum { typedef struct { srpc_event_type_t ev_type; /* what's up */ lnet_event_kind_t ev_lnet; /* LNet event type */ - int ev_fired; /* LNet event fired? */ - int ev_status; /* LNet event status */ - void *ev_data; /* owning server/client RPC */ + int ev_fired; /* LNet event fired? */ + int ev_status; /* LNet event status */ + void *ev_data; /* owning server/client RPC */ } srpc_event_t; typedef struct { - int bk_len; /* len of bulk data */ + int bk_len; /* len of bulk data */ lnet_handle_md_t bk_mdh; - int bk_sink; /* sink/source */ - int bk_niov; /* # iov in bk_iovs */ - lnet_kiov_t bk_iovs[0]; + int bk_sink; /* sink/source */ + int bk_niov; /* # iov in bk_iovs */ + lnet_kiov_t bk_iovs[0]; } srpc_bulk_t; /* bulk descriptor */ /* message buffer descriptor */ typedef struct srpc_buffer { struct list_head buf_list; /* chain on srpc_service::*_msgq */ - srpc_msg_t buf_msg; + srpc_msg_t buf_msg; lnet_handle_md_t buf_mdh; - lnet_nid_t buf_self; + lnet_nid_t buf_self; lnet_process_id_t buf_peer; } srpc_buffer_t; @@ -176,9 +176,9 @@ typedef int (*swi_action_t) (struct swi_workitem *); typedef struct swi_workitem { struct cfs_wi_sched *swi_sched; - cfs_workitem_t swi_workitem; - swi_action_t swi_action; - int swi_state; + cfs_workitem_t swi_workitem; + swi_action_t swi_action; + int swi_state; } swi_workitem_t; /* server-side state of a RPC */ @@ -186,78 +186,78 @@ struct srpc_server_rpc { /* chain on srpc_service::*_rpcq */ struct list_head srpc_list; struct srpc_service_cd *srpc_scd; - swi_workitem_t srpc_wi; - srpc_event_t srpc_ev; /* bulk/reply event */ - lnet_nid_t srpc_self; + swi_workitem_t srpc_wi; + srpc_event_t srpc_ev; /* bulk/reply event */ + lnet_nid_t srpc_self; lnet_process_id_t srpc_peer; - srpc_msg_t srpc_replymsg; + srpc_msg_t srpc_replymsg; lnet_handle_md_t srpc_replymdh; - srpc_buffer_t *srpc_reqstbuf; - srpc_bulk_t *srpc_bulk; + srpc_buffer_t *srpc_reqstbuf; + srpc_bulk_t *srpc_bulk; - unsigned int srpc_aborted; /* being given up */ - int srpc_status; - void (*srpc_done)(struct srpc_server_rpc *); + unsigned int srpc_aborted; /* being given up */ + int srpc_status; + void (*srpc_done)(struct srpc_server_rpc *); }; /* client-side state of a RPC */ typedef struct srpc_client_rpc { - struct list_head crpc_list; /* chain on user's lists */ - spinlock_t crpc_lock; /* serialize */ - int crpc_service; - atomic_t crpc_refcount; - int crpc_timeout; /* # seconds to wait for reply */ - stt_timer_t crpc_timer; - swi_workitem_t crpc_wi; + struct list_head crpc_list; /* chain on user's lists */ + spinlock_t crpc_lock; /* serialize */ + int crpc_service; + atomic_t crpc_refcount; + int crpc_timeout; /* # seconds to wait for reply */ + struct stt_timer crpc_timer; + swi_workitem_t crpc_wi; lnet_process_id_t crpc_dest; - void (*crpc_done)(struct srpc_client_rpc *); - void (*crpc_fini)(struct srpc_client_rpc *); - int crpc_status; /* completion status */ - void *crpc_priv; /* caller data */ + void (*crpc_done)(struct srpc_client_rpc *); + void (*crpc_fini)(struct srpc_client_rpc *); + int crpc_status; /* completion status */ + void *crpc_priv; /* caller data */ /* state flags */ - unsigned int crpc_aborted:1; /* being given up */ - unsigned int crpc_closed:1; /* completed */ + unsigned int crpc_aborted:1; /* being given up */ + unsigned int crpc_closed:1; /* completed */ /* RPC events */ - srpc_event_t crpc_bulkev; /* bulk event */ - srpc_event_t crpc_reqstev; /* request event */ - srpc_event_t crpc_replyev; /* reply event */ + srpc_event_t crpc_bulkev; /* bulk event */ + srpc_event_t crpc_reqstev; /* request event */ + srpc_event_t crpc_replyev; /* reply event */ /* bulk, request(reqst), and reply exchanged on wire */ - srpc_msg_t crpc_reqstmsg; - srpc_msg_t crpc_replymsg; + srpc_msg_t crpc_reqstmsg; + srpc_msg_t crpc_replymsg; lnet_handle_md_t crpc_reqstmdh; lnet_handle_md_t crpc_replymdh; - srpc_bulk_t crpc_bulk; + srpc_bulk_t crpc_bulk; } srpc_client_rpc_t; -#define srpc_client_rpc_size(rpc) \ +#define srpc_client_rpc_size(rpc) \ offsetof(srpc_client_rpc_t, crpc_bulk.bk_iovs[(rpc)->crpc_bulk.bk_niov]) -#define srpc_client_rpc_addref(rpc) \ -do { \ - CDEBUG(D_NET, "RPC[%p] -> %s (%d)++\n", \ - (rpc), libcfs_id2str((rpc)->crpc_dest), \ - atomic_read(&(rpc)->crpc_refcount)); \ - LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0); \ - atomic_inc(&(rpc)->crpc_refcount); \ +#define srpc_client_rpc_addref(rpc) \ +do { \ + CDEBUG(D_NET, "RPC[%p] -> %s (%d)++\n", \ + (rpc), libcfs_id2str((rpc)->crpc_dest), \ + atomic_read(&(rpc)->crpc_refcount)); \ + LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0); \ + atomic_inc(&(rpc)->crpc_refcount); \ } while (0) -#define srpc_client_rpc_decref(rpc) \ -do { \ - CDEBUG(D_NET, "RPC[%p] -> %s (%d)--\n", \ - (rpc), libcfs_id2str((rpc)->crpc_dest), \ - atomic_read(&(rpc)->crpc_refcount)); \ - LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0); \ - if (atomic_dec_and_test(&(rpc)->crpc_refcount)) \ - srpc_destroy_client_rpc(rpc); \ +#define srpc_client_rpc_decref(rpc) \ +do { \ + CDEBUG(D_NET, "RPC[%p] -> %s (%d)--\n", \ + (rpc), libcfs_id2str((rpc)->crpc_dest), \ + atomic_read(&(rpc)->crpc_refcount)); \ + LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0); \ + if (atomic_dec_and_test(&(rpc)->crpc_refcount)) \ + srpc_destroy_client_rpc(rpc); \ } while (0) -#define srpc_event_pending(rpc) ((rpc)->crpc_bulkev.ev_fired == 0 || \ - (rpc)->crpc_reqstev.ev_fired == 0 || \ - (rpc)->crpc_replyev.ev_fired == 0) +#define srpc_event_pending(rpc) (!(rpc)->crpc_bulkev.ev_fired || \ + !(rpc)->crpc_reqstev.ev_fired || \ + !(rpc)->crpc_replyev.ev_fired) /* CPU partition data of srpc service */ struct srpc_service_cd { @@ -268,9 +268,9 @@ struct srpc_service_cd { /** event buffer */ srpc_event_t scd_ev; /** free RPC descriptors */ - struct list_head scd_rpc_free; + struct list_head scd_rpc_free; /** in-flight RPCs */ - struct list_head scd_rpc_active; + struct list_head scd_rpc_active; /** workitem for posting buffer */ swi_workitem_t scd_buf_wi; /** CPT id */ @@ -278,7 +278,7 @@ struct srpc_service_cd { /** error code for scd_buf_wi */ int scd_buf_err; /** timestamp for scd_buf_err */ - time64_t scd_buf_err_stamp; + time64_t scd_buf_err_stamp; /** total # request buffers */ int scd_buf_total; /** # posted request buffers */ @@ -290,16 +290,16 @@ struct srpc_service_cd { /** increase/decrease some buffers */ int scd_buf_adjust; /** posted message buffers */ - struct list_head scd_buf_posted; + struct list_head scd_buf_posted; /** blocked for RPC descriptor */ - struct list_head scd_buf_blocked; + struct list_head scd_buf_blocked; }; /* number of server workitems (mini-thread) for testing service */ #define SFW_TEST_WI_MIN 256 #define SFW_TEST_WI_MAX 2048 /* extra buffers for tolerating buggy peers, or unbalanced number - * of peers between partitions */ + * of peers between partitions */ #define SFW_TEST_WI_EXTRA 64 /* number of server workitems (mini-thread) for framework service */ @@ -324,29 +324,29 @@ typedef struct srpc_service { typedef struct { struct list_head sn_list; /* chain on fw_zombie_sessions */ - lst_sid_t sn_id; /* unique identifier */ - unsigned int sn_timeout; /* # seconds' inactivity to expire */ - int sn_timer_active; - unsigned int sn_features; - stt_timer_t sn_timer; + lst_sid_t sn_id; /* unique identifier */ + unsigned int sn_timeout; /* # seconds' inactivity to expire */ + int sn_timer_active; + unsigned int sn_features; + struct stt_timer sn_timer; struct list_head sn_batches; /* list of batches */ - char sn_name[LST_NAME_SIZE]; - atomic_t sn_refcount; - atomic_t sn_brw_errors; - atomic_t sn_ping_errors; - unsigned long sn_started; + char sn_name[LST_NAME_SIZE]; + atomic_t sn_refcount; + atomic_t sn_brw_errors; + atomic_t sn_ping_errors; + unsigned long sn_started; } sfw_session_t; #define sfw_sid_equal(sid0, sid1) ((sid0).ses_nid == (sid1).ses_nid && \ (sid0).ses_stamp == (sid1).ses_stamp) typedef struct { - struct list_head bat_list; /* chain on sn_batches */ - lst_bid_t bat_id; /* batch id */ - int bat_error; /* error code of batch */ - sfw_session_t *bat_session; /* batch's session */ - atomic_t bat_nactive; /* # of active tests */ - struct list_head bat_tests; /* test instances */ + struct list_head bat_list; /* chain on sn_batches */ + lst_bid_t bat_id; /* batch id */ + int bat_error; /* error code of batch */ + sfw_session_t *bat_session; /* batch's session */ + atomic_t bat_nactive; /* # of active tests */ + struct list_head bat_tests; /* test instances */ } sfw_batch_t; typedef struct { @@ -356,32 +356,32 @@ typedef struct { * client */ int (*tso_prep_rpc)(struct sfw_test_unit *tsu, lnet_process_id_t dest, - srpc_client_rpc_t **rpc); /* prep a tests rpc */ + srpc_client_rpc_t **rpc); /* prep a tests rpc */ void (*tso_done_rpc)(struct sfw_test_unit *tsu, - srpc_client_rpc_t *rpc); /* done a test rpc */ + srpc_client_rpc_t *rpc); /* done a test rpc */ } sfw_test_client_ops_t; typedef struct sfw_test_instance { - struct list_head tsi_list; /* chain on batch */ - int tsi_service; /* test type */ - sfw_batch_t *tsi_batch; /* batch */ - sfw_test_client_ops_t *tsi_ops; /* test client operation + struct list_head tsi_list; /* chain on batch */ + int tsi_service; /* test type */ + sfw_batch_t *tsi_batch; /* batch */ + sfw_test_client_ops_t *tsi_ops; /* test client operation */ /* public parameter for all test units */ - unsigned int tsi_is_client:1; /* is test client */ - unsigned int tsi_stoptsu_onerr:1; /* stop tsu on error */ - int tsi_concur; /* concurrency */ - int tsi_loop; /* loop count */ + unsigned int tsi_is_client:1; /* is test client */ + unsigned int tsi_stoptsu_onerr:1; /* stop tsu on error */ + int tsi_concur; /* concurrency */ + int tsi_loop; /* loop count */ /* status of test instance */ - spinlock_t tsi_lock; /* serialize */ - unsigned int tsi_stopping:1; /* test is stopping */ - atomic_t tsi_nactive; /* # of active test + spinlock_t tsi_lock; /* serialize */ + unsigned int tsi_stopping:1; /* test is stopping */ + atomic_t tsi_nactive; /* # of active test * unit */ - struct list_head tsi_units; /* test units */ - struct list_head tsi_free_rpcs; /* free rpcs */ - struct list_head tsi_active_rpcs; /* active rpcs */ + struct list_head tsi_units; /* test units */ + struct list_head tsi_free_rpcs; /* free rpcs */ + struct list_head tsi_active_rpcs; /* active rpcs */ union { test_ping_req_t ping; /* ping parameter */ @@ -392,30 +392,30 @@ typedef struct sfw_test_instance { /* XXX: trailing (PAGE_CACHE_SIZE % sizeof(lnet_process_id_t)) bytes at * the end of pages are not used */ -#define SFW_MAX_CONCUR LST_MAX_CONCUR +#define SFW_MAX_CONCUR LST_MAX_CONCUR #define SFW_ID_PER_PAGE (PAGE_CACHE_SIZE / sizeof(lnet_process_id_packed_t)) -#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE) +#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE) #define sfw_id_pages(n) (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE) typedef struct sfw_test_unit { - struct list_head tsu_list; /* chain on lst_test_instance */ - lnet_process_id_t tsu_dest; /* id of dest node */ - int tsu_loop; /* loop count of the test */ + struct list_head tsu_list; /* chain on lst_test_instance */ + lnet_process_id_t tsu_dest; /* id of dest node */ + int tsu_loop; /* loop count of the test */ sfw_test_instance_t *tsu_instance; /* pointer to test instance */ - void *tsu_private; /* private data */ - swi_workitem_t tsu_worker; /* workitem of the test unit */ + void *tsu_private; /* private data */ + swi_workitem_t tsu_worker; /* workitem of the test unit */ } sfw_test_unit_t; typedef struct sfw_test_case { - struct list_head tsc_list; /* chain on fw_tests */ - srpc_service_t *tsc_srv_service; /* test service */ - sfw_test_client_ops_t *tsc_cli_ops; /* ops of test client */ + struct list_head tsc_list; /* chain on fw_tests */ + srpc_service_t *tsc_srv_service; /* test service */ + sfw_test_client_ops_t *tsc_cli_ops; /* ops of test client */ } sfw_test_case_t; srpc_client_rpc_t * sfw_create_rpc(lnet_process_id_t peer, int service, unsigned features, int nbulkiov, int bulklen, - void (*done) (srpc_client_rpc_t *), void *priv); + void (*done)(srpc_client_rpc_t *), void *priv); int sfw_create_test_rpc(sfw_test_unit_t *tsu, lnet_process_id_t peer, unsigned features, int nblk, int blklen, srpc_client_rpc_t **rpc); @@ -427,7 +427,7 @@ void sfw_free_pages(struct srpc_server_rpc *rpc); void sfw_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i); int sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len, int sink); -int sfw_make_session (srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply); +int sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply); srpc_client_rpc_t * srpc_create_client_rpc(lnet_process_id_t peer, int service, @@ -472,9 +472,9 @@ static inline void swi_init_workitem(swi_workitem_t *swi, void *data, swi_action_t action, struct cfs_wi_sched *sched) { - swi->swi_sched = sched; + swi->swi_sched = sched; swi->swi_action = action; - swi->swi_state = SWI_STATE_NEWBORN; + swi->swi_state = SWI_STATE_NEWBORN; cfs_wi_init(&swi->swi_workitem, data, swi_wi_action); } @@ -502,26 +502,23 @@ void sfw_shutdown(void); void srpc_shutdown(void); static inline void -srpc_destroy_client_rpc (srpc_client_rpc_t *rpc) +srpc_destroy_client_rpc(srpc_client_rpc_t *rpc) { - LASSERT(rpc != NULL); + LASSERT(rpc); LASSERT(!srpc_event_pending(rpc)); - LASSERT(atomic_read(&rpc->crpc_refcount) == 0); + LASSERT(!atomic_read(&rpc->crpc_refcount)); - if (rpc->crpc_fini == NULL) { + if (!rpc->crpc_fini) LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc)); - } else { - (*rpc->crpc_fini) (rpc); - } - - return; + else + (*rpc->crpc_fini)(rpc); } static inline void -srpc_init_client_rpc (srpc_client_rpc_t *rpc, lnet_process_id_t peer, - int service, int nbulkiov, int bulklen, - void (*rpc_done)(srpc_client_rpc_t *), - void (*rpc_fini)(srpc_client_rpc_t *), void *priv) +srpc_init_client_rpc(srpc_client_rpc_t *rpc, lnet_process_id_t peer, + int service, int nbulkiov, int bulklen, + void (*rpc_done)(srpc_client_rpc_t *), + void (*rpc_fini)(srpc_client_rpc_t *), void *priv) { LASSERT(nbulkiov <= LNET_MAX_IOV); @@ -534,30 +531,29 @@ srpc_init_client_rpc (srpc_client_rpc_t *rpc, lnet_process_id_t peer, spin_lock_init(&rpc->crpc_lock); atomic_set(&rpc->crpc_refcount, 1); /* 1 ref for caller */ - rpc->crpc_dest = peer; - rpc->crpc_priv = priv; - rpc->crpc_service = service; - rpc->crpc_bulk.bk_len = bulklen; + rpc->crpc_dest = peer; + rpc->crpc_priv = priv; + rpc->crpc_service = service; + rpc->crpc_bulk.bk_len = bulklen; rpc->crpc_bulk.bk_niov = nbulkiov; - rpc->crpc_done = rpc_done; - rpc->crpc_fini = rpc_fini; + rpc->crpc_done = rpc_done; + rpc->crpc_fini = rpc_fini; LNetInvalidateHandle(&rpc->crpc_reqstmdh); LNetInvalidateHandle(&rpc->crpc_replymdh); LNetInvalidateHandle(&rpc->crpc_bulk.bk_mdh); /* no event is expected at this point */ - rpc->crpc_bulkev.ev_fired = - rpc->crpc_reqstev.ev_fired = + rpc->crpc_bulkev.ev_fired = 1; + rpc->crpc_reqstev.ev_fired = 1; rpc->crpc_replyev.ev_fired = 1; - rpc->crpc_reqstmsg.msg_magic = SRPC_MSG_MAGIC; + rpc->crpc_reqstmsg.msg_magic = SRPC_MSG_MAGIC; rpc->crpc_reqstmsg.msg_version = SRPC_MSG_VERSION; - rpc->crpc_reqstmsg.msg_type = srpc_service2request(service); - return; + rpc->crpc_reqstmsg.msg_type = srpc_service2request(service); } static inline const char * -swi_state2str (int state) +swi_state2str(int state) { #define STATE2STR(x) case x: return #x switch (state) { @@ -602,11 +598,11 @@ srpc_wait_service_shutdown(srpc_service_t *sv) LASSERT(sv->sv_shuttingdown); - while (srpc_finish_service(sv) == 0) { + while (!srpc_finish_service(sv)) { i++; - CDEBUG (((i & -i) == i) ? D_WARNING : D_NET, - "Waiting for %s service to shutdown...\n", - sv->sv_name); + CDEBUG(((i & -i) == i) ? D_WARNING : D_NET, + "Waiting for %s service to shutdown...\n", + sv->sv_name); selftest_wait_events(); } } diff --git a/drivers/staging/lustre/lnet/selftest/timer.c b/drivers/staging/lustre/lnet/selftest/timer.c index b98c08a10606..8be52526ae5a 100644 --- a/drivers/staging/lustre/lnet/selftest/timer.c +++ b/drivers/staging/lustre/lnet/selftest/timer.c @@ -57,17 +57,17 @@ (STTIMER_NSLOTS - 1))]) static struct st_timer_data { - spinlock_t stt_lock; - unsigned long stt_prev_slot; /* start time of the slot processed + spinlock_t stt_lock; + unsigned long stt_prev_slot; /* start time of the slot processed * previously */ struct list_head stt_hash[STTIMER_NSLOTS]; - int stt_shuttingdown; + int stt_shuttingdown; wait_queue_head_t stt_waitq; - int stt_nthreads; + int stt_nthreads; } stt_data; void -stt_add_timer(stt_timer_t *timer) +stt_add_timer(struct stt_timer *timer) { struct list_head *pos; @@ -75,13 +75,14 @@ stt_add_timer(stt_timer_t *timer) LASSERT(stt_data.stt_nthreads > 0); LASSERT(!stt_data.stt_shuttingdown); - LASSERT(timer->stt_func != NULL); + LASSERT(timer->stt_func); LASSERT(list_empty(&timer->stt_list)); LASSERT(timer->stt_expires > ktime_get_real_seconds()); /* a simple insertion sort */ list_for_each_prev(pos, STTIMER_SLOT(timer->stt_expires)) { - stt_timer_t *old = list_entry(pos, stt_timer_t, stt_list); + struct stt_timer *old = list_entry(pos, struct stt_timer, + stt_list); if (timer->stt_expires >= old->stt_expires) break; @@ -101,7 +102,7 @@ stt_add_timer(stt_timer_t *timer) * another CPU. */ int -stt_del_timer(stt_timer_t *timer) +stt_del_timer(struct stt_timer *timer) { int ret = 0; @@ -124,10 +125,10 @@ static int stt_expire_list(struct list_head *slot, time64_t now) { int expired = 0; - stt_timer_t *timer; + struct stt_timer *timer; while (!list_empty(slot)) { - timer = list_entry(slot->next, stt_timer_t, stt_list); + timer = list_entry(slot->next, struct stt_timer, stt_list); if (timer->stt_expires > now) break; @@ -218,7 +219,7 @@ stt_startup(void) stt_data.stt_nthreads = 0; init_waitqueue_head(&stt_data.stt_waitq); rc = stt_start_timer_thread(); - if (rc != 0) + if (rc) CERROR("Can't spawn timer thread: %d\n", rc); return rc; @@ -237,7 +238,7 @@ stt_shutdown(void) stt_data.stt_shuttingdown = 1; wake_up(&stt_data.stt_waitq); - lst_wait_until(stt_data.stt_nthreads == 0, stt_data.stt_lock, + lst_wait_until(!stt_data.stt_nthreads, stt_data.stt_lock, "waiting for %d threads to terminate\n", stt_data.stt_nthreads); diff --git a/drivers/staging/lustre/lnet/selftest/timer.h b/drivers/staging/lustre/lnet/selftest/timer.h index 03e2ee294c1c..f1fbebd8a67c 100644 --- a/drivers/staging/lustre/lnet/selftest/timer.h +++ b/drivers/staging/lustre/lnet/selftest/timer.h @@ -38,15 +38,15 @@ #ifndef __SELFTEST_TIMER_H__ #define __SELFTEST_TIMER_H__ -typedef struct { +struct stt_timer { struct list_head stt_list; - time64_t stt_expires; - void (*stt_func) (void *); - void *stt_data; -} stt_timer_t; + time64_t stt_expires; + void (*stt_func)(void *); + void *stt_data; +}; -void stt_add_timer(stt_timer_t *timer); -int stt_del_timer(stt_timer_t *timer); +void stt_add_timer(struct stt_timer *timer); +int stt_del_timer(struct stt_timer *timer); int stt_startup(void); void stt_shutdown(void); diff --git a/drivers/staging/lustre/lustre/Kconfig b/drivers/staging/lustre/lustre/Kconfig index 62c7bba75274..a09b51ce8265 100644 --- a/drivers/staging/lustre/lustre/Kconfig +++ b/drivers/staging/lustre/lustre/Kconfig @@ -1,6 +1,6 @@ config LUSTRE_FS tristate "Lustre file system client support" - depends on INET && m && !MIPS && !XTENSA && !SUPERH + depends on m && !MIPS && !XTENSA && !SUPERH select LNET select CRYPTO select CRYPTO_CRC32 diff --git a/drivers/staging/lustre/lustre/Makefile b/drivers/staging/lustre/lustre/Makefile index 35d8b0b2dff4..331e4fcdd5a2 100644 --- a/drivers/staging/lustre/lustre/Makefile +++ b/drivers/staging/lustre/lustre/Makefile @@ -1,2 +1,2 @@ -obj-$(CONFIG_LUSTRE_FS) += libcfs/ obdclass/ ptlrpc/ fld/ osc/ mgc/ \ +obj-$(CONFIG_LUSTRE_FS) += obdclass/ ptlrpc/ fld/ osc/ mgc/ \ fid/ lov/ mdc/ lmv/ llite/ obdecho/ diff --git a/drivers/staging/lustre/lustre/fid/fid_request.c b/drivers/staging/lustre/lustre/fid/fid_request.c index ff8f38dc10ce..39269c3c56a6 100644 --- a/drivers/staging/lustre/lustre/fid/fid_request.c +++ b/drivers/staging/lustre/lustre/fid/fid_request.c @@ -68,7 +68,7 @@ static int seq_client_rpc(struct lu_client_seq *seq, req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_SEQ_QUERY, LUSTRE_MDS_VERSION, SEQ_QUERY); - if (req == NULL) + if (!req) return -ENOMEM; /* Init operation code */ @@ -95,7 +95,8 @@ static int seq_client_rpc(struct lu_client_seq *seq, * precreating objects on this OST), and it will send the * request to MDT0 here, so we can not keep resending the * request here, otherwise if MDT0 is failed(umounted), - * it can not release the export of MDT0 */ + * it can not release the export of MDT0 + */ if (seq->lcs_type == LUSTRE_SEQ_DATA) req->rq_no_delay = req->rq_no_resend = 1; debug_mask = D_CONSOLE; @@ -152,7 +153,8 @@ static int seq_client_alloc_meta(const struct lu_env *env, /* If meta server return -EINPROGRESS or EAGAIN, * it means meta server might not be ready to * allocate super sequence from sequence controller - * (MDT0)yet */ + * (MDT0)yet + */ rc = seq_client_rpc(seq, &seq->lcs_space, SEQ_ALLOC_META, "meta"); } while (rc == -EINPROGRESS || rc == -EAGAIN); @@ -226,8 +228,8 @@ int seq_client_alloc_fid(const struct lu_env *env, wait_queue_t link; int rc; - LASSERT(seq != NULL); - LASSERT(fid != NULL); + LASSERT(seq); + LASSERT(fid); init_waitqueue_entry(&link, current); mutex_lock(&seq->lcs_mutex); @@ -292,7 +294,7 @@ void seq_client_flush(struct lu_client_seq *seq) { wait_queue_t link; - LASSERT(seq != NULL); + LASSERT(seq); init_waitqueue_entry(&link, current); mutex_lock(&seq->lcs_mutex); @@ -375,8 +377,8 @@ static int seq_client_init(struct lu_client_seq *seq, { int rc; - LASSERT(seq != NULL); - LASSERT(prefix != NULL); + LASSERT(seq); + LASSERT(prefix); seq->lcs_type = type; @@ -438,7 +440,7 @@ int client_fid_fini(struct obd_device *obd) { struct client_obd *cli = &obd->u.cli; - if (cli->cl_seq != NULL) { + if (cli->cl_seq) { seq_client_fini(cli->cl_seq); kfree(cli->cl_seq); cli->cl_seq = NULL; @@ -448,7 +450,7 @@ int client_fid_fini(struct obd_device *obd) } EXPORT_SYMBOL(client_fid_fini); -static int __init fid_mod_init(void) +static int __init fid_init(void) { seq_debugfs_dir = ldebugfs_register(LUSTRE_SEQ_NAME, debugfs_lustre_root, @@ -456,16 +458,16 @@ static int __init fid_mod_init(void) return PTR_ERR_OR_ZERO(seq_debugfs_dir); } -static void __exit fid_mod_exit(void) +static void __exit fid_exit(void) { if (!IS_ERR_OR_NULL(seq_debugfs_dir)) ldebugfs_remove(&seq_debugfs_dir); } MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>"); -MODULE_DESCRIPTION("Lustre FID Module"); +MODULE_DESCRIPTION("Lustre File IDentifier"); +MODULE_VERSION(LUSTRE_VERSION_STRING); MODULE_LICENSE("GPL"); -MODULE_VERSION("0.1.0"); -module_init(fid_mod_init); -module_exit(fid_mod_exit); +module_init(fid_init); +module_exit(fid_exit); diff --git a/drivers/staging/lustre/lustre/fid/lproc_fid.c b/drivers/staging/lustre/lustre/fid/lproc_fid.c index 39f2aa32e984..1f0e78686278 100644 --- a/drivers/staging/lustre/lustre/fid/lproc_fid.c +++ b/drivers/staging/lustre/lustre/fid/lproc_fid.c @@ -66,7 +66,7 @@ ldebugfs_fid_write_common(const char __user *buffer, size_t count, int rc; char kernbuf[MAX_FID_RANGE_STRLEN]; - LASSERT(range != NULL); + LASSERT(range); if (count >= sizeof(kernbuf)) return -EINVAL; @@ -85,6 +85,8 @@ ldebugfs_fid_write_common(const char __user *buffer, size_t count, rc = sscanf(kernbuf, "[%llx - %llx]\n", (unsigned long long *)&tmp.lsr_start, (unsigned long long *)&tmp.lsr_end); + if (rc != 2) + return -EINVAL; if (!range_is_sane(&tmp) || range_is_zero(&tmp) || tmp.lsr_start < range->lsr_start || tmp.lsr_end > range->lsr_end) return -EINVAL; @@ -102,7 +104,6 @@ ldebugfs_fid_space_seq_write(struct file *file, int rc; seq = ((struct seq_file *)file->private_data)->private; - LASSERT(seq != NULL); mutex_lock(&seq->lcs_mutex); rc = ldebugfs_fid_write_common(buffer, count, &seq->lcs_space); @@ -122,8 +123,6 @@ ldebugfs_fid_space_seq_show(struct seq_file *m, void *unused) { struct lu_client_seq *seq = (struct lu_client_seq *)m->private; - LASSERT(seq != NULL); - mutex_lock(&seq->lcs_mutex); seq_printf(m, "[%#llx - %#llx]:%x:%s\n", PRANGE(&seq->lcs_space)); mutex_unlock(&seq->lcs_mutex); @@ -141,7 +140,6 @@ ldebugfs_fid_width_seq_write(struct file *file, int rc, val; seq = ((struct seq_file *)file->private_data)->private; - LASSERT(seq != NULL); rc = lprocfs_write_helper(buffer, count, &val); if (rc) @@ -170,8 +168,6 @@ ldebugfs_fid_width_seq_show(struct seq_file *m, void *unused) { struct lu_client_seq *seq = (struct lu_client_seq *)m->private; - LASSERT(seq != NULL); - mutex_lock(&seq->lcs_mutex); seq_printf(m, "%llu\n", seq->lcs_width); mutex_unlock(&seq->lcs_mutex); @@ -184,8 +180,6 @@ ldebugfs_fid_fid_seq_show(struct seq_file *m, void *unused) { struct lu_client_seq *seq = (struct lu_client_seq *)m->private; - LASSERT(seq != NULL); - mutex_lock(&seq->lcs_mutex); seq_printf(m, DFID "\n", PFID(&seq->lcs_fid)); mutex_unlock(&seq->lcs_mutex); @@ -199,9 +193,7 @@ ldebugfs_fid_server_seq_show(struct seq_file *m, void *unused) struct lu_client_seq *seq = (struct lu_client_seq *)m->private; struct client_obd *cli; - LASSERT(seq != NULL); - - if (seq->lcs_exp != NULL) { + if (seq->lcs_exp) { cli = &seq->lcs_exp->exp_obd->u.cli; seq_printf(m, "%s\n", cli->cl_target_uuid.uuid); } diff --git a/drivers/staging/lustre/lustre/fld/fld_cache.c b/drivers/staging/lustre/lustre/fld/fld_cache.c index d9459e58e2ce..062f388cf38a 100644 --- a/drivers/staging/lustre/lustre/fld/fld_cache.c +++ b/drivers/staging/lustre/lustre/fld/fld_cache.c @@ -65,7 +65,7 @@ struct fld_cache *fld_cache_init(const char *name, { struct fld_cache *cache; - LASSERT(name != NULL); + LASSERT(name); LASSERT(cache_threshold < cache_size); cache = kzalloc(sizeof(*cache), GFP_NOFS); @@ -100,7 +100,7 @@ void fld_cache_fini(struct fld_cache *cache) { __u64 pct; - LASSERT(cache != NULL); + LASSERT(cache); fld_cache_flush(cache); if (cache->fci_stat.fst_count > 0) { @@ -183,7 +183,8 @@ restart_fixup: } /* we could have overlap over next - * range too. better restart. */ + * range too. better restart. + */ goto restart_fixup; } @@ -218,8 +219,6 @@ static int fld_cache_shrink(struct fld_cache *cache) struct list_head *curr; int num = 0; - LASSERT(cache != NULL); - if (cache->fci_cache_count < cache->fci_cache_size) return 0; @@ -234,7 +233,7 @@ static int fld_cache_shrink(struct fld_cache *cache) } CDEBUG(D_INFO, "%s: FLD cache - Shrunk by %d entries\n", - cache->fci_name, num); + cache->fci_name, num); return 0; } @@ -295,8 +294,8 @@ static void fld_cache_punch_hole(struct fld_cache *cache, * handle range overlap in fld cache. */ static void fld_cache_overlap_handle(struct fld_cache *cache, - struct fld_cache_entry *f_curr, - struct fld_cache_entry *f_new) + struct fld_cache_entry *f_curr, + struct fld_cache_entry *f_new) { const struct lu_seq_range *range = &f_new->fce_range; const u64 new_start = range->lsr_start; @@ -304,7 +303,8 @@ static void fld_cache_overlap_handle(struct fld_cache *cache, const u32 mdt = range->lsr_index; /* this is overlap case, these case are checking overlapping with - * prev range only. fixup will handle overlapping with next range. */ + * prev range only. fixup will handle overlapping with next range. + */ if (f_curr->fce_range.lsr_index == mdt) { f_curr->fce_range.lsr_start = min(f_curr->fce_range.lsr_start, @@ -319,7 +319,8 @@ static void fld_cache_overlap_handle(struct fld_cache *cache, } else if (new_start <= f_curr->fce_range.lsr_start && f_curr->fce_range.lsr_end <= new_end) { /* case 1: new range completely overshadowed existing range. - * e.g. whole range migrated. update fld cache entry */ + * e.g. whole range migrated. update fld cache entry + */ f_curr->fce_range = *range; kfree(f_new); @@ -401,8 +402,8 @@ static int fld_cache_insert_nolock(struct fld_cache *cache, list_for_each_entry_safe(f_curr, n, head, fce_list) { /* add list if next is end of list */ if (new_end < f_curr->fce_range.lsr_start || - (new_end == f_curr->fce_range.lsr_start && - new_flags != f_curr->fce_range.lsr_flags)) + (new_end == f_curr->fce_range.lsr_start && + new_flags != f_curr->fce_range.lsr_flags)) break; prev = &f_curr->fce_list; @@ -414,7 +415,7 @@ static int fld_cache_insert_nolock(struct fld_cache *cache, } } - if (prev == NULL) + if (!prev) prev = head; CDEBUG(D_INFO, "insert range "DRANGE"\n", PRANGE(&f_new->fce_range)); @@ -459,8 +460,8 @@ struct fld_cache_entry head = &cache->fci_entries_head; list_for_each_entry(flde, head, fce_list) { if (range->lsr_start == flde->fce_range.lsr_start || - (range->lsr_end == flde->fce_range.lsr_end && - range->lsr_flags == flde->fce_range.lsr_flags)) { + (range->lsr_end == flde->fce_range.lsr_end && + range->lsr_flags == flde->fce_range.lsr_flags)) { got = flde; break; } @@ -499,7 +500,7 @@ int fld_cache_lookup(struct fld_cache *cache, cache->fci_stat.fst_count++; list_for_each_entry(flde, head, fce_list) { if (flde->fce_range.lsr_start > seq) { - if (prev != NULL) + if (prev) *range = prev->fce_range; break; } diff --git a/drivers/staging/lustre/lustre/fld/fld_internal.h b/drivers/staging/lustre/lustre/fld/fld_internal.h index 12eb1647b4bf..e8a3caf20c9b 100644 --- a/drivers/staging/lustre/lustre/fld/fld_internal.h +++ b/drivers/staging/lustre/lustre/fld/fld_internal.h @@ -58,22 +58,16 @@ struct fld_stats { __u64 fst_inflight; }; -typedef int (*fld_hash_func_t) (struct lu_client_fld *, __u64); - -typedef struct lu_fld_target * -(*fld_scan_func_t) (struct lu_client_fld *, __u64); - struct lu_fld_hash { const char *fh_name; - fld_hash_func_t fh_hash_func; - fld_scan_func_t fh_scan_func; + int (*fh_hash_func)(struct lu_client_fld *, __u64); + struct lu_fld_target *(*fh_scan_func)(struct lu_client_fld *, __u64); }; struct fld_cache_entry { struct list_head fce_lru; struct list_head fce_list; - /** - * fld cache entries are sorted on range->lsr_start field. */ + /** fld cache entries are sorted on range->lsr_start field. */ struct lu_seq_range fce_range; }; @@ -84,32 +78,25 @@ struct fld_cache { */ rwlock_t fci_lock; - /** - * Cache shrink threshold */ + /** Cache shrink threshold */ int fci_threshold; - /** - * Preferred number of cached entries */ + /** Preferred number of cached entries */ int fci_cache_size; - /** - * Current number of cached entries. Protected by \a fci_lock */ + /** Current number of cached entries. Protected by \a fci_lock */ int fci_cache_count; - /** - * LRU list fld entries. */ + /** LRU list fld entries. */ struct list_head fci_lru; - /** - * sorted fld entries. */ + /** sorted fld entries. */ struct list_head fci_entries_head; - /** - * Cache statistics. */ + /** Cache statistics. */ struct fld_stats fci_stat; - /** - * Cache name used for debug and messages. */ + /** Cache name used for debug and messages. */ char fci_name[LUSTRE_MDT_MAXNAMELEN]; unsigned int fci_no_shrink:1; }; @@ -169,7 +156,7 @@ struct fld_cache_entry static inline const char * fld_target_name(struct lu_fld_target *tar) { - if (tar->ft_srv != NULL) + if (tar->ft_srv) return tar->ft_srv->lsf_name; return (const char *)tar->ft_exp->exp_obd->obd_name; diff --git a/drivers/staging/lustre/lustre/fld/fld_request.c b/drivers/staging/lustre/lustre/fld/fld_request.c index d92c01b74865..a3d122d85c8d 100644 --- a/drivers/staging/lustre/lustre/fld/fld_request.c +++ b/drivers/staging/lustre/lustre/fld/fld_request.c @@ -58,7 +58,8 @@ #include "fld_internal.h" /* TODO: these 3 functions are copies of flow-control code from mdc_lib.c - * It should be common thing. The same about mdc RPC lock */ + * It should be common thing. The same about mdc RPC lock + */ static int fld_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw) { int rc; @@ -124,7 +125,8 @@ fld_rrb_scan(struct lu_client_fld *fld, u64 seq) * it should go to index 0 directly, instead of calculating * hash again, and also if other MDTs is not being connected, * the fld lookup requests(for seq on MDT0) should not be - * blocked because of other MDTs */ + * blocked because of other MDTs + */ if (fid_seq_is_norm(seq)) hash = fld_rrb_hash(fld, seq); else @@ -139,18 +141,19 @@ again: if (hash != 0) { /* It is possible the remote target(MDT) are not connected to * with client yet, so we will refer this to MDT0, which should - * be connected during mount */ + * be connected during mount + */ hash = 0; goto again; } CERROR("%s: Can't find target by hash %d (seq %#llx). Targets (%d):\n", - fld->lcf_name, hash, seq, fld->lcf_count); + fld->lcf_name, hash, seq, fld->lcf_count); list_for_each_entry(target, &fld->lcf_targets, ft_chain) { - const char *srv_name = target->ft_srv != NULL ? + const char *srv_name = target->ft_srv ? target->ft_srv->lsf_name : "<null>"; - const char *exp_name = target->ft_exp != NULL ? + const char *exp_name = target->ft_exp ? (char *)target->ft_exp->exp_obd->obd_uuid.uuid : "<null>"; @@ -183,13 +186,13 @@ fld_client_get_target(struct lu_client_fld *fld, u64 seq) { struct lu_fld_target *target; - LASSERT(fld->lcf_hash != NULL); + LASSERT(fld->lcf_hash); spin_lock(&fld->lcf_lock); target = fld->lcf_hash->fh_scan_func(fld, seq); spin_unlock(&fld->lcf_lock); - if (target != NULL) { + if (target) { CDEBUG(D_INFO, "%s: Found target (idx %llu) by seq %#llx\n", fld->lcf_name, target->ft_idx, seq); } @@ -207,18 +210,18 @@ int fld_client_add_target(struct lu_client_fld *fld, const char *name; struct lu_fld_target *target, *tmp; - LASSERT(tar != NULL); + LASSERT(tar); name = fld_target_name(tar); - LASSERT(name != NULL); - LASSERT(tar->ft_srv != NULL || tar->ft_exp != NULL); + LASSERT(name); + LASSERT(tar->ft_srv || tar->ft_exp); if (fld->lcf_flags != LUSTRE_FLD_INIT) { CERROR("%s: Attempt to add target %s (idx %llu) on fly - skip it\n", - fld->lcf_name, name, tar->ft_idx); + fld->lcf_name, name, tar->ft_idx); return 0; } CDEBUG(D_INFO, "%s: Adding target %s (idx %llu)\n", - fld->lcf_name, name, tar->ft_idx); + fld->lcf_name, name, tar->ft_idx); target = kzalloc(sizeof(*target), GFP_NOFS); if (!target) @@ -236,13 +239,12 @@ int fld_client_add_target(struct lu_client_fld *fld, } target->ft_exp = tar->ft_exp; - if (target->ft_exp != NULL) + if (target->ft_exp) class_export_get(target->ft_exp); target->ft_srv = tar->ft_srv; target->ft_idx = tar->ft_idx; - list_add_tail(&target->ft_chain, - &fld->lcf_targets); + list_add_tail(&target->ft_chain, &fld->lcf_targets); fld->lcf_count++; spin_unlock(&fld->lcf_lock); @@ -257,14 +259,13 @@ int fld_client_del_target(struct lu_client_fld *fld, __u64 idx) struct lu_fld_target *target, *tmp; spin_lock(&fld->lcf_lock); - list_for_each_entry_safe(target, tmp, - &fld->lcf_targets, ft_chain) { + list_for_each_entry_safe(target, tmp, &fld->lcf_targets, ft_chain) { if (target->ft_idx == idx) { fld->lcf_count--; list_del(&target->ft_chain); spin_unlock(&fld->lcf_lock); - if (target->ft_exp != NULL) + if (target->ft_exp) class_export_put(target->ft_exp); kfree(target); @@ -326,8 +327,6 @@ int fld_client_init(struct lu_client_fld *fld, int cache_size, cache_threshold; int rc; - LASSERT(fld != NULL); - snprintf(fld->lcf_name, sizeof(fld->lcf_name), "cli-%s", prefix); @@ -375,17 +374,16 @@ void fld_client_fini(struct lu_client_fld *fld) struct lu_fld_target *target, *tmp; spin_lock(&fld->lcf_lock); - list_for_each_entry_safe(target, tmp, - &fld->lcf_targets, ft_chain) { + list_for_each_entry_safe(target, tmp, &fld->lcf_targets, ft_chain) { fld->lcf_count--; list_del(&target->ft_chain); - if (target->ft_exp != NULL) + if (target->ft_exp) class_export_put(target->ft_exp); kfree(target); } spin_unlock(&fld->lcf_lock); - if (fld->lcf_cache != NULL) { + if (fld->lcf_cache) { if (!IS_ERR(fld->lcf_cache)) fld_cache_fini(fld->lcf_cache); fld->lcf_cache = NULL; @@ -402,12 +400,12 @@ int fld_client_rpc(struct obd_export *exp, int rc; struct obd_import *imp; - LASSERT(exp != NULL); + LASSERT(exp); imp = class_exp2cliimp(exp); req = ptlrpc_request_alloc_pack(imp, &RQF_FLD_QUERY, LUSTRE_MDS_VERSION, FLD_QUERY); - if (req == NULL) + if (!req) return -ENOMEM; op = req_capsule_client_get(&req->rq_pill, &RMF_FLD_OPC); @@ -436,7 +434,7 @@ int fld_client_rpc(struct obd_export *exp, goto out_req; prange = req_capsule_server_get(&req->rq_pill, &RMF_FLD_MDFLD); - if (prange == NULL) { + if (!prange) { rc = -EFAULT; goto out_req; } @@ -463,10 +461,10 @@ int fld_client_lookup(struct lu_client_fld *fld, u64 seq, u32 *mds, /* Can not find it in the cache */ target = fld_client_get_target(fld, seq); - LASSERT(target != NULL); + LASSERT(target); CDEBUG(D_INFO, "%s: Lookup fld entry (seq: %#llx) on target %s (idx %llu)\n", - fld->lcf_name, seq, fld_target_name(target), target->ft_idx); + fld->lcf_name, seq, fld_target_name(target), target->ft_idx); res.lsr_start = seq; fld_range_set_type(&res, flags); @@ -487,7 +485,7 @@ void fld_client_flush(struct lu_client_fld *fld) } EXPORT_SYMBOL(fld_client_flush); -static int __init fld_mod_init(void) +static int __init fld_init(void) { fld_debugfs_dir = ldebugfs_register(LUSTRE_FLD_NAME, debugfs_lustre_root, @@ -495,15 +493,16 @@ static int __init fld_mod_init(void) return PTR_ERR_OR_ZERO(fld_debugfs_dir); } -static void __exit fld_mod_exit(void) +static void __exit fld_exit(void) { if (!IS_ERR_OR_NULL(fld_debugfs_dir)) ldebugfs_remove(&fld_debugfs_dir); } MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>"); -MODULE_DESCRIPTION("Lustre FLD"); +MODULE_DESCRIPTION("Lustre FID Location Database"); +MODULE_VERSION(LUSTRE_VERSION_STRING); MODULE_LICENSE("GPL"); -module_init(fld_mod_init) -module_exit(fld_mod_exit) +module_init(fld_init) +module_exit(fld_exit) diff --git a/drivers/staging/lustre/lustre/fld/lproc_fld.c b/drivers/staging/lustre/lustre/fld/lproc_fld.c index 41ceaa8198a7..ca898befeba6 100644 --- a/drivers/staging/lustre/lustre/fld/lproc_fld.c +++ b/drivers/staging/lustre/lustre/fld/lproc_fld.c @@ -60,11 +60,8 @@ fld_debugfs_targets_seq_show(struct seq_file *m, void *unused) struct lu_client_fld *fld = (struct lu_client_fld *)m->private; struct lu_fld_target *target; - LASSERT(fld != NULL); - spin_lock(&fld->lcf_lock); - list_for_each_entry(target, - &fld->lcf_targets, ft_chain) + list_for_each_entry(target, &fld->lcf_targets, ft_chain) seq_printf(m, "%s\n", fld_target_name(target)); spin_unlock(&fld->lcf_lock); @@ -76,8 +73,6 @@ fld_debugfs_hash_seq_show(struct seq_file *m, void *unused) { struct lu_client_fld *fld = (struct lu_client_fld *)m->private; - LASSERT(fld != NULL); - spin_lock(&fld->lcf_lock); seq_printf(m, "%s\n", fld->lcf_hash->fh_name); spin_unlock(&fld->lcf_lock); @@ -102,9 +97,8 @@ fld_debugfs_hash_seq_write(struct file *file, return -EFAULT; fld = ((struct seq_file *)file->private_data)->private; - LASSERT(fld != NULL); - for (i = 0; fld_hash[i].fh_name != NULL; i++) { + for (i = 0; fld_hash[i].fh_name; i++) { if (count != strlen(fld_hash[i].fh_name)) continue; @@ -114,7 +108,7 @@ fld_debugfs_hash_seq_write(struct file *file, } } - if (hash != NULL) { + if (hash) { spin_lock(&fld->lcf_lock); fld->lcf_hash = hash; spin_unlock(&fld->lcf_lock); @@ -132,8 +126,6 @@ fld_debugfs_cache_flush_write(struct file *file, const char __user *buffer, { struct lu_client_fld *fld = file->private_data; - LASSERT(fld != NULL); - fld_cache_flush(fld->lcf_cache); CDEBUG(D_INFO, "%s: Lookup cache is flushed\n", fld->lcf_name); diff --git a/drivers/staging/lustre/lustre/include/cl_object.h b/drivers/staging/lustre/lustre/include/cl_object.h index bd7acc2a1219..fb971ded5a1b 100644 --- a/drivers/staging/lustre/lustre/include/cl_object.h +++ b/drivers/staging/lustre/lustre/include/cl_object.h @@ -157,7 +157,8 @@ struct cl_device { }; /** \addtogroup cl_object cl_object - * @{ */ + * @{ + */ /** * "Data attributes" of cl_object. Data attributes can be updated * independently for a sub-object, and top-object's attributes are calculated @@ -288,13 +289,14 @@ struct cl_object_conf { enum { /** configure layout, set up a new stripe, must be called while - * holding layout lock. */ + * holding layout lock. + */ OBJECT_CONF_SET = 0, /** invalidate the current stripe configuration due to losing - * layout lock. */ + * layout lock. + */ OBJECT_CONF_INVALIDATE = 1, - /** wait for old layout to go away so that new layout can be - * set up. */ + /** wait for old layout to go away so that new layout can be set up. */ OBJECT_CONF_WAIT = 2 }; @@ -320,7 +322,7 @@ struct cl_object_operations { * to be used instead of newly created. */ int (*coo_page_init)(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, struct page *vmpage); + struct cl_page *page, struct page *vmpage); /** * Initialize lock slice for this layer. Called top-to-bottom through * every object layer when a new cl_lock is instantiated. Layer @@ -393,7 +395,8 @@ struct cl_object_operations { */ struct cl_object_header { /** Standard lu_object_header. cl_object::co_lu::lo_header points - * here. */ + * here. + */ struct lu_object_header coh_lu; /** \name locks * \todo XXX move locks below to the separate cache-lines, they are @@ -464,7 +467,8 @@ struct cl_object_header { #define CL_PAGE_EOF ((pgoff_t)~0ull) /** \addtogroup cl_page cl_page - * @{ */ + * @{ + */ /** \struct cl_page * Layered client page. @@ -687,12 +691,14 @@ enum cl_page_state { enum cl_page_type { /** Host page, the page is from the host inode which the cl_page - * belongs to. */ + * belongs to. + */ CPT_CACHEABLE = 1, /** Transient page, the transient cl_page is used to bind a cl_page * to vmpage which is not belonging to the same object of cl_page. - * it is used in DirectIO, lockless IO and liblustre. */ + * it is used in DirectIO and lockless IO. + */ CPT_TRANSIENT, }; @@ -728,7 +734,8 @@ struct cl_page { /** Parent page, NULL for top-level page. Immutable after creation. */ struct cl_page *cp_parent; /** Lower-layer page. NULL for bottommost page. Immutable after - * creation. */ + * creation. + */ struct cl_page *cp_child; /** * Page state. This field is const to avoid accidental update, it is @@ -842,7 +849,7 @@ struct cl_page_operations { * \return the underlying VM page. Optional. */ struct page *(*cpo_vmpage)(const struct lu_env *env, - const struct cl_page_slice *slice); + const struct cl_page_slice *slice); /** * Called when \a io acquires this page into the exclusive * ownership. When this method returns, it is guaranteed that the is @@ -1126,7 +1133,8 @@ static inline int __page_in_use(const struct cl_page *page, int refc) /** @} cl_page */ /** \addtogroup cl_lock cl_lock - * @{ */ + * @{ + */ /** \struct cl_lock * * Extent locking on the client. @@ -1641,7 +1649,8 @@ struct cl_lock { struct cl_lock_slice { struct cl_lock *cls_lock; /** Object slice corresponding to this lock slice. Immutable after - * creation. */ + * creation. + */ struct cl_object *cls_obj; const struct cl_lock_operations *cls_ops; /** Linkage into cl_lock::cll_layers. Immutable after creation. */ @@ -1885,7 +1894,8 @@ struct cl_2queue { /** @} cl_page_list */ /** \addtogroup cl_io cl_io - * @{ */ + * @{ + */ /** \struct cl_io * I/O * @@ -2041,8 +2051,8 @@ struct cl_io_operations { * * \see cl_io_operations::cio_iter_fini() */ - int (*cio_iter_init) (const struct lu_env *env, - const struct cl_io_slice *slice); + int (*cio_iter_init)(const struct lu_env *env, + const struct cl_io_slice *slice); /** * Finalize io iteration. * @@ -2052,8 +2062,8 @@ struct cl_io_operations { * * \see cl_io_operations::cio_iter_init() */ - void (*cio_iter_fini) (const struct lu_env *env, - const struct cl_io_slice *slice); + void (*cio_iter_fini)(const struct lu_env *env, + const struct cl_io_slice *slice); /** * Collect locks for the current iteration of io. * @@ -2063,8 +2073,8 @@ struct cl_io_operations { * cl_io_lock_add(). Once all locks are collected, they are * sorted and enqueued in the proper order. */ - int (*cio_lock) (const struct lu_env *env, - const struct cl_io_slice *slice); + int (*cio_lock)(const struct lu_env *env, + const struct cl_io_slice *slice); /** * Finalize unlocking. * @@ -2089,8 +2099,8 @@ struct cl_io_operations { * Called top-to-bottom at the end of io loop. Here layer * might wait for an unfinished asynchronous io. */ - void (*cio_end) (const struct lu_env *env, - const struct cl_io_slice *slice); + void (*cio_end)(const struct lu_env *env, + const struct cl_io_slice *slice); /** * Called bottom-to-top to notify layers that read/write IO * iteration finished, with \a nob bytes transferred. @@ -2101,8 +2111,8 @@ struct cl_io_operations { /** * Called once per io, bottom-to-top to release io resources. */ - void (*cio_fini) (const struct lu_env *env, - const struct cl_io_slice *slice); + void (*cio_fini)(const struct lu_env *env, + const struct cl_io_slice *slice); } op[CIT_OP_NR]; struct { /** @@ -2222,7 +2232,7 @@ struct cl_io_lock_link { struct cl_lock *cill_lock; /** optional destructor */ void (*cill_fini)(const struct lu_env *env, - struct cl_io_lock_link *link); + struct cl_io_lock_link *link); }; /** @@ -2272,7 +2282,7 @@ enum cl_io_lock_dmd { CILR_MANDATORY = 0, /** Layers are free to decide between local and global locking. */ CILR_MAYBE, - /** Never lock: there is no cache (e.g., liblustre). */ + /** Never lock: there is no cache (e.g., lockless IO). */ CILR_NEVER }; @@ -2284,7 +2294,8 @@ enum cl_fsync_mode { /** discard all of dirty pages in a specific file range */ CL_FSYNC_DISCARD = 2, /** start writeback and make sure they have reached storage before - * return. OST_SYNC RPC must be issued and finished */ + * return. OST_SYNC RPC must be issued and finished + */ CL_FSYNC_ALL = 3 }; @@ -2403,7 +2414,8 @@ struct cl_io { /** @} cl_io */ /** \addtogroup cl_req cl_req - * @{ */ + * @{ + */ /** \struct cl_req * Transfer. * @@ -2582,7 +2594,8 @@ enum cache_stats_item { /** how many entities are in the cache right now */ CS_total, /** how many entities in the cache are actively used (and cannot be - * evicted) right now */ + * evicted) right now + */ CS_busy, /** how many entities were created at all */ CS_create, @@ -2600,7 +2613,7 @@ struct cache_stats { }; /** These are not exported so far */ -void cache_stats_init (struct cache_stats *cs, const char *name); +void cache_stats_init(struct cache_stats *cs, const char *name); /** * Client-side site. This represents particular client stack. "Global" @@ -2613,7 +2626,7 @@ struct cl_site { * Statistical counters. Atomics do not scale, something better like * per-cpu counters is needed. * - * These are exported as /proc/fs/lustre/llite/.../site + * These are exported as /sys/kernel/debug/lustre/llite/.../site * * When interpreting keep in mind that both sub-locks (and sub-pages) * and top-locks (and top-pages) are accounted here. @@ -2624,8 +2637,8 @@ struct cl_site { atomic_t cs_locks_state[CLS_NR]; }; -int cl_site_init (struct cl_site *s, struct cl_device *top); -void cl_site_fini (struct cl_site *s); +int cl_site_init(struct cl_site *s, struct cl_device *top); +void cl_site_fini(struct cl_site *s); void cl_stack_fini(const struct lu_env *env, struct cl_device *cl); /** @@ -2653,7 +2666,7 @@ static inline int lu_device_is_cl(const struct lu_device *d) static inline struct cl_device *lu2cl_dev(const struct lu_device *d) { - LASSERT(d == NULL || IS_ERR(d) || lu_device_is_cl(d)); + LASSERT(!d || IS_ERR(d) || lu_device_is_cl(d)); return container_of0(d, struct cl_device, cd_lu_dev); } @@ -2664,7 +2677,7 @@ static inline struct lu_device *cl2lu_dev(struct cl_device *d) static inline struct cl_object *lu2cl(const struct lu_object *o) { - LASSERT(o == NULL || IS_ERR(o) || lu_device_is_cl(o->lo_dev)); + LASSERT(!o || IS_ERR(o) || lu_device_is_cl(o->lo_dev)); return container_of0(o, struct cl_object, co_lu); } @@ -2681,7 +2694,7 @@ static inline struct cl_object *cl_object_next(const struct cl_object *obj) static inline struct cl_device *cl_object_device(const struct cl_object *o) { - LASSERT(o == NULL || IS_ERR(o) || lu_device_is_cl(o->co_lu.lo_dev)); + LASSERT(!o || IS_ERR(o) || lu_device_is_cl(o->co_lu.lo_dev)); return container_of0(o->co_lu.lo_dev, struct cl_device, cd_lu_dev); } @@ -2725,27 +2738,28 @@ void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice, /** @} helpers */ /** \defgroup cl_object cl_object - * @{ */ -struct cl_object *cl_object_top (struct cl_object *o); + * @{ + */ +struct cl_object *cl_object_top(struct cl_object *o); struct cl_object *cl_object_find(const struct lu_env *env, struct cl_device *cd, const struct lu_fid *fid, const struct cl_object_conf *c); int cl_object_header_init(struct cl_object_header *h); -void cl_object_put (const struct lu_env *env, struct cl_object *o); -void cl_object_get (struct cl_object *o); -void cl_object_attr_lock (struct cl_object *o); +void cl_object_put(const struct lu_env *env, struct cl_object *o); +void cl_object_get(struct cl_object *o); +void cl_object_attr_lock(struct cl_object *o); void cl_object_attr_unlock(struct cl_object *o); -int cl_object_attr_get (const struct lu_env *env, struct cl_object *obj, - struct cl_attr *attr); -int cl_object_attr_set (const struct lu_env *env, struct cl_object *obj, - const struct cl_attr *attr, unsigned valid); -int cl_object_glimpse (const struct lu_env *env, struct cl_object *obj, - struct ost_lvb *lvb); -int cl_conf_set (const struct lu_env *env, struct cl_object *obj, - const struct cl_object_conf *conf); -void cl_object_prune (const struct lu_env *env, struct cl_object *obj); -void cl_object_kill (const struct lu_env *env, struct cl_object *obj); +int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj, + struct cl_attr *attr); +int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj, + const struct cl_attr *attr, unsigned valid); +int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj, + struct ost_lvb *lvb); +int cl_conf_set(const struct lu_env *env, struct cl_object *obj, + const struct cl_object_conf *conf); +void cl_object_prune(const struct lu_env *env, struct cl_object *obj); +void cl_object_kill(const struct lu_env *env, struct cl_object *obj); /** * Returns true, iff \a o0 and \a o1 are slices of the same object. @@ -2770,7 +2784,8 @@ static inline void *cl_object_page_slice(struct cl_object *clob, /** @} cl_object */ /** \defgroup cl_page cl_page - * @{ */ + * @{ + */ enum { CLP_GANG_OKAY = 0, CLP_GANG_RESCHED, @@ -2781,34 +2796,26 @@ enum { /* callback of cl_page_gang_lookup() */ typedef int (*cl_page_gang_cb_t) (const struct lu_env *, struct cl_io *, struct cl_page *, void *); -int cl_page_gang_lookup (const struct lu_env *env, - struct cl_object *obj, - struct cl_io *io, - pgoff_t start, pgoff_t end, - cl_page_gang_cb_t cb, void *cbdata); -struct cl_page *cl_page_lookup (struct cl_object_header *hdr, - pgoff_t index); -struct cl_page *cl_page_find (const struct lu_env *env, - struct cl_object *obj, - pgoff_t idx, struct page *vmpage, - enum cl_page_type type); -struct cl_page *cl_page_find_sub (const struct lu_env *env, - struct cl_object *obj, - pgoff_t idx, struct page *vmpage, +int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj, + struct cl_io *io, pgoff_t start, pgoff_t end, + cl_page_gang_cb_t cb, void *cbdata); +struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index); +struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *obj, + pgoff_t idx, struct page *vmpage, + enum cl_page_type type); +struct cl_page *cl_page_find_sub(const struct lu_env *env, + struct cl_object *obj, + pgoff_t idx, struct page *vmpage, struct cl_page *parent); -void cl_page_get (struct cl_page *page); -void cl_page_put (const struct lu_env *env, - struct cl_page *page); -void cl_page_print (const struct lu_env *env, void *cookie, - lu_printer_t printer, - const struct cl_page *pg); -void cl_page_header_print(const struct lu_env *env, void *cookie, - lu_printer_t printer, - const struct cl_page *pg); -struct page *cl_page_vmpage (const struct lu_env *env, - struct cl_page *page); -struct cl_page *cl_vmpage_page (struct page *vmpage, struct cl_object *obj); -struct cl_page *cl_page_top (struct cl_page *page); +void cl_page_get(struct cl_page *page); +void cl_page_put(const struct lu_env *env, struct cl_page *page); +void cl_page_print(const struct lu_env *env, void *cookie, lu_printer_t printer, + const struct cl_page *pg); +void cl_page_header_print(const struct lu_env *env, void *cookie, + lu_printer_t printer, const struct cl_page *pg); +struct page *cl_page_vmpage(const struct lu_env *env, struct cl_page *page); +struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj); +struct cl_page *cl_page_top(struct cl_page *page); const struct cl_page_slice *cl_page_at(const struct cl_page *page, const struct lu_device_type *dtype); @@ -2820,17 +2827,17 @@ const struct cl_page_slice *cl_page_at(const struct cl_page *page, */ /** @{ */ -int cl_page_own (const struct lu_env *env, - struct cl_io *io, struct cl_page *page); -int cl_page_own_try (const struct lu_env *env, - struct cl_io *io, struct cl_page *page); -void cl_page_assume (const struct lu_env *env, - struct cl_io *io, struct cl_page *page); -void cl_page_unassume (const struct lu_env *env, - struct cl_io *io, struct cl_page *pg); -void cl_page_disown (const struct lu_env *env, - struct cl_io *io, struct cl_page *page); -int cl_page_is_owned (const struct cl_page *pg, const struct cl_io *io); +int cl_page_own(const struct lu_env *env, + struct cl_io *io, struct cl_page *page); +int cl_page_own_try(const struct lu_env *env, + struct cl_io *io, struct cl_page *page); +void cl_page_assume(const struct lu_env *env, + struct cl_io *io, struct cl_page *page); +void cl_page_unassume(const struct lu_env *env, + struct cl_io *io, struct cl_page *pg); +void cl_page_disown(const struct lu_env *env, + struct cl_io *io, struct cl_page *page); +int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io); /** @} ownership */ @@ -2841,19 +2848,19 @@ int cl_page_is_owned (const struct cl_page *pg, const struct cl_io *io); * tracking transfer state. */ /** @{ */ -int cl_page_prep (const struct lu_env *env, struct cl_io *io, - struct cl_page *pg, enum cl_req_type crt); -void cl_page_completion (const struct lu_env *env, - struct cl_page *pg, enum cl_req_type crt, int ioret); -int cl_page_make_ready (const struct lu_env *env, struct cl_page *pg, - enum cl_req_type crt); -int cl_page_cache_add (const struct lu_env *env, struct cl_io *io, - struct cl_page *pg, enum cl_req_type crt); -void cl_page_clip (const struct lu_env *env, struct cl_page *pg, - int from, int to); -int cl_page_cancel (const struct lu_env *env, struct cl_page *page); -int cl_page_flush (const struct lu_env *env, struct cl_io *io, - struct cl_page *pg); +int cl_page_prep(const struct lu_env *env, struct cl_io *io, + struct cl_page *pg, enum cl_req_type crt); +void cl_page_completion(const struct lu_env *env, + struct cl_page *pg, enum cl_req_type crt, int ioret); +int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg, + enum cl_req_type crt); +int cl_page_cache_add(const struct lu_env *env, struct cl_io *io, + struct cl_page *pg, enum cl_req_type crt); +void cl_page_clip(const struct lu_env *env, struct cl_page *pg, + int from, int to); +int cl_page_cancel(const struct lu_env *env, struct cl_page *page); +int cl_page_flush(const struct lu_env *env, struct cl_io *io, + struct cl_page *pg); /** @} transfer */ @@ -2862,24 +2869,22 @@ int cl_page_flush (const struct lu_env *env, struct cl_io *io, * Functions to discard, delete and export a cl_page. */ /** @{ */ -void cl_page_discard (const struct lu_env *env, struct cl_io *io, - struct cl_page *pg); -void cl_page_delete (const struct lu_env *env, struct cl_page *pg); -int cl_page_unmap (const struct lu_env *env, struct cl_io *io, - struct cl_page *pg); -int cl_page_is_vmlocked (const struct lu_env *env, - const struct cl_page *pg); -void cl_page_export (const struct lu_env *env, - struct cl_page *pg, int uptodate); -int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io, - struct cl_page *page); -loff_t cl_offset (const struct cl_object *obj, pgoff_t idx); -pgoff_t cl_index (const struct cl_object *obj, loff_t offset); -int cl_page_size (const struct cl_object *obj); -int cl_pages_prune (const struct lu_env *env, struct cl_object *obj); - -void cl_lock_print (const struct lu_env *env, void *cookie, - lu_printer_t printer, const struct cl_lock *lock); +void cl_page_discard(const struct lu_env *env, struct cl_io *io, + struct cl_page *pg); +void cl_page_delete(const struct lu_env *env, struct cl_page *pg); +int cl_page_unmap(const struct lu_env *env, struct cl_io *io, + struct cl_page *pg); +int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg); +void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate); +int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io, + struct cl_page *page); +loff_t cl_offset(const struct cl_object *obj, pgoff_t idx); +pgoff_t cl_index(const struct cl_object *obj, loff_t offset); +int cl_page_size(const struct cl_object *obj); +int cl_pages_prune(const struct lu_env *env, struct cl_object *obj); + +void cl_lock_print(const struct lu_env *env, void *cookie, + lu_printer_t printer, const struct cl_lock *lock); void cl_lock_descr_print(const struct lu_env *env, void *cookie, lu_printer_t printer, const struct cl_lock_descr *descr); @@ -2888,7 +2893,8 @@ void cl_lock_descr_print(const struct lu_env *env, void *cookie, /** @} cl_page */ /** \defgroup cl_lock cl_lock - * @{ */ + * @{ + */ struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io, const struct cl_lock_descr *need, @@ -2917,19 +2923,19 @@ static inline struct cl_lock *cl_lock_at_page(const struct lu_env *env, const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock, const struct lu_device_type *dtype); -void cl_lock_get (struct cl_lock *lock); -void cl_lock_get_trust (struct cl_lock *lock); -void cl_lock_put (const struct lu_env *env, struct cl_lock *lock); -void cl_lock_hold_add (const struct lu_env *env, struct cl_lock *lock, - const char *scope, const void *source); +void cl_lock_get(struct cl_lock *lock); +void cl_lock_get_trust(struct cl_lock *lock); +void cl_lock_put(const struct lu_env *env, struct cl_lock *lock); +void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock, + const char *scope, const void *source); void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock, const char *scope, const void *source); -void cl_lock_unhold (const struct lu_env *env, struct cl_lock *lock, - const char *scope, const void *source); -void cl_lock_release (const struct lu_env *env, struct cl_lock *lock, - const char *scope, const void *source); -void cl_lock_user_add (const struct lu_env *env, struct cl_lock *lock); -void cl_lock_user_del (const struct lu_env *env, struct cl_lock *lock); +void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock, + const char *scope, const void *source); +void cl_lock_release(const struct lu_env *env, struct cl_lock *lock, + const char *scope, const void *source); +void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock); +void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock); int cl_lock_is_intransit(struct cl_lock *lock); @@ -2966,52 +2972,53 @@ int cl_lock_enqueue_wait(const struct lu_env *env, struct cl_lock *lock, * * cl_use_try() NONE cl_lock_operations::clo_use() CLS_HELD * - * @{ */ + * @{ + */ -int cl_wait (const struct lu_env *env, struct cl_lock *lock); -void cl_unuse (const struct lu_env *env, struct cl_lock *lock); -int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock, - struct cl_io *io, __u32 flags); -int cl_unuse_try (const struct lu_env *env, struct cl_lock *lock); -int cl_wait_try (const struct lu_env *env, struct cl_lock *lock); -int cl_use_try (const struct lu_env *env, struct cl_lock *lock, int atomic); +int cl_wait(const struct lu_env *env, struct cl_lock *lock); +void cl_unuse(const struct lu_env *env, struct cl_lock *lock); +int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock, + struct cl_io *io, __u32 flags); +int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock); +int cl_wait_try(const struct lu_env *env, struct cl_lock *lock); +int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic); /** @} statemachine */ -void cl_lock_signal (const struct lu_env *env, struct cl_lock *lock); -int cl_lock_state_wait (const struct lu_env *env, struct cl_lock *lock); -void cl_lock_state_set (const struct lu_env *env, struct cl_lock *lock, - enum cl_lock_state state); -int cl_queue_match (const struct list_head *queue, - const struct cl_lock_descr *need); - -void cl_lock_mutex_get (const struct lu_env *env, struct cl_lock *lock); -void cl_lock_mutex_put (const struct lu_env *env, struct cl_lock *lock); -int cl_lock_is_mutexed (struct cl_lock *lock); -int cl_lock_nr_mutexed (const struct lu_env *env); -int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock); -int cl_lock_ext_match (const struct cl_lock_descr *has, - const struct cl_lock_descr *need); -int cl_lock_descr_match(const struct cl_lock_descr *has, - const struct cl_lock_descr *need); -int cl_lock_mode_match (enum cl_lock_mode has, enum cl_lock_mode need); -int cl_lock_modify (const struct lu_env *env, struct cl_lock *lock, - const struct cl_lock_descr *desc); - -void cl_lock_closure_init (const struct lu_env *env, - struct cl_lock_closure *closure, - struct cl_lock *origin, int wait); -void cl_lock_closure_fini (struct cl_lock_closure *closure); -int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock, - struct cl_lock_closure *closure); -void cl_lock_disclosure (const struct lu_env *env, - struct cl_lock_closure *closure); -int cl_lock_enclosure (const struct lu_env *env, struct cl_lock *lock, - struct cl_lock_closure *closure); +void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock); +int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock); +void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock, + enum cl_lock_state state); +int cl_queue_match(const struct list_head *queue, + const struct cl_lock_descr *need); + +void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock); +void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock); +int cl_lock_is_mutexed(struct cl_lock *lock); +int cl_lock_nr_mutexed(const struct lu_env *env); +int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock); +int cl_lock_ext_match(const struct cl_lock_descr *has, + const struct cl_lock_descr *need); +int cl_lock_descr_match(const struct cl_lock_descr *has, + const struct cl_lock_descr *need); +int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need); +int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock, + const struct cl_lock_descr *desc); + +void cl_lock_closure_init(const struct lu_env *env, + struct cl_lock_closure *closure, + struct cl_lock *origin, int wait); +void cl_lock_closure_fini(struct cl_lock_closure *closure); +int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock, + struct cl_lock_closure *closure); +void cl_lock_disclosure(const struct lu_env *env, + struct cl_lock_closure *closure); +int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock, + struct cl_lock_closure *closure); void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock); void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock); -void cl_lock_error (const struct lu_env *env, struct cl_lock *lock, int error); +void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error); void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int wait); unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock); @@ -3019,39 +3026,40 @@ unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock); /** @} cl_lock */ /** \defgroup cl_io cl_io - * @{ */ - -int cl_io_init (const struct lu_env *env, struct cl_io *io, - enum cl_io_type iot, struct cl_object *obj); -int cl_io_sub_init (const struct lu_env *env, struct cl_io *io, - enum cl_io_type iot, struct cl_object *obj); -int cl_io_rw_init (const struct lu_env *env, struct cl_io *io, - enum cl_io_type iot, loff_t pos, size_t count); -int cl_io_loop (const struct lu_env *env, struct cl_io *io); - -void cl_io_fini (const struct lu_env *env, struct cl_io *io); -int cl_io_iter_init (const struct lu_env *env, struct cl_io *io); -void cl_io_iter_fini (const struct lu_env *env, struct cl_io *io); -int cl_io_lock (const struct lu_env *env, struct cl_io *io); -void cl_io_unlock (const struct lu_env *env, struct cl_io *io); -int cl_io_start (const struct lu_env *env, struct cl_io *io); -void cl_io_end (const struct lu_env *env, struct cl_io *io); -int cl_io_lock_add (const struct lu_env *env, struct cl_io *io, - struct cl_io_lock_link *link); -int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io, - struct cl_lock_descr *descr); -int cl_io_read_page (const struct lu_env *env, struct cl_io *io, - struct cl_page *page); -int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io, - struct cl_page *page, unsigned from, unsigned to); -int cl_io_commit_write (const struct lu_env *env, struct cl_io *io, - struct cl_page *page, unsigned from, unsigned to); -int cl_io_submit_rw (const struct lu_env *env, struct cl_io *io, - enum cl_req_type iot, struct cl_2queue *queue); -int cl_io_submit_sync (const struct lu_env *env, struct cl_io *io, - enum cl_req_type iot, struct cl_2queue *queue, - long timeout); -int cl_io_is_going (const struct lu_env *env); + * @{ + */ + +int cl_io_init(const struct lu_env *env, struct cl_io *io, + enum cl_io_type iot, struct cl_object *obj); +int cl_io_sub_init(const struct lu_env *env, struct cl_io *io, + enum cl_io_type iot, struct cl_object *obj); +int cl_io_rw_init(const struct lu_env *env, struct cl_io *io, + enum cl_io_type iot, loff_t pos, size_t count); +int cl_io_loop(const struct lu_env *env, struct cl_io *io); + +void cl_io_fini(const struct lu_env *env, struct cl_io *io); +int cl_io_iter_init(const struct lu_env *env, struct cl_io *io); +void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io); +int cl_io_lock(const struct lu_env *env, struct cl_io *io); +void cl_io_unlock(const struct lu_env *env, struct cl_io *io); +int cl_io_start(const struct lu_env *env, struct cl_io *io); +void cl_io_end(const struct lu_env *env, struct cl_io *io); +int cl_io_lock_add(const struct lu_env *env, struct cl_io *io, + struct cl_io_lock_link *link); +int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io, + struct cl_lock_descr *descr); +int cl_io_read_page(const struct lu_env *env, struct cl_io *io, + struct cl_page *page); +int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io, + struct cl_page *page, unsigned from, unsigned to); +int cl_io_commit_write(const struct lu_env *env, struct cl_io *io, + struct cl_page *page, unsigned from, unsigned to); +int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io, + enum cl_req_type iot, struct cl_2queue *queue); +int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io, + enum cl_req_type iot, struct cl_2queue *queue, + long timeout); +int cl_io_is_going(const struct lu_env *env); /** * True, iff \a io is an O_APPEND write(2). @@ -3094,7 +3102,8 @@ do { \ /** @} cl_io */ /** \defgroup cl_page_list cl_page_list - * @{ */ + * @{ + */ /** * Last page in the page list. @@ -3117,40 +3126,41 @@ static inline struct cl_page *cl_page_list_last(struct cl_page_list *plist) #define cl_page_list_for_each_safe(page, temp, list) \ list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch) -void cl_page_list_init (struct cl_page_list *plist); -void cl_page_list_add (struct cl_page_list *plist, struct cl_page *page); -void cl_page_list_move (struct cl_page_list *dst, struct cl_page_list *src, - struct cl_page *page); -void cl_page_list_splice (struct cl_page_list *list, - struct cl_page_list *head); -void cl_page_list_disown (const struct lu_env *env, - struct cl_io *io, struct cl_page_list *plist); - -void cl_2queue_init (struct cl_2queue *queue); -void cl_2queue_disown (const struct lu_env *env, - struct cl_io *io, struct cl_2queue *queue); -void cl_2queue_discard (const struct lu_env *env, - struct cl_io *io, struct cl_2queue *queue); -void cl_2queue_fini (const struct lu_env *env, struct cl_2queue *queue); +void cl_page_list_init(struct cl_page_list *plist); +void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page); +void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src, + struct cl_page *page); +void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head); +void cl_page_list_disown(const struct lu_env *env, + struct cl_io *io, struct cl_page_list *plist); + +void cl_2queue_init(struct cl_2queue *queue); +void cl_2queue_disown(const struct lu_env *env, + struct cl_io *io, struct cl_2queue *queue); +void cl_2queue_discard(const struct lu_env *env, + struct cl_io *io, struct cl_2queue *queue); +void cl_2queue_fini(const struct lu_env *env, struct cl_2queue *queue); void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page); /** @} cl_page_list */ /** \defgroup cl_req cl_req - * @{ */ + * @{ + */ struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page, enum cl_req_type crt, int nr_objects); -void cl_req_page_add (const struct lu_env *env, struct cl_req *req, - struct cl_page *page); -void cl_req_page_done (const struct lu_env *env, struct cl_page *page); -int cl_req_prep (const struct lu_env *env, struct cl_req *req); -void cl_req_attr_set (const struct lu_env *env, struct cl_req *req, - struct cl_req_attr *attr, u64 flags); +void cl_req_page_add(const struct lu_env *env, struct cl_req *req, + struct cl_page *page); +void cl_req_page_done(const struct lu_env *env, struct cl_page *page); +int cl_req_prep(const struct lu_env *env, struct cl_req *req); +void cl_req_attr_set(const struct lu_env *env, struct cl_req *req, + struct cl_req_attr *attr, u64 flags); void cl_req_completion(const struct lu_env *env, struct cl_req *req, int ioret); /** \defgroup cl_sync_io cl_sync_io - * @{ */ + * @{ + */ /** * Anchor for synchronous transfer. This is allocated on a stack by thread @@ -3214,22 +3224,23 @@ void cl_sync_io_note(struct cl_sync_io *anchor, int ioret); * - cl_env_reexit(cl_env_reenter had to be called priorly) * * \see lu_env, lu_context, lu_context_key - * @{ */ + * @{ + */ struct cl_env_nest { int cen_refcheck; void *cen_cookie; }; -struct lu_env *cl_env_get (int *refcheck); -struct lu_env *cl_env_alloc (int *refcheck, __u32 tags); -struct lu_env *cl_env_nested_get (struct cl_env_nest *nest); -void cl_env_put (struct lu_env *env, int *refcheck); -void cl_env_nested_put (struct cl_env_nest *nest, struct lu_env *env); -void *cl_env_reenter (void); -void cl_env_reexit (void *cookie); -void cl_env_implant (struct lu_env *env, int *refcheck); -void cl_env_unplant (struct lu_env *env, int *refcheck); +struct lu_env *cl_env_get(int *refcheck); +struct lu_env *cl_env_alloc(int *refcheck, __u32 tags); +struct lu_env *cl_env_nested_get(struct cl_env_nest *nest); +void cl_env_put(struct lu_env *env, int *refcheck); +void cl_env_nested_put(struct cl_env_nest *nest, struct lu_env *env); +void *cl_env_reenter(void); +void cl_env_reexit(void *cookie); +void cl_env_implant(struct lu_env *env, int *refcheck); +void cl_env_unplant(struct lu_env *env, int *refcheck); /** @} cl_env */ diff --git a/drivers/staging/lustre/lustre/include/lclient.h b/drivers/staging/lustre/lustre/include/lclient.h index 36e7a6767e71..5d839a9f789f 100644 --- a/drivers/staging/lustre/lustre/include/lclient.h +++ b/drivers/staging/lustre/lustre/include/lclient.h @@ -127,7 +127,7 @@ static inline struct ccc_thread_info *ccc_env_info(const struct lu_env *env) struct ccc_thread_info *info; info = lu_context_key_get(&env->le_ctx, &ccc_key); - LASSERT(info != NULL); + LASSERT(info); return info; } @@ -156,7 +156,7 @@ static inline struct ccc_session *ccc_env_session(const struct lu_env *env) struct ccc_session *ses; ses = lu_context_key_get(env->le_ses, &ccc_session_key); - LASSERT(ses != NULL); + LASSERT(ses); return ses; } @@ -383,7 +383,8 @@ void cl_put_grouplock(struct ccc_grouplock *cg); * * NB: If you find you have to use these interfaces for your new code, please * think about it again. These interfaces may be removed in the future for - * better layering. */ + * better layering. + */ struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj); void lov_lsm_put(struct cl_object *clobj, struct lov_stripe_md *lsm); int lov_read_and_clear_async_rc(struct cl_object *clob); diff --git a/drivers/staging/lustre/lustre/include/linux/obd.h b/drivers/staging/lustre/lustre/include/linux/obd.h index 468bc28be895..3907bf4ce07c 100644 --- a/drivers/staging/lustre/lustre/include/linux/obd.h +++ b/drivers/staging/lustre/lustre/include/linux/obd.h @@ -57,23 +57,23 @@ struct ll_iattr { #define CLIENT_OBD_LIST_LOCK_DEBUG 1 -typedef struct { +struct client_obd_lock { spinlock_t lock; unsigned long time; struct task_struct *task; const char *func; int line; -} client_obd_lock_t; +}; -static inline void __client_obd_list_lock(client_obd_lock_t *lock, +static inline void __client_obd_list_lock(struct client_obd_lock *lock, const char *func, int line) { unsigned long cur = jiffies; while (1) { if (spin_trylock(&lock->lock)) { - LASSERT(lock->task == NULL); + LASSERT(!lock->task); lock->task = current; lock->func = func; lock->line = line; @@ -85,7 +85,7 @@ static inline void __client_obd_list_lock(client_obd_lock_t *lock, time_before(lock->time + 5 * HZ, jiffies)) { struct task_struct *task = lock->task; - if (task == NULL) + if (!task) continue; LCONSOLE_WARN("%s:%d: lock %p was acquired by <%s:%d:%s:%d> for %lu seconds.\n", @@ -106,20 +106,20 @@ static inline void __client_obd_list_lock(client_obd_lock_t *lock, #define client_obd_list_lock(lock) \ __client_obd_list_lock(lock, __func__, __LINE__) -static inline void client_obd_list_unlock(client_obd_lock_t *lock) +static inline void client_obd_list_unlock(struct client_obd_lock *lock) { - LASSERT(lock->task != NULL); + LASSERT(lock->task); lock->task = NULL; lock->time = jiffies; spin_unlock(&lock->lock); } -static inline void client_obd_list_lock_init(client_obd_lock_t *lock) +static inline void client_obd_list_lock_init(struct client_obd_lock *lock) { spin_lock_init(&lock->lock); } -static inline void client_obd_list_lock_done(client_obd_lock_t *lock) +static inline void client_obd_list_lock_done(struct client_obd_lock *lock) {} #endif /* __LINUX_OBD_H */ diff --git a/drivers/staging/lustre/lustre/include/lprocfs_status.h b/drivers/staging/lustre/lustre/include/lprocfs_status.h index 0ac8e0edcc48..4146c9c3999f 100644 --- a/drivers/staging/lustre/lustre/include/lprocfs_status.h +++ b/drivers/staging/lustre/lustre/include/lprocfs_status.h @@ -54,7 +54,7 @@ struct lprocfs_vars { struct file_operations *fops; void *data; /** - * /proc file mode. + * sysfs file mode. */ umode_t proc_mode; }; @@ -175,7 +175,8 @@ struct lprocfs_percpu { enum lprocfs_stats_flags { LPROCFS_STATS_FLAG_NONE = 0x0000, /* per cpu counter */ LPROCFS_STATS_FLAG_NOPERCPU = 0x0001, /* stats have no percpu - * area and need locking */ + * area and need locking + */ LPROCFS_STATS_FLAG_IRQ_SAFE = 0x0002, /* alloc need irq safe */ }; @@ -196,7 +197,8 @@ struct lprocfs_stats { unsigned short ls_biggest_alloc_num; enum lprocfs_stats_flags ls_flags; /* Lock used when there are no percpu stats areas; For percpu stats, - * it is used to protect ls_biggest_alloc_num change */ + * it is used to protect ls_biggest_alloc_num change + */ spinlock_t ls_lock; /* has ls_num of counter headers */ @@ -274,20 +276,7 @@ static inline int opcode_offset(__u32 opc) OPC_RANGE(OST)); } else if (opc < FLD_LAST_OPC) { /* FLD opcode */ - return (opc - FLD_FIRST_OPC + - OPC_RANGE(SEC) + - OPC_RANGE(SEQ) + - OPC_RANGE(QUOTA) + - OPC_RANGE(LLOG) + - OPC_RANGE(OBD) + - OPC_RANGE(MGS) + - OPC_RANGE(LDLM) + - OPC_RANGE(MDS) + - OPC_RANGE(OST)); - } else if (opc < UPDATE_LAST_OPC) { - /* update opcode */ - return (opc - UPDATE_FIRST_OPC + - OPC_RANGE(FLD) + + return (opc - FLD_FIRST_OPC + OPC_RANGE(SEC) + OPC_RANGE(SEQ) + OPC_RANGE(QUOTA) + @@ -312,8 +301,7 @@ static inline int opcode_offset(__u32 opc) OPC_RANGE(SEC) + \ OPC_RANGE(SEQ) + \ OPC_RANGE(SEC) + \ - OPC_RANGE(FLD) + \ - OPC_RANGE(UPDATE)) + OPC_RANGE(FLD)) #define EXTRA_MAX_OPCODES ((PTLRPC_LAST_CNTR - PTLRPC_FIRST_CNTR) + \ OPC_RANGE(EXTRA)) @@ -407,7 +395,7 @@ static inline int lprocfs_stats_lock(struct lprocfs_stats *stats, int opc, } else { unsigned int cpuid = get_cpu(); - if (unlikely(stats->ls_percpu[cpuid] == NULL)) { + if (unlikely(!stats->ls_percpu[cpuid])) { rc = lprocfs_stats_alloc_one(stats, cpuid); if (rc < 0) { put_cpu(); @@ -438,12 +426,10 @@ static inline void lprocfs_stats_unlock(struct lprocfs_stats *stats, int opc, case LPROCFS_GET_SMP_ID: if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) { - if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) { - spin_unlock_irqrestore(&stats->ls_lock, - *flags); - } else { + if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) + spin_unlock_irqrestore(&stats->ls_lock, *flags); + else spin_unlock(&stats->ls_lock); - } } else { put_cpu(); } @@ -451,12 +437,10 @@ static inline void lprocfs_stats_unlock(struct lprocfs_stats *stats, int opc, case LPROCFS_GET_NUM_CPU: if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) { - if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) { - spin_unlock_irqrestore(&stats->ls_lock, - *flags); - } else { + if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) + spin_unlock_irqrestore(&stats->ls_lock, *flags); + else spin_unlock(&stats->ls_lock); - } } return; } @@ -521,11 +505,11 @@ static inline __u64 lprocfs_stats_collector(struct lprocfs_stats *stats, unsigned long flags = 0; __u64 ret = 0; - LASSERT(stats != NULL); + LASSERT(stats); num_cpu = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags); for (i = 0; i < num_cpu; i++) { - if (stats->ls_percpu[i] == NULL) + if (!stats->ls_percpu[i]) continue; ret += lprocfs_read_helper( lprocfs_stats_counter_get(stats, i, idx), @@ -608,7 +592,7 @@ int lprocfs_write_helper(const char __user *buffer, unsigned long count, int *val); int lprocfs_write_u64_helper(const char __user *buffer, unsigned long count, __u64 *val); -int lprocfs_write_frac_u64_helper(const char *buffer, +int lprocfs_write_frac_u64_helper(const char __user *buffer, unsigned long count, __u64 *val, int mult); char *lprocfs_find_named_value(const char *buffer, const char *name, @@ -625,9 +609,10 @@ int lprocfs_single_release(struct inode *, struct file *); int lprocfs_seq_release(struct inode *, struct file *); /* write the name##_seq_show function, call LPROC_SEQ_FOPS_RO for read-only - proc entries; otherwise, you will define name##_seq_write function also for - a read-write proc entry, and then call LPROC_SEQ_SEQ instead. Finally, - call ldebugfs_obd_seq_create(obd, filename, 0444, &name#_fops, data); */ + * proc entries; otherwise, you will define name##_seq_write function also for + * a read-write proc entry, and then call LPROC_SEQ_SEQ instead. Finally, + * call ldebugfs_obd_seq_create(obd, filename, 0444, &name#_fops, data); + */ #define __LPROC_SEQ_FOPS(name, custom_seq_write) \ static int name##_single_open(struct inode *inode, struct file *file) \ { \ diff --git a/drivers/staging/lustre/lustre/include/lu_object.h b/drivers/staging/lustre/lustre/include/lu_object.h index 1d79341a495d..b5088b13a305 100644 --- a/drivers/staging/lustre/lustre/include/lu_object.h +++ b/drivers/staging/lustre/lustre/include/lu_object.h @@ -164,11 +164,12 @@ struct lu_device_operations { /** * For lu_object_conf flags */ -typedef enum { +enum loc_flags { /* This is a new object to be allocated, or the file - * corresponding to the object does not exists. */ + * corresponding to the object does not exists. + */ LOC_F_NEW = 0x00000001, -} loc_flags_t; +}; /** * Object configuration, describing particulars of object being created. On @@ -179,7 +180,7 @@ struct lu_object_conf { /** * Some hints for obj find and alloc. */ - loc_flags_t loc_flags; + enum loc_flags loc_flags; }; /** @@ -392,7 +393,7 @@ struct lu_device_type_operations { static inline int lu_device_is_md(const struct lu_device *d) { - return ergo(d != NULL, d->ld_type->ldt_tags & LU_DEVICE_MD); + return ergo(d, d->ld_type->ldt_tags & LU_DEVICE_MD); } /** @@ -488,7 +489,7 @@ enum lu_object_header_flags { /** * Mark this object has already been taken out of cache. */ - LU_OBJECT_UNHASHED = 1 + LU_OBJECT_UNHASHED = 1, }; enum lu_object_header_attr { @@ -756,7 +757,7 @@ static inline const struct lu_fid *lu_object_fid(const struct lu_object *o) /** * return device operations vector for this object */ -static const inline struct lu_device_operations * +static inline const struct lu_device_operations * lu_object_ops(const struct lu_object *o) { return o->lo_dev->ld_ops; @@ -895,7 +896,8 @@ enum lu_xattr_flags { /** @} helpers */ /** \name lu_context - * @{ */ + * @{ + */ /** For lu_context health-checks */ enum lu_context_state { @@ -1119,7 +1121,7 @@ struct lu_context_key { CLASSERT(PAGE_CACHE_SIZE >= sizeof (*value)); \ \ value = kzalloc(sizeof(*value), GFP_NOFS); \ - if (value == NULL) \ + if (!value) \ value = ERR_PTR(-ENOMEM); \ \ return value; \ @@ -1174,7 +1176,7 @@ void lu_context_key_revive (struct lu_context_key *key); do { \ LU_CONTEXT_KEY_INIT(key); \ key = va_arg(args, struct lu_context_key *); \ - } while (key != NULL); \ + } while (key); \ va_end(args); \ } diff --git a/drivers/staging/lustre/lustre/include/lu_ref.h b/drivers/staging/lustre/lustre/include/lu_ref.h index 97cd157dd35a..f7dfd83951ee 100644 --- a/drivers/staging/lustre/lustre/include/lu_ref.h +++ b/drivers/staging/lustre/lustre/include/lu_ref.h @@ -17,10 +17,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with Lustre; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * */ #ifndef __LUSTRE_LU_REF_H diff --git a/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h b/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h index 09088f40ba88..07d45de69dd9 100644 --- a/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h +++ b/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h @@ -47,9 +47,11 @@ struct ll_fiemap_extent { __u64 fe_logical; /* logical offset in bytes for the start of - * the extent from the beginning of the file */ + * the extent from the beginning of the file + */ __u64 fe_physical; /* physical offset in bytes for the start - * of the extent from the beginning of the disk */ + * of the extent from the beginning of the disk + */ __u64 fe_length; /* length in bytes for this extent */ __u64 fe_reserved64[2]; __u32 fe_flags; /* FIEMAP_EXTENT_* flags for this extent */ @@ -59,9 +61,11 @@ struct ll_fiemap_extent { struct ll_user_fiemap { __u64 fm_start; /* logical offset (inclusive) at - * which to start mapping (in) */ + * which to start mapping (in) + */ __u64 fm_length; /* logical length of mapping which - * userspace wants (in) */ + * userspace wants (in) + */ __u32 fm_flags; /* FIEMAP_FLAG_* flags for request (in/out) */ __u32 fm_mapped_extents;/* number of extents that were mapped (out) */ __u32 fm_extent_count; /* size of fm_extents array (in) */ @@ -71,28 +75,38 @@ struct ll_user_fiemap { #define FIEMAP_MAX_OFFSET (~0ULL) -#define FIEMAP_FLAG_SYNC 0x00000001 /* sync file data before map */ -#define FIEMAP_FLAG_XATTR 0x00000002 /* map extended attribute tree */ - -#define FIEMAP_EXTENT_LAST 0x00000001 /* Last extent in file. */ -#define FIEMAP_EXTENT_UNKNOWN 0x00000002 /* Data location unknown. */ -#define FIEMAP_EXTENT_DELALLOC 0x00000004 /* Location still pending. - * Sets EXTENT_UNKNOWN. */ -#define FIEMAP_EXTENT_ENCODED 0x00000008 /* Data can not be read - * while fs is unmounted */ -#define FIEMAP_EXTENT_DATA_ENCRYPTED 0x00000080 /* Data is encrypted by fs. - * Sets EXTENT_NO_DIRECT. */ +#define FIEMAP_FLAG_SYNC 0x00000001 /* sync file data before + * map + */ +#define FIEMAP_FLAG_XATTR 0x00000002 /* map extended attribute + * tree + */ +#define FIEMAP_EXTENT_LAST 0x00000001 /* Last extent in file. */ +#define FIEMAP_EXTENT_UNKNOWN 0x00000002 /* Data location unknown. */ +#define FIEMAP_EXTENT_DELALLOC 0x00000004 /* Location still pending. + * Sets EXTENT_UNKNOWN. + */ +#define FIEMAP_EXTENT_ENCODED 0x00000008 /* Data can not be read + * while fs is unmounted + */ +#define FIEMAP_EXTENT_DATA_ENCRYPTED 0x00000080 /* Data is encrypted by fs. + * Sets EXTENT_NO_DIRECT. + */ #define FIEMAP_EXTENT_NOT_ALIGNED 0x00000100 /* Extent offsets may not be - * block aligned. */ + * block aligned. + */ #define FIEMAP_EXTENT_DATA_INLINE 0x00000200 /* Data mixed with metadata. * Sets EXTENT_NOT_ALIGNED.*/ -#define FIEMAP_EXTENT_DATA_TAIL 0x00000400 /* Multiple files in block. - * Sets EXTENT_NOT_ALIGNED.*/ -#define FIEMAP_EXTENT_UNWRITTEN 0x00000800 /* Space allocated, but - * no data (i.e. zero). */ -#define FIEMAP_EXTENT_MERGED 0x00001000 /* File does not natively +#define FIEMAP_EXTENT_DATA_TAIL 0x00000400 /* Multiple files in block. + * Sets EXTENT_NOT_ALIGNED. + */ +#define FIEMAP_EXTENT_UNWRITTEN 0x00000800 /* Space allocated, but + * no data (i.e. zero). + */ +#define FIEMAP_EXTENT_MERGED 0x00001000 /* File does not natively * support extents. Result - * merged for efficiency. */ + * merged for efficiency. + */ static inline size_t fiemap_count_to_size(size_t extent_count) { @@ -114,7 +128,8 @@ static inline unsigned fiemap_size_to_count(size_t array_size) /* Lustre specific flags - use a high bit, don't conflict with upstream flag */ #define FIEMAP_EXTENT_NO_DIRECT 0x40000000 /* Data mapping undefined */ -#define FIEMAP_EXTENT_NET 0x80000000 /* Data stored remotely. - * Sets NO_DIRECT flag */ +#define FIEMAP_EXTENT_NET 0x80000000 /* Data stored remotely. + * Sets NO_DIRECT flag + */ #endif /* _LUSTRE_FIEMAP_H */ diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_build_version.h b/drivers/staging/lustre/lustre/include/lustre/lustre_build_version.h deleted file mode 100644 index 93a3d7db3010..000000000000 --- a/drivers/staging/lustre/lustre/include/lustre/lustre_build_version.h +++ /dev/null @@ -1,2 +0,0 @@ -#define BUILD_VERSION "v2_3_64_0-g6e62c21-CHANGED-3.9.0" -#define LUSTRE_RELEASE 3.9.0_g6e62c21 diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h index b064b5821e3f..da8bc6eadd13 100644 --- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h +++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h @@ -113,25 +113,25 @@ #define CONNMGR_REQUEST_PORTAL 1 #define CONNMGR_REPLY_PORTAL 2 -//#define OSC_REQUEST_PORTAL 3 +/*#define OSC_REQUEST_PORTAL 3 */ #define OSC_REPLY_PORTAL 4 -//#define OSC_BULK_PORTAL 5 +/*#define OSC_BULK_PORTAL 5 */ #define OST_IO_PORTAL 6 #define OST_CREATE_PORTAL 7 #define OST_BULK_PORTAL 8 -//#define MDC_REQUEST_PORTAL 9 +/*#define MDC_REQUEST_PORTAL 9 */ #define MDC_REPLY_PORTAL 10 -//#define MDC_BULK_PORTAL 11 +/*#define MDC_BULK_PORTAL 11 */ #define MDS_REQUEST_PORTAL 12 -//#define MDS_REPLY_PORTAL 13 +/*#define MDS_REPLY_PORTAL 13 */ #define MDS_BULK_PORTAL 14 #define LDLM_CB_REQUEST_PORTAL 15 #define LDLM_CB_REPLY_PORTAL 16 #define LDLM_CANCEL_REQUEST_PORTAL 17 #define LDLM_CANCEL_REPLY_PORTAL 18 -//#define PTLBD_REQUEST_PORTAL 19 -//#define PTLBD_REPLY_PORTAL 20 -//#define PTLBD_BULK_PORTAL 21 +/*#define PTLBD_REQUEST_PORTAL 19 */ +/*#define PTLBD_REPLY_PORTAL 20 */ +/*#define PTLBD_BULK_PORTAL 21 */ #define MDS_SETATTR_PORTAL 22 #define MDS_READPAGE_PORTAL 23 #define OUT_PORTAL 24 @@ -146,7 +146,9 @@ #define SEQ_CONTROLLER_PORTAL 32 #define MGS_BULK_PORTAL 33 -/* Portal 63 is reserved for the Cray Inc DVS - nic@cray.com, roe@cray.com, n8851@cray.com */ +/* Portal 63 is reserved for the Cray Inc DVS - nic@cray.com, roe@cray.com, + * n8851@cray.com + */ /* packet types */ #define PTL_RPC_MSG_REQUEST 4711 @@ -295,7 +297,8 @@ static inline int range_compare_loc(const struct lu_seq_range *r1, fld_range_is_mdt(range) ? "mdt" : "ost" /** \defgroup lu_fid lu_fid - * @{ */ + * @{ + */ /** * Flags for lustre_mdt_attrs::lma_compat and lustre_mdt_attrs::lma_incompat. @@ -307,7 +310,8 @@ enum lma_compat { LMAC_SOM = 0x00000002, LMAC_NOT_IN_OI = 0x00000004, /* the object does NOT need OI mapping */ LMAC_FID_ON_OST = 0x00000008, /* For OST-object, its OI mapping is - * under /O/<seq>/d<x>. */ + * under /O/<seq>/d<x>. + */ }; /** @@ -319,7 +323,8 @@ enum lma_incompat { LMAI_RELEASED = 0x00000001, /* file is released */ LMAI_AGENT = 0x00000002, /* agent inode */ LMAI_REMOTE_PARENT = 0x00000004, /* the parent of the object - is on the remote MDT */ + * is on the remote MDT + */ }; #define LMA_INCOMPAT_SUPP (LMAI_AGENT | LMAI_REMOTE_PARENT) @@ -395,12 +400,14 @@ enum fid_seq { FID_SEQ_LOCAL_FILE = 0x200000001ULL, FID_SEQ_DOT_LUSTRE = 0x200000002ULL, /* sequence is used for local named objects FIDs generated - * by local_object_storage library */ + * by local_object_storage library + */ FID_SEQ_LOCAL_NAME = 0x200000003ULL, /* Because current FLD will only cache the fid sequence, instead * of oid on the client side, if the FID needs to be exposed to * clients sides, it needs to make sure all of fids under one - * sequence will be located in one MDT. */ + * sequence will be located in one MDT. + */ FID_SEQ_SPECIAL = 0x200000004ULL, FID_SEQ_QUOTA = 0x200000005ULL, FID_SEQ_QUOTA_GLB = 0x200000006ULL, @@ -601,7 +608,8 @@ static inline void ostid_set_seq(struct ost_id *oi, __u64 seq) oi->oi_fid.f_seq = seq; /* Note: if f_oid + f_ver is zero, we need init it * to be 1, otherwise, ostid_seq will treat this - * as old ostid (oi_seq == 0) */ + * as old ostid (oi_seq == 0) + */ if (oi->oi_fid.f_oid == 0 && oi->oi_fid.f_ver == 0) oi->oi_fid.f_oid = LUSTRE_FID_INIT_OID; } @@ -630,15 +638,13 @@ static inline void ostid_set_id(struct ost_id *oi, __u64 oid) { if (fid_seq_is_mdt0(ostid_seq(oi))) { if (oid >= IDIF_MAX_OID) { - CERROR("Bad %llu to set "DOSTID"\n", - oid, POSTID(oi)); + CERROR("Bad %llu to set " DOSTID "\n", oid, POSTID(oi)); return; } oi->oi.oi_id = oid; } else { if (oid > OBIF_MAX_OID) { - CERROR("Bad %llu to set "DOSTID"\n", - oid, POSTID(oi)); + CERROR("Bad %llu to set " DOSTID "\n", oid, POSTID(oi)); return; } oi->oi_fid.f_oid = oid; @@ -689,11 +695,12 @@ static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid, * that we map into the IDIF namespace. It allows up to 2^48 * objects per OST, as this is the object namespace that has * been in production for years. This can handle create rates - * of 1M objects/s/OST for 9 years, or combinations thereof. */ + * of 1M objects/s/OST for 9 years, or combinations thereof. + */ if (ostid_id(ostid) >= IDIF_MAX_OID) { - CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n", - POSTID(ostid), ost_idx); - return -EBADF; + CERROR("bad MDT0 id, " DOSTID " ost_idx:%u\n", + POSTID(ostid), ost_idx); + return -EBADF; } fid->f_seq = fid_idif_seq(ostid_id(ostid), ost_idx); /* truncate to 32 bits by assignment */ @@ -704,10 +711,11 @@ static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid, /* This is either an IDIF object, which identifies objects across * all OSTs, or a regular FID. The IDIF namespace maps legacy * OST objects into the FID namespace. In both cases, we just - * pass the FID through, no conversion needed. */ + * pass the FID through, no conversion needed. + */ if (ostid->oi_fid.f_ver != 0) { - CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n", - POSTID(ostid), ost_idx); + CERROR("bad MDT0 id, " DOSTID " ost_idx:%u\n", + POSTID(ostid), ost_idx); return -EBADF; } *fid = ostid->oi_fid; @@ -807,7 +815,7 @@ static inline void fid_be_to_cpu(struct lu_fid *dst, const struct lu_fid *src) static inline int fid_is_sane(const struct lu_fid *fid) { - return fid != NULL && + return fid && ((fid_seq(fid) >= FID_SEQ_START && fid_ver(fid) == 0) || fid_is_igif(fid) || fid_is_idif(fid) || fid_seq_is_rsvd(fid_seq(fid))); @@ -868,7 +876,8 @@ static inline void ostid_le_to_cpu(const struct ost_id *src_oi, /** @} lu_fid */ /** \defgroup lu_dir lu_dir - * @{ */ + * @{ + */ /** * Enumeration of possible directory entry attributes. @@ -880,24 +889,8 @@ enum lu_dirent_attrs { LUDA_FID = 0x0001, LUDA_TYPE = 0x0002, LUDA_64BITHASH = 0x0004, - - /* The following attrs are used for MDT internal only, - * not visible to client */ - - /* Verify the dirent consistency */ - LUDA_VERIFY = 0x8000, - /* Only check but not repair the dirent inconsistency */ - LUDA_VERIFY_DRYRUN = 0x4000, - /* The dirent has been repaired, or to be repaired (dryrun). */ - LUDA_REPAIR = 0x2000, - /* The system is upgraded, has beed or to be repaired (dryrun). */ - LUDA_UPGRADE = 0x1000, - /* Ignore this record, go to next directly. */ - LUDA_IGNORE = 0x0800, }; -#define LU_DIRENT_ATTRS_MASK 0xf800 - /** * Layout of readdir pages, as transmitted on wire. */ @@ -1128,7 +1121,8 @@ struct ptlrpc_body_v2 { __u32 pb_conn_cnt; __u32 pb_timeout; /* for req, the deadline, for rep, the service est */ __u32 pb_service_time; /* for rep, actual service time, also used for - net_latency of req */ + * net_latency of req + */ __u32 pb_limit; __u64 pb_slv; /* VBR: pre-versions */ @@ -1174,7 +1168,8 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); /* #define MSG_AT_SUPPORT 0x0008 * This was used in early prototypes of adaptive timeouts, and while there * shouldn't be any users of that code there also isn't a need for using this - * bits. Defer usage until at least 1.10 to avoid potential conflict. */ + * bits. Defer usage until at least 1.10 to avoid potential conflict. + */ #define MSG_DELAY_REPLAY 0x0010 #define MSG_VERSION_REPLAY 0x0020 #define MSG_REQ_REPLAY_DONE 0x0040 @@ -1187,7 +1182,7 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); #define MSG_CONNECT_RECOVERING 0x00000001 #define MSG_CONNECT_RECONNECT 0x00000002 #define MSG_CONNECT_REPLAYABLE 0x00000004 -//#define MSG_CONNECT_PEER 0x8 +/*#define MSG_CONNECT_PEER 0x8 */ #define MSG_CONNECT_LIBCLIENT 0x00000010 #define MSG_CONNECT_INITIAL 0x00000020 #define MSG_CONNECT_ASYNC 0x00000040 @@ -1195,60 +1190,65 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); #define MSG_CONNECT_TRANSNO 0x00000100 /* report transno */ /* Connect flags */ -#define OBD_CONNECT_RDONLY 0x1ULL /*client has read-only access*/ -#define OBD_CONNECT_INDEX 0x2ULL /*connect specific LOV idx */ -#define OBD_CONNECT_MDS 0x4ULL /*connect from MDT to OST */ -#define OBD_CONNECT_GRANT 0x8ULL /*OSC gets grant at connect */ -#define OBD_CONNECT_SRVLOCK 0x10ULL /*server takes locks for cli */ -#define OBD_CONNECT_VERSION 0x20ULL /*Lustre versions in ocd */ -#define OBD_CONNECT_REQPORTAL 0x40ULL /*Separate non-IO req portal */ -#define OBD_CONNECT_ACL 0x80ULL /*access control lists */ -#define OBD_CONNECT_XATTR 0x100ULL /*client use extended attr */ +#define OBD_CONNECT_RDONLY 0x1ULL /*client has read-only access*/ +#define OBD_CONNECT_INDEX 0x2ULL /*connect specific LOV idx */ +#define OBD_CONNECT_MDS 0x4ULL /*connect from MDT to OST */ +#define OBD_CONNECT_GRANT 0x8ULL /*OSC gets grant at connect */ +#define OBD_CONNECT_SRVLOCK 0x10ULL /*server takes locks for cli */ +#define OBD_CONNECT_VERSION 0x20ULL /*Lustre versions in ocd */ +#define OBD_CONNECT_REQPORTAL 0x40ULL /*Separate non-IO req portal */ +#define OBD_CONNECT_ACL 0x80ULL /*access control lists */ +#define OBD_CONNECT_XATTR 0x100ULL /*client use extended attr */ #define OBD_CONNECT_CROW 0x200ULL /*MDS+OST create obj on write*/ -#define OBD_CONNECT_TRUNCLOCK 0x400ULL /*locks on server for punch */ -#define OBD_CONNECT_TRANSNO 0x800ULL /*replay sends init transno */ -#define OBD_CONNECT_IBITS 0x1000ULL /*support for inodebits locks*/ +#define OBD_CONNECT_TRUNCLOCK 0x400ULL /*locks on server for punch */ +#define OBD_CONNECT_TRANSNO 0x800ULL /*replay sends init transno */ +#define OBD_CONNECT_IBITS 0x1000ULL /*support for inodebits locks*/ #define OBD_CONNECT_JOIN 0x2000ULL /*files can be concatenated. *We do not support JOIN FILE *anymore, reserve this flags *just for preventing such bit - *to be reused.*/ -#define OBD_CONNECT_ATTRFID 0x4000ULL /*Server can GetAttr By Fid*/ -#define OBD_CONNECT_NODEVOH 0x8000ULL /*No open hndl on specl nodes*/ -#define OBD_CONNECT_RMT_CLIENT 0x10000ULL /*Remote client */ + *to be reused. + */ +#define OBD_CONNECT_ATTRFID 0x4000ULL /*Server can GetAttr By Fid*/ +#define OBD_CONNECT_NODEVOH 0x8000ULL /*No open hndl on specl nodes*/ +#define OBD_CONNECT_RMT_CLIENT 0x10000ULL /*Remote client */ #define OBD_CONNECT_RMT_CLIENT_FORCE 0x20000ULL /*Remote client by force */ -#define OBD_CONNECT_BRW_SIZE 0x40000ULL /*Max bytes per rpc */ -#define OBD_CONNECT_QUOTA64 0x80000ULL /*Not used since 2.4 */ -#define OBD_CONNECT_MDS_CAPA 0x100000ULL /*MDS capability */ -#define OBD_CONNECT_OSS_CAPA 0x200000ULL /*OSS capability */ -#define OBD_CONNECT_CANCELSET 0x400000ULL /*Early batched cancels. */ -#define OBD_CONNECT_SOM 0x800000ULL /*Size on MDS */ -#define OBD_CONNECT_AT 0x1000000ULL /*client uses AT */ +#define OBD_CONNECT_BRW_SIZE 0x40000ULL /*Max bytes per rpc */ +#define OBD_CONNECT_QUOTA64 0x80000ULL /*Not used since 2.4 */ +#define OBD_CONNECT_MDS_CAPA 0x100000ULL /*MDS capability */ +#define OBD_CONNECT_OSS_CAPA 0x200000ULL /*OSS capability */ +#define OBD_CONNECT_CANCELSET 0x400000ULL /*Early batched cancels. */ +#define OBD_CONNECT_SOM 0x800000ULL /*Size on MDS */ +#define OBD_CONNECT_AT 0x1000000ULL /*client uses AT */ #define OBD_CONNECT_LRU_RESIZE 0x2000000ULL /*LRU resize feature. */ -#define OBD_CONNECT_MDS_MDS 0x4000000ULL /*MDS-MDS connection */ +#define OBD_CONNECT_MDS_MDS 0x4000000ULL /*MDS-MDS connection */ #define OBD_CONNECT_REAL 0x8000000ULL /*real connection */ #define OBD_CONNECT_CHANGE_QS 0x10000000ULL /*Not used since 2.4 */ -#define OBD_CONNECT_CKSUM 0x20000000ULL /*support several cksum algos*/ -#define OBD_CONNECT_FID 0x40000000ULL /*FID is supported by server */ -#define OBD_CONNECT_VBR 0x80000000ULL /*version based recovery */ -#define OBD_CONNECT_LOV_V3 0x100000000ULL /*client supports LOV v3 EA */ +#define OBD_CONNECT_CKSUM 0x20000000ULL /*support several cksum algos*/ +#define OBD_CONNECT_FID 0x40000000ULL /*FID is supported by server */ +#define OBD_CONNECT_VBR 0x80000000ULL /*version based recovery */ +#define OBD_CONNECT_LOV_V3 0x100000000ULL /*client supports LOV v3 EA */ #define OBD_CONNECT_GRANT_SHRINK 0x200000000ULL /* support grant shrink */ #define OBD_CONNECT_SKIP_ORPHAN 0x400000000ULL /* don't reuse orphan objids */ #define OBD_CONNECT_MAX_EASIZE 0x800000000ULL /* preserved for large EA */ #define OBD_CONNECT_FULL20 0x1000000000ULL /* it is 2.0 client */ #define OBD_CONNECT_LAYOUTLOCK 0x2000000000ULL /* client uses layout lock */ #define OBD_CONNECT_64BITHASH 0x4000000000ULL /* client supports 64-bits - * directory hash */ + * directory hash + */ #define OBD_CONNECT_MAXBYTES 0x8000000000ULL /* max stripe size */ #define OBD_CONNECT_IMP_RECOV 0x10000000000ULL /* imp recovery support */ #define OBD_CONNECT_JOBSTATS 0x20000000000ULL /* jobid in ptlrpc_body */ #define OBD_CONNECT_UMASK 0x40000000000ULL /* create uses client umask */ #define OBD_CONNECT_EINPROGRESS 0x80000000000ULL /* client handles -EINPROGRESS - * RPC error properly */ + * RPC error properly + */ #define OBD_CONNECT_GRANT_PARAM 0x100000000000ULL/* extra grant params used for - * finer space reservation */ + * finer space reservation + */ #define OBD_CONNECT_FLOCK_OWNER 0x200000000000ULL /* for the fixed 1.8 - * policy and 2.x server */ + * policy and 2.x server + */ #define OBD_CONNECT_LVB_TYPE 0x400000000000ULL /* variable type of LVB */ #define OBD_CONNECT_NANOSEC_TIME 0x800000000000ULL /* nanosecond timestamps */ #define OBD_CONNECT_LIGHTWEIGHT 0x1000000000000ULL/* lightweight connection */ @@ -1264,61 +1264,19 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); * submit a small patch against EVERY branch that ONLY adds the new flag, * updates obd_connect_names[] for lprocfs_rd_connect_flags(), adds the * flag to check_obd_connect_data(), and updates wiretests accordingly, so it - * can be approved and landed easily to reserve the flag for future use. */ + * can be approved and landed easily to reserve the flag for future use. + */ /* The MNE_SWAB flag is overloading the MDS_MDS bit only for the MGS * connection. It is a temporary bug fix for Imperative Recovery interop * between 2.2 and 2.3 x86/ppc nodes, and can be removed when interop for - * 2.2 clients/servers is no longer needed. LU-1252/LU-1644. */ + * 2.2 clients/servers is no longer needed. LU-1252/LU-1644. + */ #define OBD_CONNECT_MNE_SWAB OBD_CONNECT_MDS_MDS #define OCD_HAS_FLAG(ocd, flg) \ (!!((ocd)->ocd_connect_flags & OBD_CONNECT_##flg)) -#define LRU_RESIZE_CONNECT_FLAG OBD_CONNECT_LRU_RESIZE - -#define MDT_CONNECT_SUPPORTED (OBD_CONNECT_RDONLY | OBD_CONNECT_VERSION | \ - OBD_CONNECT_ACL | OBD_CONNECT_XATTR | \ - OBD_CONNECT_IBITS | \ - OBD_CONNECT_NODEVOH | OBD_CONNECT_ATTRFID | \ - OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \ - OBD_CONNECT_RMT_CLIENT | \ - OBD_CONNECT_RMT_CLIENT_FORCE | \ - OBD_CONNECT_BRW_SIZE | OBD_CONNECT_MDS_CAPA | \ - OBD_CONNECT_OSS_CAPA | OBD_CONNECT_MDS_MDS | \ - OBD_CONNECT_FID | LRU_RESIZE_CONNECT_FLAG | \ - OBD_CONNECT_VBR | OBD_CONNECT_LOV_V3 | \ - OBD_CONNECT_SOM | OBD_CONNECT_FULL20 | \ - OBD_CONNECT_64BITHASH | OBD_CONNECT_JOBSTATS | \ - OBD_CONNECT_EINPROGRESS | \ - OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_UMASK | \ - OBD_CONNECT_LVB_TYPE | OBD_CONNECT_LAYOUTLOCK |\ - OBD_CONNECT_PINGLESS | OBD_CONNECT_MAX_EASIZE |\ - OBD_CONNECT_FLOCK_DEAD | \ - OBD_CONNECT_DISP_STRIPE) - -#define OST_CONNECT_SUPPORTED (OBD_CONNECT_SRVLOCK | OBD_CONNECT_GRANT | \ - OBD_CONNECT_REQPORTAL | OBD_CONNECT_VERSION | \ - OBD_CONNECT_TRUNCLOCK | OBD_CONNECT_INDEX | \ - OBD_CONNECT_BRW_SIZE | OBD_CONNECT_OSS_CAPA | \ - OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \ - LRU_RESIZE_CONNECT_FLAG | OBD_CONNECT_CKSUM | \ - OBD_CONNECT_RMT_CLIENT | \ - OBD_CONNECT_RMT_CLIENT_FORCE | OBD_CONNECT_VBR | \ - OBD_CONNECT_MDS | OBD_CONNECT_SKIP_ORPHAN | \ - OBD_CONNECT_GRANT_SHRINK | OBD_CONNECT_FULL20 | \ - OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES | \ - OBD_CONNECT_MAX_EASIZE | \ - OBD_CONNECT_EINPROGRESS | \ - OBD_CONNECT_JOBSTATS | \ - OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_LVB_TYPE|\ - OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_FID | \ - OBD_CONNECT_PINGLESS) -#define ECHO_CONNECT_SUPPORTED (0) -#define MGS_CONNECT_SUPPORTED (OBD_CONNECT_VERSION | OBD_CONNECT_AT | \ - OBD_CONNECT_FULL20 | OBD_CONNECT_IMP_RECOV | \ - OBD_CONNECT_MNE_SWAB | OBD_CONNECT_PINGLESS) - /* Features required for this version of the client to work with server */ #define CLIENT_CONNECT_MDT_REQD (OBD_CONNECT_IBITS | OBD_CONNECT_FID | \ OBD_CONNECT_FULL20) @@ -1334,7 +1292,8 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); /* This structure is used for both request and reply. * * If we eventually have separate connect data for different types, which we - * almost certainly will, then perhaps we stick a union in here. */ + * almost certainly will, then perhaps we stick a union in here. + */ struct obd_connect_data_v1 { __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */ __u32 ocd_version; /* lustre release version number */ @@ -1364,7 +1323,7 @@ struct obd_connect_data { __u8 ocd_blocksize; /* log2 of the backend filesystem blocksize */ __u8 ocd_inodespace; /* log2 of the per-inode space consumption */ __u16 ocd_grant_extent; /* per-extent grant overhead, in 1K blocks */ - __u32 ocd_unused; /* also fix lustre_swab_connect */ + __u32 ocd_unused; /* also fix lustre_swab_connect */ __u64 ocd_transno; /* first transno from client to be replayed */ __u32 ocd_group; /* MDS group on OST */ __u32 ocd_cksum_types; /* supported checksum algorithms */ @@ -1374,7 +1333,8 @@ struct obd_connect_data { /* Fields after ocd_maxbytes are only accessible by the receiver * if the corresponding flag in ocd_connect_flags is set. Accessing * any field after ocd_maxbytes on the receiver without a valid flag - * may result in out-of-bound memory access and kernel oops. */ + * may result in out-of-bound memory access and kernel oops. + */ __u64 padding1; /* added 2.1.0. also fix lustre_swab_connect */ __u64 padding2; /* added 2.1.0. also fix lustre_swab_connect */ __u64 padding3; /* added 2.1.0. also fix lustre_swab_connect */ @@ -1398,7 +1358,8 @@ struct obd_connect_data { * with senior engineers before starting to use a new field. Then, submit * a small patch against EVERY branch that ONLY adds the new field along with * the matching OBD_CONNECT flag, so that can be approved and landed easily to - * reserve the flag for future use. */ + * reserve the flag for future use. + */ void lustre_swab_connect(struct obd_connect_data *ocd); @@ -1408,18 +1369,18 @@ void lustre_swab_connect(struct obd_connect_data *ocd); * Please update DECLARE_CKSUM_NAME/OBD_CKSUM_ALL in obd.h when adding a new * algorithm and also the OBD_FL_CKSUM* flags. */ -typedef enum { +enum cksum_type { OBD_CKSUM_CRC32 = 0x00000001, OBD_CKSUM_ADLER = 0x00000002, OBD_CKSUM_CRC32C = 0x00000004, -} cksum_type_t; +}; /* * OST requests: OBDO & OBD request records */ /* opcodes */ -typedef enum { +enum ost_cmd { OST_REPLY = 0, /* reply ? */ OST_GETATTR = 1, OST_SETATTR = 2, @@ -1440,14 +1401,14 @@ typedef enum { OST_QUOTACTL = 19, OST_QUOTA_ADJUST_QUNIT = 20, /* not used since 2.4 */ OST_LAST_OPC -} ost_cmd_t; +}; #define OST_FIRST_OPC OST_REPLY enum obdo_flags { OBD_FL_INLINEDATA = 0x00000001, OBD_FL_OBDMDEXISTS = 0x00000002, OBD_FL_DELORPHAN = 0x00000004, /* if set in o_flags delete orphans */ - OBD_FL_NORPC = 0x00000008, /* set in o_flags do in OSC not OST */ + OBD_FL_NORPC = 0x00000008, /* set in o_flags do in OSC not OST */ OBD_FL_IDONLY = 0x00000010, /* set in o_flags only adjust obj id*/ OBD_FL_RECREATE_OBJS = 0x00000020, /* recreate missing obj */ OBD_FL_DEBUG_CHECK = 0x00000040, /* echo client/server debug check */ @@ -1461,14 +1422,16 @@ enum obdo_flags { OBD_FL_CKSUM_RSVD2 = 0x00008000, /* for future cksum types */ OBD_FL_CKSUM_RSVD3 = 0x00010000, /* for future cksum types */ OBD_FL_SHRINK_GRANT = 0x00020000, /* object shrink the grant */ - OBD_FL_MMAP = 0x00040000, /* object is mmapped on the client. + OBD_FL_MMAP = 0x00040000, /* object is mmapped on the client. * XXX: obsoleted - reserved for old - * clients prior than 2.2 */ + * clients prior than 2.2 + */ OBD_FL_RECOV_RESEND = 0x00080000, /* recoverable resent */ OBD_FL_NOSPC_BLK = 0x00100000, /* no more block space on OST */ /* Note that while these checksum values are currently separate bits, - * in 2.x we can actually allow all values from 1-31 if we wanted. */ + * in 2.x we can actually allow all values from 1-31 if we wanted. + */ OBD_FL_CKSUM_ALL = OBD_FL_CKSUM_CRC32 | OBD_FL_CKSUM_ADLER | OBD_FL_CKSUM_CRC32C, @@ -1657,7 +1620,7 @@ lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic) } } -#define OBD_MD_FLID (0x00000001ULL) /* object ID */ +#define OBD_MD_FLID (0x00000001ULL) /* object ID */ #define OBD_MD_FLATIME (0x00000002ULL) /* access time */ #define OBD_MD_FLMTIME (0x00000004ULL) /* data modification time */ #define OBD_MD_FLCTIME (0x00000008ULL) /* change time */ @@ -1683,22 +1646,23 @@ lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic) #define OBD_MD_FLGROUP (0x01000000ULL) /* group */ #define OBD_MD_FLFID (0x02000000ULL) /* ->ost write inline fid */ #define OBD_MD_FLEPOCH (0x04000000ULL) /* ->ost write with ioepoch */ - /* ->mds if epoch opens or closes */ + /* ->mds if epoch opens or closes + */ #define OBD_MD_FLGRANT (0x08000000ULL) /* ost preallocation space grant */ #define OBD_MD_FLDIREA (0x10000000ULL) /* dir's extended attribute data */ #define OBD_MD_FLUSRQUOTA (0x20000000ULL) /* over quota flags sent from ost */ #define OBD_MD_FLGRPQUOTA (0x40000000ULL) /* over quota flags sent from ost */ #define OBD_MD_FLMODEASIZE (0x80000000ULL) /* EA size will be changed */ -#define OBD_MD_MDS (0x0000000100000000ULL) /* where an inode lives on */ +#define OBD_MD_MDS (0x0000000100000000ULL) /* where an inode lives on */ #define OBD_MD_REINT (0x0000000200000000ULL) /* reintegrate oa */ -#define OBD_MD_MEA (0x0000000400000000ULL) /* CMD split EA */ +#define OBD_MD_MEA (0x0000000400000000ULL) /* CMD split EA */ #define OBD_MD_TSTATE (0x0000000800000000ULL) /* transient state field */ #define OBD_MD_FLXATTR (0x0000001000000000ULL) /* xattr */ #define OBD_MD_FLXATTRLS (0x0000002000000000ULL) /* xattr list */ #define OBD_MD_FLXATTRRM (0x0000004000000000ULL) /* xattr remove */ -#define OBD_MD_FLACL (0x0000008000000000ULL) /* ACL */ +#define OBD_MD_FLACL (0x0000008000000000ULL) /* ACL */ #define OBD_MD_FLRMTPERM (0x0000010000000000ULL) /* remote permission */ #define OBD_MD_FLMDSCAPA (0x0000020000000000ULL) /* MDS capability */ #define OBD_MD_FLOSSCAPA (0x0000040000000000ULL) /* OSS capability */ @@ -1707,7 +1671,8 @@ lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic) #define OBD_MD_FLGETATTRLOCK (0x0000200000000000ULL) /* Get IOEpoch attributes * under lock; for xattr * requests means the - * client holds the lock */ + * client holds the lock + */ #define OBD_MD_FLOBJCOUNT (0x0000400000000000ULL) /* for multiple destroy */ #define OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) /* lfs lsetfacl case */ @@ -1727,7 +1692,8 @@ lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic) #define OBD_MD_FLXATTRALL (OBD_MD_FLXATTR | OBD_MD_FLXATTRLS) /* don't forget obdo_fid which is way down at the bottom so it can - * come after the definition of llog_cookie */ + * come after the definition of llog_cookie + */ enum hss_valid { HSS_SETMASK = 0x01, @@ -1749,19 +1715,20 @@ void lustre_swab_obd_statfs(struct obd_statfs *os); /* ost_body.data values for OST_BRW */ -#define OBD_BRW_READ 0x01 -#define OBD_BRW_WRITE 0x02 -#define OBD_BRW_RWMASK (OBD_BRW_READ | OBD_BRW_WRITE) -#define OBD_BRW_SYNC 0x08 /* this page is a part of synchronous +#define OBD_BRW_READ 0x01 +#define OBD_BRW_WRITE 0x02 +#define OBD_BRW_RWMASK (OBD_BRW_READ | OBD_BRW_WRITE) +#define OBD_BRW_SYNC 0x08 /* this page is a part of synchronous * transfer and is not accounted in - * the grant. */ -#define OBD_BRW_CHECK 0x10 + * the grant. + */ +#define OBD_BRW_CHECK 0x10 #define OBD_BRW_FROM_GRANT 0x20 /* the osc manages this under llite */ -#define OBD_BRW_GRANTED 0x40 /* the ost manages this */ -#define OBD_BRW_NOCACHE 0x80 /* this page is a part of non-cached IO */ -#define OBD_BRW_NOQUOTA 0x100 -#define OBD_BRW_SRVLOCK 0x200 /* Client holds no lock over this page */ -#define OBD_BRW_ASYNC 0x400 /* Server may delay commit to disk */ +#define OBD_BRW_GRANTED 0x40 /* the ost manages this */ +#define OBD_BRW_NOCACHE 0x80 /* this page is a part of non-cached IO */ +#define OBD_BRW_NOQUOTA 0x100 +#define OBD_BRW_SRVLOCK 0x200 /* Client holds no lock over this page */ +#define OBD_BRW_ASYNC 0x400 /* Server may delay commit to disk */ #define OBD_BRW_MEMALLOC 0x800 /* Client runs in the "kswapd" context */ #define OBD_BRW_OVER_USRQUOTA 0x1000 /* Running out of user quota */ #define OBD_BRW_OVER_GRPQUOTA 0x2000 /* Running out of group quota */ @@ -1775,7 +1742,8 @@ struct obd_ioobj { struct ost_id ioo_oid; /* object ID, if multi-obj BRW */ __u32 ioo_max_brw; /* low 16 bits were o_mode before 2.4, * now (PTLRPC_BULK_OPS_COUNT - 1) in - * high 16 bits in 2.4 and later */ + * high 16 bits in 2.4 and later + */ __u32 ioo_bufcnt; /* number of niobufs for this object */ }; @@ -1799,7 +1767,8 @@ void lustre_swab_niobuf_remote(struct niobuf_remote *nbr); /* lock value block communicated between the filter and llite */ /* OST_LVB_ERR_INIT is needed because the return code in rc is - * negative, i.e. because ((MASK + rc) & MASK) != MASK. */ + * negative, i.e. because ((MASK + rc) & MASK) != MASK. + */ #define OST_LVB_ERR_INIT 0xffbadbad80000000ULL #define OST_LVB_ERR_MASK 0xffbadbad00000000ULL #define OST_LVB_IS_ERR(blocks) \ @@ -1836,23 +1805,12 @@ void lustre_swab_ost_lvb(struct ost_lvb *lvb); * lquota data structures */ -#ifndef QUOTABLOCK_BITS -#define QUOTABLOCK_BITS 10 -#endif - -#ifndef QUOTABLOCK_SIZE -#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS) -#endif - -#ifndef toqb -#define toqb(x) (((x) + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS) -#endif - /* The lquota_id structure is an union of all the possible identifier types that * can be used with quota, this includes: * - 64-bit user ID * - 64-bit group ID - * - a FID which can be used for per-directory quota in the future */ + * - a FID which can be used for per-directory quota in the future + */ union lquota_id { struct lu_fid qid_fid; /* FID for per-directory quota */ __u64 qid_uid; /* user identifier */ @@ -1889,89 +1847,6 @@ do { \ Q_COPY(out, in, qc_dqblk); \ } while (0) -/* Body of quota request used for quota acquire/release RPCs between quota - * master (aka QMT) and slaves (ak QSD). */ -struct quota_body { - struct lu_fid qb_fid; /* FID of global index packing the pool ID - * and type (data or metadata) as well as - * the quota type (user or group). */ - union lquota_id qb_id; /* uid or gid or directory FID */ - __u32 qb_flags; /* see below */ - __u32 qb_padding; - __u64 qb_count; /* acquire/release count (kbytes/inodes) */ - __u64 qb_usage; /* current slave usage (kbytes/inodes) */ - __u64 qb_slv_ver; /* slave index file version */ - struct lustre_handle qb_lockh; /* per-ID lock handle */ - struct lustre_handle qb_glb_lockh; /* global lock handle */ - __u64 qb_padding1[4]; -}; - -/* When the quota_body is used in the reply of quota global intent - * lock (IT_QUOTA_CONN) reply, qb_fid contains slave index file FID. */ -#define qb_slv_fid qb_fid -/* qb_usage is the current qunit (in kbytes/inodes) when quota_body is used in - * quota reply */ -#define qb_qunit qb_usage - -#define QUOTA_DQACQ_FL_ACQ 0x1 /* acquire quota */ -#define QUOTA_DQACQ_FL_PREACQ 0x2 /* pre-acquire */ -#define QUOTA_DQACQ_FL_REL 0x4 /* release quota */ -#define QUOTA_DQACQ_FL_REPORT 0x8 /* report usage */ - -void lustre_swab_quota_body(struct quota_body *b); - -/* Quota types currently supported */ -enum { - LQUOTA_TYPE_USR = 0x00, /* maps to USRQUOTA */ - LQUOTA_TYPE_GRP = 0x01, /* maps to GRPQUOTA */ - LQUOTA_TYPE_MAX -}; - -/* There are 2 different resource types on which a quota limit can be enforced: - * - inodes on the MDTs - * - blocks on the OSTs */ -enum { - LQUOTA_RES_MD = 0x01, /* skip 0 to avoid null oid in FID */ - LQUOTA_RES_DT = 0x02, - LQUOTA_LAST_RES, - LQUOTA_FIRST_RES = LQUOTA_RES_MD -}; - -#define LQUOTA_NR_RES (LQUOTA_LAST_RES - LQUOTA_FIRST_RES + 1) - -/* - * Space accounting support - * Format of an accounting record, providing disk usage information for a given - * user or group - */ -struct lquota_acct_rec { /* 16 bytes */ - __u64 bspace; /* current space in use */ - __u64 ispace; /* current # inodes in use */ -}; - -/* - * Global quota index support - * Format of a global record, providing global quota settings for a given quota - * identifier - */ -struct lquota_glb_rec { /* 32 bytes */ - __u64 qbr_hardlimit; /* quota hard limit, in #inodes or kbytes */ - __u64 qbr_softlimit; /* quota soft limit, in #inodes or kbytes */ - __u64 qbr_time; /* grace time, in seconds */ - __u64 qbr_granted; /* how much is granted to slaves, in #inodes or - * kbytes */ -}; - -/* - * Slave index support - * Format of a slave record, recording how much space is granted to a given - * slave - */ -struct lquota_slv_rec { /* 8 bytes */ - __u64 qsr_granted; /* space granted to the slave for the key=ID, - * in #inodes or kbytes */ -}; - /* Data structures associated with the quota locks */ /* Glimpse descriptor used for the index & per-ID quota locks */ @@ -1985,9 +1860,6 @@ struct ldlm_gl_lquota_desc { __u64 gl_pad2; }; -#define gl_qunit gl_hardlimit /* current qunit value used when - * glimpsing per-ID quota locks */ - /* quota glimpse flags */ #define LQUOTA_FL_EDQUOT 0x1 /* user/group out of quota space on QMT */ @@ -2002,15 +1874,12 @@ struct lquota_lvb { void lustre_swab_lquota_lvb(struct lquota_lvb *lvb); -/* LVB used with global quota lock */ -#define lvb_glb_ver lvb_id_may_rel /* current version of the global index */ - /* op codes */ -typedef enum { +enum quota_cmd { QUOTA_DQACQ = 601, QUOTA_DQREL = 602, QUOTA_LAST_OPC -} quota_cmd_t; +}; #define QUOTA_FIRST_OPC QUOTA_DQACQ /* @@ -2018,7 +1887,7 @@ typedef enum { */ /* opcodes */ -typedef enum { +enum mds_cmd { MDS_GETATTR = 33, MDS_GETATTR_NAME = 34, MDS_CLOSE = 35, @@ -2049,23 +1918,15 @@ typedef enum { MDS_HSM_CT_UNREGISTER = 60, MDS_SWAP_LAYOUTS = 61, MDS_LAST_OPC -} mds_cmd_t; +}; #define MDS_FIRST_OPC MDS_GETATTR -/* opcodes for object update */ -typedef enum { - UPDATE_OBJ = 1000, - UPDATE_LAST_OPC -} update_cmd_t; - -#define UPDATE_FIRST_OPC UPDATE_OBJ - /* * Do not exceed 63 */ -typedef enum { +enum mdt_reint_cmd { REINT_SETATTR = 1, REINT_CREATE = 2, REINT_LINK = 3, @@ -2074,9 +1935,9 @@ typedef enum { REINT_OPEN = 6, REINT_SETXATTR = 7, REINT_RMENTRY = 8, -// REINT_WRITE = 9, +/* REINT_WRITE = 9, */ REINT_MAX -} mds_reint_t, mdt_reint_t; +}; void lustre_swab_generic_32s(__u32 *val); @@ -2097,7 +1958,8 @@ void lustre_swab_generic_32s(__u32 *val); /* INODE LOCK PARTS */ #define MDS_INODELOCK_LOOKUP 0x000001 /* For namespace, dentry etc, and also * was used to protect permission (mode, - * owner, group etc) before 2.4. */ + * owner, group etc) before 2.4. + */ #define MDS_INODELOCK_UPDATE 0x000002 /* size, links, timestamps */ #define MDS_INODELOCK_OPEN 0x000004 /* For opened files */ #define MDS_INODELOCK_LAYOUT 0x000008 /* for layout */ @@ -2110,7 +1972,8 @@ void lustre_swab_generic_32s(__u32 *val); * For local directory, MDT will always grant UPDATE_LOCK|PERM_LOCK together. * For Remote directory, the master MDT, where the remote directory is, will * grant UPDATE_LOCK|PERM_LOCK, and the remote MDT, where the name entry is, - * will grant LOOKUP_LOCK. */ + * will grant LOOKUP_LOCK. + */ #define MDS_INODELOCK_PERM 0x000010 #define MDS_INODELOCK_XATTR 0x000020 /* extended attributes */ @@ -2120,7 +1983,8 @@ void lustre_swab_generic_32s(__u32 *val); /* NOTE: until Lustre 1.8.7/2.1.1 the fid_ver() was packed into name[2], * but was moved into name[1] along with the OID to avoid consuming the - * name[2,3] fields that need to be used for the quota id (also a FID). */ + * name[2,3] fields that need to be used for the quota id (also a FID). + */ enum { LUSTRE_RES_ID_SEQ_OFF = 0, LUSTRE_RES_ID_VER_OID_OFF = 1, @@ -2156,7 +2020,8 @@ enum md_op_flags { #define LUSTRE_BFLAG_UNCOMMITTED_WRITES 0x1 /* these should be identical to their EXT4_*_FL counterparts, they are - * redefined here only to avoid dragging in fs/ext4/ext4.h */ + * redefined here only to avoid dragging in fs/ext4/ext4.h + */ #define LUSTRE_SYNC_FL 0x00000008 /* Synchronous updates */ #define LUSTRE_IMMUTABLE_FL 0x00000010 /* Immutable file */ #define LUSTRE_APPEND_FL 0x00000020 /* writes to file may only append */ @@ -2168,15 +2033,14 @@ enum md_op_flags { * protocol equivalents of LDISKFS_*_FL values stored on disk, while * the S_* flags are kernel-internal values that change between kernel * versions. These flags are set/cleared via FSFILT_IOC_{GET,SET}_FLAGS. - * See b=16526 for a full history. */ + * See b=16526 for a full history. + */ static inline int ll_ext_to_inode_flags(int flags) { return (((flags & LUSTRE_SYNC_FL) ? S_SYNC : 0) | ((flags & LUSTRE_NOATIME_FL) ? S_NOATIME : 0) | ((flags & LUSTRE_APPEND_FL) ? S_APPEND : 0) | -#if defined(S_DIRSYNC) ((flags & LUSTRE_DIRSYNC_FL) ? S_DIRSYNC : 0) | -#endif ((flags & LUSTRE_IMMUTABLE_FL) ? S_IMMUTABLE : 0)); } @@ -2185,9 +2049,7 @@ static inline int ll_inode_to_ext_flags(int iflags) return (((iflags & S_SYNC) ? LUSTRE_SYNC_FL : 0) | ((iflags & S_NOATIME) ? LUSTRE_NOATIME_FL : 0) | ((iflags & S_APPEND) ? LUSTRE_APPEND_FL : 0) | -#if defined(S_DIRSYNC) ((iflags & S_DIRSYNC) ? LUSTRE_DIRSYNC_FL : 0) | -#endif ((iflags & S_IMMUTABLE) ? LUSTRE_IMMUTABLE_FL : 0)); } @@ -2207,9 +2069,10 @@ struct mdt_body { __s64 ctime; __u64 blocks; /* XID, in the case of MDS_READPAGE */ __u64 ioepoch; - __u64 t_state; /* transient file state defined in - * enum md_transient_state - * was "ino" until 2.4.0 */ + __u64 t_state; /* transient file state defined in + * enum md_transient_state + * was "ino" until 2.4.0 + */ __u32 fsuid; __u32 fsgid; __u32 capability; @@ -2219,7 +2082,7 @@ struct mdt_body { __u32 flags; /* from vfs for pin/unpin, LUSTRE_BFLAG close */ __u32 rdev; __u32 nlink; /* #bytes to read in the case of MDS_READPAGE */ - __u32 unused2; /* was "generation" until 2.4.0 */ + __u32 unused2; /* was "generation" until 2.4.0 */ __u32 suppgid; __u32 eadatasize; __u32 aclsize; @@ -2256,7 +2119,8 @@ enum { }; /* inode access permission for remote user, the inode info are omitted, - * for client knows them. */ + * for client knows them. + */ struct mdt_remote_perm { __u32 rp_uid; __u32 rp_gid; @@ -2306,13 +2170,13 @@ void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa); * since the client and MDS may run different kernels (see bug 13828) * Therefore, we should only use MDS_ATTR_* attributes for sa_valid. */ -#define MDS_ATTR_MODE 0x1ULL /* = 1 */ -#define MDS_ATTR_UID 0x2ULL /* = 2 */ -#define MDS_ATTR_GID 0x4ULL /* = 4 */ -#define MDS_ATTR_SIZE 0x8ULL /* = 8 */ -#define MDS_ATTR_ATIME 0x10ULL /* = 16 */ -#define MDS_ATTR_MTIME 0x20ULL /* = 32 */ -#define MDS_ATTR_CTIME 0x40ULL /* = 64 */ +#define MDS_ATTR_MODE 0x1ULL /* = 1 */ +#define MDS_ATTR_UID 0x2ULL /* = 2 */ +#define MDS_ATTR_GID 0x4ULL /* = 4 */ +#define MDS_ATTR_SIZE 0x8ULL /* = 8 */ +#define MDS_ATTR_ATIME 0x10ULL /* = 16 */ +#define MDS_ATTR_MTIME 0x20ULL /* = 32 */ +#define MDS_ATTR_CTIME 0x40ULL /* = 64 */ #define MDS_ATTR_ATIME_SET 0x80ULL /* = 128 */ #define MDS_ATTR_MTIME_SET 0x100ULL /* = 256 */ #define MDS_ATTR_FORCE 0x200ULL /* = 512, Not a change, but a change it */ @@ -2320,14 +2184,11 @@ void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa); #define MDS_ATTR_KILL_SUID 0x800ULL /* = 2048 */ #define MDS_ATTR_KILL_SGID 0x1000ULL /* = 4096 */ #define MDS_ATTR_CTIME_SET 0x2000ULL /* = 8192 */ -#define MDS_ATTR_FROM_OPEN 0x4000ULL /* = 16384, called from open path, ie O_TRUNC */ +#define MDS_ATTR_FROM_OPEN 0x4000ULL /* = 16384, called from open path, + * ie O_TRUNC + */ #define MDS_ATTR_BLOCKS 0x8000ULL /* = 32768 */ -#ifndef FMODE_READ -#define FMODE_READ 00000001 -#define FMODE_WRITE 00000002 -#endif - #define MDS_FMODE_CLOSED 00000000 #define MDS_FMODE_EXEC 00000004 /* IO Epoch is opened on a closed file. */ @@ -2354,9 +2215,10 @@ void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa); * We do not support JOIN FILE * anymore, reserve this flags * just for preventing such bit - * to be reused. */ + * to be reused. + */ -#define MDS_OPEN_LOCK 04000000000 /* This open requires open lock */ +#define MDS_OPEN_LOCK 04000000000 /* This open requires open lock */ #define MDS_OPEN_HAS_EA 010000000000 /* specify object create pattern */ #define MDS_OPEN_HAS_OBJS 020000000000 /* Just set the EA the obj exist */ #define MDS_OPEN_NORESTORE 0100000000000ULL /* Do not restore file at open */ @@ -2409,7 +2271,8 @@ struct mdt_rec_create { __u32 cr_bias; /* use of helpers set/get_mrc_cr_flags() is needed to access * 64 bits cr_flags [cr_flags_l, cr_flags_h], this is done to - * extend cr_flags size without breaking 1.8 compat */ + * extend cr_flags size without breaking 1.8 compat + */ __u32 cr_flags_l; /* for use with open, low 32 bits */ __u32 cr_flags_h; /* for use with open, high 32 bits */ __u32 cr_umask; /* umask for create */ @@ -2630,7 +2493,8 @@ enum seq_op { #define LOV_MAX_UUID_BUFFER_SIZE 8192 /* The size of the buffer the lov/mdc reserves for the * array of UUIDs returned by the MDS. With the current - * protocol, this will limit the max number of OSTs per LOV */ + * protocol, this will limit the max number of OSTs per LOV + */ #define LOV_DESC_MAGIC 0xB0CCDE5C #define LOV_DESC_QOS_MAXAGE_DEFAULT 5 /* Seconds */ @@ -2639,13 +2503,13 @@ enum seq_op { /* LOV settings descriptor (should only contain static info) */ struct lov_desc { __u32 ld_tgt_count; /* how many OBD's */ - __u32 ld_active_tgt_count; /* how many active */ - __u32 ld_default_stripe_count; /* how many objects are used */ - __u32 ld_pattern; /* default PATTERN_RAID0 */ - __u64 ld_default_stripe_size; /* in bytes */ - __u64 ld_default_stripe_offset; /* in bytes */ + __u32 ld_active_tgt_count; /* how many active */ + __u32 ld_default_stripe_count; /* how many objects are used */ + __u32 ld_pattern; /* default PATTERN_RAID0 */ + __u64 ld_default_stripe_size; /* in bytes */ + __u64 ld_default_stripe_offset; /* in bytes */ __u32 ld_padding_0; /* unused */ - __u32 ld_qos_maxage; /* in second */ + __u32 ld_qos_maxage; /* in second */ __u32 ld_padding_1; /* also fix lustre_swab_lov_desc */ __u32 ld_padding_2; /* also fix lustre_swab_lov_desc */ struct obd_uuid ld_uuid; @@ -2659,7 +2523,7 @@ void lustre_swab_lov_desc(struct lov_desc *ld); * LDLM requests: */ /* opcodes -- MUST be distinct from OST/MDS opcodes */ -typedef enum { +enum ldlm_cmd { LDLM_ENQUEUE = 101, LDLM_CONVERT = 102, LDLM_CANCEL = 103, @@ -2668,7 +2532,7 @@ typedef enum { LDLM_GL_CALLBACK = 106, LDLM_SET_INFO = 107, LDLM_LAST_OPC -} ldlm_cmd_t; +}; #define LDLM_FIRST_OPC LDLM_ENQUEUE #define RES_NAME_SIZE 4 @@ -2687,7 +2551,7 @@ static inline int ldlm_res_eq(const struct ldlm_res_id *res0, } /* lock types */ -typedef enum { +enum ldlm_mode { LCK_MINMODE = 0, LCK_EX = 1, LCK_PW = 2, @@ -2698,17 +2562,17 @@ typedef enum { LCK_GROUP = 64, LCK_COS = 128, LCK_MAXMODE -} ldlm_mode_t; +}; #define LCK_MODE_NUM 8 -typedef enum { +enum ldlm_type { LDLM_PLAIN = 10, LDLM_EXTENT = 11, LDLM_FLOCK = 12, LDLM_IBITS = 13, LDLM_MAX_TYPE -} ldlm_type_t; +}; #define LDLM_MIN_TYPE LDLM_PLAIN @@ -2747,7 +2611,8 @@ struct ldlm_flock_wire { * the first fields of the ldlm_flock structure because there is only * one ldlm_swab routine to process the ldlm_policy_data_t union. if * this ever changes we will need to swab the union differently based - * on the resource type. */ + * on the resource type. + */ typedef union { struct ldlm_extent l_extent; @@ -2768,15 +2633,15 @@ struct ldlm_intent { void lustre_swab_ldlm_intent(struct ldlm_intent *i); struct ldlm_resource_desc { - ldlm_type_t lr_type; + enum ldlm_type lr_type; __u32 lr_padding; /* also fix lustre_swab_ldlm_resource_desc */ struct ldlm_res_id lr_name; }; struct ldlm_lock_desc { struct ldlm_resource_desc l_resource; - ldlm_mode_t l_req_mode; - ldlm_mode_t l_granted_mode; + enum ldlm_mode l_req_mode; + enum ldlm_mode l_granted_mode; ldlm_wire_policy_data_t l_policy_data; }; @@ -2793,7 +2658,8 @@ struct ldlm_request { void lustre_swab_ldlm_request(struct ldlm_request *rq); /* If LDLM_ENQUEUE, 1 slot is already occupied, 1 is available. - * Otherwise, 2 are available. */ + * Otherwise, 2 are available. + */ #define ldlm_request_bufsize(count, type) \ ({ \ int _avail = LDLM_LOCKREQ_HANDLES; \ @@ -2820,7 +2686,7 @@ void lustre_swab_ldlm_reply(struct ldlm_reply *r); /* * Opcodes for mountconf (mgs and mgc) */ -typedef enum { +enum mgs_cmd { MGS_CONNECT = 250, MGS_DISCONNECT, MGS_EXCEPTION, /* node died, etc. */ @@ -2829,7 +2695,7 @@ typedef enum { MGS_SET_INFO, MGS_CONFIG_READ, MGS_LAST_OPC -} mgs_cmd_t; +}; #define MGS_FIRST_OPC MGS_CONNECT #define MGS_PARAM_MAXLEN 1024 @@ -2918,13 +2784,13 @@ void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size); * Opcodes for multiple servers. */ -typedef enum { +enum obd_cmd { OBD_PING = 400, OBD_LOG_CANCEL, OBD_QC_CALLBACK, OBD_IDX_READ, OBD_LAST_OPC -} obd_cmd_t; +}; #define OBD_FIRST_OPC OBD_PING /* catalog of log objects */ @@ -2933,7 +2799,7 @@ typedef enum { struct llog_logid { struct ost_id lgl_oi; __u32 lgl_ogen; -} __attribute__((packed)); +} __packed; /** Records written to the CATALOGS list */ #define CATLIST "CATALOGS" @@ -2942,7 +2808,7 @@ struct llog_catid { __u32 lci_padding1; __u32 lci_padding2; __u32 lci_padding3; -} __attribute__((packed)); +} __packed; /* Log data record types - there is no specific reason that these need to * be related to the RPC opcodes, but no reason not to (may be handy later?) @@ -2950,7 +2816,7 @@ struct llog_catid { #define LLOG_OP_MAGIC 0x10600000 #define LLOG_OP_MASK 0xfff00000 -typedef enum { +enum llog_op_type { LLOG_PAD_MAGIC = LLOG_OP_MAGIC | 0x00000, OST_SZ_REC = LLOG_OP_MAGIC | 0x00f00, /* OST_RAID1_REC = LLOG_OP_MAGIC | 0x01000, never used */ @@ -2970,7 +2836,7 @@ typedef enum { HSM_AGENT_REC = LLOG_OP_MAGIC | 0x80000, LLOG_HDR_MAGIC = LLOG_OP_MAGIC | 0x45539, LLOG_LOGID_MAGIC = LLOG_OP_MAGIC | 0x4553b, -} llog_op_type; +}; #define LLOG_REC_HDR_NEEDS_SWABBING(r) \ (((r)->lrh_type & __swab32(LLOG_OP_MASK)) == __swab32(LLOG_OP_MAGIC)) @@ -3006,7 +2872,7 @@ struct llog_logid_rec { __u64 lid_padding2; __u64 lid_padding3; struct llog_rec_tail lid_tail; -} __attribute__((packed)); +} __packed; struct llog_unlink_rec { struct llog_rec_hdr lur_hdr; @@ -3014,7 +2880,7 @@ struct llog_unlink_rec { __u32 lur_oseq; __u32 lur_count; struct llog_rec_tail lur_tail; -} __attribute__((packed)); +} __packed; struct llog_unlink64_rec { struct llog_rec_hdr lur_hdr; @@ -3024,7 +2890,7 @@ struct llog_unlink64_rec { __u64 lur_padding2; __u64 lur_padding3; struct llog_rec_tail lur_tail; -} __attribute__((packed)); +} __packed; struct llog_setattr64_rec { struct llog_rec_hdr lsr_hdr; @@ -3035,7 +2901,7 @@ struct llog_setattr64_rec { __u32 lsr_gid_h; __u64 lsr_padding; struct llog_rec_tail lsr_tail; -} __attribute__((packed)); +} __packed; struct llog_size_change_rec { struct llog_rec_hdr lsc_hdr; @@ -3045,16 +2911,7 @@ struct llog_size_change_rec { __u64 lsc_padding2; __u64 lsc_padding3; struct llog_rec_tail lsc_tail; -} __attribute__((packed)); - -#define CHANGELOG_MAGIC 0xca103000 - -/** \a changelog_rec_type's that can't be masked */ -#define CHANGELOG_MINMASK (1 << CL_MARK) -/** bits covering all \a changelog_rec_type's */ -#define CHANGELOG_ALLMASK 0XFFFFFFFF -/** default \a changelog_rec_type mask */ -#define CHANGELOG_DEFMASK CHANGELOG_ALLMASK & ~(1 << CL_ATIME | 1 << CL_CLOSE) +} __packed; /* changelog llog name, needed by client replicators */ #define CHANGELOG_CATALOG "changelog_catalog" @@ -3062,22 +2919,20 @@ struct llog_size_change_rec { struct changelog_setinfo { __u64 cs_recno; __u32 cs_id; -} __attribute__((packed)); +} __packed; /** changelog record */ struct llog_changelog_rec { struct llog_rec_hdr cr_hdr; struct changelog_rec cr; struct llog_rec_tail cr_tail; /**< for_sizezof_only */ -} __attribute__((packed)); +} __packed; struct llog_changelog_ext_rec { struct llog_rec_hdr cr_hdr; struct changelog_ext_rec cr; struct llog_rec_tail cr_tail; /**< for_sizezof_only */ -} __attribute__((packed)); - -#define CHANGELOG_USER_PREFIX "cl" +} __packed; struct llog_changelog_user_rec { struct llog_rec_hdr cur_hdr; @@ -3085,7 +2940,7 @@ struct llog_changelog_user_rec { __u32 cur_padding; __u64 cur_endrec; struct llog_rec_tail cur_tail; -} __attribute__((packed)); +} __packed; enum agent_req_status { ARS_WAITING, @@ -3123,21 +2978,22 @@ struct llog_agent_req_rec { struct llog_rec_hdr arr_hdr; /**< record header */ __u32 arr_status; /**< status of the request */ /* must match enum - * agent_req_status */ + * agent_req_status + */ __u32 arr_archive_id; /**< backend archive number */ __u64 arr_flags; /**< req flags */ - __u64 arr_compound_id; /**< compound cookie */ + __u64 arr_compound_id;/**< compound cookie */ __u64 arr_req_create; /**< req. creation time */ __u64 arr_req_change; /**< req. status change time */ struct hsm_action_item arr_hai; /**< req. to the agent */ - struct llog_rec_tail arr_tail; /**< record tail for_sizezof_only */ -} __attribute__((packed)); + struct llog_rec_tail arr_tail; /**< record tail for_sizezof_only */ +} __packed; /* Old llog gen for compatibility */ struct llog_gen { __u64 mnt_cnt; __u64 conn_cnt; -} __attribute__((packed)); +} __packed; struct llog_gen_rec { struct llog_rec_hdr lgr_hdr; @@ -3175,19 +3031,21 @@ struct llog_log_hdr { __u32 llh_reserved[LLOG_HEADER_SIZE/sizeof(__u32) - 23]; __u32 llh_bitmap[LLOG_BITMAP_BYTES/sizeof(__u32)]; struct llog_rec_tail llh_tail; -} __attribute__((packed)); +} __packed; #define LLOG_BITMAP_SIZE(llh) (__u32)((llh->llh_hdr.lrh_len - \ llh->llh_bitmap_offset - \ sizeof(llh->llh_tail)) * 8) -/** log cookies are used to reference a specific log file and a record therein */ +/** log cookies are used to reference a specific log file and a record + * therein + */ struct llog_cookie { struct llog_logid lgc_lgl; __u32 lgc_subsys; __u32 lgc_index; __u32 lgc_padding; -} __attribute__((packed)); +} __packed; /** llog protocol */ enum llogd_rpc_ops { @@ -3196,7 +3054,7 @@ enum llogd_rpc_ops { LLOG_ORIGIN_HANDLE_READ_HEADER = 503, LLOG_ORIGIN_HANDLE_WRITE_REC = 504, LLOG_ORIGIN_HANDLE_CLOSE = 505, - LLOG_ORIGIN_CONNECT = 506, + LLOG_ORIGIN_CONNECT = 506, LLOG_CATINFO = 507, /* deprecated */ LLOG_ORIGIN_HANDLE_PREV_BLOCK = 508, LLOG_ORIGIN_HANDLE_DESTROY = 509, /* for destroy llog object*/ @@ -3212,13 +3070,13 @@ struct llogd_body { __u32 lgd_saved_index; __u32 lgd_len; __u64 lgd_cur_offset; -} __attribute__((packed)); +} __packed; struct llogd_conn_body { struct llog_gen lgdc_gen; struct llog_logid lgdc_logid; __u32 lgdc_ctxt_idx; -} __attribute__((packed)); +} __packed; /* Note: 64-bit types are 64-bit aligned in structure */ struct obdo { @@ -3245,17 +3103,18 @@ struct obdo { __u64 o_ioepoch; /* epoch in ost writes */ __u32 o_stripe_idx; /* holds stripe idx */ __u32 o_parent_ver; - struct lustre_handle o_handle; /* brw: lock handle to prolong - * locks */ - struct llog_cookie o_lcookie; /* destroy: unlink cookie from - * MDS */ + struct lustre_handle o_handle; /* brw: lock handle to prolong locks + */ + struct llog_cookie o_lcookie; /* destroy: unlink cookie from MDS + */ __u32 o_uid_h; __u32 o_gid_h; __u64 o_data_version; /* getattr: sum of iversion for * each stripe. * brw: grant space consumed on - * the client for the write */ + * the client for the write + */ __u64 o_padding_4; __u64 o_padding_5; __u64 o_padding_6; @@ -3273,13 +3132,14 @@ static inline void lustre_set_wire_obdo(struct obd_connect_data *ocd, { *wobdo = *lobdo; wobdo->o_flags &= ~OBD_FL_LOCAL_MASK; - if (ocd == NULL) + if (!ocd) return; if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) && fid_seq_is_echo(ostid_seq(&lobdo->o_oi))) { /* Currently OBD_FL_OSTID will only be used when 2.4 echo - * client communicate with pre-2.4 server */ + * client communicate with pre-2.4 server + */ wobdo->o_oi.oi.oi_id = fid_oid(&lobdo->o_oi.oi_fid); wobdo->o_oi.oi.oi_seq = fid_seq(&lobdo->o_oi.oi_fid); } @@ -3292,7 +3152,7 @@ static inline void lustre_get_wire_obdo(struct obd_connect_data *ocd, __u32 local_flags = 0; if (lobdo->o_valid & OBD_MD_FLFLAGS) - local_flags = lobdo->o_flags & OBD_FL_LOCAL_MASK; + local_flags = lobdo->o_flags & OBD_FL_LOCAL_MASK; *lobdo = *wobdo; if (local_flags != 0) { @@ -3300,7 +3160,7 @@ static inline void lustre_get_wire_obdo(struct obd_connect_data *ocd, lobdo->o_flags &= ~OBD_FL_LOCAL_MASK; lobdo->o_flags |= local_flags; } - if (ocd == NULL) + if (!ocd) return; if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) && @@ -3349,100 +3209,14 @@ void dump_ioo(struct obd_ioobj *nb); void dump_ost_body(struct ost_body *ob); void dump_rcs(__u32 *rc); -#define IDX_INFO_MAGIC 0x3D37CC37 - -/* Index file transfer through the network. The server serializes the index into - * a byte stream which is sent to the client via a bulk transfer */ -struct idx_info { - __u32 ii_magic; - - /* reply: see idx_info_flags below */ - __u32 ii_flags; - - /* request & reply: number of lu_idxpage (to be) transferred */ - __u16 ii_count; - __u16 ii_pad0; - - /* request: requested attributes passed down to the iterator API */ - __u32 ii_attrs; - - /* request & reply: index file identifier (FID) */ - struct lu_fid ii_fid; - - /* reply: version of the index file before starting to walk the index. - * Please note that the version can be modified at any time during the - * transfer */ - __u64 ii_version; - - /* request: hash to start with: - * reply: hash of the first entry of the first lu_idxpage and hash - * of the entry to read next if any */ - __u64 ii_hash_start; - __u64 ii_hash_end; - - /* reply: size of keys in lu_idxpages, minimal one if II_FL_VARKEY is - * set */ - __u16 ii_keysize; - - /* reply: size of records in lu_idxpages, minimal one if II_FL_VARREC - * is set */ - __u16 ii_recsize; - - __u32 ii_pad1; - __u64 ii_pad2; - __u64 ii_pad3; -}; - -void lustre_swab_idx_info(struct idx_info *ii); - -#define II_END_OFF MDS_DIR_END_OFF /* all entries have been read */ - -/* List of flags used in idx_info::ii_flags */ -enum idx_info_flags { - II_FL_NOHASH = 1 << 0, /* client doesn't care about hash value */ - II_FL_VARKEY = 1 << 1, /* keys can be of variable size */ - II_FL_VARREC = 1 << 2, /* records can be of variable size */ - II_FL_NONUNQ = 1 << 3, /* index supports non-unique keys */ -}; - -#define LIP_MAGIC 0x8A6D6B6C - -/* 4KB (= LU_PAGE_SIZE) container gathering key/record pairs */ -struct lu_idxpage { - /* 16-byte header */ - __u32 lip_magic; - __u16 lip_flags; - __u16 lip_nr; /* number of entries in the container */ - __u64 lip_pad0; /* additional padding for future use */ - - /* key/record pairs are stored in the remaining 4080 bytes. - * depending upon the flags in idx_info::ii_flags, each key/record - * pair might be preceded by: - * - a hash value - * - the key size (II_FL_VARKEY is set) - * - the record size (II_FL_VARREC is set) - * - * For the time being, we only support fixed-size key & record. */ - char lip_entries[0]; -}; - -#define LIP_HDR_SIZE (offsetof(struct lu_idxpage, lip_entries)) - -/* Gather all possible type associated with a 4KB container */ -union lu_page { - struct lu_dirpage lp_dir; /* for MDS_READPAGE */ - struct lu_idxpage lp_idx; /* for OBD_IDX_READ */ - char lp_array[LU_PAGE_SIZE]; -}; - /* security opcodes */ -typedef enum { +enum sec_cmd { SEC_CTX_INIT = 801, SEC_CTX_INIT_CONT = 802, SEC_CTX_FINI = 803, SEC_LAST_OPC, SEC_FIRST_OPC = SEC_CTX_INIT -} sec_cmd_t; +}; /* * capa related definitions @@ -3451,7 +3225,8 @@ typedef enum { #define CAPA_HMAC_KEY_MAX_LEN 56 /* NB take care when changing the sequence of elements this struct, - * because the offset info is used in find_capa() */ + * because the offset info is used in find_capa() + */ struct lustre_capa { struct lu_fid lc_fid; /** fid */ __u64 lc_opc; /** operations allowed */ @@ -3463,7 +3238,7 @@ struct lustre_capa { /* FIXME: y2038 time_t overflow: */ __u32 lc_expiry; /** expiry time (sec) */ __u8 lc_hmac[CAPA_HMAC_MAX_LEN]; /** HMAC */ -} __attribute__((packed)); +} __packed; void lustre_swab_lustre_capa(struct lustre_capa *c); @@ -3497,7 +3272,7 @@ struct lustre_capa_key { __u32 lk_keyid; /**< key# */ __u32 lk_padding; __u8 lk_key[CAPA_HMAC_KEY_MAX_LEN]; /**< key */ -} __attribute__((packed)); +} __packed; /** The link ea holds 1 \a link_ea_entry for each hardlink */ #define LINK_EA_MAGIC 0x11EAF1DFUL @@ -3518,7 +3293,7 @@ struct link_ea_entry { unsigned char lee_reclen[2]; unsigned char lee_parent_fid[sizeof(struct lu_fid)]; char lee_name[0]; -} __attribute__((packed)); +} __packed; /** fid2path request/reply structure */ struct getinfo_fid2path { @@ -3527,7 +3302,7 @@ struct getinfo_fid2path { __u32 gf_linkno; __u32 gf_pathlen; char gf_path[0]; -} __attribute__((packed)); +} __packed; void lustre_swab_fid2path (struct getinfo_fid2path *gf); @@ -3558,7 +3333,7 @@ void lustre_swab_layout_intent(struct layout_intent *li); */ struct hsm_progress_kernel { /* Field taken from struct hsm_progress */ - lustre_fid hpk_fid; + struct lu_fid hpk_fid; __u64 hpk_cookie; struct hsm_extent hpk_extent; __u16 hpk_flags; @@ -3567,7 +3342,7 @@ struct hsm_progress_kernel { /* Additional fields */ __u64 hpk_data_version; __u64 hpk_padding2; -} __attribute__((packed)); +} __packed; void lustre_swab_hsm_user_state(struct hsm_user_state *hus); void lustre_swab_hsm_current_action(struct hsm_current_action *action); @@ -3576,92 +3351,6 @@ void lustre_swab_hsm_user_state(struct hsm_user_state *hus); void lustre_swab_hsm_user_item(struct hsm_user_item *hui); void lustre_swab_hsm_request(struct hsm_request *hr); -/** - * These are object update opcode under UPDATE_OBJ, which is currently - * being used by cross-ref operations between MDT. - * - * During the cross-ref operation, the Master MDT, which the client send the - * request to, will disassembly the operation into object updates, then OSP - * will send these updates to the remote MDT to be executed. - * - * Update request format - * magic: UPDATE_BUFFER_MAGIC_V1 - * Count: How many updates in the req. - * bufs[0] : following are packets of object. - * update[0]: - * type: object_update_op, the op code of update - * fid: The object fid of the update. - * lens/bufs: other parameters of the update. - * update[1]: - * type: object_update_op, the op code of update - * fid: The object fid of the update. - * lens/bufs: other parameters of the update. - * .......... - * update[7]: type: object_update_op, the op code of update - * fid: The object fid of the update. - * lens/bufs: other parameters of the update. - * Current 8 maxim updates per object update request. - * - ******************************************************************* - * update reply format: - * - * ur_version: UPDATE_REPLY_V1 - * ur_count: The count of the reply, which is usually equal - * to the number of updates in the request. - * ur_lens: The reply lengths of each object update. - * - * replies: 1st update reply [4bytes_ret: other body] - * 2nd update reply [4bytes_ret: other body] - * ..... - * nth update reply [4bytes_ret: other body] - * - * For each reply of the update, the format would be - * result(4 bytes):Other stuff - */ - -#define UPDATE_MAX_OPS 10 -#define UPDATE_BUFFER_MAGIC_V1 0xBDDE0001 -#define UPDATE_BUFFER_MAGIC UPDATE_BUFFER_MAGIC_V1 -#define UPDATE_BUF_COUNT 8 -enum object_update_op { - OBJ_CREATE = 1, - OBJ_DESTROY = 2, - OBJ_REF_ADD = 3, - OBJ_REF_DEL = 4, - OBJ_ATTR_SET = 5, - OBJ_ATTR_GET = 6, - OBJ_XATTR_SET = 7, - OBJ_XATTR_GET = 8, - OBJ_INDEX_LOOKUP = 9, - OBJ_INDEX_INSERT = 10, - OBJ_INDEX_DELETE = 11, - OBJ_LAST -}; - -struct update { - __u32 u_type; - __u32 u_batchid; - struct lu_fid u_fid; - __u32 u_lens[UPDATE_BUF_COUNT]; - __u32 u_bufs[0]; -}; - -struct update_buf { - __u32 ub_magic; - __u32 ub_count; - __u32 ub_bufs[0]; -}; - -#define UPDATE_REPLY_V1 0x00BD0001 -struct update_reply { - __u32 ur_version; - __u32 ur_count; - __u32 ur_lens[0]; -}; - -void lustre_swab_update_buf(struct update_buf *ub); -void lustre_swab_update_reply_buf(struct update_reply *ur); - /** layout swap request structure * fid1 and fid2 are in mdt_body */ diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h index 2b4dd656d5f5..276906e646f5 100644 --- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h +++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h @@ -85,9 +85,8 @@ struct obd_statfs { __u32 os_namelen; __u64 os_maxbytes; __u32 os_state; /**< obd_statfs_state OS_STATE_* flag */ - __u32 os_fprecreated; /* objs available now to the caller */ - /* used in QoS code to find preferred - * OSTs */ + __u32 os_fprecreated; /* objs available now to the caller */ + /* used in QoS code to find preferred OSTs */ __u32 os_spare2; __u32 os_spare3; __u32 os_spare4; @@ -135,8 +134,9 @@ struct filter_fid_old { /* Userspace should treat lu_fid as opaque, and only use the following methods * to print or parse them. Other functions (e.g. compare, swab) could be moved - * here from lustre_idl.h if needed. */ -typedef struct lu_fid lustre_fid; + * here from lustre_idl.h if needed. + */ +struct lu_fid; /** * Following struct for object attributes, that will be kept inode's EA. @@ -266,7 +266,8 @@ struct ost_id { /* Define O_LOV_DELAY_CREATE to be a mask that is not useful for regular * files, but are unlikely to be used in practice and are not harmful if * used incorrectly. O_NOCTTY and FASYNC are only meaningful for character - * devices and are safe for use on new files (See LU-812, LU-4209). */ + * devices and are safe for use on new files (See LU-812, LU-4209). + */ #define O_LOV_DELAY_CREATE (O_NOCTTY | FASYNC) #define LL_FILE_IGNORE_LOCK 0x00000001 @@ -302,7 +303,8 @@ struct ost_id { * The limit of 12 pages is somewhat arbitrary, but is a reasonably large * allocation that is sufficient for the current generation of systems. * - * (max buffer size - lov+rpc header) / sizeof(struct lov_ost_data_v1) */ + * (max buffer size - lov+rpc header) / sizeof(struct lov_ost_data_v1) + */ #define LOV_MAX_STRIPE_COUNT 2000 /* ((12 * 4096 - 256) / 24) */ #define LOV_ALL_STRIPES 0xffff /* only valid for directories */ #define LOV_V1_INSANE_STRIPE_COUNT 65532 /* maximum stripe count bz13933 */ @@ -323,9 +325,11 @@ struct lov_user_md_v1 { /* LOV EA user data (host-endian) */ __u16 lmm_stripe_count; /* num stripes in use for this object */ union { __u16 lmm_stripe_offset; /* starting stripe offset in - * lmm_objects, use when writing */ + * lmm_objects, use when writing + */ __u16 lmm_layout_gen; /* layout generation number - * used when reading */ + * used when reading + */ }; struct lov_user_ost_data_v1 lmm_objects[0]; /* per-stripe data */ } __attribute__((packed, __may_alias__)); @@ -338,9 +342,11 @@ struct lov_user_md_v3 { /* LOV EA user data (host-endian) */ __u16 lmm_stripe_count; /* num stripes in use for this object */ union { __u16 lmm_stripe_offset; /* starting stripe offset in - * lmm_objects, use when writing */ + * lmm_objects, use when writing + */ __u16 lmm_layout_gen; /* layout generation number - * used when reading */ + * used when reading + */ }; char lmm_pool_name[LOV_MAXPOOLNAME]; /* pool name */ struct lov_user_ost_data_v1 lmm_objects[0]; /* per-stripe data */ @@ -442,9 +448,13 @@ static inline void obd_str2uuid(struct obd_uuid *uuid, const char *tmp) /* For printf's only, make sure uuid is terminated */ static inline char *obd_uuid2str(const struct obd_uuid *uuid) { + if (!uuid) + return NULL; + if (uuid->uuid[sizeof(*uuid) - 1] != '\0') { /* Obviously not safe, but for printfs, no real harm done... - we're always null-terminated, even in a race. */ + * we're always null-terminated, even in a race. + */ static char temp[sizeof(*uuid)]; memcpy(temp, uuid->uuid, sizeof(*uuid) - 1); @@ -455,8 +465,9 @@ static inline char *obd_uuid2str(const struct obd_uuid *uuid) } /* Extract fsname from uuid (or target name) of a target - e.g. (myfs-OST0007_UUID -> myfs) - see also deuuidify. */ + * e.g. (myfs-OST0007_UUID -> myfs) + * see also deuuidify. + */ static inline void obd_uuid2fsname(char *buf, char *uuid, int buflen) { char *p; @@ -465,11 +476,12 @@ static inline void obd_uuid2fsname(char *buf, char *uuid, int buflen) buf[buflen - 1] = '\0'; p = strrchr(buf, '-'); if (p) - *p = '\0'; + *p = '\0'; } /* printf display format - e.g. printf("file FID is "DFID"\n", PFID(fid)); */ + * e.g. printf("file FID is "DFID"\n", PFID(fid)); + */ #define FID_NOBRACE_LEN 40 #define FID_LEN (FID_NOBRACE_LEN + 2) #define DFID_NOBRACE "%#llx:0x%x:0x%x" @@ -480,7 +492,8 @@ static inline void obd_uuid2fsname(char *buf, char *uuid, int buflen) (fid)->f_ver /* scanf input parse format -- strip '[' first. - e.g. sscanf(fidstr, SFID, RFID(&fid)); */ + * e.g. sscanf(fidstr, SFID, RFID(&fid)); + */ #define SFID "0x%llx:0x%x:0x%x" #define RFID(fid) \ &((fid)->f_seq), \ @@ -542,22 +555,6 @@ enum { RMT_RGETFACL = 4 }; -#ifdef NEED_QUOTA_DEFS -#ifndef QIF_BLIMITS -#define QIF_BLIMITS 1 -#define QIF_SPACE 2 -#define QIF_ILIMITS 4 -#define QIF_INODES 8 -#define QIF_BTIME 16 -#define QIF_ITIME 32 -#define QIF_LIMITS (QIF_BLIMITS | QIF_ILIMITS) -#define QIF_USAGE (QIF_SPACE | QIF_INODES) -#define QIF_TIMES (QIF_BTIME | QIF_ITIME) -#define QIF_ALL (QIF_LIMITS | QIF_USAGE | QIF_TIMES) -#endif - -#endif /* !__KERNEL__ */ - /* lustre volatile file support * file name header: .^L^S^T^R:volatile" */ @@ -566,9 +563,9 @@ enum { /* hdr + MDT index */ #define LUSTRE_VOLATILE_IDX LUSTRE_VOLATILE_HDR":%.4X:" -typedef enum lustre_quota_version { +enum lustre_quota_version { LUSTRE_QUOTA_V2 = 1 -} lustre_quota_version_t; +}; /* XXX: same as if_dqinfo struct in kernel */ struct obd_dqinfo { @@ -698,7 +695,8 @@ static inline const char *changelog_type2str(int type) #define CLF_HSM_LAST 15 /* Remove bits higher than _h, then extract the value - * between _h and _l by shifting lower weigth to bit 0. */ + * between _h and _l by shifting lower weigth to bit 0. + */ #define CLF_GET_BITS(_b, _h, _l) (((_b << (CLF_HSM_LAST - _h)) & 0xFFFF) \ >> (CLF_HSM_LAST - _h + _l)) @@ -761,10 +759,10 @@ struct changelog_rec { __u64 cr_prev; /**< last index for this target fid */ __u64 cr_time; union { - lustre_fid cr_tfid; /**< target fid */ + struct lu_fid cr_tfid; /**< target fid */ __u32 cr_markerflags; /**< CL_MARK flags */ }; - lustre_fid cr_pfid; /**< parent fid */ + struct lu_fid cr_pfid; /**< parent fid */ char cr_name[0]; /**< last element */ } __packed; @@ -775,18 +773,19 @@ struct changelog_rec { struct changelog_ext_rec { __u16 cr_namelen; __u16 cr_flags; /**< (flags & CLF_FLAGMASK) | - CLF_EXT_VERSION */ + * CLF_EXT_VERSION + */ __u32 cr_type; /**< \a changelog_rec_type */ __u64 cr_index; /**< changelog record number */ __u64 cr_prev; /**< last index for this target fid */ __u64 cr_time; union { - lustre_fid cr_tfid; /**< target fid */ + struct lu_fid cr_tfid; /**< target fid */ __u32 cr_markerflags; /**< CL_MARK flags */ }; - lustre_fid cr_pfid; /**< target parent fid */ - lustre_fid cr_sfid; /**< source fid, or zero */ - lustre_fid cr_spfid; /**< source parent fid, or zero */ + struct lu_fid cr_pfid; /**< target parent fid */ + struct lu_fid cr_sfid; /**< source fid, or zero */ + struct lu_fid cr_spfid; /**< source parent fid, or zero */ char cr_name[0]; /**< last element */ } __packed; @@ -835,7 +834,8 @@ struct ioc_data_version { }; #define LL_DV_NOFLUSH 0x01 /* Do not take READ EXTENT LOCK before sampling - version. Dirty caches are left unchanged. */ + * version. Dirty caches are left unchanged. + */ #ifndef offsetof # define offsetof(typ, memb) ((unsigned long)((char *)&(((typ *)0)->memb))) @@ -976,8 +976,8 @@ struct hsm_request { }; struct hsm_user_item { - lustre_fid hui_fid; - struct hsm_extent hui_extent; + struct lu_fid hui_fid; + struct hsm_extent hui_extent; } __packed; struct hsm_user_request { @@ -1046,8 +1046,8 @@ static inline char *hsm_copytool_action2name(enum hsm_copytool_action a) struct hsm_action_item { __u32 hai_len; /* valid size of this struct */ __u32 hai_action; /* hsm_copytool_action, but use known size */ - lustre_fid hai_fid; /* Lustre FID to operated on */ - lustre_fid hai_dfid; /* fid used for data access */ + struct lu_fid hai_fid; /* Lustre FID to operated on */ + struct lu_fid hai_dfid; /* fid used for data access */ struct hsm_extent hai_extent; /* byte range to operate on */ __u64 hai_cookie; /* action cookie from coordinator */ __u64 hai_gid; /* grouplock id */ @@ -1095,7 +1095,8 @@ struct hsm_action_list { __u32 padding1; char hal_fsname[0]; /* null-terminated */ /* struct hsm_action_item[hal_count] follows, aligned on 8-byte - boundaries. See hai_zero */ + * boundaries. See hai_zero + */ } __packed; #ifndef HAVE_CFS_SIZE_ROUND @@ -1157,7 +1158,7 @@ struct hsm_user_import { #define HP_FLAG_RETRY 0x02 struct hsm_progress { - lustre_fid hp_fid; + struct lu_fid hp_fid; __u64 hp_cookie; struct hsm_extent hp_extent; __u16 hp_flags; diff --git a/drivers/staging/lustre/lustre/include/lustre_cfg.h b/drivers/staging/lustre/lustre/include/lustre_cfg.h index eb6b292b7b25..bb16ae980b98 100644 --- a/drivers/staging/lustre/lustre/include/lustre_cfg.h +++ b/drivers/staging/lustre/lustre/include/lustre_cfg.h @@ -50,12 +50,13 @@ #define LUSTRE_CFG_MAX_BUFCOUNT 8 #define LCFG_HDR_SIZE(count) \ - cfs_size_round(offsetof (struct lustre_cfg, lcfg_buflens[(count)])) + cfs_size_round(offsetof(struct lustre_cfg, lcfg_buflens[(count)])) /** If the LCFG_REQUIRED bit is set in a configuration command, * then the client is required to understand this parameter * in order to mount the filesystem. If it does not understand - * a REQUIRED command the client mount will fail. */ + * a REQUIRED command the client mount will fail. + */ #define LCFG_REQUIRED 0x0001000 enum lcfg_command_type { @@ -87,9 +88,11 @@ enum lcfg_command_type { LCFG_POOL_DEL = 0x00ce023, /**< destroy an ost pool name */ LCFG_SET_LDLM_TIMEOUT = 0x00ce030, /**< set ldlm_timeout */ LCFG_PRE_CLEANUP = 0x00cf031, /**< call type-specific pre - * cleanup cleanup */ + * cleanup cleanup + */ LCFG_SET_PARAM = 0x00ce032, /**< use set_param syntax to set - *a proc parameters */ + * a proc parameters + */ }; struct lustre_cfg_bufs { @@ -128,7 +131,7 @@ static inline void lustre_cfg_bufs_set(struct lustre_cfg_bufs *bufs, { if (index >= LUSTRE_CFG_MAX_BUFCOUNT) return; - if (bufs == NULL) + if (!bufs) return; if (bufs->lcfg_bufcount <= index) @@ -158,7 +161,6 @@ static inline void *lustre_cfg_buf(struct lustre_cfg *lcfg, int index) int offset; int bufcount; - LASSERT (lcfg != NULL); LASSERT (index >= 0); bufcount = lcfg->lcfg_bufcount; @@ -191,7 +193,7 @@ static inline char *lustre_cfg_string(struct lustre_cfg *lcfg, int index) return NULL; s = lustre_cfg_buf(lcfg, index); - if (s == NULL) + if (!s) return NULL; /* @@ -252,10 +254,6 @@ static inline struct lustre_cfg *lustre_cfg_new(int cmd, static inline void lustre_cfg_free(struct lustre_cfg *lcfg) { - int len; - - len = lustre_cfg_len(lcfg->lcfg_bufcount, lcfg->lcfg_buflens); - kfree(lcfg); return; } diff --git a/drivers/staging/lustre/lustre/include/lustre_disk.h b/drivers/staging/lustre/lustre/include/lustre_disk.h index 7c6933ffc9c1..95fd36063f55 100644 --- a/drivers/staging/lustre/lustre/include/lustre_disk.h +++ b/drivers/staging/lustre/lustre/include/lustre_disk.h @@ -65,7 +65,8 @@ /****************** mount command *********************/ /* The lmd is only used internally by Lustre; mount simply passes - everything as string options */ + * everything as string options + */ #define LMD_MAGIC 0xbdacbd03 #define LMD_PARAMS_MAXLEN 4096 @@ -79,23 +80,26 @@ struct lustre_mount_data { int lmd_recovery_time_soft; int lmd_recovery_time_hard; char *lmd_dev; /* device name */ - char *lmd_profile; /* client only */ + char *lmd_profile; /* client only */ char *lmd_mgssec; /* sptlrpc flavor to mgs */ - char *lmd_opts; /* lustre mount options (as opposed to - _device_ mount options) */ + char *lmd_opts; /* lustre mount options (as opposed to + * _device_ mount options) + */ char *lmd_params; /* lustre params */ - __u32 *lmd_exclude; /* array of OSTs to ignore */ - char *lmd_mgs; /* MGS nid */ - char *lmd_osd_type; /* OSD type */ + __u32 *lmd_exclude; /* array of OSTs to ignore */ + char *lmd_mgs; /* MGS nid */ + char *lmd_osd_type; /* OSD type */ }; #define LMD_FLG_SERVER 0x0001 /* Mounting a server */ #define LMD_FLG_CLIENT 0x0002 /* Mounting a client */ #define LMD_FLG_ABORT_RECOV 0x0008 /* Abort recovery */ #define LMD_FLG_NOSVC 0x0010 /* Only start MGS/MGC for servers, - no other services */ -#define LMD_FLG_NOMGS 0x0020 /* Only start target for servers, reusing - existing MGS services */ + * no other services + */ +#define LMD_FLG_NOMGS 0x0020 /* Only start target for servers, + * reusing existing MGS services + */ #define LMD_FLG_WRITECONF 0x0040 /* Rewrite config log */ #define LMD_FLG_NOIR 0x0080 /* NO imperative recovery */ #define LMD_FLG_NOSCRUB 0x0100 /* Do not trigger scrub automatically */ @@ -116,231 +120,6 @@ struct lustre_mount_data { #define LR_EXPIRE_INTERVALS 16 /**< number of intervals to track transno */ #define ENOENT_VERSION 1 /** 'virtual' version of non-existent object */ -#define LR_SERVER_SIZE 512 -#define LR_CLIENT_START 8192 -#define LR_CLIENT_SIZE 128 -#if LR_CLIENT_START < LR_SERVER_SIZE -#error "Can't have LR_CLIENT_START < LR_SERVER_SIZE" -#endif - -/* - * This limit is arbitrary (131072 clients on x86), but it is convenient to use - * 2^n * PAGE_CACHE_SIZE * 8 for the number of bits that fit an order-n allocation. - * If we need more than 131072 clients (order-2 allocation on x86) then this - * should become an array of single-page pointers that are allocated on demand. - */ -#if (128 * 1024UL) > (PAGE_CACHE_SIZE * 8) -#define LR_MAX_CLIENTS (128 * 1024UL) -#else -#define LR_MAX_CLIENTS (PAGE_CACHE_SIZE * 8) -#endif - -/** COMPAT_146: this is an OST (temporary) */ -#define OBD_COMPAT_OST 0x00000002 -/** COMPAT_146: this is an MDT (temporary) */ -#define OBD_COMPAT_MDT 0x00000004 -/** 2.0 server, interop flag to show server version is changed */ -#define OBD_COMPAT_20 0x00000008 - -/** MDS handles LOV_OBJID file */ -#define OBD_ROCOMPAT_LOVOBJID 0x00000001 - -/** OST handles group subdirs */ -#define OBD_INCOMPAT_GROUPS 0x00000001 -/** this is an OST */ -#define OBD_INCOMPAT_OST 0x00000002 -/** this is an MDT */ -#define OBD_INCOMPAT_MDT 0x00000004 -/** common last_rvcd format */ -#define OBD_INCOMPAT_COMMON_LR 0x00000008 -/** FID is enabled */ -#define OBD_INCOMPAT_FID 0x00000010 -/** Size-on-MDS is enabled */ -#define OBD_INCOMPAT_SOM 0x00000020 -/** filesystem using iam format to store directory entries */ -#define OBD_INCOMPAT_IAM_DIR 0x00000040 -/** LMA attribute contains per-inode incompatible flags */ -#define OBD_INCOMPAT_LMA 0x00000080 -/** lmm_stripe_count has been shrunk from __u32 to __u16 and the remaining 16 - * bits are now used to store a generation. Once we start changing the layout - * and bumping the generation, old versions expecting a 32-bit lmm_stripe_count - * will be confused by interpreting stripe_count | gen << 16 as the actual - * stripe count */ -#define OBD_INCOMPAT_LMM_VER 0x00000100 -/** multiple OI files for MDT */ -#define OBD_INCOMPAT_MULTI_OI 0x00000200 - -/* Data stored per server at the head of the last_rcvd file. In le32 order. - This should be common to filter_internal.h, lustre_mds.h */ -struct lr_server_data { - __u8 lsd_uuid[40]; /* server UUID */ - __u64 lsd_last_transno; /* last completed transaction ID */ - __u64 lsd_compat14; /* reserved - compat with old last_rcvd */ - __u64 lsd_mount_count; /* incarnation number */ - __u32 lsd_feature_compat; /* compatible feature flags */ - __u32 lsd_feature_rocompat;/* read-only compatible feature flags */ - __u32 lsd_feature_incompat;/* incompatible feature flags */ - __u32 lsd_server_size; /* size of server data area */ - __u32 lsd_client_start; /* start of per-client data area */ - __u16 lsd_client_size; /* size of per-client data area */ - __u16 lsd_subdir_count; /* number of subdirectories for objects */ - __u64 lsd_catalog_oid; /* recovery catalog object id */ - __u32 lsd_catalog_ogen; /* recovery catalog inode generation */ - __u8 lsd_peeruuid[40]; /* UUID of MDS associated with this OST */ - __u32 lsd_osd_index; /* index number of OST in LOV */ - __u32 lsd_padding1; /* was lsd_mdt_index, unused in 2.4.0 */ - __u32 lsd_start_epoch; /* VBR: start epoch from last boot */ - /** transaction values since lsd_trans_table_time */ - __u64 lsd_trans_table[LR_EXPIRE_INTERVALS]; - /** start point of transno table below */ - __u32 lsd_trans_table_time; /* time of first slot in table above */ - __u32 lsd_expire_intervals; /* LR_EXPIRE_INTERVALS */ - __u8 lsd_padding[LR_SERVER_SIZE - 288]; -}; - -/* Data stored per client in the last_rcvd file. In le32 order. */ -struct lsd_client_data { - __u8 lcd_uuid[40]; /* client UUID */ - __u64 lcd_last_transno; /* last completed transaction ID */ - __u64 lcd_last_xid; /* xid for the last transaction */ - __u32 lcd_last_result; /* result from last RPC */ - __u32 lcd_last_data; /* per-op data (disposition for open &c.) */ - /* for MDS_CLOSE requests */ - __u64 lcd_last_close_transno; /* last completed transaction ID */ - __u64 lcd_last_close_xid; /* xid for the last transaction */ - __u32 lcd_last_close_result; /* result from last RPC */ - __u32 lcd_last_close_data; /* per-op data */ - /* VBR: last versions */ - __u64 lcd_pre_versions[4]; - __u32 lcd_last_epoch; - /** orphans handling for delayed export rely on that */ - __u32 lcd_first_epoch; - __u8 lcd_padding[LR_CLIENT_SIZE - 128]; -}; - -/* bug20354: the lcd_uuid for export of clients may be wrong */ -static inline void check_lcd(char *obd_name, int index, - struct lsd_client_data *lcd) -{ - int length = sizeof(lcd->lcd_uuid); - - if (strnlen((char *)lcd->lcd_uuid, length) == length) { - lcd->lcd_uuid[length - 1] = '\0'; - - LCONSOLE_ERROR("the client UUID (%s) on %s for exports stored in last_rcvd(index = %d) is bad!\n", - lcd->lcd_uuid, obd_name, index); - } -} - -/* last_rcvd handling */ -static inline void lsd_le_to_cpu(struct lr_server_data *buf, - struct lr_server_data *lsd) -{ - int i; - - memcpy(lsd->lsd_uuid, buf->lsd_uuid, sizeof(lsd->lsd_uuid)); - lsd->lsd_last_transno = le64_to_cpu(buf->lsd_last_transno); - lsd->lsd_compat14 = le64_to_cpu(buf->lsd_compat14); - lsd->lsd_mount_count = le64_to_cpu(buf->lsd_mount_count); - lsd->lsd_feature_compat = le32_to_cpu(buf->lsd_feature_compat); - lsd->lsd_feature_rocompat = le32_to_cpu(buf->lsd_feature_rocompat); - lsd->lsd_feature_incompat = le32_to_cpu(buf->lsd_feature_incompat); - lsd->lsd_server_size = le32_to_cpu(buf->lsd_server_size); - lsd->lsd_client_start = le32_to_cpu(buf->lsd_client_start); - lsd->lsd_client_size = le16_to_cpu(buf->lsd_client_size); - lsd->lsd_subdir_count = le16_to_cpu(buf->lsd_subdir_count); - lsd->lsd_catalog_oid = le64_to_cpu(buf->lsd_catalog_oid); - lsd->lsd_catalog_ogen = le32_to_cpu(buf->lsd_catalog_ogen); - memcpy(lsd->lsd_peeruuid, buf->lsd_peeruuid, sizeof(lsd->lsd_peeruuid)); - lsd->lsd_osd_index = le32_to_cpu(buf->lsd_osd_index); - lsd->lsd_padding1 = le32_to_cpu(buf->lsd_padding1); - lsd->lsd_start_epoch = le32_to_cpu(buf->lsd_start_epoch); - for (i = 0; i < LR_EXPIRE_INTERVALS; i++) - lsd->lsd_trans_table[i] = le64_to_cpu(buf->lsd_trans_table[i]); - lsd->lsd_trans_table_time = le32_to_cpu(buf->lsd_trans_table_time); - lsd->lsd_expire_intervals = le32_to_cpu(buf->lsd_expire_intervals); -} - -static inline void lsd_cpu_to_le(struct lr_server_data *lsd, - struct lr_server_data *buf) -{ - int i; - - memcpy(buf->lsd_uuid, lsd->lsd_uuid, sizeof(buf->lsd_uuid)); - buf->lsd_last_transno = cpu_to_le64(lsd->lsd_last_transno); - buf->lsd_compat14 = cpu_to_le64(lsd->lsd_compat14); - buf->lsd_mount_count = cpu_to_le64(lsd->lsd_mount_count); - buf->lsd_feature_compat = cpu_to_le32(lsd->lsd_feature_compat); - buf->lsd_feature_rocompat = cpu_to_le32(lsd->lsd_feature_rocompat); - buf->lsd_feature_incompat = cpu_to_le32(lsd->lsd_feature_incompat); - buf->lsd_server_size = cpu_to_le32(lsd->lsd_server_size); - buf->lsd_client_start = cpu_to_le32(lsd->lsd_client_start); - buf->lsd_client_size = cpu_to_le16(lsd->lsd_client_size); - buf->lsd_subdir_count = cpu_to_le16(lsd->lsd_subdir_count); - buf->lsd_catalog_oid = cpu_to_le64(lsd->lsd_catalog_oid); - buf->lsd_catalog_ogen = cpu_to_le32(lsd->lsd_catalog_ogen); - memcpy(buf->lsd_peeruuid, lsd->lsd_peeruuid, sizeof(buf->lsd_peeruuid)); - buf->lsd_osd_index = cpu_to_le32(lsd->lsd_osd_index); - buf->lsd_padding1 = cpu_to_le32(lsd->lsd_padding1); - buf->lsd_start_epoch = cpu_to_le32(lsd->lsd_start_epoch); - for (i = 0; i < LR_EXPIRE_INTERVALS; i++) - buf->lsd_trans_table[i] = cpu_to_le64(lsd->lsd_trans_table[i]); - buf->lsd_trans_table_time = cpu_to_le32(lsd->lsd_trans_table_time); - buf->lsd_expire_intervals = cpu_to_le32(lsd->lsd_expire_intervals); -} - -static inline void lcd_le_to_cpu(struct lsd_client_data *buf, - struct lsd_client_data *lcd) -{ - memcpy(lcd->lcd_uuid, buf->lcd_uuid, sizeof (lcd->lcd_uuid)); - lcd->lcd_last_transno = le64_to_cpu(buf->lcd_last_transno); - lcd->lcd_last_xid = le64_to_cpu(buf->lcd_last_xid); - lcd->lcd_last_result = le32_to_cpu(buf->lcd_last_result); - lcd->lcd_last_data = le32_to_cpu(buf->lcd_last_data); - lcd->lcd_last_close_transno = le64_to_cpu(buf->lcd_last_close_transno); - lcd->lcd_last_close_xid = le64_to_cpu(buf->lcd_last_close_xid); - lcd->lcd_last_close_result = le32_to_cpu(buf->lcd_last_close_result); - lcd->lcd_last_close_data = le32_to_cpu(buf->lcd_last_close_data); - lcd->lcd_pre_versions[0] = le64_to_cpu(buf->lcd_pre_versions[0]); - lcd->lcd_pre_versions[1] = le64_to_cpu(buf->lcd_pre_versions[1]); - lcd->lcd_pre_versions[2] = le64_to_cpu(buf->lcd_pre_versions[2]); - lcd->lcd_pre_versions[3] = le64_to_cpu(buf->lcd_pre_versions[3]); - lcd->lcd_last_epoch = le32_to_cpu(buf->lcd_last_epoch); - lcd->lcd_first_epoch = le32_to_cpu(buf->lcd_first_epoch); -} - -static inline void lcd_cpu_to_le(struct lsd_client_data *lcd, - struct lsd_client_data *buf) -{ - memcpy(buf->lcd_uuid, lcd->lcd_uuid, sizeof (lcd->lcd_uuid)); - buf->lcd_last_transno = cpu_to_le64(lcd->lcd_last_transno); - buf->lcd_last_xid = cpu_to_le64(lcd->lcd_last_xid); - buf->lcd_last_result = cpu_to_le32(lcd->lcd_last_result); - buf->lcd_last_data = cpu_to_le32(lcd->lcd_last_data); - buf->lcd_last_close_transno = cpu_to_le64(lcd->lcd_last_close_transno); - buf->lcd_last_close_xid = cpu_to_le64(lcd->lcd_last_close_xid); - buf->lcd_last_close_result = cpu_to_le32(lcd->lcd_last_close_result); - buf->lcd_last_close_data = cpu_to_le32(lcd->lcd_last_close_data); - buf->lcd_pre_versions[0] = cpu_to_le64(lcd->lcd_pre_versions[0]); - buf->lcd_pre_versions[1] = cpu_to_le64(lcd->lcd_pre_versions[1]); - buf->lcd_pre_versions[2] = cpu_to_le64(lcd->lcd_pre_versions[2]); - buf->lcd_pre_versions[3] = cpu_to_le64(lcd->lcd_pre_versions[3]); - buf->lcd_last_epoch = cpu_to_le32(lcd->lcd_last_epoch); - buf->lcd_first_epoch = cpu_to_le32(lcd->lcd_first_epoch); -} - -static inline __u64 lcd_last_transno(struct lsd_client_data *lcd) -{ - return (lcd->lcd_last_transno > lcd->lcd_last_close_transno ? - lcd->lcd_last_transno : lcd->lcd_last_close_transno); -} - -static inline __u64 lcd_last_xid(struct lsd_client_data *lcd) -{ - return (lcd->lcd_last_xid > lcd->lcd_last_close_xid ? - lcd->lcd_last_xid : lcd->lcd_last_close_xid); -} - /****************** superblock additional info *********************/ struct ll_sb_info; @@ -360,7 +139,8 @@ struct lustre_sb_info { char lsi_osd_type[16]; char lsi_fstype[16]; struct backing_dev_info lsi_bdi; /* each client mountpoint needs - own backing_dev_info */ + * own backing_dev_info + */ }; #define LSI_UMOUNT_FAILOVER 0x00200000 diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h index 9b319f1df025..8b0364f71129 100644 --- a/drivers/staging/lustre/lustre/include/lustre_dlm.h +++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h @@ -69,7 +69,7 @@ struct obd_device; /** * LDLM non-error return states */ -typedef enum { +enum ldlm_error { ELDLM_OK = 0, ELDLM_LOCK_CHANGED = 300, @@ -80,7 +80,7 @@ typedef enum { ELDLM_NAMESPACE_EXISTS = 400, ELDLM_BAD_NAMESPACE = 401 -} ldlm_error_t; +}; /** * LDLM namespace type. @@ -145,16 +145,17 @@ typedef enum { #define LCK_COMPAT_COS (LCK_COS) /** @} Lock Compatibility Matrix */ -extern ldlm_mode_t lck_compat_array[]; +extern enum ldlm_mode lck_compat_array[]; -static inline void lockmode_verify(ldlm_mode_t mode) +static inline void lockmode_verify(enum ldlm_mode mode) { - LASSERT(mode > LCK_MINMODE && mode < LCK_MAXMODE); + LASSERT(mode > LCK_MINMODE && mode < LCK_MAXMODE); } -static inline int lockmode_compat(ldlm_mode_t exist_mode, ldlm_mode_t new_mode) +static inline int lockmode_compat(enum ldlm_mode exist_mode, + enum ldlm_mode new_mode) { - return (lck_compat_array[exist_mode] & new_mode); + return (lck_compat_array[exist_mode] & new_mode); } /* @@ -249,7 +250,8 @@ struct ldlm_pool { /** Current biggest client lock volume. Protected by pl_lock. */ __u64 pl_client_lock_volume; /** Lock volume factor. SLV on client is calculated as following: - * server_slv * lock_volume_factor. */ + * server_slv * lock_volume_factor. + */ atomic_t pl_lock_volume_factor; /** Time when last SLV from server was obtained. */ time64_t pl_recalc_time; @@ -295,10 +297,10 @@ struct ldlm_valblock_ops { * LDLM pools related, type of lock pool in the namespace. * Greedy means release cached locks aggressively */ -typedef enum { +enum ldlm_appetite { LDLM_NAMESPACE_GREEDY = 1 << 0, LDLM_NAMESPACE_MODEST = 1 << 1 -} ldlm_appetite_t; +}; struct ldlm_ns_bucket { /** back pointer to namespace */ @@ -317,7 +319,7 @@ enum { LDLM_NSS_LAST }; -typedef enum { +enum ldlm_ns_type { /** invalid type */ LDLM_NS_TYPE_UNKNOWN = 0, /** mdc namespace */ @@ -332,7 +334,7 @@ typedef enum { LDLM_NS_TYPE_MGC, /** mgs namespace */ LDLM_NS_TYPE_MGT, -} ldlm_ns_type_t; +}; /** * LDLM Namespace. @@ -373,7 +375,7 @@ struct ldlm_namespace { /** * Namespace connect flags supported by server (may be changed via - * /proc, LRU resize may be disabled/enabled). + * sysfs, LRU resize may be disabled/enabled). */ __u64 ns_connect_flags; @@ -439,7 +441,7 @@ struct ldlm_namespace { /** LDLM pool structure for this namespace */ struct ldlm_pool ns_pool; /** Definition of how eagerly unused locks will be released from LRU */ - ldlm_appetite_t ns_appetite; + enum ldlm_appetite ns_appetite; /** Limit of parallel AST RPC count. */ unsigned ns_max_parallel_ast; @@ -465,7 +467,6 @@ struct ldlm_namespace { */ static inline int ns_connect_cancelset(struct ldlm_namespace *ns) { - LASSERT(ns != NULL); return !!(ns->ns_connect_flags & OBD_CONNECT_CANCELSET); } @@ -474,14 +475,12 @@ static inline int ns_connect_cancelset(struct ldlm_namespace *ns) */ static inline int ns_connect_lru_resize(struct ldlm_namespace *ns) { - LASSERT(ns != NULL); return !!(ns->ns_connect_flags & OBD_CONNECT_LRU_RESIZE); } static inline void ns_register_cancel(struct ldlm_namespace *ns, ldlm_cancel_for_recovery arg) { - LASSERT(ns != NULL); ns->ns_cancel_for_recovery = arg; } @@ -503,7 +502,8 @@ struct ldlm_glimpse_work { struct list_head gl_list; /* linkage to other gl work structs */ __u32 gl_flags;/* see LDLM_GL_WORK_* below */ union ldlm_gl_desc *gl_desc; /* glimpse descriptor to be packed in - * glimpse callback request */ + * glimpse callback request + */ }; /** The ldlm_glimpse_work is allocated on the stack and should not be freed. */ @@ -512,8 +512,9 @@ struct ldlm_glimpse_work { /** Interval node data for each LDLM_EXTENT lock. */ struct ldlm_interval { struct interval_node li_node; /* node for tree management */ - struct list_head li_group; /* the locks which have the same - * policy - group of the policy */ + struct list_head li_group; /* the locks which have the same + * policy - group of the policy + */ }; #define to_ldlm_interval(n) container_of(n, struct ldlm_interval, li_node) @@ -527,7 +528,7 @@ struct ldlm_interval { struct ldlm_interval_tree { /** Tree size. */ int lit_size; - ldlm_mode_t lit_mode; /* lock mode */ + enum ldlm_mode lit_mode; /* lock mode */ struct interval_node *lit_root; /* actual ldlm_interval */ }; @@ -535,12 +536,13 @@ struct ldlm_interval_tree { #define LUSTRE_TRACKS_LOCK_EXP_REFS (0) /** Cancel flags. */ -typedef enum { +enum ldlm_cancel_flags { LCF_ASYNC = 0x1, /* Cancel locks asynchronously. */ LCF_LOCAL = 0x2, /* Cancel locks locally, not notifing server */ LCF_BL_AST = 0x4, /* Cancel locks marked as LDLM_FL_BL_AST - * in the same RPC */ -} ldlm_cancel_flags_t; + * in the same RPC + */ +}; struct ldlm_flock { __u64 start; @@ -559,7 +561,7 @@ typedef union { struct ldlm_inodebits l_inodebits; } ldlm_policy_data_t; -void ldlm_convert_policy_to_local(struct obd_export *exp, ldlm_type_t type, +void ldlm_convert_policy_to_local(struct obd_export *exp, enum ldlm_type type, const ldlm_wire_policy_data_t *wpolicy, ldlm_policy_data_t *lpolicy); @@ -637,11 +639,11 @@ struct ldlm_lock { * Requested mode. * Protected by lr_lock. */ - ldlm_mode_t l_req_mode; + enum ldlm_mode l_req_mode; /** * Granted mode, also protected by lr_lock. */ - ldlm_mode_t l_granted_mode; + enum ldlm_mode l_granted_mode; /** Lock completion handler pointer. Called when lock is granted. */ ldlm_completion_callback l_completion_ast; /** @@ -841,20 +843,19 @@ struct ldlm_resource { /** * protected by lr_lock - * @{ */ + * @{ + */ /** List of locks in granted state */ struct list_head lr_granted; /** * List of locks that could not be granted due to conflicts and - * that are waiting for conflicts to go away */ + * that are waiting for conflicts to go away + */ struct list_head lr_waiting; /** @} */ - /* XXX No longer needed? Remove ASAP */ - ldlm_mode_t lr_most_restr; - /** Type of locks this resource can hold. Only one type per resource. */ - ldlm_type_t lr_type; /* LDLM_{PLAIN,EXTENT,FLOCK,IBITS} */ + enum ldlm_type lr_type; /* LDLM_{PLAIN,EXTENT,FLOCK,IBITS} */ /** Resource name */ struct ldlm_res_id lr_name; @@ -921,7 +922,7 @@ static inline int ldlm_lvbo_init(struct ldlm_resource *res) { struct ldlm_namespace *ns = ldlm_res_to_ns(res); - if (ns->ns_lvbo != NULL && ns->ns_lvbo->lvbo_init != NULL) + if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) return ns->ns_lvbo->lvbo_init(res); return 0; @@ -931,7 +932,7 @@ static inline int ldlm_lvbo_size(struct ldlm_lock *lock) { struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); - if (ns->ns_lvbo != NULL && ns->ns_lvbo->lvbo_size != NULL) + if (ns->ns_lvbo && ns->ns_lvbo->lvbo_size) return ns->ns_lvbo->lvbo_size(lock); return 0; @@ -941,10 +942,9 @@ static inline int ldlm_lvbo_fill(struct ldlm_lock *lock, void *buf, int len) { struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); - if (ns->ns_lvbo != NULL) { - LASSERT(ns->ns_lvbo->lvbo_fill != NULL); + if (ns->ns_lvbo) return ns->ns_lvbo->lvbo_fill(lock, buf, len); - } + return 0; } @@ -1015,7 +1015,7 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, /** Non-rate-limited lock printing function for debugging purposes. */ #define LDLM_DEBUG(lock, fmt, a...) do { \ - if (likely(lock != NULL)) { \ + if (likely(lock)) { \ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_DLMTRACE, NULL); \ ldlm_lock_debug(&msgdata, D_DLMTRACE, NULL, lock, \ "### " fmt, ##a); \ @@ -1025,7 +1025,7 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, } while (0) typedef int (*ldlm_processing_policy)(struct ldlm_lock *lock, __u64 *flags, - int first_enq, ldlm_error_t *err, + int first_enq, enum ldlm_error *err, struct list_head *work_list); /** @@ -1042,7 +1042,8 @@ typedef int (*ldlm_res_iterator_t)(struct ldlm_resource *, void *); * * LDLM provides for a way to iterate through every lock on a resource or * namespace or every resource in a namespace. - * @{ */ + * @{ + */ int ldlm_resource_iterate(struct ldlm_namespace *, const struct ldlm_res_id *, ldlm_iterator_t iter, void *data); /** @} ldlm_iterator */ @@ -1091,7 +1092,7 @@ ldlm_handle2lock_long(const struct lustre_handle *h, __u64 flags) struct ldlm_lock *lock; lock = __ldlm_handle2lock(h, flags); - if (lock != NULL) + if (lock) LDLM_LOCK_REF_DEL(lock); return lock; } @@ -1111,7 +1112,7 @@ static inline int ldlm_res_lvbo_update(struct ldlm_resource *res, return 0; } -int ldlm_error2errno(ldlm_error_t error); +int ldlm_error2errno(enum ldlm_error error); #if LUSTRE_TRACKS_LOCK_EXP_REFS void ldlm_dump_export_locks(struct obd_export *exp); @@ -1168,12 +1169,13 @@ void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode); void ldlm_lock_fail_match_locked(struct ldlm_lock *lock); void ldlm_lock_allow_match(struct ldlm_lock *lock); void ldlm_lock_allow_match_locked(struct ldlm_lock *lock); -ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, - const struct ldlm_res_id *, ldlm_type_t type, - ldlm_policy_data_t *, ldlm_mode_t mode, - struct lustre_handle *, int unref); -ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh, - __u64 *bits); +enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, + const struct ldlm_res_id *, + enum ldlm_type type, ldlm_policy_data_t *, + enum ldlm_mode mode, struct lustre_handle *, + int unref); +enum ldlm_mode ldlm_revalidate_lock_handle(struct lustre_handle *lockh, + __u64 *bits); void ldlm_lock_cancel(struct ldlm_lock *lock); void ldlm_lock_dump_handle(int level, struct lustre_handle *); void ldlm_unlink_lock_skiplist(struct ldlm_lock *req); @@ -1181,8 +1183,8 @@ void ldlm_unlink_lock_skiplist(struct ldlm_lock *req); /* resource.c */ struct ldlm_namespace * ldlm_namespace_new(struct obd_device *obd, char *name, - ldlm_side_t client, ldlm_appetite_t apt, - ldlm_ns_type_t ns_type); + ldlm_side_t client, enum ldlm_appetite apt, + enum ldlm_ns_type ns_type); int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags); void ldlm_namespace_get(struct ldlm_namespace *ns); void ldlm_namespace_put(struct ldlm_namespace *ns); @@ -1193,7 +1195,7 @@ void ldlm_debugfs_cleanup(void); struct ldlm_resource *ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent, const struct ldlm_res_id *, - ldlm_type_t type, int create); + enum ldlm_type type, int create); int ldlm_resource_putref(struct ldlm_resource *res); void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head, @@ -1219,7 +1221,8 @@ int ldlm_lock_change_resource(struct ldlm_namespace *, struct ldlm_lock *, * These AST handlers are typically used for server-side local locks and are * also used by client-side lock handlers to perform minimum level base * processing. - * @{ */ + * @{ + */ int ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data); int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data); /** @} ldlm_local_ast */ @@ -1227,7 +1230,8 @@ int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data); /** \defgroup ldlm_cli_api API to operate on locks from actual LDLM users. * These are typically used by client and server (*_local versions) * to obtain and release locks. - * @{ */ + * @{ + */ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp, struct ldlm_enqueue_info *einfo, const struct ldlm_res_id *res_id, @@ -1244,29 +1248,32 @@ int ldlm_prep_elc_req(struct obd_export *exp, struct list_head *cancels, int count); int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, - ldlm_type_t type, __u8 with_policy, ldlm_mode_t mode, + enum ldlm_type type, __u8 with_policy, + enum ldlm_mode mode, __u64 *flags, void *lvb, __u32 lvb_len, struct lustre_handle *lockh, int rc); int ldlm_cli_update_pool(struct ptlrpc_request *req); int ldlm_cli_cancel(struct lustre_handle *lockh, - ldlm_cancel_flags_t cancel_flags); + enum ldlm_cancel_flags cancel_flags); int ldlm_cli_cancel_unused(struct ldlm_namespace *, const struct ldlm_res_id *, - ldlm_cancel_flags_t flags, void *opaque); + enum ldlm_cancel_flags flags, void *opaque); int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns, const struct ldlm_res_id *res_id, ldlm_policy_data_t *policy, - ldlm_mode_t mode, - ldlm_cancel_flags_t flags, + enum ldlm_mode mode, + enum ldlm_cancel_flags flags, void *opaque); int ldlm_cancel_resource_local(struct ldlm_resource *res, struct list_head *cancels, ldlm_policy_data_t *policy, - ldlm_mode_t mode, __u64 lock_flags, - ldlm_cancel_flags_t cancel_flags, void *opaque); + enum ldlm_mode mode, __u64 lock_flags, + enum ldlm_cancel_flags cancel_flags, + void *opaque); int ldlm_cli_cancel_list_local(struct list_head *cancels, int count, - ldlm_cancel_flags_t flags); + enum ldlm_cancel_flags flags); int ldlm_cli_cancel_list(struct list_head *head, int count, - struct ptlrpc_request *req, ldlm_cancel_flags_t flags); + struct ptlrpc_request *req, + enum ldlm_cancel_flags flags); /** @} ldlm_cli_api */ /* mds/handler.c */ diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h index 0d3ed87d38e1..7f2ba2ffe0eb 100644 --- a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h +++ b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h @@ -57,7 +57,8 @@ /** * Server placed lock on granted list, or a recovering client wants the - * lock added to the granted list, no questions asked. */ + * lock added to the granted list, no questions asked. + */ #define LDLM_FL_BLOCK_GRANTED 0x0000000000000002ULL /* bit 1 */ #define ldlm_is_block_granted(_l) LDLM_TEST_FLAG((_l), 1ULL << 1) #define ldlm_set_block_granted(_l) LDLM_SET_FLAG((_l), 1ULL << 1) @@ -65,7 +66,8 @@ /** * Server placed lock on conv list, or a recovering client wants the lock - * added to the conv list, no questions asked. */ + * added to the conv list, no questions asked. + */ #define LDLM_FL_BLOCK_CONV 0x0000000000000004ULL /* bit 2 */ #define ldlm_is_block_conv(_l) LDLM_TEST_FLAG((_l), 1ULL << 2) #define ldlm_set_block_conv(_l) LDLM_SET_FLAG((_l), 1ULL << 2) @@ -73,7 +75,8 @@ /** * Server placed lock on wait list, or a recovering client wants the lock - * added to the wait list, no questions asked. */ + * added to the wait list, no questions asked. + */ #define LDLM_FL_BLOCK_WAIT 0x0000000000000008ULL /* bit 3 */ #define ldlm_is_block_wait(_l) LDLM_TEST_FLAG((_l), 1ULL << 3) #define ldlm_set_block_wait(_l) LDLM_SET_FLAG((_l), 1ULL << 3) @@ -87,7 +90,8 @@ /** * Lock is being replayed. This could probably be implied by the fact that - * one of BLOCK_{GRANTED,CONV,WAIT} is set, but that is pretty dangerous. */ + * one of BLOCK_{GRANTED,CONV,WAIT} is set, but that is pretty dangerous. + */ #define LDLM_FL_REPLAY 0x0000000000000100ULL /* bit 8 */ #define ldlm_is_replay(_l) LDLM_TEST_FLAG((_l), 1ULL << 8) #define ldlm_set_replay(_l) LDLM_SET_FLAG((_l), 1ULL << 8) @@ -125,7 +129,8 @@ /** * Server told not to wait if blocked. For AGL, OST will not send glimpse - * callback. */ + * callback. + */ #define LDLM_FL_BLOCK_NOWAIT 0x0000000000040000ULL /* bit 18 */ #define ldlm_is_block_nowait(_l) LDLM_TEST_FLAG((_l), 1ULL << 18) #define ldlm_set_block_nowait(_l) LDLM_SET_FLAG((_l), 1ULL << 18) @@ -141,7 +146,8 @@ * Immediately cancel such locks when they block some other locks. Send * cancel notification to original lock holder, but expect no reply. This * is for clients (like liblustre) that cannot be expected to reliably - * response to blocking AST. */ + * response to blocking AST. + */ #define LDLM_FL_CANCEL_ON_BLOCK 0x0000000000800000ULL /* bit 23 */ #define ldlm_is_cancel_on_block(_l) LDLM_TEST_FLAG((_l), 1ULL << 23) #define ldlm_set_cancel_on_block(_l) LDLM_SET_FLAG((_l), 1ULL << 23) @@ -164,7 +170,8 @@ /** * Used for marking lock as a target for -EINTR while cp_ast sleep emulation - * + race with upcoming bl_ast. */ + * + race with upcoming bl_ast. + */ #define LDLM_FL_FAIL_LOC 0x0000000100000000ULL /* bit 32 */ #define ldlm_is_fail_loc(_l) LDLM_TEST_FLAG((_l), 1ULL << 32) #define ldlm_set_fail_loc(_l) LDLM_SET_FLAG((_l), 1ULL << 32) @@ -172,7 +179,8 @@ /** * Used while processing the unused list to know that we have already - * handled this lock and decided to skip it. */ + * handled this lock and decided to skip it. + */ #define LDLM_FL_SKIPPED 0x0000000200000000ULL /* bit 33 */ #define ldlm_is_skipped(_l) LDLM_TEST_FLAG((_l), 1ULL << 33) #define ldlm_set_skipped(_l) LDLM_SET_FLAG((_l), 1ULL << 33) @@ -231,7 +239,8 @@ * The proper fix is to do the granting inside of the completion AST, * which can be replaced with a LVB-aware wrapping function for OSC locks. * That change is pretty high-risk, though, and would need a lot more - * testing. */ + * testing. + */ #define LDLM_FL_LVB_READY 0x0000020000000000ULL /* bit 41 */ #define ldlm_is_lvb_ready(_l) LDLM_TEST_FLAG((_l), 1ULL << 41) #define ldlm_set_lvb_ready(_l) LDLM_SET_FLAG((_l), 1ULL << 41) @@ -243,7 +252,8 @@ * dirty pages. It can remain on the granted list during this whole time. * Threads racing to update the KMS after performing their writeback need * to know to exclude each other's locks from the calculation as they walk - * the granted list. */ + * the granted list. + */ #define LDLM_FL_KMS_IGNORE 0x0000040000000000ULL /* bit 42 */ #define ldlm_is_kms_ignore(_l) LDLM_TEST_FLAG((_l), 1ULL << 42) #define ldlm_set_kms_ignore(_l) LDLM_SET_FLAG((_l), 1ULL << 42) @@ -263,7 +273,8 @@ /** * optimization hint: LDLM can run blocking callback from current context - * w/o involving separate thread. in order to decrease cs rate */ + * w/o involving separate thread. in order to decrease cs rate + */ #define LDLM_FL_ATOMIC_CB 0x0000200000000000ULL /* bit 45 */ #define ldlm_is_atomic_cb(_l) LDLM_TEST_FLAG((_l), 1ULL << 45) #define ldlm_set_atomic_cb(_l) LDLM_SET_FLAG((_l), 1ULL << 45) @@ -280,7 +291,8 @@ * LDLM_FL_BL_DONE is to be set by ldlm_cancel_callback() when lock cache is * dropped to let ldlm_callback_handler() return EINVAL to the server. It * is used when ELC RPC is already prepared and is waiting for rpc_lock, - * too late to send a separate CANCEL RPC. */ + * too late to send a separate CANCEL RPC. + */ #define LDLM_FL_BL_AST 0x0000400000000000ULL /* bit 46 */ #define ldlm_is_bl_ast(_l) LDLM_TEST_FLAG((_l), 1ULL << 46) #define ldlm_set_bl_ast(_l) LDLM_SET_FLAG((_l), 1ULL << 46) @@ -295,7 +307,8 @@ /** * Don't put lock into the LRU list, so that it is not canceled due * to aging. Used by MGC locks, they are cancelled only at unmount or - * by callback. */ + * by callback. + */ #define LDLM_FL_NO_LRU 0x0001000000000000ULL /* bit 48 */ #define ldlm_is_no_lru(_l) LDLM_TEST_FLAG((_l), 1ULL << 48) #define ldlm_set_no_lru(_l) LDLM_SET_FLAG((_l), 1ULL << 48) @@ -304,7 +317,8 @@ /** * Set for locks that failed and where the server has been notified. * - * Protected by lock and resource locks. */ + * Protected by lock and resource locks. + */ #define LDLM_FL_FAIL_NOTIFIED 0x0002000000000000ULL /* bit 49 */ #define ldlm_is_fail_notified(_l) LDLM_TEST_FLAG((_l), 1ULL << 49) #define ldlm_set_fail_notified(_l) LDLM_SET_FLAG((_l), 1ULL << 49) @@ -315,7 +329,8 @@ * be destroyed when last reference to them is released. Set by * ldlm_lock_destroy_internal(). * - * Protected by lock and resource locks. */ + * Protected by lock and resource locks. + */ #define LDLM_FL_DESTROYED 0x0004000000000000ULL /* bit 50 */ #define ldlm_is_destroyed(_l) LDLM_TEST_FLAG((_l), 1ULL << 50) #define ldlm_set_destroyed(_l) LDLM_SET_FLAG((_l), 1ULL << 50) @@ -333,7 +348,8 @@ * NB: compared with check_res_locked(), checking this bit is cheaper. * Also, spin_is_locked() is deprecated for kernel code; one reason is * because it works only for SMP so user needs to add extra macros like - * LASSERT_SPIN_LOCKED for uniprocessor kernels. */ + * LASSERT_SPIN_LOCKED for uniprocessor kernels. + */ #define LDLM_FL_RES_LOCKED 0x0010000000000000ULL /* bit 52 */ #define ldlm_is_res_locked(_l) LDLM_TEST_FLAG((_l), 1ULL << 52) #define ldlm_set_res_locked(_l) LDLM_SET_FLAG((_l), 1ULL << 52) @@ -343,7 +359,8 @@ * It's set once we call ldlm_add_waiting_lock_res_locked() to start the * lock-timeout timer and it will never be reset. * - * Protected by lock and resource locks. */ + * Protected by lock and resource locks. + */ #define LDLM_FL_WAITED 0x0020000000000000ULL /* bit 53 */ #define ldlm_is_waited(_l) LDLM_TEST_FLAG((_l), 1ULL << 53) #define ldlm_set_waited(_l) LDLM_SET_FLAG((_l), 1ULL << 53) @@ -365,10 +382,10 @@ #define LDLM_TEST_FLAG(_l, _b) (((_l)->l_flags & (_b)) != 0) /** set a ldlm_lock flag bit */ -#define LDLM_SET_FLAG(_l, _b) (((_l)->l_flags |= (_b)) +#define LDLM_SET_FLAG(_l, _b) ((_l)->l_flags |= (_b)) /** clear a ldlm_lock flag bit */ -#define LDLM_CLEAR_FLAG(_l, _b) (((_l)->l_flags &= ~(_b)) +#define LDLM_CLEAR_FLAG(_l, _b) ((_l)->l_flags &= ~(_b)) /** Mask of flags inherited from parent lock when doing intents. */ #define LDLM_INHERIT_FLAGS LDLM_FL_INHERIT_MASK diff --git a/drivers/staging/lustre/lustre/include/lustre_export.h b/drivers/staging/lustre/lustre/include/lustre_export.h index 311e5aa9b0db..3014d27e6dc2 100644 --- a/drivers/staging/lustre/lustre/include/lustre_export.h +++ b/drivers/staging/lustre/lustre/include/lustre_export.h @@ -50,62 +50,6 @@ #include "lustre/lustre_idl.h" #include "lustre_dlm.h" -struct mds_client_data; -struct mdt_client_data; -struct mds_idmap_table; -struct mdt_idmap_table; - -/** - * Target-specific export data - */ -struct tg_export_data { - /** Protects led_lcd below */ - struct mutex ted_lcd_lock; - /** Per-client data for each export */ - struct lsd_client_data *ted_lcd; - /** Offset of record in last_rcvd file */ - loff_t ted_lr_off; - /** Client index in last_rcvd file */ - int ted_lr_idx; -}; - -/** - * MDT-specific export data - */ -struct mdt_export_data { - struct tg_export_data med_ted; - /** List of all files opened by client on this MDT */ - struct list_head med_open_head; - spinlock_t med_open_lock; /* med_open_head, mfd_list */ - /** Bitmask of all ibit locks this MDT understands */ - __u64 med_ibits_known; - struct mutex med_idmap_mutex; - struct lustre_idmap_table *med_idmap; -}; - -struct ec_export_data { /* echo client */ - struct list_head eced_locks; -}; - -/* In-memory access to client data from OST struct */ -/** Filter (oss-side) specific import data */ -struct filter_export_data { - struct tg_export_data fed_ted; - spinlock_t fed_lock; /**< protects fed_mod_list */ - long fed_dirty; /* in bytes */ - long fed_grant; /* in bytes */ - struct list_head fed_mod_list; /* files being modified */ - int fed_mod_count;/* items in fed_writing list */ - long fed_pending; /* bytes just being written */ - __u32 fed_group; - __u8 fed_pagesize; /* log2 of client page size */ -}; - -struct mgs_export_data { - struct list_head med_clients; /* mgc fs client via this exp */ - spinlock_t med_lock; /* protect med_clients */ -}; - enum obd_option { OBD_OPT_FORCE = 0x0001, OBD_OPT_FAILOVER = 0x0002, @@ -179,7 +123,8 @@ struct obd_export { */ spinlock_t exp_lock; /** Compatibility flags for this export are embedded into - * exp_connect_data */ + * exp_connect_data + */ struct obd_connect_data exp_connect_data; enum obd_option exp_flags; unsigned long exp_failed:1, @@ -200,22 +145,8 @@ struct obd_export { /** blocking dlm lock list, protected by exp_bl_list_lock */ struct list_head exp_bl_list; spinlock_t exp_bl_list_lock; - - /** Target specific data */ - union { - struct tg_export_data eu_target_data; - struct mdt_export_data eu_mdt_data; - struct filter_export_data eu_filter_data; - struct ec_export_data eu_ec_data; - struct mgs_export_data eu_mgs_data; - } u; }; -#define exp_target_data u.eu_target_data -#define exp_mdt_data u.eu_mdt_data -#define exp_filter_data u.eu_filter_data -#define exp_ec_data u.eu_ec_data - static inline __u64 *exp_connect_flags_ptr(struct obd_export *exp) { return &exp->exp_connect_data.ocd_connect_flags; @@ -228,7 +159,6 @@ static inline __u64 exp_connect_flags(struct obd_export *exp) static inline int exp_max_brw_size(struct obd_export *exp) { - LASSERT(exp != NULL); if (exp_connect_flags(exp) & OBD_CONNECT_BRW_SIZE) return exp->exp_connect_data.ocd_brw_size; @@ -242,19 +172,16 @@ static inline int exp_connect_multibulk(struct obd_export *exp) static inline int exp_connect_cancelset(struct obd_export *exp) { - LASSERT(exp != NULL); return !!(exp_connect_flags(exp) & OBD_CONNECT_CANCELSET); } static inline int exp_connect_lru_resize(struct obd_export *exp) { - LASSERT(exp != NULL); return !!(exp_connect_flags(exp) & OBD_CONNECT_LRU_RESIZE); } static inline int exp_connect_rmtclient(struct obd_export *exp) { - LASSERT(exp != NULL); return !!(exp_connect_flags(exp) & OBD_CONNECT_RMT_CLIENT); } @@ -268,14 +195,11 @@ static inline int client_is_remote(struct obd_export *exp) static inline int exp_connect_vbr(struct obd_export *exp) { - LASSERT(exp != NULL); - LASSERT(exp->exp_connection); return !!(exp_connect_flags(exp) & OBD_CONNECT_VBR); } static inline int exp_connect_som(struct obd_export *exp) { - LASSERT(exp != NULL); return !!(exp_connect_flags(exp) & OBD_CONNECT_SOM); } @@ -288,7 +212,6 @@ static inline int imp_connect_lru_resize(struct obd_import *imp) { struct obd_connect_data *ocd; - LASSERT(imp != NULL); ocd = &imp->imp_connect_data; return !!(ocd->ocd_connect_flags & OBD_CONNECT_LRU_RESIZE); } @@ -300,7 +223,6 @@ static inline int exp_connect_layout(struct obd_export *exp) static inline bool exp_connect_lvb_type(struct obd_export *exp) { - LASSERT(exp != NULL); if (exp_connect_flags(exp) & OBD_CONNECT_LVB_TYPE) return true; else @@ -311,7 +233,6 @@ static inline bool imp_connect_lvb_type(struct obd_import *imp) { struct obd_connect_data *ocd; - LASSERT(imp != NULL); ocd = &imp->imp_connect_data; if (ocd->ocd_connect_flags & OBD_CONNECT_LVB_TYPE) return true; @@ -331,13 +252,19 @@ static inline bool imp_connect_disp_stripe(struct obd_import *imp) { struct obd_connect_data *ocd; - LASSERT(imp != NULL); ocd = &imp->imp_connect_data; return ocd->ocd_connect_flags & OBD_CONNECT_DISP_STRIPE; } struct obd_export *class_conn2export(struct lustre_handle *conn); +#define KKUC_CT_DATA_MAGIC 0x092013cea +struct kkuc_ct_data { + __u32 kcd_magic; + struct obd_uuid kcd_uuid; + __u32 kcd_archive; +}; + /** @} export */ #endif /* __EXPORT_H */ diff --git a/drivers/staging/lustre/lustre/include/lustre_fid.h b/drivers/staging/lustre/lustre/include/lustre_fid.h index 9b1a9c695113..ab4a92390a43 100644 --- a/drivers/staging/lustre/lustre/include/lustre_fid.h +++ b/drivers/staging/lustre/lustre/include/lustre_fid.h @@ -251,7 +251,8 @@ static inline void lu_local_name_obj_fid(struct lu_fid *fid, __u32 oid) /* For new FS (>= 2.4), the root FID will be changed to * [FID_SEQ_ROOT:1:0], for existing FS, (upgraded to 2.4), - * the root FID will still be IGIF */ + * the root FID will still be IGIF + */ static inline int fid_is_root(const struct lu_fid *fid) { return unlikely((fid_seq(fid) == FID_SEQ_ROOT && @@ -294,7 +295,8 @@ static inline int fid_is_namespace_visible(const struct lu_fid *fid) const __u64 seq = fid_seq(fid); /* Here, we cannot distinguish whether the normal FID is for OST - * object or not. It is caller's duty to check more if needed. */ + * object or not. It is caller's duty to check more if needed. + */ return (!fid_is_last_id(fid) && (fid_seq_is_norm(seq) || fid_seq_is_igif(seq))) || fid_is_root(fid) || fid_is_dot_lustre(fid); @@ -433,7 +435,7 @@ fid_extract_from_res_name(struct lu_fid *fid, const struct ldlm_res_id *res) */ static inline struct ldlm_res_id * fid_build_quota_res_name(const struct lu_fid *glb_fid, union lquota_id *qid, - struct ldlm_res_id *res) + struct ldlm_res_id *res) { fid_build_reg_res_name(glb_fid, res); res->name[LUSTRE_RES_ID_QUOTA_SEQ_OFF] = fid_seq(&qid->qid_fid); @@ -516,7 +518,8 @@ static inline int ostid_res_name_eq(struct ost_id *oi, struct ldlm_res_id *name) { /* Note: it is just a trick here to save some effort, probably the - * correct way would be turn them into the FID and compare */ + * correct way would be turn them into the FID and compare + */ if (fid_seq_is_mdt0(ostid_seq(oi))) { return name->name[LUSTRE_RES_ID_SEQ_OFF] == ostid_id(oi) && name->name[LUSTRE_RES_ID_VER_OID_OFF] == ostid_seq(oi); @@ -589,12 +592,14 @@ static inline __u64 fid_flatten(const struct lu_fid *fid) static inline __u32 fid_hash(const struct lu_fid *f, int bits) { /* all objects with same id and different versions will belong to same - * collisions list. */ + * collisions list. + */ return hash_long(fid_flatten(f), bits); } /** - * map fid to 32 bit value for ino on 32bit systems. */ + * map fid to 32 bit value for ino on 32bit systems. + */ static inline __u32 fid_flatten32(const struct lu_fid *fid) { __u32 ino; @@ -611,7 +616,8 @@ static inline __u32 fid_flatten32(const struct lu_fid *fid) * that inodes generated at about the same time have a reduced chance * of collisions. This will give a period of 2^12 = 1024 unique clients * (from SEQ) and up to min(LUSTRE_SEQ_MAX_WIDTH, 2^20) = 128k objects - * (from OID), or up to 128M inodes without collisions for new files. */ + * (from OID), or up to 128M inodes without collisions for new files. + */ ino = ((seq & 0x000fffffULL) << 12) + ((seq >> 8) & 0xfffff000) + (seq >> (64 - (40-8)) & 0xffffff00) + (fid_oid(fid) & 0xff000fff) + ((fid_oid(fid) & 0x00fff000) << 8); diff --git a/drivers/staging/lustre/lustre/include/lustre_fld.h b/drivers/staging/lustre/lustre/include/lustre_fld.h index 551162624974..4cf2b0e61672 100644 --- a/drivers/staging/lustre/lustre/include/lustre_fld.h +++ b/drivers/staging/lustre/lustre/include/lustre_fld.h @@ -71,50 +71,41 @@ struct lu_fld_target { struct lu_server_fld { /** * super sequence controller export, needed to forward fld - * lookup request. */ + * lookup request. + */ struct obd_export *lsf_control_exp; - /** - * Client FLD cache. */ + /** Client FLD cache. */ struct fld_cache *lsf_cache; - /** - * Protect index modifications */ + /** Protect index modifications */ struct mutex lsf_lock; - /** - * Fld service name in form "fld-srv-lustre-MDTXXX" */ + /** Fld service name in form "fld-srv-lustre-MDTXXX" */ char lsf_name[LUSTRE_MDT_MAXNAMELEN]; }; struct lu_client_fld { - /** - * Client side debugfs entry. */ + /** Client side debugfs entry. */ struct dentry *lcf_debugfs_entry; - /** - * List of exports client FLD knows about. */ + /** List of exports client FLD knows about. */ struct list_head lcf_targets; - /** - * Current hash to be used to chose an export. */ + /** Current hash to be used to chose an export. */ struct lu_fld_hash *lcf_hash; - /** - * Exports count. */ + /** Exports count. */ int lcf_count; - /** - * Lock protecting exports list and fld_hash. */ + /** Lock protecting exports list and fld_hash. */ spinlock_t lcf_lock; - /** - * Client FLD cache. */ + /** Client FLD cache. */ struct fld_cache *lcf_cache; - /** - * Client fld debugfs entry name. */ + /** Client fld debugfs entry name. */ char lcf_name[LUSTRE_MDT_MAXNAMELEN]; int lcf_flags; diff --git a/drivers/staging/lustre/lustre/include/lustre_handles.h b/drivers/staging/lustre/lustre/include/lustre_handles.h index f39780ae4c8a..27f169d2ed34 100644 --- a/drivers/staging/lustre/lustre/include/lustre_handles.h +++ b/drivers/staging/lustre/lustre/include/lustre_handles.h @@ -65,7 +65,8 @@ struct portals_handle_ops { * * Now you're able to assign the results of cookie2handle directly to an * ldlm_lock. If it's not at the top, you'll want to use container_of() - * to compute the start of the structure based on the handle field. */ + * to compute the start of the structure based on the handle field. + */ struct portals_handle { struct list_head h_link; __u64 h_cookie; diff --git a/drivers/staging/lustre/lustre/include/lustre_import.h b/drivers/staging/lustre/lustre/include/lustre_import.h index 4e4230e94c11..dac2d84d8266 100644 --- a/drivers/staging/lustre/lustre/include/lustre_import.h +++ b/drivers/staging/lustre/lustre/include/lustre_import.h @@ -292,7 +292,8 @@ struct obd_import { /* need IR MNE swab */ imp_need_mne_swab:1, /* import must be reconnected instead of - * chose new connection */ + * chosing new connection + */ imp_force_reconnect:1, /* import has tried to connect with server */ imp_connect_tried:1; diff --git a/drivers/staging/lustre/lustre/include/lustre_kernelcomm.h b/drivers/staging/lustre/lustre/include/lustre_kernelcomm.h new file mode 100644 index 000000000000..970610b6de89 --- /dev/null +++ b/drivers/staging/lustre/lustre/include/lustre_kernelcomm.h @@ -0,0 +1,55 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.gnu.org/licenses/gpl-2.0.html + * + * GPL HEADER END + */ +/* + * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + * + * Copyright (c) 2013 Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * + * Author: Nathan Rutman <nathan.rutman@sun.com> + * + * Kernel <-> userspace communication routines. + * The definitions below are used in the kernel and userspace. + */ + +#ifndef __LUSTRE_KERNELCOMM_H__ +#define __LUSTRE_KERNELCOMM_H__ + +/* For declarations shared with userspace */ +#include "uapi_kernelcomm.h" + +/* prototype for callback function on kuc groups */ +typedef int (*libcfs_kkuc_cb_t)(void *data, void *cb_arg); + +/* Kernel methods */ +int libcfs_kkuc_msg_put(struct file *fp, void *payload); +int libcfs_kkuc_group_put(unsigned int group, void *payload); +int libcfs_kkuc_group_add(struct file *fp, int uid, unsigned int group, + void *data, size_t data_len); +int libcfs_kkuc_group_rem(int uid, unsigned int group); +int libcfs_kkuc_group_foreach(unsigned int group, libcfs_kkuc_cb_t cb_func, + void *cb_arg); + +#endif /* __LUSTRE_KERNELCOMM_H__ */ diff --git a/drivers/staging/lustre/lustre/include/lustre_lib.h b/drivers/staging/lustre/lustre/include/lustre_lib.h index 428469fec534..f2223d55850a 100644 --- a/drivers/staging/lustre/lustre/include/lustre_lib.h +++ b/drivers/staging/lustre/lustre/include/lustre_lib.h @@ -153,9 +153,9 @@ struct obd_ioctl_data { /* buffers the kernel will treat as user pointers */ __u32 ioc_plen1; - char *ioc_pbuf1; + void __user *ioc_pbuf1; __u32 ioc_plen2; - char *ioc_pbuf2; + void __user *ioc_pbuf2; /* inline buffers for various arguments */ __u32 ioc_inllen1; @@ -252,8 +252,8 @@ static inline int obd_ioctl_is_invalid(struct obd_ioctl_data *data) #include "obd_support.h" /* function defined in lustre/obdclass/<platform>/<platform>-module.c */ -int obd_ioctl_getdata(char **buf, int *len, void *arg); -int obd_ioctl_popdata(void *arg, void *data, int len); +int obd_ioctl_getdata(char **buf, int *len, void __user *arg); +int obd_ioctl_popdata(void __user *arg, void *data, int len); static inline void obd_ioctl_freedata(char *buf, int len) { @@ -365,10 +365,10 @@ static inline void obd_ioctl_freedata(char *buf, int len) /* OBD_IOC_LLOG_CATINFO is deprecated */ #define OBD_IOC_LLOG_CATINFO _IOWR('f', 196, OBD_IOC_DATA_TYPE) -#define ECHO_IOC_GET_STRIPE _IOWR('f', 200, OBD_IOC_DATA_TYPE) -#define ECHO_IOC_SET_STRIPE _IOWR('f', 201, OBD_IOC_DATA_TYPE) -#define ECHO_IOC_ENQUEUE _IOWR('f', 202, OBD_IOC_DATA_TYPE) -#define ECHO_IOC_CANCEL _IOWR('f', 203, OBD_IOC_DATA_TYPE) +/* #define ECHO_IOC_GET_STRIPE _IOWR('f', 200, OBD_IOC_DATA_TYPE) */ +/* #define ECHO_IOC_SET_STRIPE _IOWR('f', 201, OBD_IOC_DATA_TYPE) */ +/* #define ECHO_IOC_ENQUEUE _IOWR('f', 202, OBD_IOC_DATA_TYPE) */ +/* #define ECHO_IOC_CANCEL _IOWR('f', 203, OBD_IOC_DATA_TYPE) */ #define OBD_IOC_GET_OBJ_VERSION _IOR('f', 210, OBD_IOC_DATA_TYPE) @@ -387,7 +387,8 @@ static inline void obd_ioctl_freedata(char *buf, int len) */ /* Until such time as we get_info the per-stripe maximum from the OST, - * we define this to be 2T - 4k, which is the ext3 maxbytes. */ + * we define this to be 2T - 4k, which is the ext3 maxbytes. + */ #define LUSTRE_STRIPE_MAXBYTES 0x1fffffff000ULL /* Special values for remove LOV EA from disk */ @@ -540,7 +541,7 @@ do { \ l_add_wait(&wq, &__wait); \ \ /* Block all signals (just the non-fatal ones if no timeout). */ \ - if (info->lwi_on_signal != NULL && (__timeout == 0 || __allow_intr)) \ + if (info->lwi_on_signal && (__timeout == 0 || __allow_intr)) \ __blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS); \ else \ __blocked = cfs_block_sigsinv(0); \ @@ -562,13 +563,13 @@ do { \ __timeout = cfs_time_sub(__timeout, \ cfs_time_sub(interval, remaining));\ if (__timeout == 0) { \ - if (info->lwi_on_timeout == NULL || \ + if (!info->lwi_on_timeout || \ info->lwi_on_timeout(info->lwi_cb_data)) { \ ret = -ETIMEDOUT; \ break; \ } \ /* Take signals after the timeout expires. */ \ - if (info->lwi_on_signal != NULL) \ + if (info->lwi_on_signal) \ (void)cfs_block_sigsinv(LUSTRE_FATAL_SIGS);\ } \ } \ @@ -578,7 +579,7 @@ do { \ if (condition) \ break; \ if (cfs_signal_pending()) { \ - if (info->lwi_on_signal != NULL && \ + if (info->lwi_on_signal && \ (__timeout == 0 || __allow_intr)) { \ if (info->lwi_on_signal != LWI_ON_SIGNAL_NOOP) \ info->lwi_on_signal(info->lwi_cb_data);\ diff --git a/drivers/staging/lustre/lustre/include/lustre_lite.h b/drivers/staging/lustre/lustre/include/lustre_lite.h index f6d7aae3a0b8..fcc5ebbceed8 100644 --- a/drivers/staging/lustre/lustre/include/lustre_lite.h +++ b/drivers/staging/lustre/lustre/include/lustre_lite.h @@ -53,56 +53,8 @@ #define LL_MAX_BLKSIZE_BITS (22) #define LL_MAX_BLKSIZE (1UL<<LL_MAX_BLKSIZE_BITS) -#include "lustre/lustre_user.h" - -struct lustre_rw_params { - int lrp_lock_mode; - ldlm_policy_data_t lrp_policy; - u32 lrp_brw_flags; - int lrp_ast_flags; -}; - -/* - * XXX nikita: this function lives in the header because it is used by both - * llite kernel module and liblustre library, and there is no (?) better place - * to put it in. - */ -static inline void lustre_build_lock_params(int cmd, unsigned long open_flags, - __u64 connect_flags, - loff_t pos, ssize_t len, - struct lustre_rw_params *params) -{ - params->lrp_lock_mode = (cmd == OBD_BRW_READ) ? LCK_PR : LCK_PW; - params->lrp_brw_flags = 0; - - params->lrp_policy.l_extent.start = pos; - params->lrp_policy.l_extent.end = pos + len - 1; - /* - * for now O_APPEND always takes local locks. - */ - if (cmd == OBD_BRW_WRITE && (open_flags & O_APPEND)) { - params->lrp_policy.l_extent.start = 0; - params->lrp_policy.l_extent.end = OBD_OBJECT_EOF; - } else if (LIBLUSTRE_CLIENT && (connect_flags & OBD_CONNECT_SRVLOCK)) { - /* - * liblustre: OST-side locking for all non-O_APPEND - * reads/writes. - */ - params->lrp_lock_mode = LCK_NL; - params->lrp_brw_flags = OBD_BRW_SRVLOCK; - } else { - /* - * nothing special for the kernel. In the future llite may use - * OST-side locks for small writes into highly contended - * files. - */ - } - params->lrp_ast_flags = (open_flags & O_NONBLOCK) ? - LDLM_FL_BLOCK_NOWAIT : 0; -} - /* - * This is embedded into liblustre and llite super-blocks to keep track of + * This is embedded into llite super-blocks to keep track of * connect flags (capabilities) supported by all imports given mount is * connected to. */ diff --git a/drivers/staging/lustre/lustre/include/lustre_log.h b/drivers/staging/lustre/lustre/include/lustre_log.h index e4fc8b5e1336..49618e186824 100644 --- a/drivers/staging/lustre/lustre/include/lustre_log.h +++ b/drivers/staging/lustre/lustre/include/lustre_log.h @@ -241,7 +241,8 @@ struct llog_ctxt { struct obd_llog_group *loc_olg; /* group containing that ctxt */ struct obd_export *loc_exp; /* parent "disk" export (e.g. MDS) */ struct obd_import *loc_imp; /* to use in RPC's: can be backward - pointing import */ + * pointing import + */ struct llog_operations *loc_logops; struct llog_handle *loc_handle; struct mutex loc_mutex; /* protect loc_imp */ @@ -255,7 +256,7 @@ struct llog_ctxt { static inline int llog_handle2ops(struct llog_handle *loghandle, struct llog_operations **lop) { - if (loghandle == NULL || loghandle->lgh_logops == NULL) + if (!loghandle || !loghandle->lgh_logops) return -EINVAL; *lop = loghandle->lgh_logops; @@ -272,7 +273,7 @@ static inline struct llog_ctxt *llog_ctxt_get(struct llog_ctxt *ctxt) static inline void llog_ctxt_put(struct llog_ctxt *ctxt) { - if (ctxt == NULL) + if (!ctxt) return; LASSERT_ATOMIC_GT_LT(&ctxt->loc_refcount, 0, LI_POISON); CDEBUG(D_INFO, "PUTting ctxt %p : new refcount %d\n", ctxt, @@ -294,7 +295,7 @@ static inline int llog_group_set_ctxt(struct obd_llog_group *olg, LASSERT(index >= 0 && index < LLOG_MAX_CTXTS); spin_lock(&olg->olg_lock); - if (olg->olg_ctxts[index] != NULL) { + if (olg->olg_ctxts[index]) { spin_unlock(&olg->olg_lock); return -EEXIST; } @@ -311,7 +312,7 @@ static inline struct llog_ctxt *llog_group_get_ctxt(struct obd_llog_group *olg, LASSERT(index >= 0 && index < LLOG_MAX_CTXTS); spin_lock(&olg->olg_lock); - if (olg->olg_ctxts[index] == NULL) + if (!olg->olg_ctxts[index]) ctxt = NULL; else ctxt = llog_ctxt_get(olg->olg_ctxts[index]); @@ -335,7 +336,7 @@ static inline struct llog_ctxt *llog_get_context(struct obd_device *obd, static inline int llog_group_ctxt_null(struct obd_llog_group *olg, int index) { - return (olg->olg_ctxts[index] == NULL); + return (!olg->olg_ctxts[index]); } static inline int llog_ctxt_null(struct obd_device *obd, int index) @@ -354,7 +355,7 @@ static inline int llog_next_block(const struct lu_env *env, rc = llog_handle2ops(loghandle, &lop); if (rc) return rc; - if (lop->lop_next_block == NULL) + if (!lop->lop_next_block) return -EOPNOTSUPP; rc = lop->lop_next_block(env, loghandle, cur_idx, next_idx, diff --git a/drivers/staging/lustre/lustre/include/lustre_mdc.h b/drivers/staging/lustre/lustre/include/lustre_mdc.h index 3da373315856..df94f9f3bef2 100644 --- a/drivers/staging/lustre/lustre/include/lustre_mdc.h +++ b/drivers/staging/lustre/lustre/include/lustre_mdc.h @@ -81,8 +81,8 @@ static inline void mdc_init_rpc_lock(struct mdc_rpc_lock *lck) static inline void mdc_get_rpc_lock(struct mdc_rpc_lock *lck, struct lookup_intent *it) { - if (it != NULL && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP || - it->it_op == IT_LAYOUT)) + if (it && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP || + it->it_op == IT_LAYOUT)) return; /* This would normally block until the existing request finishes. @@ -90,7 +90,8 @@ static inline void mdc_get_rpc_lock(struct mdc_rpc_lock *lck, * done, then set rpcl_it to MDC_FAKE_RPCL_IT. Once that is set * it will only be cleared when all fake requests are finished. * Only when all fake requests are finished can normal requests - * be sent, to ensure they are recoverable again. */ + * be sent, to ensure they are recoverable again. + */ again: mutex_lock(&lck->rpcl_mutex); @@ -105,22 +106,23 @@ static inline void mdc_get_rpc_lock(struct mdc_rpc_lock *lck, * just turned off but there are still requests in progress. * Wait until they finish. It doesn't need to be efficient * in this extremely rare case, just have low overhead in - * the common case when it isn't true. */ + * the common case when it isn't true. + */ while (unlikely(lck->rpcl_it == MDC_FAKE_RPCL_IT)) { mutex_unlock(&lck->rpcl_mutex); schedule_timeout(cfs_time_seconds(1) / 4); goto again; } - LASSERT(lck->rpcl_it == NULL); + LASSERT(!lck->rpcl_it); lck->rpcl_it = it; } static inline void mdc_put_rpc_lock(struct mdc_rpc_lock *lck, struct lookup_intent *it) { - if (it != NULL && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP || - it->it_op == IT_LAYOUT)) + if (it && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP || + it->it_op == IT_LAYOUT)) return; if (lck->rpcl_it == MDC_FAKE_RPCL_IT) { /* OBD_FAIL_MDC_RPCS_SEM */ diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h index d834ddd8183b..4fa1a18b7d15 100644 --- a/drivers/staging/lustre/lustre/include/lustre_net.h +++ b/drivers/staging/lustre/lustre/include/lustre_net.h @@ -76,7 +76,8 @@ * In order for the client and server to properly negotiate the maximum * possible transfer size, PTLRPC_BULK_OPS_COUNT must be a power-of-two * value. The client is free to limit the actual RPC size for any bulk - * transfer via cl_max_pages_per_rpc to some non-power-of-two value. */ + * transfer via cl_max_pages_per_rpc to some non-power-of-two value. + */ #define PTLRPC_BULK_OPS_BITS 2 #define PTLRPC_BULK_OPS_COUNT (1U << PTLRPC_BULK_OPS_BITS) /** @@ -85,7 +86,8 @@ * protocol limitation on the maximum RPC size that can be used by any * RPC sent to that server in the future. Instead, the server should * use the negotiated per-client ocd_brw_size to determine the bulk - * RPC count. */ + * RPC count. + */ #define PTLRPC_BULK_OPS_MASK (~((__u64)PTLRPC_BULK_OPS_COUNT - 1)) /** @@ -419,16 +421,18 @@ struct ptlrpc_reply_state { /** A spinlock to protect the reply state flags */ spinlock_t rs_lock; /** Reply state flags */ - unsigned long rs_difficult:1; /* ACK/commit stuff */ + unsigned long rs_difficult:1; /* ACK/commit stuff */ unsigned long rs_no_ack:1; /* no ACK, even for - difficult requests */ + * difficult requests + */ unsigned long rs_scheduled:1; /* being handled? */ unsigned long rs_scheduled_ever:1;/* any schedule attempts? */ unsigned long rs_handled:1; /* been handled yet? */ unsigned long rs_on_net:1; /* reply_out_callback pending? */ unsigned long rs_prealloc:1; /* rs from prealloc list */ unsigned long rs_committed:1;/* the transaction was committed - * and the rs was dispatched */ + * and the rs was dispatched + */ /** Size of the state */ int rs_size; /** opcode */ @@ -463,7 +467,7 @@ struct ptlrpc_reply_state { /** Handles of locks awaiting client reply ACK */ struct lustre_handle rs_locks[RS_MAX_LOCKS]; /** Lock modes of locks in \a rs_locks */ - ldlm_mode_t rs_modes[RS_MAX_LOCKS]; + enum ldlm_mode rs_modes[RS_MAX_LOCKS]; }; struct ptlrpc_thread; @@ -1181,7 +1185,7 @@ struct nrs_fifo_req { * purpose of this object is to hold references to the request's resources * for the lifetime of the request, and to hold properties that policies use * use for determining the request's scheduling priority. - * */ + */ struct ptlrpc_nrs_request { /** * The request's resource hierarchy. @@ -1321,15 +1325,17 @@ struct ptlrpc_request { /* do not resend request on -EINPROGRESS */ rq_no_retry_einprogress:1, /* allow the req to be sent if the import is in recovery - * status */ + * status + */ rq_allow_replay:1; unsigned int rq_nr_resend; enum rq_phase rq_phase; /* one of RQ_PHASE_* */ enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */ - atomic_t rq_refcount;/* client-side refcount for SENT race, - server-side refcount for multiple replies */ + atomic_t rq_refcount; /* client-side refcount for SENT race, + * server-side refcount for multiple replies + */ /** Portal to which this request would be sent */ short rq_request_portal; /* XXX FIXME bug 249 */ @@ -1363,7 +1369,8 @@ struct ptlrpc_request { /** * security and encryption data - * @{ */ + * @{ + */ struct ptlrpc_cli_ctx *rq_cli_ctx; /**< client's half ctx */ struct ptlrpc_svc_ctx *rq_svc_ctx; /**< server's half ctx */ struct list_head rq_ctx_chain; /**< link to waited ctx */ @@ -1477,7 +1484,8 @@ struct ptlrpc_request { /** when request must finish. volatile * so that servers' early reply updates to the deadline aren't - * kept in per-cpu cache */ + * kept in per-cpu cache + */ volatile time64_t rq_deadline; /** when req reply unlink must finish. */ time64_t rq_reply_deadline; @@ -1518,7 +1526,7 @@ struct ptlrpc_request { static inline int ptlrpc_req_interpret(const struct lu_env *env, struct ptlrpc_request *req, int rc) { - if (req->rq_interpret_reply != NULL) { + if (req->rq_interpret_reply) { req->rq_status = req->rq_interpret_reply(env, req, &req->rq_async_args, rc); @@ -1678,7 +1686,8 @@ do { \ /** * This is the debug print function you need to use to print request structure * content into lustre debug log. - * for most callers (level is a constant) this is resolved at compile time */ + * for most callers (level is a constant) this is resolved at compile time + */ #define DEBUG_REQ(level, req, fmt, args...) \ do { \ if ((level) & (D_ERROR | D_WARNING)) { \ @@ -1947,7 +1956,7 @@ struct ptlrpc_service_ops { * or general metadata service for MDS. */ struct ptlrpc_service { - /** serialize /proc operations */ + /** serialize sysfs operations */ spinlock_t srv_lock; /** most often accessed fields */ /** chain thru all services */ @@ -2101,7 +2110,8 @@ struct ptlrpc_service_part { /** NRS head for regular requests */ struct ptlrpc_nrs scp_nrs_reg; /** NRS head for HP requests; this is only valid for services that can - * handle HP requests */ + * handle HP requests + */ struct ptlrpc_nrs *scp_nrs_hp; /** AT stuff */ @@ -2141,8 +2151,8 @@ struct ptlrpc_service_part { #define ptlrpc_service_for_each_part(part, i, svc) \ for (i = 0; \ i < (svc)->srv_ncpts && \ - (svc)->srv_parts != NULL && \ - ((part) = (svc)->srv_parts[i]) != NULL; i++) + (svc)->srv_parts && \ + ((part) = (svc)->srv_parts[i]); i++) /** * Declaration of ptlrpcd control structure @@ -2259,7 +2269,6 @@ static inline bool nrs_policy_compat_all(const struct ptlrpc_service *svc, static inline bool nrs_policy_compat_one(const struct ptlrpc_service *svc, const struct ptlrpc_nrs_pol_desc *desc) { - LASSERT(desc->pd_compat_svc_name != NULL); return strcmp(svc->srv_name, desc->pd_compat_svc_name) == 0; } @@ -2303,7 +2312,6 @@ static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req) struct ptlrpc_bulk_desc *desc; int rc; - LASSERT(req != NULL); desc = req->rq_bulk; if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) && @@ -2374,14 +2382,14 @@ void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req); struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp, const struct req_format *format); struct ptlrpc_request *ptlrpc_request_alloc_pool(struct obd_import *imp, - struct ptlrpc_request_pool *, - const struct req_format *format); + struct ptlrpc_request_pool *, + const struct req_format *); void ptlrpc_request_free(struct ptlrpc_request *request); int ptlrpc_request_pack(struct ptlrpc_request *request, __u32 version, int opcode); -struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp, - const struct req_format *format, - __u32 version, int opcode); +struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *, + const struct req_format *, + __u32, int); int ptlrpc_request_bufs_pack(struct ptlrpc_request *request, __u32 version, int opcode, char **bufs, struct ptlrpc_cli_ctx *ctx); @@ -2462,7 +2470,8 @@ struct ptlrpc_service_thr_conf { /* "soft" limit for total threads number */ unsigned int tc_nthrs_max; /* user specified threads number, it will be validated due to - * other members of this structure. */ + * other members of this structure. + */ unsigned int tc_nthrs_user; /* set NUMA node affinity for service threads */ unsigned int tc_cpu_affinity; @@ -2500,14 +2509,12 @@ struct ptlrpc_service_conf { */ void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs); void ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs); -struct ptlrpc_service *ptlrpc_register_service( - struct ptlrpc_service_conf *conf, - struct kset *parent, - struct dentry *debugfs_entry); +struct ptlrpc_service *ptlrpc_register_service(struct ptlrpc_service_conf *conf, + struct kset *parent, + struct dentry *debugfs_entry); int ptlrpc_start_threads(struct ptlrpc_service *svc); int ptlrpc_unregister_service(struct ptlrpc_service *service); -int liblustre_check_services(void *arg); int ptlrpc_hr_init(void); void ptlrpc_hr_fini(void); @@ -2536,7 +2543,7 @@ int ptlrpc_reconnect_import(struct obd_import *imp); int ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout, int index); void ptlrpc_buf_set_swabbed(struct ptlrpc_request *req, const int inout, - int index); + int index); int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len); int ptlrpc_unpack_req_msg(struct ptlrpc_request *req, int len); @@ -2726,7 +2733,7 @@ ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req) static inline void ptlrpc_client_wake_req(struct ptlrpc_request *req) { - if (req->rq_set == NULL) + if (!req->rq_set) wake_up(&req->rq_reply_waitq); else wake_up(&req->rq_set->set_waitq); @@ -2750,7 +2757,7 @@ ptlrpc_rs_decref(struct ptlrpc_reply_state *rs) /* Should only be called once per req */ static inline void ptlrpc_req_drop_rs(struct ptlrpc_request *req) { - if (req->rq_reply_state == NULL) + if (!req->rq_reply_state) return; /* shouldn't occur */ ptlrpc_rs_decref(req->rq_reply_state); req->rq_reply_state = NULL; @@ -2807,7 +2814,6 @@ ptlrpc_server_get_timeout(struct ptlrpc_service_part *svcpt) static inline struct ptlrpc_service * ptlrpc_req2svc(struct ptlrpc_request *req) { - LASSERT(req->rq_rqbd != NULL); return req->rq_rqbd->rqbd_svcpt->scp_service; } diff --git a/drivers/staging/lustre/lustre/include/lustre_req_layout.h b/drivers/staging/lustre/lustre/include/lustre_req_layout.h index 46a662f89322..b2e67fcf9ef1 100644 --- a/drivers/staging/lustre/lustre/include/lustre_req_layout.h +++ b/drivers/staging/lustre/lustre/include/lustre_req_layout.h @@ -107,8 +107,8 @@ void req_capsule_set_size(struct req_capsule *pill, const struct req_msg_field *field, enum req_location loc, int size); int req_capsule_get_size(const struct req_capsule *pill, - const struct req_msg_field *field, - enum req_location loc); + const struct req_msg_field *field, + enum req_location loc); int req_capsule_msg_size(struct req_capsule *pill, enum req_location loc); int req_capsule_fmt_size(__u32 magic, const struct req_format *fmt, enum req_location loc); @@ -130,7 +130,6 @@ void req_layout_fini(void); extern struct req_format RQF_OBD_PING; extern struct req_format RQF_OBD_SET_INFO; extern struct req_format RQF_SEC_CTX; -extern struct req_format RQF_OBD_IDX_READ; /* MGS req_format */ extern struct req_format RQF_MGS_TARGET_REG; extern struct req_format RQF_MGS_SET_INFO; @@ -146,7 +145,6 @@ extern struct req_format RQF_MDS_GETSTATUS; extern struct req_format RQF_MDS_SYNC; extern struct req_format RQF_MDS_GETXATTR; extern struct req_format RQF_MDS_GETATTR; -extern struct req_format RQF_UPDATE_OBJ; /* * This is format of direct (non-intent) MDS_GETATTR_NAME request. @@ -177,7 +175,6 @@ extern struct req_format RQF_MDS_REINT_SETXATTR; extern struct req_format RQF_MDS_QUOTACHECK; extern struct req_format RQF_MDS_QUOTACTL; extern struct req_format RQF_QC_CALLBACK; -extern struct req_format RQF_QUOTA_DQACQ; extern struct req_format RQF_MDS_SWAP_LAYOUTS; /* MDS hsm formats */ extern struct req_format RQF_MDS_HSM_STATE_GET; @@ -220,7 +217,6 @@ extern struct req_format RQF_LDLM_INTENT_OPEN; extern struct req_format RQF_LDLM_INTENT_CREATE; extern struct req_format RQF_LDLM_INTENT_UNLINK; extern struct req_format RQF_LDLM_INTENT_GETXATTR; -extern struct req_format RQF_LDLM_INTENT_QUOTA; extern struct req_format RQF_LDLM_CANCEL; extern struct req_format RQF_LDLM_CALLBACK; extern struct req_format RQF_LDLM_CP_CALLBACK; @@ -252,7 +248,6 @@ extern struct req_msg_field RMF_SETINFO_KEY; extern struct req_msg_field RMF_GETINFO_VAL; extern struct req_msg_field RMF_GETINFO_VALLEN; extern struct req_msg_field RMF_GETINFO_KEY; -extern struct req_msg_field RMF_IDX_INFO; extern struct req_msg_field RMF_CLOSE_DATA; /* @@ -277,7 +272,6 @@ extern struct req_msg_field RMF_CAPA1; extern struct req_msg_field RMF_CAPA2; extern struct req_msg_field RMF_OBD_QUOTACHECK; extern struct req_msg_field RMF_OBD_QUOTACTL; -extern struct req_msg_field RMF_QUOTA_BODY; extern struct req_msg_field RMF_STRING; extern struct req_msg_field RMF_SWAP_LAYOUTS; extern struct req_msg_field RMF_MDS_HSM_PROGRESS; @@ -322,9 +316,6 @@ extern struct req_msg_field RMF_MGS_CONFIG_RES; /* generic uint32 */ extern struct req_msg_field RMF_U32; -/* OBJ update format */ -extern struct req_msg_field RMF_UPDATE; -extern struct req_msg_field RMF_UPDATE_REPLY; /** @} req_layout */ #endif /* _LUSTRE_REQ_LAYOUT_H__ */ diff --git a/drivers/staging/lustre/lustre/include/lustre_sec.h b/drivers/staging/lustre/lustre/include/lustre_sec.h index dd1033be6bfa..01b4e6726a68 100644 --- a/drivers/staging/lustre/lustre/include/lustre_sec.h +++ b/drivers/staging/lustre/lustre/include/lustre_sec.h @@ -351,26 +351,23 @@ struct ptlrpc_ctx_ops { /** * To determine whether it's suitable to use the \a ctx for \a vcred. */ - int (*match) (struct ptlrpc_cli_ctx *ctx, - struct vfs_cred *vcred); + int (*match)(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred); /** * To bring the \a ctx uptodate. */ - int (*refresh) (struct ptlrpc_cli_ctx *ctx); + int (*refresh)(struct ptlrpc_cli_ctx *ctx); /** * Validate the \a ctx. */ - int (*validate) (struct ptlrpc_cli_ctx *ctx); + int (*validate)(struct ptlrpc_cli_ctx *ctx); /** * Force the \a ctx to die. */ - void (*force_die) (struct ptlrpc_cli_ctx *ctx, - int grace); - int (*display) (struct ptlrpc_cli_ctx *ctx, - char *buf, int bufsize); + void (*force_die)(struct ptlrpc_cli_ctx *ctx, int grace); + int (*display)(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize); /** * Sign the request message using \a ctx. @@ -382,8 +379,7 @@ struct ptlrpc_ctx_ops { * * \see null_ctx_sign(), plain_ctx_sign(), gss_cli_ctx_sign(). */ - int (*sign) (struct ptlrpc_cli_ctx *ctx, - struct ptlrpc_request *req); + int (*sign)(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req); /** * Verify the reply message using \a ctx. @@ -395,8 +391,7 @@ struct ptlrpc_ctx_ops { * * \see null_ctx_verify(), plain_ctx_verify(), gss_cli_ctx_verify(). */ - int (*verify) (struct ptlrpc_cli_ctx *ctx, - struct ptlrpc_request *req); + int (*verify)(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req); /** * Encrypt the request message using \a ctx. @@ -408,8 +403,7 @@ struct ptlrpc_ctx_ops { * * \see gss_cli_ctx_seal(). */ - int (*seal) (struct ptlrpc_cli_ctx *ctx, - struct ptlrpc_request *req); + int (*seal)(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req); /** * Decrypt the reply message using \a ctx. @@ -421,8 +415,7 @@ struct ptlrpc_ctx_ops { * * \see gss_cli_ctx_unseal(). */ - int (*unseal) (struct ptlrpc_cli_ctx *ctx, - struct ptlrpc_request *req); + int (*unseal)(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req); /** * Wrap bulk request data. This is called before wrapping RPC @@ -444,9 +437,9 @@ struct ptlrpc_ctx_ops { * * \see plain_cli_wrap_bulk(), gss_cli_ctx_wrap_bulk(). */ - int (*wrap_bulk) (struct ptlrpc_cli_ctx *ctx, - struct ptlrpc_request *req, - struct ptlrpc_bulk_desc *desc); + int (*wrap_bulk)(struct ptlrpc_cli_ctx *ctx, + struct ptlrpc_request *req, + struct ptlrpc_bulk_desc *desc); /** * Unwrap bulk reply data. This is called after wrapping RPC @@ -461,9 +454,9 @@ struct ptlrpc_ctx_ops { * * \see plain_cli_unwrap_bulk(), gss_cli_ctx_unwrap_bulk(). */ - int (*unwrap_bulk) (struct ptlrpc_cli_ctx *ctx, - struct ptlrpc_request *req, - struct ptlrpc_bulk_desc *desc); + int (*unwrap_bulk)(struct ptlrpc_cli_ctx *ctx, + struct ptlrpc_request *req, + struct ptlrpc_bulk_desc *desc); }; #define PTLRPC_CTX_NEW_BIT (0) /* newly created */ @@ -515,9 +508,9 @@ struct ptlrpc_sec_cops { * * \see null_create_sec(), plain_create_sec(), gss_sec_create_kr(). */ - struct ptlrpc_sec * (*create_sec) (struct obd_import *imp, - struct ptlrpc_svc_ctx *ctx, - struct sptlrpc_flavor *flavor); + struct ptlrpc_sec *(*create_sec)(struct obd_import *imp, + struct ptlrpc_svc_ctx *ctx, + struct sptlrpc_flavor *flavor); /** * Destructor of ptlrpc_sec. When called, refcount has been dropped @@ -525,7 +518,7 @@ struct ptlrpc_sec_cops { * * \see null_destroy_sec(), plain_destroy_sec(), gss_sec_destroy_kr(). */ - void (*destroy_sec) (struct ptlrpc_sec *sec); + void (*destroy_sec)(struct ptlrpc_sec *sec); /** * Notify that this ptlrpc_sec is going to die. Optionally, policy @@ -534,7 +527,7 @@ struct ptlrpc_sec_cops { * * \see plain_kill_sec(), gss_sec_kill(). */ - void (*kill_sec) (struct ptlrpc_sec *sec); + void (*kill_sec)(struct ptlrpc_sec *sec); /** * Given \a vcred, lookup and/or create its context. The policy module @@ -544,10 +537,9 @@ struct ptlrpc_sec_cops { * * \see null_lookup_ctx(), plain_lookup_ctx(), gss_sec_lookup_ctx_kr(). */ - struct ptlrpc_cli_ctx * (*lookup_ctx) (struct ptlrpc_sec *sec, - struct vfs_cred *vcred, - int create, - int remove_dead); + struct ptlrpc_cli_ctx *(*lookup_ctx)(struct ptlrpc_sec *sec, + struct vfs_cred *vcred, + int create, int remove_dead); /** * Called then the reference of \a ctx dropped to 0. The policy module @@ -559,9 +551,8 @@ struct ptlrpc_sec_cops { * * \see plain_release_ctx(), gss_sec_release_ctx_kr(). */ - void (*release_ctx) (struct ptlrpc_sec *sec, - struct ptlrpc_cli_ctx *ctx, - int sync); + void (*release_ctx)(struct ptlrpc_sec *sec, struct ptlrpc_cli_ctx *ctx, + int sync); /** * Flush the context cache. @@ -573,11 +564,8 @@ struct ptlrpc_sec_cops { * * \see plain_flush_ctx_cache(), gss_sec_flush_ctx_cache_kr(). */ - int (*flush_ctx_cache) - (struct ptlrpc_sec *sec, - uid_t uid, - int grace, - int force); + int (*flush_ctx_cache)(struct ptlrpc_sec *sec, uid_t uid, + int grace, int force); /** * Called periodically by garbage collector to remove dead contexts @@ -585,7 +573,7 @@ struct ptlrpc_sec_cops { * * \see gss_sec_gc_ctx_kr(). */ - void (*gc_ctx) (struct ptlrpc_sec *sec); + void (*gc_ctx)(struct ptlrpc_sec *sec); /** * Given an context \a ctx, install a corresponding reverse service @@ -593,9 +581,8 @@ struct ptlrpc_sec_cops { * XXX currently it's only used by GSS module, maybe we should remove * this from general API. */ - int (*install_rctx)(struct obd_import *imp, - struct ptlrpc_sec *sec, - struct ptlrpc_cli_ctx *ctx); + int (*install_rctx)(struct obd_import *imp, struct ptlrpc_sec *sec, + struct ptlrpc_cli_ctx *ctx); /** * To allocate request buffer for \a req. @@ -608,9 +595,8 @@ struct ptlrpc_sec_cops { * * \see null_alloc_reqbuf(), plain_alloc_reqbuf(), gss_alloc_reqbuf(). */ - int (*alloc_reqbuf)(struct ptlrpc_sec *sec, - struct ptlrpc_request *req, - int lustre_msg_size); + int (*alloc_reqbuf)(struct ptlrpc_sec *sec, struct ptlrpc_request *req, + int lustre_msg_size); /** * To free request buffer for \a req. @@ -619,8 +605,7 @@ struct ptlrpc_sec_cops { * * \see null_free_reqbuf(), plain_free_reqbuf(), gss_free_reqbuf(). */ - void (*free_reqbuf) (struct ptlrpc_sec *sec, - struct ptlrpc_request *req); + void (*free_reqbuf)(struct ptlrpc_sec *sec, struct ptlrpc_request *req); /** * To allocate reply buffer for \a req. @@ -632,9 +617,8 @@ struct ptlrpc_sec_cops { * * \see null_alloc_repbuf(), plain_alloc_repbuf(), gss_alloc_repbuf(). */ - int (*alloc_repbuf)(struct ptlrpc_sec *sec, - struct ptlrpc_request *req, - int lustre_msg_size); + int (*alloc_repbuf)(struct ptlrpc_sec *sec, struct ptlrpc_request *req, + int lustre_msg_size); /** * To free reply buffer for \a req. @@ -645,8 +629,7 @@ struct ptlrpc_sec_cops { * * \see null_free_repbuf(), plain_free_repbuf(), gss_free_repbuf(). */ - void (*free_repbuf) (struct ptlrpc_sec *sec, - struct ptlrpc_request *req); + void (*free_repbuf)(struct ptlrpc_sec *sec, struct ptlrpc_request *req); /** * To expand the request buffer of \a req, thus the \a segment in @@ -658,15 +641,13 @@ struct ptlrpc_sec_cops { * \see null_enlarge_reqbuf(), plain_enlarge_reqbuf(), * gss_enlarge_reqbuf(). */ - int (*enlarge_reqbuf) - (struct ptlrpc_sec *sec, - struct ptlrpc_request *req, - int segment, int newsize); + int (*enlarge_reqbuf)(struct ptlrpc_sec *sec, + struct ptlrpc_request *req, + int segment, int newsize); /* * misc */ - int (*display) (struct ptlrpc_sec *sec, - struct seq_file *seq); + int (*display)(struct ptlrpc_sec *sec, struct seq_file *seq); }; /** @@ -690,7 +671,7 @@ struct ptlrpc_sec_sops { * * \see null_accept(), plain_accept(), gss_svc_accept_kr(). */ - int (*accept) (struct ptlrpc_request *req); + int (*accept)(struct ptlrpc_request *req); /** * Perform security transformation upon reply message. @@ -702,15 +683,14 @@ struct ptlrpc_sec_sops { * * \see null_authorize(), plain_authorize(), gss_svc_authorize(). */ - int (*authorize) (struct ptlrpc_request *req); + int (*authorize)(struct ptlrpc_request *req); /** * Invalidate server context \a ctx. * * \see gss_svc_invalidate_ctx(). */ - void (*invalidate_ctx) - (struct ptlrpc_svc_ctx *ctx); + void (*invalidate_ctx)(struct ptlrpc_svc_ctx *ctx); /** * Allocate a ptlrpc_reply_state. @@ -724,28 +704,26 @@ struct ptlrpc_sec_sops { * * \see null_alloc_rs(), plain_alloc_rs(), gss_svc_alloc_rs(). */ - int (*alloc_rs) (struct ptlrpc_request *req, - int msgsize); + int (*alloc_rs)(struct ptlrpc_request *req, int msgsize); /** * Free a ptlrpc_reply_state. */ - void (*free_rs) (struct ptlrpc_reply_state *rs); + void (*free_rs)(struct ptlrpc_reply_state *rs); /** * Release the server context \a ctx. * * \see gss_svc_free_ctx(). */ - void (*free_ctx) (struct ptlrpc_svc_ctx *ctx); + void (*free_ctx)(struct ptlrpc_svc_ctx *ctx); /** * Install a reverse context based on the server context \a ctx. * * \see gss_svc_install_rctx_kr(). */ - int (*install_rctx)(struct obd_import *imp, - struct ptlrpc_svc_ctx *ctx); + int (*install_rctx)(struct obd_import *imp, struct ptlrpc_svc_ctx *ctx); /** * Prepare buffer for incoming bulk write. @@ -755,24 +733,24 @@ struct ptlrpc_sec_sops { * * \see gss_svc_prep_bulk(). */ - int (*prep_bulk) (struct ptlrpc_request *req, - struct ptlrpc_bulk_desc *desc); + int (*prep_bulk)(struct ptlrpc_request *req, + struct ptlrpc_bulk_desc *desc); /** * Unwrap the bulk write data. * * \see plain_svc_unwrap_bulk(), gss_svc_unwrap_bulk(). */ - int (*unwrap_bulk) (struct ptlrpc_request *req, - struct ptlrpc_bulk_desc *desc); + int (*unwrap_bulk)(struct ptlrpc_request *req, + struct ptlrpc_bulk_desc *desc); /** * Wrap the bulk read data. * * \see plain_svc_wrap_bulk(), gss_svc_wrap_bulk(). */ - int (*wrap_bulk) (struct ptlrpc_request *req, - struct ptlrpc_bulk_desc *desc); + int (*wrap_bulk)(struct ptlrpc_request *req, + struct ptlrpc_bulk_desc *desc); }; struct ptlrpc_sec_policy { diff --git a/drivers/staging/lustre/lustre/include/lustre_ver.h b/drivers/staging/lustre/lustre/include/lustre_ver.h index caa4da12f37a..64559a16f4de 100644 --- a/drivers/staging/lustre/lustre/include/lustre_ver.h +++ b/drivers/staging/lustre/lustre/include/lustre_ver.h @@ -1,26 +1,20 @@ #ifndef _LUSTRE_VER_H_ #define _LUSTRE_VER_H_ -/* This file automatically generated from lustre/include/lustre_ver.h.in, - * based on parameters in lustre/autoconf/lustre-version.ac. - * Changes made directly to this file will be lost. */ #define LUSTRE_MAJOR 2 -#define LUSTRE_MINOR 3 -#define LUSTRE_PATCH 64 +#define LUSTRE_MINOR 4 +#define LUSTRE_PATCH 60 #define LUSTRE_FIX 0 -#define LUSTRE_VERSION_STRING "2.3.64" +#define LUSTRE_VERSION_STRING "2.4.60" #define LUSTRE_VERSION_CODE OBD_OCD_VERSION(LUSTRE_MAJOR, \ LUSTRE_MINOR, LUSTRE_PATCH, \ LUSTRE_FIX) -/* liblustre clients are only allowed to connect if their LUSTRE_FIX mismatches - * by this amount (set in lustre/autoconf/lustre-version.ac). */ -#define LUSTRE_VERSION_ALLOWED_OFFSET OBD_OCD_VERSION(0, 0, 1, 32) - -/* If lustre version of client and servers it connects to differs by more +/* + * If lustre version of client and servers it connects to differs by more * than this amount, client would issue a warning. - * (set in lustre/autoconf/lustre-version.ac) */ + */ #define LUSTRE_VERSION_OFFSET_WARN OBD_OCD_VERSION(0, 4, 0, 0) #endif diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h index bcbe61301713..4a0f2e8b19f6 100644 --- a/drivers/staging/lustre/lustre/include/obd.h +++ b/drivers/staging/lustre/lustre/include/obd.h @@ -90,7 +90,8 @@ struct lov_stripe_md { pid_t lsm_lock_owner; /* debugging */ /* maximum possible file size, might change as OSTs status changes, - * e.g. disconnected, deactivated */ + * e.g. disconnected, deactivated + */ __u64 lsm_maxbytes; struct { /* Public members. */ @@ -123,7 +124,7 @@ static inline bool lsm_is_released(struct lov_stripe_md *lsm) static inline bool lsm_has_objects(struct lov_stripe_md *lsm) { - if (lsm == NULL) + if (!lsm) return false; if (lsm_is_released(lsm)) return false; @@ -159,7 +160,8 @@ struct obd_info { /* An update callback which is called to update some data on upper * level. E.g. it is used for update lsm->lsm_oinfo at every received * request in osc level for enqueue requests. It is also possible to - * update some caller data from LOV layer if needed. */ + * update some caller data from LOV layer if needed. + */ obd_enqueue_update_f oi_cb_up; }; @@ -216,7 +218,6 @@ struct timeout_item { }; #define OSC_MAX_RIF_DEFAULT 8 -#define MDS_OSC_MAX_RIF_DEFAULT 50 #define OSC_MAX_RIF_MAX 256 #define OSC_MAX_DIRTY_DEFAULT (OSC_MAX_RIF_DEFAULT * 4) #define OSC_MAX_DIRTY_MB_MAX 2048 /* arbitrary, but < MAX_LONG bytes */ @@ -241,7 +242,8 @@ struct client_obd { struct obd_import *cl_import; /* ptlrpc connection state */ int cl_conn_count; /* max_mds_easize is purely a performance thing so we don't have to - * call obd_size_diskmd() all the time. */ + * call obd_size_diskmd() all the time. + */ int cl_default_mds_easize; int cl_max_mds_easize; int cl_default_mds_cookiesize; @@ -261,7 +263,8 @@ struct client_obd { /* since we allocate grant by blocks, we don't know how many grant will * be used to add a page into cache. As a solution, we reserve maximum * grant before trying to dirty a page and unreserve the rest. - * See osc_{reserve|unreserve}_grant for details. */ + * See osc_{reserve|unreserve}_grant for details. + */ long cl_reserved_grant; struct list_head cl_cache_waiters; /* waiting for cache/grant */ unsigned long cl_next_shrink_grant; /* jiffies */ @@ -269,14 +272,16 @@ struct client_obd { int cl_grant_shrink_interval; /* seconds */ /* A chunk is an optimal size used by osc_extent to determine - * the extent size. A chunk is max(PAGE_CACHE_SIZE, OST block size) */ + * the extent size. A chunk is max(PAGE_CACHE_SIZE, OST block size) + */ int cl_chunkbits; int cl_chunk; int cl_extent_tax; /* extent overhead, by bytes */ /* keep track of objects that have lois that contain pages which * have been queued for async brw. this lock also protects the - * lists of osc_client_pages that hang off of the loi */ + * lists of osc_client_pages that hang off of the loi + */ /* * ->cl_loi_list_lock protects consistency of * ->cl_loi_{ready,read,write}_list. ->ap_make_ready() and @@ -295,14 +300,14 @@ struct client_obd { * NB by Jinshan: though field names are still _loi_, but actually * osc_object{}s are in the list. */ - client_obd_lock_t cl_loi_list_lock; + struct client_obd_lock cl_loi_list_lock; struct list_head cl_loi_ready_list; struct list_head cl_loi_hp_ready_list; struct list_head cl_loi_write_list; struct list_head cl_loi_read_list; int cl_r_in_flight; int cl_w_in_flight; - /* just a sum of the loi/lop pending numbers to be exported by /proc */ + /* just a sum of the loi/lop pending numbers to be exported by sysfs */ atomic_t cl_pending_w_pages; atomic_t cl_pending_r_pages; __u32 cl_max_pages_per_rpc; @@ -322,7 +327,7 @@ struct client_obd { atomic_t cl_lru_shrinkers; atomic_t cl_lru_in_list; struct list_head cl_lru_list; /* lru page list */ - client_obd_lock_t cl_lru_list_lock; /* page list protector */ + struct client_obd_lock cl_lru_list_lock; /* page list protector */ /* number of in flight destroy rpcs is limited to max_rpcs_in_flight */ atomic_t cl_destroy_in_flight; @@ -340,7 +345,7 @@ struct client_obd { /* supported checksum types that are worked out at connect time */ __u32 cl_supp_cksum_types; /* checksum algorithm to be used */ - cksum_type_t cl_cksum_type; + enum cksum_type cl_cksum_type; /* also protected by the poorly named _loi_list_lock lock above */ struct osc_async_rc cl_ar; @@ -375,14 +380,12 @@ struct echo_client_obd { spinlock_t ec_lock; struct list_head ec_objects; struct list_head ec_locks; - int ec_nstripes; __u64 ec_unique; }; /* Generic subset of OSTs */ struct ost_pool { - __u32 *op_array; /* array of index of - lov_obd->lov_tgts */ + __u32 *op_array; /* array of index of lov_obd->lov_tgts */ unsigned int op_count; /* number of OSTs in the array */ unsigned int op_size; /* allocated size of lp_array */ struct rw_semaphore op_rw_sem; /* to protect ost_pool use */ @@ -415,14 +418,16 @@ struct lov_qos { struct lov_qos_rr lq_rr; /* round robin qos data */ unsigned long lq_dirty:1, /* recalc qos data */ lq_same_space:1,/* the ost's all have approx. - the same space avail */ + * the same space avail + */ lq_reset:1, /* zero current penalties */ lq_statfs_in_progress:1; /* statfs op in progress */ /* qos statfs data */ struct lov_statfs_data *lq_statfs_data; - wait_queue_head_t lq_statfs_waitq; /* waitqueue to notify statfs - * requests completion */ + wait_queue_head_t lq_statfs_waitq; /* waitqueue to notify statfs + * requests completion + */ }; struct lov_tgt_desc { @@ -450,16 +455,16 @@ struct pool_desc { struct lov_qos_rr pool_rr; /* round robin qos */ struct hlist_node pool_hash; /* access by poolname */ struct list_head pool_list; /* serial access */ - struct dentry *pool_debugfs_entry; /* file in /proc */ + struct dentry *pool_debugfs_entry; /* file in debugfs */ struct obd_device *pool_lobd; /* obd of the lov/lod to which - * this pool belongs */ + * this pool belongs + */ }; struct lov_obd { struct lov_desc desc; struct lov_tgt_desc **lov_tgts; /* sparse array */ - struct ost_pool lov_packed; /* all OSTs in a packed - array */ + struct ost_pool lov_packed; /* all OSTs in a packed array */ struct mutex lov_lock; struct obd_connect_data lov_ocd; atomic_t lov_refcount; @@ -596,34 +601,6 @@ struct obd_trans_info { struct obd_uuid *oti_ost_uuid; }; -static inline void oti_init(struct obd_trans_info *oti, - struct ptlrpc_request *req) -{ - if (oti == NULL) - return; - memset(oti, 0, sizeof(*oti)); - - if (req == NULL) - return; - - oti->oti_xid = req->rq_xid; - /** VBR: take versions from request */ - if (req->rq_reqmsg != NULL && - lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) { - __u64 *pre_version = lustre_msg_get_versions(req->rq_reqmsg); - - oti->oti_pre_version = pre_version ? pre_version[0] : 0; - oti->oti_transno = lustre_msg_get_transno(req->rq_reqmsg); - } - - /** called from mds_create_objects */ - if (req->rq_repmsg != NULL) - oti->oti_transno = lustre_msg_get_transno(req->rq_repmsg); - oti->oti_thread = req->rq_svc_thread; - if (req->rq_reqmsg != NULL) - oti->oti_conn_cnt = lustre_msg_get_conn_cnt(req->rq_reqmsg); -} - static inline void oti_alloc_cookies(struct obd_trans_info *oti, int num_cookies) { @@ -681,7 +658,7 @@ enum obd_notify_event { /* * Data structure used to pass obd_notify()-event to non-obd listeners (llite - * and liblustre being main examples). + * being main example). */ struct obd_notify_upcall { int (*onu_upcall)(struct obd_device *host, struct obd_device *watched, @@ -728,21 +705,23 @@ struct obd_device { unsigned long obd_attached:1, /* finished attach */ obd_set_up:1, /* finished setup */ obd_version_recov:1, /* obd uses version checking */ - obd_replayable:1, /* recovery is enabled; inform clients */ - obd_no_transno:1, /* no committed-transno notification */ + obd_replayable:1,/* recovery is enabled; inform clients */ + obd_no_transno:1, /* no committed-transno notification */ obd_no_recov:1, /* fail instead of retry messages */ obd_stopping:1, /* started cleanup */ obd_starting:1, /* started setup */ obd_force:1, /* cleanup with > 0 obd refcount */ - obd_fail:1, /* cleanup with failover */ - obd_async_recov:1, /* allow asynchronous orphan cleanup */ + obd_fail:1, /* cleanup with failover */ + obd_async_recov:1, /* allow asynchronous orphan cleanup */ obd_no_conn:1, /* deny new connections */ obd_inactive:1, /* device active/inactive - * (for /proc/status only!!) */ + * (for sysfs status only!!) + */ obd_no_ir:1, /* no imperative recovery. */ obd_process_conf:1; /* device is processing mgs config */ /* use separate field as it is set in interrupt to don't mess with - * protection of other bits using _bh lock */ + * protection of other bits using _bh lock + */ unsigned long obd_recovery_expired:1; /* uuid-export hash body */ struct cfs_hash *obd_uuid_hash; @@ -935,7 +914,8 @@ struct md_op_data { __u32 op_npages; /* used to transfer info between the stacks of MD client - * see enum op_cli_flags */ + * see enum op_cli_flags + */ __u32 op_cli_flags; /* File object data version for HSM release, on client */ @@ -957,7 +937,7 @@ struct md_enqueue_info { struct lustre_handle mi_lockh; struct inode *mi_dir; int (*mi_cb)(struct ptlrpc_request *req, - struct md_enqueue_info *minfo, int rc); + struct md_enqueue_info *minfo, int rc); __u64 mi_cbdata; unsigned int mi_generation; }; @@ -965,7 +945,7 @@ struct md_enqueue_info { struct obd_ops { struct module *owner; int (*iocontrol)(unsigned int cmd, struct obd_export *exp, int len, - void *karg, void *uarg); + void *karg, void __user *uarg); int (*get_info)(const struct lu_env *env, struct obd_export *, __u32 keylen, void *key, __u32 *vallen, void *val, struct lov_stripe_md *lsm); @@ -987,7 +967,8 @@ struct obd_ops { /* connect to the target device with given connection * data. @ocd->ocd_connect_flags is modified to reflect flags actually * granted by the target, which are guaranteed to be a subset of flags - * asked for. If @ocd == NULL, use default parameters. */ + * asked for. If @ocd == NULL, use default parameters. + */ int (*connect)(const struct lu_env *env, struct obd_export **exp, struct obd_device *src, struct obd_uuid *cluuid, struct obd_connect_data *ocd, @@ -1083,7 +1064,8 @@ struct obd_ops { /* * NOTE: If adding ops, add another LPROCFS_OBD_OP_INIT() line * to lprocfs_alloc_obd_stats() in obdclass/lprocfs_status.c. - * Also, add a wrapper function in include/linux/obd_class.h. */ + * Also, add a wrapper function in include/linux/obd_class.h. + */ }; enum { @@ -1189,14 +1171,14 @@ struct md_ops { struct obd_client_handle *); int (*set_lock_data)(struct obd_export *, __u64 *, void *, __u64 *); - ldlm_mode_t (*lock_match)(struct obd_export *, __u64, - const struct lu_fid *, ldlm_type_t, - ldlm_policy_data_t *, ldlm_mode_t, - struct lustre_handle *); + enum ldlm_mode (*lock_match)(struct obd_export *, __u64, + const struct lu_fid *, enum ldlm_type, + ldlm_policy_data_t *, enum ldlm_mode, + struct lustre_handle *); int (*cancel_unused)(struct obd_export *, const struct lu_fid *, - ldlm_policy_data_t *, ldlm_mode_t, - ldlm_cancel_flags_t flags, void *opaque); + ldlm_policy_data_t *, enum ldlm_mode, + enum ldlm_cancel_flags flags, void *opaque); int (*get_remote_perm)(struct obd_export *, const struct lu_fid *, __u32, struct ptlrpc_request **); @@ -1224,9 +1206,9 @@ struct lsm_operations { void (*lsm_stripe_by_offset)(struct lov_stripe_md *, int *, u64 *, u64 *); int (*lsm_lmm_verify)(struct lov_mds_md *lmm, int lmm_bytes, - __u16 *stripe_count); + __u16 *stripe_count); int (*lsm_unpackmd)(struct lov_obd *lov, struct lov_stripe_md *lsm, - struct lov_mds_md *lmm); + struct lov_mds_md *lmm); }; extern const struct lsm_operations lsm_v1_ops; @@ -1253,7 +1235,7 @@ static inline struct md_open_data *obd_mod_alloc(void) struct md_open_data *mod; mod = kzalloc(sizeof(*mod), GFP_NOFS); - if (mod == NULL) + if (!mod) return NULL; atomic_set(&mod->mod_refcount, 1); return mod; @@ -1300,7 +1282,7 @@ static inline bool filename_is_volatile(const char *name, int namelen, int *idx) return false; /* caller does not care of idx */ - if (idx == NULL) + if (!idx) return true; /* volatile file, the MDT can be set from name */ @@ -1327,7 +1309,8 @@ static inline bool filename_is_volatile(const char *name, int namelen, int *idx) return true; bad_format: /* bad format of mdt idx, we cannot return an error - * to caller so we use hash algo */ + * to caller so we use hash algo + */ CERROR("Bad volatile file name format: %s\n", name + LUSTRE_VOLATILE_HDR_LEN); return false; @@ -1335,7 +1318,6 @@ bad_format: static inline int cli_brw_size(struct obd_device *obd) { - LASSERT(obd != NULL); return obd->u.cli.cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; } diff --git a/drivers/staging/lustre/lustre/include/obd_cksum.h b/drivers/staging/lustre/lustre/include/obd_cksum.h index 01db60405393..637fa22110a4 100644 --- a/drivers/staging/lustre/lustre/include/obd_cksum.h +++ b/drivers/staging/lustre/lustre/include/obd_cksum.h @@ -37,7 +37,7 @@ #include "../../include/linux/libcfs/libcfs.h" #include "lustre/lustre_idl.h" -static inline unsigned char cksum_obd2cfs(cksum_type_t cksum_type) +static inline unsigned char cksum_obd2cfs(enum cksum_type cksum_type) { switch (cksum_type) { case OBD_CKSUM_CRC32: @@ -63,8 +63,9 @@ static inline unsigned char cksum_obd2cfs(cksum_type_t cksum_type) * In case of an unsupported types/flags we fall back to ADLER * because that is supported by all clients since 1.8 * - * In case multiple algorithms are supported the best one is used. */ -static inline u32 cksum_type_pack(cksum_type_t cksum_type) + * In case multiple algorithms are supported the best one is used. + */ +static inline u32 cksum_type_pack(enum cksum_type cksum_type) { unsigned int performance = 0, tmp; u32 flag = OBD_FL_CKSUM_ADLER; @@ -98,7 +99,7 @@ static inline u32 cksum_type_pack(cksum_type_t cksum_type) return flag; } -static inline cksum_type_t cksum_type_unpack(u32 o_flags) +static inline enum cksum_type cksum_type_unpack(u32 o_flags) { switch (o_flags & OBD_FL_CKSUM_ALL) { case OBD_FL_CKSUM_CRC32C: @@ -116,9 +117,9 @@ static inline cksum_type_t cksum_type_unpack(u32 o_flags) * 1.8 supported ADLER it is base and not depend on hw * Client uses all available local algos */ -static inline cksum_type_t cksum_types_supported_client(void) +static inline enum cksum_type cksum_types_supported_client(void) { - cksum_type_t ret = OBD_CKSUM_ADLER; + enum cksum_type ret = OBD_CKSUM_ADLER; CDEBUG(D_INFO, "Crypto hash speed: crc %d, crc32c %d, adler %d\n", cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32)), @@ -139,14 +140,16 @@ static inline cksum_type_t cksum_types_supported_client(void) * Currently, calling cksum_type_pack() with a mask will return the fastest * checksum type due to its benchmarking at libcfs module load. * Caution is advised, however, since what is fastest on a single client may - * not be the fastest or most efficient algorithm on the server. */ -static inline cksum_type_t cksum_type_select(cksum_type_t cksum_types) + * not be the fastest or most efficient algorithm on the server. + */ +static inline enum cksum_type cksum_type_select(enum cksum_type cksum_types) { return cksum_type_unpack(cksum_type_pack(cksum_types)); } /* Checksum algorithm names. Must be defined in the same order as the - * OBD_CKSUM_* flags. */ + * OBD_CKSUM_* flags. + */ #define DECLARE_CKSUM_NAME char *cksum_name[] = {"crc32", "adler", "crc32c"} #endif /* __OBD_H */ diff --git a/drivers/staging/lustre/lustre/include/obd_class.h b/drivers/staging/lustre/lustre/include/obd_class.h index 97d80397503c..706869f8c98f 100644 --- a/drivers/staging/lustre/lustre/include/obd_class.h +++ b/drivers/staging/lustre/lustre/include/obd_class.h @@ -45,18 +45,22 @@ #include "lprocfs_status.h" #define OBD_STATFS_NODELAY 0x0001 /* requests should be send without delay - * and resends for avoid deadlocks */ + * and resends for avoid deadlocks + */ #define OBD_STATFS_FROM_CACHE 0x0002 /* the statfs callback should not update - * obd_osfs_age */ + * obd_osfs_age + */ #define OBD_STATFS_PTLRPCD 0x0004 /* requests will be sent via ptlrpcd * instead of a specific set. This * means that we cannot rely on the set * interpret routine to be called. * lov_statfs_fini() must thus be called - * by the request interpret routine */ + * by the request interpret routine + */ #define OBD_STATFS_FOR_MDT0 0x0008 /* The statfs is only for retrieving - * information from MDT0. */ -#define OBD_FL_PUNCH 0x00000001 /* To indicate it is punch operation */ + * information from MDT0. + */ +#define OBD_FL_PUNCH 0x00000001 /* To indicate it is punch operation */ /* OBD Device Declarations */ extern struct obd_device *obd_devs[MAX_OBD_DEVICES]; @@ -83,10 +87,10 @@ int class_name2dev(const char *name); struct obd_device *class_name2obd(const char *name); int class_uuid2dev(struct obd_uuid *uuid); struct obd_device *class_find_client_obd(struct obd_uuid *tgt_uuid, - const char *typ_name, - struct obd_uuid *grp_uuid); + const char *typ_name, + struct obd_uuid *grp_uuid); struct obd_device *class_devices_in_group(struct obd_uuid *grp_uuid, - int *next); + int *next); struct obd_device *class_num2obd(int num); int class_notify_sptlrpc_conf(const char *fsname, int namelen); @@ -160,8 +164,9 @@ struct config_llog_data { struct mutex cld_lock; int cld_type; unsigned int cld_stopping:1, /* we were told to stop - * watching */ - cld_lostlock:1; /* lock not requeued */ + * watching + */ + cld_lostlock:1; /* lock not requeued */ char cld_logname[0]; }; @@ -193,7 +198,7 @@ extern void (*class_export_dump_hook)(struct obd_export *); struct obd_export *class_export_get(struct obd_export *exp); void class_export_put(struct obd_export *exp); struct obd_export *class_new_export(struct obd_device *obddev, - struct obd_uuid *cluuid); + struct obd_uuid *cluuid); void class_unlink_export(struct obd_export *exp); struct obd_import *class_import_get(struct obd_import *); @@ -203,7 +208,7 @@ void class_destroy_import(struct obd_import *exp); void class_put_type(struct obd_type *type); int class_connect(struct lustre_handle *conn, struct obd_device *obd, - struct obd_uuid *cluuid); + struct obd_uuid *cluuid); int class_disconnect(struct obd_export *exp); void class_fail_export(struct obd_export *exp); int class_manual_cleanup(struct obd_device *obd); @@ -275,7 +280,8 @@ void md_from_obdo(struct md_op_data *op_data, struct obdo *oa, u32 valid); #define CTXTP(ctxt, op) (ctxt)->loc_logops->lop_##op /* Ensure obd_setup: used for cleanup which must be called - while obd is stopping */ + * while obd is stopping + */ static inline int obd_check_dev(struct obd_device *obd) { if (!obd) { @@ -306,7 +312,7 @@ static inline int obd_check_dev_active(struct obd_device *obd) / sizeof(((struct obd_ops *)(0))->iocontrol)) #define OBD_COUNTER_INCREMENT(obdx, op) \ - if ((obdx)->obd_stats != NULL) { \ + if ((obdx)->obd_stats) { \ unsigned int coffset; \ coffset = (unsigned int)((obdx)->obd_cntr_base) + \ OBD_COUNTER_OFFSET(op); \ @@ -315,7 +321,7 @@ static inline int obd_check_dev_active(struct obd_device *obd) } #define EXP_COUNTER_INCREMENT(export, op) \ - if ((export)->exp_obd->obd_stats != NULL) { \ + if ((export)->exp_obd->obd_stats) { \ unsigned int coffset; \ coffset = (unsigned int)((export)->exp_obd->obd_cntr_base) + \ OBD_COUNTER_OFFSET(op); \ @@ -329,7 +335,7 @@ static inline int obd_check_dev_active(struct obd_device *obd) / sizeof(((struct md_ops *)(0))->getstatus)) #define MD_COUNTER_INCREMENT(obdx, op) \ - if ((obd)->md_stats != NULL) { \ + if ((obd)->md_stats) { \ unsigned int coffset; \ coffset = (unsigned int)((obdx)->md_cntr_base) + \ MD_COUNTER_OFFSET(op); \ @@ -338,24 +344,24 @@ static inline int obd_check_dev_active(struct obd_device *obd) } #define EXP_MD_COUNTER_INCREMENT(export, op) \ - if ((export)->exp_obd->obd_stats != NULL) { \ + if ((export)->exp_obd->obd_stats) { \ unsigned int coffset; \ coffset = (unsigned int)((export)->exp_obd->md_cntr_base) + \ MD_COUNTER_OFFSET(op); \ LASSERT(coffset < (export)->exp_obd->md_stats->ls_num); \ lprocfs_counter_incr((export)->exp_obd->md_stats, coffset); \ - if ((export)->exp_md_stats != NULL) \ + if ((export)->exp_md_stats) \ lprocfs_counter_incr( \ (export)->exp_md_stats, coffset); \ } #define EXP_CHECK_MD_OP(exp, op) \ do { \ - if ((exp) == NULL) { \ + if (!(exp)) { \ CERROR("obd_" #op ": NULL export\n"); \ return -ENODEV; \ } \ - if ((exp)->exp_obd == NULL || !OBT((exp)->exp_obd)) { \ + if (!(exp)->exp_obd || !OBT((exp)->exp_obd)) { \ CERROR("obd_" #op ": cleaned up obd\n"); \ return -EOPNOTSUPP; \ } \ @@ -379,11 +385,11 @@ do { \ #define EXP_CHECK_DT_OP(exp, op) \ do { \ - if ((exp) == NULL) { \ + if (!(exp)) { \ CERROR("obd_" #op ": NULL export\n"); \ return -ENODEV; \ } \ - if ((exp)->exp_obd == NULL || !OBT((exp)->exp_obd)) { \ + if (!(exp)->exp_obd || !OBT((exp)->exp_obd)) { \ CERROR("obd_" #op ": cleaned up obd\n"); \ return -EOPNOTSUPP; \ } \ @@ -467,7 +473,7 @@ static inline int obd_setup(struct obd_device *obd, struct lustre_cfg *cfg) DECLARE_LU_VARS(ldt, d); ldt = obd->obd_type->typ_lu; - if (ldt != NULL) { + if (ldt) { struct lu_context session_ctx; struct lu_env env; @@ -509,7 +515,7 @@ static inline int obd_precleanup(struct obd_device *obd, return rc; ldt = obd->obd_type->typ_lu; d = obd->obd_lu_dev; - if (ldt != NULL && d != NULL) { + if (ldt && d) { if (cleanup_stage == OBD_CLEANUP_EXPORTS) { struct lu_env env; @@ -538,7 +544,7 @@ static inline int obd_cleanup(struct obd_device *obd) ldt = obd->obd_type->typ_lu; d = obd->obd_lu_dev; - if (ldt != NULL && d != NULL) { + if (ldt && d) { struct lu_env env; rc = lu_env_init(&env, ldt->ldt_ctx_tags); @@ -558,7 +564,8 @@ static inline int obd_cleanup(struct obd_device *obd) static inline void obd_cleanup_client_import(struct obd_device *obd) { /* If we set up but never connected, the - client import will not have been cleaned. */ + * client import will not have been cleaned. + */ down_write(&obd->u.cli.cl_sem); if (obd->u.cli.cl_import) { struct obd_import *imp; @@ -586,7 +593,7 @@ obd_process_config(struct obd_device *obd, int datalen, void *data) obd->obd_process_conf = 1; ldt = obd->obd_type->typ_lu; d = obd->obd_lu_dev; - if (ldt != NULL && d != NULL) { + if (ldt && d) { struct lu_env env; rc = lu_env_init(&env, ldt->ldt_ctx_tags); @@ -674,7 +681,7 @@ static inline int obd_alloc_memmd(struct obd_export *exp, struct lov_stripe_md **mem_tgt) { LASSERT(mem_tgt); - LASSERT(*mem_tgt == NULL); + LASSERT(!*mem_tgt); return obd_unpackmd(exp, mem_tgt, NULL, 0); } @@ -767,7 +774,7 @@ static inline int obd_setattr_rqset(struct obd_export *exp, EXP_COUNTER_INCREMENT(exp, setattr_async); set = ptlrpc_prep_set(); - if (set == NULL) + if (!set) return -ENOMEM; rc = OBP(exp->exp_obd, setattr_async)(exp, oinfo, oti, set); @@ -778,7 +785,8 @@ static inline int obd_setattr_rqset(struct obd_export *exp, } /* This adds all the requests into @set if @set != NULL, otherwise - all requests are sent asynchronously without waiting for response. */ + * all requests are sent asynchronously without waiting for response. + */ static inline int obd_setattr_async(struct obd_export *exp, struct obd_info *oinfo, struct obd_trans_info *oti, @@ -848,7 +856,8 @@ static inline int obd_connect(const struct lu_env *env, { int rc; __u64 ocf = data ? data->ocd_connect_flags : 0; /* for post-condition - * check */ + * check + */ rc = obd_check_dev_active(obd); if (rc) @@ -858,7 +867,7 @@ static inline int obd_connect(const struct lu_env *env, rc = OBP(obd, connect)(env, exp, obd, cluuid, data, localdata); /* check that only subset is granted */ - LASSERT(ergo(data != NULL, (data->ocd_connect_flags & ocf) == + LASSERT(ergo(data, (data->ocd_connect_flags & ocf) == data->ocd_connect_flags)); return rc; } @@ -871,8 +880,7 @@ static inline int obd_reconnect(const struct lu_env *env, void *localdata) { int rc; - __u64 ocf = d ? d->ocd_connect_flags : 0; /* for post-condition - * check */ + __u64 ocf = d ? d->ocd_connect_flags : 0; /* for post-condition check */ rc = obd_check_dev_active(obd); if (rc) @@ -882,8 +890,7 @@ static inline int obd_reconnect(const struct lu_env *env, rc = OBP(obd, reconnect)(env, exp, obd, cluuid, d, localdata); /* check that only subset is granted */ - LASSERT(ergo(d != NULL, - (d->ocd_connect_flags & ocf) == d->ocd_connect_flags)); + LASSERT(ergo(d, (d->ocd_connect_flags & ocf) == d->ocd_connect_flags)); return rc; } @@ -998,7 +1005,7 @@ static inline int obd_init_export(struct obd_export *exp) { int rc = 0; - if ((exp)->exp_obd != NULL && OBT((exp)->exp_obd) && + if ((exp)->exp_obd && OBT((exp)->exp_obd) && OBP((exp)->exp_obd, init_export)) rc = OBP(exp->exp_obd, init_export)(exp); return rc; @@ -1006,7 +1013,7 @@ static inline int obd_init_export(struct obd_export *exp) static inline int obd_destroy_export(struct obd_export *exp) { - if ((exp)->exp_obd != NULL && OBT((exp)->exp_obd) && + if ((exp)->exp_obd && OBT((exp)->exp_obd) && OBP((exp)->exp_obd, destroy_export)) OBP(exp->exp_obd, destroy_export)(exp); return 0; @@ -1014,7 +1021,8 @@ static inline int obd_destroy_export(struct obd_export *exp) /* @max_age is the oldest time in jiffies that we accept using a cached data. * If the cache is older than @max_age we will get a new value from the - * target. Use a value of "cfs_time_current() + HZ" to guarantee freshness. */ + * target. Use a value of "cfs_time_current() + HZ" to guarantee freshness. + */ static inline int obd_statfs_async(struct obd_export *exp, struct obd_info *oinfo, __u64 max_age, @@ -1023,7 +1031,7 @@ static inline int obd_statfs_async(struct obd_export *exp, int rc = 0; struct obd_device *obd; - if (exp == NULL || exp->exp_obd == NULL) + if (!exp || !exp->exp_obd) return -EINVAL; obd = exp->exp_obd; @@ -1059,7 +1067,7 @@ static inline int obd_statfs_rqset(struct obd_export *exp, int rc = 0; set = ptlrpc_prep_set(); - if (set == NULL) + if (!set) return -ENOMEM; oinfo.oi_osfs = osfs; @@ -1073,7 +1081,8 @@ static inline int obd_statfs_rqset(struct obd_export *exp, /* @max_age is the oldest time in jiffies that we accept using a cached data. * If the cache is older than @max_age we will get a new value from the - * target. Use a value of "cfs_time_current() + HZ" to guarantee freshness. */ + * target. Use a value of "cfs_time_current() + HZ" to guarantee freshness. + */ static inline int obd_statfs(const struct lu_env *env, struct obd_export *exp, struct obd_statfs *osfs, __u64 max_age, __u32 flags) @@ -1081,7 +1090,7 @@ static inline int obd_statfs(const struct lu_env *env, struct obd_export *exp, int rc = 0; struct obd_device *obd = exp->exp_obd; - if (obd == NULL) + if (!obd) return -EINVAL; OBD_CHECK_DT_OP(obd, statfs, -EOPNOTSUPP); @@ -1155,7 +1164,7 @@ static inline int obd_adjust_kms(struct obd_export *exp, } static inline int obd_iocontrol(unsigned int cmd, struct obd_export *exp, - int len, void *karg, void *uarg) + int len, void *karg, void __user *uarg) { int rc; @@ -1205,9 +1214,10 @@ static inline int obd_notify(struct obd_device *obd, return rc; /* the check for async_recov is a complete hack - I'm hereby - overloading the meaning to also mean "this was called from - mds_postsetup". I know that my mds is able to handle notifies - by this point, and it needs to get them to execute mds_postrecov. */ + * overloading the meaning to also mean "this was called from + * mds_postsetup". I know that my mds is able to handle notifies + * by this point, and it needs to get them to execute mds_postrecov. + */ if (!obd->obd_set_up && !obd->obd_async_recov) { CDEBUG(D_HA, "obd %s not set up\n", obd->obd_name); return -EINVAL; @@ -1241,7 +1251,7 @@ static inline int obd_notify_observer(struct obd_device *observer, * Also, call non-obd listener, if any */ onu = &observer->obd_upcall; - if (onu->onu_upcall != NULL) + if (onu->onu_upcall) rc2 = onu->onu_upcall(observer, observed, ev, onu->onu_owner, NULL); else @@ -1287,7 +1297,7 @@ static inline int obd_health_check(const struct lu_env *env, int rc; /* don't use EXP_CHECK_DT_OP, because NULL method is normal here */ - if (obd == NULL || !OBT(obd)) { + if (!obd || !OBT(obd)) { CERROR("cleaned up obd\n"); return -EOPNOTSUPP; } @@ -1318,57 +1328,6 @@ static inline int obd_register_observer(struct obd_device *obd, return 0; } -#if 0 -static inline int obd_register_page_removal_cb(struct obd_export *exp, - obd_page_removal_cb_t cb, - obd_pin_extent_cb pin_cb) -{ - int rc; - - OBD_CHECK_DT_OP(exp->exp_obd, register_page_removal_cb, 0); - OBD_COUNTER_INCREMENT(exp->exp_obd, register_page_removal_cb); - - rc = OBP(exp->exp_obd, register_page_removal_cb)(exp, cb, pin_cb); - return rc; -} - -static inline int obd_unregister_page_removal_cb(struct obd_export *exp, - obd_page_removal_cb_t cb) -{ - int rc; - - OBD_CHECK_DT_OP(exp->exp_obd, unregister_page_removal_cb, 0); - OBD_COUNTER_INCREMENT(exp->exp_obd, unregister_page_removal_cb); - - rc = OBP(exp->exp_obd, unregister_page_removal_cb)(exp, cb); - return rc; -} - -static inline int obd_register_lock_cancel_cb(struct obd_export *exp, - obd_lock_cancel_cb cb) -{ - int rc; - - OBD_CHECK_DT_OP(exp->exp_obd, register_lock_cancel_cb, 0); - OBD_COUNTER_INCREMENT(exp->exp_obd, register_lock_cancel_cb); - - rc = OBP(exp->exp_obd, register_lock_cancel_cb)(exp, cb); - return rc; -} - -static inline int obd_unregister_lock_cancel_cb(struct obd_export *exp, - obd_lock_cancel_cb cb) -{ - int rc; - - OBD_CHECK_DT_OP(exp->exp_obd, unregister_lock_cancel_cb, 0); - OBD_COUNTER_INCREMENT(exp->exp_obd, unregister_lock_cancel_cb); - - rc = OBP(exp->exp_obd, unregister_lock_cancel_cb)(exp, cb); - return rc; -} -#endif - /* metadata helpers */ static inline int md_getstatus(struct obd_export *exp, struct lu_fid *fid) { @@ -1392,7 +1351,7 @@ static inline int md_getattr(struct obd_export *exp, struct md_op_data *op_data, } static inline int md_null_inode(struct obd_export *exp, - const struct lu_fid *fid) + const struct lu_fid *fid) { int rc; @@ -1657,8 +1616,8 @@ static inline int md_set_lock_data(struct obd_export *exp, static inline int md_cancel_unused(struct obd_export *exp, const struct lu_fid *fid, ldlm_policy_data_t *policy, - ldlm_mode_t mode, - ldlm_cancel_flags_t flags, + enum ldlm_mode mode, + enum ldlm_cancel_flags flags, void *opaque) { int rc; @@ -1671,12 +1630,12 @@ static inline int md_cancel_unused(struct obd_export *exp, return rc; } -static inline ldlm_mode_t md_lock_match(struct obd_export *exp, __u64 flags, - const struct lu_fid *fid, - ldlm_type_t type, - ldlm_policy_data_t *policy, - ldlm_mode_t mode, - struct lustre_handle *lockh) +static inline enum ldlm_mode md_lock_match(struct obd_export *exp, __u64 flags, + const struct lu_fid *fid, + enum ldlm_type type, + ldlm_policy_data_t *policy, + enum ldlm_mode mode, + struct lustre_handle *lockh) { EXP_CHECK_MD_OP(exp, lock_match); EXP_MD_COUNTER_INCREMENT(exp, lock_match); @@ -1759,7 +1718,8 @@ struct lwp_register_item { /* I'm as embarrassed about this as you are. * * <shaver> // XXX do not look into _superhack with remaining eye - * <shaver> // XXX if this were any uglier, I'd get my own show on MTV */ + * <shaver> // XXX if this were any uglier, I'd get my own show on MTV + */ extern int (*ptlrpc_put_connection_superhack)(struct ptlrpc_connection *c); /* obd_mount.c */ @@ -1774,7 +1734,7 @@ void class_uuid_unparse(class_uuid_t in, struct obd_uuid *out); /* lustre_peer.c */ int lustre_uuid_to_peer(const char *uuid, lnet_nid_t *peer_nid, int index); int class_add_uuid(const char *uuid, __u64 nid); -int class_del_uuid (const char *uuid); +int class_del_uuid(const char *uuid); int class_check_uuid(struct obd_uuid *uuid, __u64 nid); void class_init_uuidlist(void); void class_exit_uuidlist(void); diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h index d031437c0528..225262fa67b6 100644 --- a/drivers/staging/lustre/lustre/include/obd_support.h +++ b/drivers/staging/lustre/lustre/include/obd_support.h @@ -47,7 +47,8 @@ extern unsigned int obd_debug_peer_on_timeout; extern unsigned int obd_dump_on_timeout; extern unsigned int obd_dump_on_eviction; /* obd_timeout should only be used for recovery, not for - networking / disk / timings affected by load (use Adaptive Timeouts) */ + * networking / disk / timings affected by load (use Adaptive Timeouts) + */ extern unsigned int obd_timeout; /* seconds */ extern unsigned int obd_timeout_set; extern unsigned int at_min; @@ -104,18 +105,21 @@ extern char obd_jobid_var[]; * failover targets the client only pings one server at a time, and pings * can be lost on a loaded network. Since eviction has serious consequences, * and there's no urgent need to evict a client just because it's idle, we - * should be very conservative here. */ + * should be very conservative here. + */ #define PING_EVICT_TIMEOUT (PING_INTERVAL * 6) #define DISK_TIMEOUT 50 /* Beyond this we warn about disk speed */ #define CONNECTION_SWITCH_MIN 5U /* Connection switching rate limiter */ - /* Max connect interval for nonresponsive servers; ~50s to avoid building up - connect requests in the LND queues, but within obd_timeout so we don't - miss the recovery window */ +/* Max connect interval for nonresponsive servers; ~50s to avoid building up + * connect requests in the LND queues, but within obd_timeout so we don't + * miss the recovery window + */ #define CONNECTION_SWITCH_MAX min(50U, max(CONNECTION_SWITCH_MIN, obd_timeout)) #define CONNECTION_SWITCH_INC 5 /* Connection timeout backoff */ /* In general this should be low to have quick detection of a system - running on a backup server. (If it's too low, import_select_connection - will increase the timeout anyhow.) */ + * running on a backup server. (If it's too low, import_select_connection + * will increase the timeout anyhow.) + */ #define INITIAL_CONNECT_TIMEOUT max(CONNECTION_SWITCH_MIN, obd_timeout/20) /* The max delay between connects is SWITCH_MAX + SWITCH_INC + INITIAL */ #define RECONNECT_DELAY_MAX (CONNECTION_SWITCH_MAX + CONNECTION_SWITCH_INC + \ @@ -507,7 +511,6 @@ extern char obd_jobid_var[]; do { \ struct portals_handle *__h = (handle); \ \ - LASSERT(handle != NULL); \ __h->h_cookie = (unsigned long)(ptr); \ __h->h_size = (size); \ call_rcu(&__h->h_rcu, class_handle_free_cb); \ diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h b/drivers/staging/lustre/lustre/include/uapi_kernelcomm.h index 41f3d810aea4..5e998362e44b 100644 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h +++ b/drivers/staging/lustre/lustre/include/uapi_kernelcomm.h @@ -15,37 +15,29 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ /* * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2013, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. * * Author: Nathan Rutman <nathan.rutman@sun.com> * - * libcfs/include/libcfs/libcfs_kernelcomm.h - * * Kernel <-> userspace communication routines. * The definitions below are used in the kernel and userspace. - * */ -#ifndef __LIBCFS_KERNELCOMM_H__ -#define __LIBCFS_KERNELCOMM_H__ +#ifndef __UAPI_KERNELCOMM_H__ +#define __UAPI_KERNELCOMM_H__ -#ifndef __LIBCFS_LIBCFS_H__ -#error Do not #include this file directly. #include <linux/libcfs/libcfs.h> instead -#endif +#include <linux/types.h> /* KUC message header. * All current and future KUC messages should use this header. @@ -53,66 +45,50 @@ */ struct kuc_hdr { __u16 kuc_magic; - __u8 kuc_transport; /* Each new Lustre feature should use a different - transport */ + /* Each new Lustre feature should use a different transport */ + __u8 kuc_transport; __u8 kuc_flags; - __u16 kuc_msgtype; /* Message type or opcode, transport-specific */ - __u16 kuc_msglen; /* Including header */ + /* Message type or opcode, transport-specific */ + __u16 kuc_msgtype; + /* Including header */ + __u16 kuc_msglen; } __aligned(sizeof(__u64)); -#define KUC_CHANGELOG_MSG_MAXSIZE (sizeof(struct kuc_hdr)+CR_MAXSIZE) +#define KUC_CHANGELOG_MSG_MAXSIZE (sizeof(struct kuc_hdr) + CR_MAXSIZE) -#define KUC_MAGIC 0x191C /*Lustre9etLinC */ -#define KUC_FL_BLOCK 0x01 /* Wait for send */ +#define KUC_MAGIC 0x191C /*Lustre9etLinC */ /* kuc_msgtype values are defined in each transport */ enum kuc_transport_type { - KUC_TRANSPORT_GENERIC = 1, - KUC_TRANSPORT_HSM = 2, - KUC_TRANSPORT_CHANGELOG = 3, + KUC_TRANSPORT_GENERIC = 1, + KUC_TRANSPORT_HSM = 2, + KUC_TRANSPORT_CHANGELOG = 3, }; enum kuc_generic_message_type { - KUC_MSG_SHUTDOWN = 1, + KUC_MSG_SHUTDOWN = 1, }; -/* prototype for callback function on kuc groups */ -typedef int (*libcfs_kkuc_cb_t)(__u32 data, void *cb_arg); - /* KUC Broadcast Groups. This determines which userspace process hears which * messages. Mutliple transports may be used within a group, or multiple * groups may use the same transport. Broadcast * groups need not be used if e.g. a UID is specified instead; * use group 0 to signify unicast. */ -#define KUC_GRP_HSM 0x02 -#define KUC_GRP_MAX KUC_GRP_HSM - -/* Kernel methods */ -int libcfs_kkuc_msg_put(struct file *fp, void *payload); -int libcfs_kkuc_group_put(int group, void *payload); -int libcfs_kkuc_group_add(struct file *fp, int uid, unsigned int group, - __u32 data); -int libcfs_kkuc_group_rem(int uid, int group); -int libcfs_kkuc_group_foreach(int group, libcfs_kkuc_cb_t cb_func, - void *cb_arg); +#define KUC_GRP_HSM 0x02 +#define KUC_GRP_MAX KUC_GRP_HSM #define LK_FLG_STOP 0x01 +#define LK_NOFD -1U /* kernelcomm control structure, passed from userspace to kernel */ -typedef struct lustre_kernelcomm { +struct lustre_kernelcomm { __u32 lk_wfd; __u32 lk_rfd; __u32 lk_uid; __u32 lk_group; __u32 lk_data; __u32 lk_flags; -} __packed lustre_kernelcomm; - -/* Userspace methods */ -int libcfs_ukuc_start(lustre_kernelcomm *l, int groups); -int libcfs_ukuc_stop(lustre_kernelcomm *l); -int libcfs_ukuc_msg_get(lustre_kernelcomm *l, char *buf, int maxsize, - int transport); +} __packed; -#endif /* __LIBCFS_KERNELCOMM_H__ */ +#endif /* __UAPI_KERNELCOMM_H__ */ diff --git a/drivers/staging/lustre/lustre/lclient/glimpse.c b/drivers/staging/lustre/lustre/lclient/glimpse.c index 8533a1e539f4..c4e8a0878ac8 100644 --- a/drivers/staging/lustre/lustre/lclient/glimpse.c +++ b/drivers/staging/lustre/lustre/lclient/glimpse.c @@ -109,7 +109,8 @@ int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io, * if there were no conflicting locks. If there * were conflicting locks, enqueuing or waiting * fails with -ENAVAIL, but valid inode - * attributes are returned anyway. */ + * attributes are returned anyway. + */ *descr = whole_file; descr->cld_obj = clob; descr->cld_mode = CLM_PHANTOM; diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c index 34dde7dede74..aced41ab93a1 100644 --- a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c +++ b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c @@ -116,8 +116,8 @@ void *ccc_key_init(const struct lu_context *ctx, struct lu_context_key *key) { struct ccc_thread_info *info; - info = kmem_cache_alloc(ccc_thread_kmem, GFP_NOFS | __GFP_ZERO); - if (info == NULL) + info = kmem_cache_zalloc(ccc_thread_kmem, GFP_NOFS); + if (!info) info = ERR_PTR(-ENOMEM); return info; } @@ -135,8 +135,8 @@ void *ccc_session_key_init(const struct lu_context *ctx, { struct ccc_session *session; - session = kmem_cache_alloc(ccc_session_kmem, GFP_NOFS | __GFP_ZERO); - if (session == NULL) + session = kmem_cache_zalloc(ccc_session_kmem, GFP_NOFS); + if (!session) session = ERR_PTR(-ENOMEM); return session; } @@ -173,7 +173,7 @@ int ccc_device_init(const struct lu_env *env, struct lu_device *d, vdv = lu2ccc_dev(d); vdv->cdv_next = lu2cl_dev(next); - LASSERT(d->ld_site != NULL && next->ld_type != NULL); + LASSERT(d->ld_site && next->ld_type); next->ld_site = d->ld_site; rc = next->ld_type->ldt_ops->ldto_device_init( env, next, next->ld_type->ldt_name, NULL); @@ -211,12 +211,12 @@ struct lu_device *ccc_device_alloc(const struct lu_env *env, vdv->cdv_cl.cd_ops = clops; site = kzalloc(sizeof(*site), GFP_NOFS); - if (site != NULL) { + if (site) { rc = cl_site_init(site, &vdv->cdv_cl); if (rc == 0) rc = lu_site_init_finish(&site->cs_lu); else { - LASSERT(lud->ld_site == NULL); + LASSERT(!lud->ld_site); CERROR("Cannot init lu_site, rc %d.\n", rc); kfree(site); } @@ -236,7 +236,7 @@ struct lu_device *ccc_device_free(const struct lu_env *env, struct cl_site *site = lu2cl_site(d->ld_site); struct lu_device *next = cl2lu_dev(vdv->cdv_next); - if (d->ld_site != NULL) { + if (d->ld_site) { cl_site_fini(site); kfree(site); } @@ -251,8 +251,8 @@ int ccc_req_init(const struct lu_env *env, struct cl_device *dev, struct ccc_req *vrq; int result; - vrq = kmem_cache_alloc(ccc_req_kmem, GFP_NOFS | __GFP_ZERO); - if (vrq != NULL) { + vrq = kmem_cache_zalloc(ccc_req_kmem, GFP_NOFS); + if (vrq) { cl_req_slice_add(req, &vrq->crq_cl, dev, &ccc_req_ops); result = 0; } else @@ -304,7 +304,7 @@ out_kmem: void ccc_global_fini(struct lu_device_type *device_type) { - if (ccc_inode_fini_env != NULL) { + if (ccc_inode_fini_env) { cl_env_put(ccc_inode_fini_env, &dummy_refcheck); ccc_inode_fini_env = NULL; } @@ -327,8 +327,8 @@ struct lu_object *ccc_object_alloc(const struct lu_env *env, struct ccc_object *vob; struct lu_object *obj; - vob = kmem_cache_alloc(ccc_object_kmem, GFP_NOFS | __GFP_ZERO); - if (vob != NULL) { + vob = kmem_cache_zalloc(ccc_object_kmem, GFP_NOFS); + if (vob) { struct cl_object_header *hdr; obj = ccc2lu(vob); @@ -365,7 +365,7 @@ int ccc_object_init(const struct lu_env *env, struct lu_object *obj, under = &dev->cdv_next->cd_lu_dev; below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under); - if (below != NULL) { + if (below) { const struct cl_object_conf *cconf; cconf = lu2cl_conf(conf); @@ -396,8 +396,8 @@ int ccc_lock_init(const struct lu_env *env, CLOBINVRNT(env, obj, ccc_object_invariant(obj)); - clk = kmem_cache_alloc(ccc_lock_kmem, GFP_NOFS | __GFP_ZERO); - if (clk != NULL) { + clk = kmem_cache_zalloc(ccc_lock_kmem, GFP_NOFS); + if (clk) { cl_lock_slice_add(lock, &clk->clk_cl, obj, lkops); result = 0; } else @@ -613,7 +613,8 @@ void ccc_lock_state(const struct lu_env *env, * stale i_size when doing appending writes and effectively * cancel the result of the truncate. Getting the * ll_inode_size_lock() after the enqueue maintains the DLM - * -> ll_inode_size_lock() acquiring order. */ + * -> ll_inode_size_lock() acquiring order. + */ if (lock->cll_descr.cld_start == 0 && lock->cll_descr.cld_end == CL_PAGE_EOF) cl_merge_lvb(env, inode); @@ -660,7 +661,7 @@ void ccc_io_update_iov(const struct lu_env *env, { size_t size = io->u.ci_rw.crw_count; - if (!cl_is_normalio(env, io) || cio->cui_iter == NULL) + if (!cl_is_normalio(env, io) || !cio->cui_iter) return; iov_iter_truncate(cio->cui_iter, size); @@ -749,12 +750,13 @@ int ccc_prep_size(const struct lu_env *env, struct cl_object *obj, */ ccc_object_size_unlock(obj); result = cl_glimpse_lock(env, io, inode, obj, 0); - if (result == 0 && exceed != NULL) { + if (result == 0 && exceed) { /* If objective page index exceed end-of-file * page index, return directly. Do not expect * kernel will check such case correctly. * linux-2.6.18-128.1.1 miss to do that. - * --bug 17336 */ + * --bug 17336 + */ loff_t size = cl_isize_read(inode); loff_t cur_index = start >> PAGE_CACHE_SHIFT; loff_t size_index = (size - 1) >> @@ -884,7 +886,8 @@ again: if (attr->ia_valid & ATTR_FILE) /* populate the file descriptor for ftruncate to honor - * group lock - see LU-787 */ + * group lock - see LU-787 + */ cio->cui_fd = cl_iattr2fd(inode, attr); result = cl_io_loop(env, io); @@ -896,7 +899,8 @@ again: goto again; /* HSM import case: file is released, cannot be restored * no need to fail except if restore registration failed - * with -ENODATA */ + * with -ENODATA + */ if (result == -ENODATA && io->ci_restore_needed && io->ci_result != -ENODATA) result = 0; @@ -986,17 +990,6 @@ struct inode *ccc_object_inode(const struct cl_object *obj) } /** - * Returns a pointer to cl_page associated with \a vmpage, without acquiring - * additional reference to the resulting page. This is an unsafe version of - * cl_vmpage_page() that can only be used under vmpage lock. - */ -struct cl_page *ccc_vmpage_page_transient(struct page *vmpage) -{ - KLASSERT(PageLocked(vmpage)); - return (struct cl_page *)vmpage->private; -} - -/** * Initialize or update CLIO structures for regular files when new * meta-data arrives from the server. * @@ -1033,11 +1026,12 @@ int cl_file_inode_init(struct inode *inode, struct lustre_md *md) fid = &lli->lli_fid; LASSERT(fid_is_sane(fid)); - if (lli->lli_clob == NULL) { + if (!lli->lli_clob) { /* clob is slave of inode, empty lli_clob means for new inode, * there is no clob in cache with the given fid, so it is * unnecessary to perform lookup-alloc-lookup-insert, just - * alloc and insert directly. */ + * alloc and insert directly. + */ LASSERT(inode->i_state & I_NEW); conf.coc_lu.loc_flags = LOC_F_NEW; clob = cl_object_find(env, lu2cl_dev(site->ls_top_dev), @@ -1109,7 +1103,7 @@ void cl_inode_fini(struct inode *inode) int refcheck; int emergency; - if (clob != NULL) { + if (clob) { void *cookie; cookie = cl_env_reenter(); @@ -1117,7 +1111,7 @@ void cl_inode_fini(struct inode *inode) emergency = IS_ERR(env); if (emergency) { mutex_lock(&ccc_inode_fini_guard); - LASSERT(ccc_inode_fini_env != NULL); + LASSERT(ccc_inode_fini_env); cl_env_implant(ccc_inode_fini_env, &refcheck); env = ccc_inode_fini_env; } @@ -1162,7 +1156,8 @@ __u16 ll_dirent_type_get(struct lu_dirent *ent) } /** - * build inode number from passed @fid */ + * build inode number from passed @fid + */ __u64 cl_fid_build_ino(const struct lu_fid *fid, int api32) { if (BITS_PER_LONG == 32 || api32) @@ -1173,7 +1168,8 @@ __u64 cl_fid_build_ino(const struct lu_fid *fid, int api32) /** * build inode generation from passed @fid. If our FID overflows the 32-bit - * inode number then return a non-zero generation to distinguish them. */ + * inode number then return a non-zero generation to distinguish them. + */ __u32 cl_fid_build_gen(const struct lu_fid *fid) { __u32 gen; @@ -1194,7 +1190,8 @@ __u32 cl_fid_build_gen(const struct lu_fid *fid) * have to wait for the refcount to become zero to destroy the older layout. * * Notice that the lsm returned by this function may not be valid unless called - * inside layout lock - MDS_INODELOCK_LAYOUT. */ + * inside layout lock - MDS_INODELOCK_LAYOUT. + */ struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode) { return lov_lsm_get(cl_i2info(inode)->lli_clob); diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_misc.c b/drivers/staging/lustre/lustre/lclient/lcommon_misc.c index 8389a0edad36..d80bcedd78d1 100644 --- a/drivers/staging/lustre/lustre/lclient/lcommon_misc.c +++ b/drivers/staging/lustre/lustre/lclient/lcommon_misc.c @@ -48,7 +48,8 @@ /* Initialize the default and maximum LOV EA and cookie sizes. This allows * us to make MDS RPCs with large enough reply buffers to hold the * maximum-sized (= maximum striped) EA and cookie without having to - * calculate this (via a call into the LOV + OSCs) each time we make an RPC. */ + * calculate this (via a call into the LOV + OSCs) each time we make an RPC. + */ int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp) { struct lov_stripe_md lsm = { .lsm_magic = LOV_MAGIC_V3 }; @@ -74,7 +75,8 @@ int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp) cookiesize = stripes * sizeof(struct llog_cookie); /* default cookiesize is 0 because from 2.4 server doesn't send - * llog cookies to client. */ + * llog cookies to client. + */ CDEBUG(D_HA, "updating def/max_easize: %d/%d def/max_cookiesize: 0/%d\n", def_easize, easize, cookiesize); diff --git a/drivers/staging/lustre/lustre/ldlm/interval_tree.c b/drivers/staging/lustre/lustre/ldlm/interval_tree.c index a2ea8e5b93d8..323060626fdf 100644 --- a/drivers/staging/lustre/lustre/ldlm/interval_tree.c +++ b/drivers/staging/lustre/lustre/ldlm/interval_tree.c @@ -49,13 +49,11 @@ enum { static inline int node_is_left_child(struct interval_node *node) { - LASSERT(node->in_parent != NULL); return node == node->in_parent->in_left; } static inline int node_is_right_child(struct interval_node *node) { - LASSERT(node->in_parent != NULL); return node == node->in_parent->in_right; } @@ -135,7 +133,8 @@ static void __rotate_change_maxhigh(struct interval_node *node, /* The left rotation "pivots" around the link from node to node->right, and * - node will be linked to node->right's left child, and - * - node->right's left child will be linked to node's right child. */ + * - node->right's left child will be linked to node's right child. + */ static void __rotate_left(struct interval_node *node, struct interval_node **root) { @@ -164,7 +163,8 @@ static void __rotate_left(struct interval_node *node, /* The right rotation "pivots" around the link from node to node->left, and * - node will be linked to node->left's right child, and - * - node->left's right child will be linked to node's left child. */ + * - node->left's right child will be linked to node's left child. + */ static void __rotate_right(struct interval_node *node, struct interval_node **root) { diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c index 9c70f31ea56e..a803e200f206 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c @@ -62,7 +62,8 @@ * is the "highest lock". This function returns the new KMS value. * Caller must hold lr_lock already. * - * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */ + * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! + */ __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms) { struct ldlm_resource *res = lock->l_resource; @@ -72,7 +73,8 @@ __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms) /* don't let another thread in ldlm_extent_shift_kms race in * just after we finish and take our lock into account in its - * calculation of the kms */ + * calculation of the kms + */ lock->l_flags |= LDLM_FL_KMS_IGNORE; list_for_each(tmp, &res->lr_granted) { @@ -85,7 +87,8 @@ __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms) return old_kms; /* This extent _has_ to be smaller than old_kms (checked above) - * so kms can only ever be smaller or the same as old_kms. */ + * so kms can only ever be smaller or the same as old_kms. + */ if (lck->l_policy_data.l_extent.end + 1 > kms) kms = lck->l_policy_data.l_extent.end + 1; } @@ -112,8 +115,8 @@ struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock) struct ldlm_interval *node; LASSERT(lock->l_resource->lr_type == LDLM_EXTENT); - node = kmem_cache_alloc(ldlm_interval_slab, GFP_NOFS | __GFP_ZERO); - if (node == NULL) + node = kmem_cache_zalloc(ldlm_interval_slab, GFP_NOFS); + if (!node) return NULL; INIT_LIST_HEAD(&node->li_group); @@ -134,7 +137,7 @@ struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l) { struct ldlm_interval *n = l->l_tree_node; - if (n == NULL) + if (!n) return NULL; LASSERT(!list_empty(&n->li_group)); @@ -144,7 +147,7 @@ struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l) return list_empty(&n->li_group) ? n : NULL; } -static inline int lock_mode_to_index(ldlm_mode_t mode) +static inline int lock_mode_to_index(enum ldlm_mode mode) { int index; @@ -168,7 +171,7 @@ void ldlm_extent_add_lock(struct ldlm_resource *res, LASSERT(lock->l_granted_mode == lock->l_req_mode); node = lock->l_tree_node; - LASSERT(node != NULL); + LASSERT(node); LASSERT(!interval_is_intree(&node->li_node)); idx = lock_mode_to_index(lock->l_granted_mode); @@ -185,14 +188,14 @@ void ldlm_extent_add_lock(struct ldlm_resource *res, struct ldlm_interval *tmp; tmp = ldlm_interval_detach(lock); - LASSERT(tmp != NULL); ldlm_interval_free(tmp); ldlm_interval_attach(to_ldlm_interval(found), lock); } res->lr_itree[idx].lit_size++; /* even though we use interval tree to manage the extent lock, we also - * add the locks into grant list, for debug purpose, .. */ + * add the locks into grant list, for debug purpose, .. + */ ldlm_resource_add_lock(res, &res->lr_granted, lock); } @@ -211,7 +214,7 @@ void ldlm_extent_unlink_lock(struct ldlm_lock *lock) LASSERT(lock->l_granted_mode == 1 << idx); tree = &res->lr_itree[idx]; - LASSERT(tree->lit_root != NULL); /* assure the tree is not null */ + LASSERT(tree->lit_root); /* assure the tree is not null */ tree->lit_size--; node = ldlm_interval_detach(lock); diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c index 4310154e1728..b88b78606aee 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c @@ -92,7 +92,7 @@ ldlm_flocks_overlap(struct ldlm_lock *lock, struct ldlm_lock *new) } static inline void -ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, __u64 flags) +ldlm_flock_destroy(struct ldlm_lock *lock, enum ldlm_mode mode, __u64 flags) { LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: 0x%llx)", mode, flags); @@ -107,7 +107,8 @@ ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, __u64 flags) lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING; /* when reaching here, it is under lock_res_and_lock(). Thus, - need call the nolock version of ldlm_lock_decref_internal*/ + * need call the nolock version of ldlm_lock_decref_internal + */ ldlm_lock_decref_internal_nolock(lock, mode); } @@ -133,7 +134,7 @@ ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, __u64 flags) * would be collected and ASTs sent. */ static int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, - int first_enq, ldlm_error_t *err, + int first_enq, enum ldlm_error *err, struct list_head *work_list) { struct ldlm_resource *res = req->l_resource; @@ -143,7 +144,7 @@ static int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, struct ldlm_lock *lock = NULL; struct ldlm_lock *new = req; struct ldlm_lock *new2 = NULL; - ldlm_mode_t mode = req->l_req_mode; + enum ldlm_mode mode = req->l_req_mode; int added = (mode == LCK_NL); int overlaps = 0; int splitted = 0; @@ -159,13 +160,15 @@ static int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, *err = ELDLM_OK; /* No blocking ASTs are sent to the clients for - * Posix file & record locks */ + * Posix file & record locks + */ req->l_blocking_ast = NULL; reprocess: if ((*flags == LDLM_FL_WAIT_NOREPROC) || (mode == LCK_NL)) { /* This loop determines where this processes locks start - * in the resource lr_granted list. */ + * in the resource lr_granted list. + */ list_for_each(tmp, &res->lr_granted) { lock = list_entry(tmp, struct ldlm_lock, l_res_link); @@ -180,7 +183,8 @@ reprocess: lockmode_verify(mode); /* This loop determines if there are existing locks - * that conflict with the new lock request. */ + * that conflict with the new lock request. + */ list_for_each(tmp, &res->lr_granted) { lock = list_entry(tmp, struct ldlm_lock, l_res_link); @@ -238,8 +242,8 @@ reprocess: } /* Scan the locks owned by this process that overlap this request. - * We may have to merge or split existing locks. */ - + * We may have to merge or split existing locks. + */ if (!ownlocks) ownlocks = &res->lr_granted; @@ -253,7 +257,8 @@ reprocess: /* If the modes are the same then we need to process * locks that overlap OR adjoin the new lock. The extra * logic condition is necessary to deal with arithmetic - * overflow and underflow. */ + * overflow and underflow. + */ if ((new->l_policy_data.l_flock.start > (lock->l_policy_data.l_flock.end + 1)) && (lock->l_policy_data.l_flock.end != @@ -327,11 +332,13 @@ reprocess: * with the request but this would complicate the reply * processing since updates to req get reflected in the * reply. The client side replays the lock request so - * it must see the original lock data in the reply. */ + * it must see the original lock data in the reply. + */ /* XXX - if ldlm_lock_new() can sleep we should * release the lr_lock, allocate the new lock, - * and restart processing this lock. */ + * and restart processing this lock. + */ if (!new2) { unlock_res_and_lock(req); new2 = ldlm_lock_create(ns, &res->lr_name, LDLM_FLOCK, @@ -361,7 +368,7 @@ reprocess: lock->l_policy_data.l_flock.start = new->l_policy_data.l_flock.end + 1; new2->l_conn_export = lock->l_conn_export; - if (lock->l_export != NULL) { + if (lock->l_export) { new2->l_export = class_export_lock_get(lock->l_export, new2); if (new2->l_export->exp_lock_hash && @@ -381,7 +388,7 @@ reprocess: } /* if new2 is created but never used, destroy it*/ - if (splitted == 0 && new2 != NULL) + if (splitted == 0 && new2) ldlm_lock_destroy_nolock(new2); /* At this point we're granting the lock request. */ @@ -396,7 +403,8 @@ reprocess: if (*flags != LDLM_FL_WAIT_NOREPROC) { /* The only one possible case for client-side calls flock * policy function is ldlm_flock_completion_ast inside which - * carries LDLM_FL_WAIT_NOREPROC flag. */ + * carries LDLM_FL_WAIT_NOREPROC flag. + */ CERROR("Illegal parameter for client-side-only module.\n"); LBUG(); } @@ -404,7 +412,8 @@ reprocess: /* In case we're reprocessing the requested lock we can't destroy * it until after calling ldlm_add_ast_work_item() above so that laawi() * can bump the reference count on \a req. Otherwise \a req - * could be freed before the completion AST can be sent. */ + * could be freed before the completion AST can be sent. + */ if (added) ldlm_flock_destroy(req, mode, *flags); @@ -449,7 +458,7 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data) struct obd_import *imp = NULL; struct ldlm_flock_wait_data fwd; struct l_wait_info lwi; - ldlm_error_t err; + enum ldlm_error err; int rc = 0; CDEBUG(D_DLMTRACE, "flags: 0x%llx data: %p getlk: %p\n", @@ -458,12 +467,12 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data) /* Import invalidation. We need to actually release the lock * references being held, so that it can go away. No point in * holding the lock even if app still believes it has it, since - * server already dropped it anyway. Only for granted locks too. */ + * server already dropped it anyway. Only for granted locks too. + */ if ((lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) == (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) { if (lock->l_req_mode == lock->l_granted_mode && - lock->l_granted_mode != LCK_NL && - data == NULL) + lock->l_granted_mode != LCK_NL && !data) ldlm_lock_decref_internal(lock, lock->l_req_mode); /* Need to wake up the waiter if we were evicted */ @@ -475,7 +484,7 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data) if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED | LDLM_FL_BLOCK_CONV))) { - if (data == NULL) + if (!data) /* mds granted the lock in the reply */ goto granted; /* CP AST RPC: lock get granted, wake it up */ @@ -488,10 +497,10 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data) obd = class_exp2obd(lock->l_conn_export); /* if this is a local lock, there is no import */ - if (obd != NULL) + if (obd) imp = obd->u.cli.cl_import; - if (imp != NULL) { + if (imp) { spin_lock(&imp->imp_lock); fwd.fwd_generation = imp->imp_generation; spin_unlock(&imp->imp_lock); @@ -540,7 +549,8 @@ granted: } else if (flags & LDLM_FL_TEST_LOCK) { /* fcntl(F_GETLK) request */ /* The old mode was saved in getlk->fl_type so that if the mode - * in the lock changes we can decref the appropriate refcount.*/ + * in the lock changes we can decref the appropriate refcount. + */ ldlm_flock_destroy(lock, getlk->fl_type, LDLM_FL_WAIT_NOREPROC); switch (lock->l_granted_mode) { case LCK_PR: @@ -559,7 +569,8 @@ granted: __u64 noreproc = LDLM_FL_WAIT_NOREPROC; /* We need to reprocess the lock to do merges or splits - * with existing locks owned by this process. */ + * with existing locks owned by this process. + */ ldlm_process_flock_lock(lock, &noreproc, 1, &err, NULL); } unlock_res_and_lock(lock); @@ -576,7 +587,8 @@ void ldlm_flock_policy_wire18_to_local(const ldlm_wire_policy_data_t *wpolicy, lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid; /* Compat code, old clients had no idea about owner field and * relied solely on pid for ownership. Introduced in LU-104, 2.1, - * April 2011 */ + * April 2011 + */ lpolicy->l_flock.owner = wpolicy->l_flock.lfw_pid; } diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h index 849cc98df7dd..e21373e7306f 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h @@ -96,14 +96,15 @@ enum { LDLM_CANCEL_SHRINK = 1 << 2, /* Cancel locks from shrinker. */ LDLM_CANCEL_LRUR = 1 << 3, /* Cancel locks from lru resize. */ LDLM_CANCEL_NO_WAIT = 1 << 4 /* Cancel locks w/o blocking (neither - * sending nor waiting for any rpcs) */ + * sending nor waiting for any rpcs) + */ }; int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, - ldlm_cancel_flags_t sync, int flags); + enum ldlm_cancel_flags sync, int flags); int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels, int count, int max, - ldlm_cancel_flags_t cancel_flags, int flags); + enum ldlm_cancel_flags cancel_flags, int flags); extern int ldlm_enqueue_min; /* ldlm_resource.c */ @@ -133,11 +134,11 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill, enum req_location loc, void *data, int size); struct ldlm_lock * ldlm_lock_create(struct ldlm_namespace *ns, const struct ldlm_res_id *, - ldlm_type_t type, ldlm_mode_t, + enum ldlm_type type, enum ldlm_mode mode, const struct ldlm_callback_suite *cbs, void *data, __u32 lvb_len, enum lvb_type lvb_type); -ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *, struct ldlm_lock **, - void *cookie, __u64 *flags); +enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *, struct ldlm_lock **, + void *cookie, __u64 *flags); void ldlm_lock_addref_internal(struct ldlm_lock *, __u32 mode); void ldlm_lock_addref_internal_nolock(struct ldlm_lock *, __u32 mode); void ldlm_lock_decref_internal(struct ldlm_lock *, __u32 mode); @@ -154,7 +155,7 @@ int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, struct list_head *cancels, int count, - ldlm_cancel_flags_t cancel_flags); + enum ldlm_cancel_flags cancel_flags); void ldlm_handle_bl_callback(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, struct ldlm_lock *lock); diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c index 3c8d4413d976..b586d5a88d00 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c @@ -219,7 +219,8 @@ EXPORT_SYMBOL(client_import_find_conn); void client_destroy_import(struct obd_import *imp) { /* Drop security policy instance after all RPCs have finished/aborted - * to let all busy contexts be released. */ + * to let all busy contexts be released. + */ class_import_get(imp); class_destroy_import(imp); sptlrpc_import_sec_put(imp); @@ -227,29 +228,6 @@ void client_destroy_import(struct obd_import *imp) } EXPORT_SYMBOL(client_destroy_import); -/** - * Check whether or not the OSC is on MDT. - * In the config log, - * osc on MDT - * setup 0:{fsname}-OSTxxxx-osc[-MDTxxxx] 1:lustre-OST0000_UUID 2:NID - * osc on client - * setup 0:{fsname}-OSTxxxx-osc 1:lustre-OST0000_UUID 2:NID - * - **/ -static int osc_on_mdt(char *obdname) -{ - char *ptr; - - ptr = strrchr(obdname, '-'); - if (ptr == NULL) - return 0; - - if (strncmp(ptr + 1, "MDT", 3) == 0) - return 1; - - return 0; -} - /* Configure an RPC client OBD device. * * lcfg parameters: @@ -264,11 +242,12 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) struct obd_uuid server_uuid; int rq_portal, rp_portal, connect_op; char *name = obddev->obd_type->typ_name; - ldlm_ns_type_t ns_type = LDLM_NS_TYPE_UNKNOWN; + enum ldlm_ns_type ns_type = LDLM_NS_TYPE_UNKNOWN; int rc; /* In a more perfect world, we would hang a ptlrpc_client off of - * obd_type and just use the values from there. */ + * obd_type and just use the values from there. + */ if (!strcmp(name, LUSTRE_OSC_NAME)) { rq_portal = OST_REQUEST_PORTAL; rp_portal = OSC_REPLY_PORTAL; @@ -284,22 +263,6 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) cli->cl_sp_me = LUSTRE_SP_CLI; cli->cl_sp_to = LUSTRE_SP_MDT; ns_type = LDLM_NS_TYPE_MDC; - } else if (!strcmp(name, LUSTRE_OSP_NAME)) { - if (strstr(lustre_cfg_buf(lcfg, 1), "OST") == NULL) { - /* OSP_on_MDT for other MDTs */ - connect_op = MDS_CONNECT; - cli->cl_sp_to = LUSTRE_SP_MDT; - ns_type = LDLM_NS_TYPE_MDC; - rq_portal = OUT_PORTAL; - } else { - /* OSP on MDT for OST */ - connect_op = OST_CONNECT; - cli->cl_sp_to = LUSTRE_SP_OST; - ns_type = LDLM_NS_TYPE_OSC; - rq_portal = OST_REQUEST_PORTAL; - } - rp_portal = OSC_REPLY_PORTAL; - cli->cl_sp_me = LUSTRE_SP_CLI; } else if (!strcmp(name, LUSTRE_MGC_NAME)) { rq_portal = MGS_REQUEST_PORTAL; rp_portal = MGC_REPLY_PORTAL; @@ -387,7 +350,8 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) /* This value may be reduced at connect time in * ptlrpc_connect_interpret() . We initialize it to only * 1MB until we know what the performance looks like. - * In the future this should likely be increased. LU-1431 */ + * In the future this should likely be increased. LU-1431 + */ cli->cl_max_pages_per_rpc = min_t(int, PTLRPC_MAX_BRW_PAGES, LNET_MTU >> PAGE_CACHE_SHIFT); @@ -400,10 +364,7 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 512 /* MB */) { cli->cl_max_rpcs_in_flight = 4; } else { - if (osc_on_mdt(obddev->obd_name)) - cli->cl_max_rpcs_in_flight = MDS_OSC_MAX_RIF_DEFAULT; - else - cli->cl_max_rpcs_in_flight = OSC_MAX_RIF_DEFAULT; + cli->cl_max_rpcs_in_flight = OSC_MAX_RIF_DEFAULT; } rc = ldlm_get_ref(); if (rc) { @@ -415,7 +376,7 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) &obddev->obd_ldlm_client); imp = class_new_import(obddev); - if (imp == NULL) { + if (!imp) { rc = -ENOENT; goto err_ldlm; } @@ -451,7 +412,7 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) LDLM_NAMESPACE_CLIENT, LDLM_NAMESPACE_GREEDY, ns_type); - if (obddev->obd_namespace == NULL) { + if (!obddev->obd_namespace) { CERROR("Unable to create client namespace - %s\n", obddev->obd_name); rc = -ENOMEM; @@ -477,7 +438,7 @@ int client_obd_cleanup(struct obd_device *obddev) ldlm_namespace_free_post(obddev->obd_namespace); obddev->obd_namespace = NULL; - LASSERT(obddev->u.cli.cl_import == NULL); + LASSERT(!obddev->u.cli.cl_import); ldlm_put_ref(); return 0; @@ -528,7 +489,7 @@ int client_connect_import(const struct lu_env *env, LASSERT(imp->imp_state == LUSTRE_IMP_DISCON); goto out_ldlm; } - LASSERT(*exp != NULL && (*exp)->exp_connection); + LASSERT(*exp && (*exp)->exp_connection); if (data) { LASSERTF((ocd->ocd_connect_flags & data->ocd_connect_flags) == @@ -587,17 +548,19 @@ int client_disconnect_export(struct obd_export *exp) /* Mark import deactivated now, so we don't try to reconnect if any * of the cleanup RPCs fails (e.g. LDLM cancel, etc). We don't - * fully deactivate the import, or that would drop all requests. */ + * fully deactivate the import, or that would drop all requests. + */ spin_lock(&imp->imp_lock); imp->imp_deactive = 1; spin_unlock(&imp->imp_lock); /* Some non-replayable imports (MDS's OSCs) are pinged, so just * delete it regardless. (It's safe to delete an import that was - * never added.) */ + * never added.) + */ (void)ptlrpc_pinger_del_import(imp); - if (obd->obd_namespace != NULL) { + if (obd->obd_namespace) { /* obd_force == local only */ ldlm_cli_cancel_unused(obd->obd_namespace, NULL, obd->obd_force ? LCF_LOCAL : 0, NULL); @@ -606,7 +569,8 @@ int client_disconnect_export(struct obd_export *exp) } /* There's no need to hold sem while disconnecting an import, - * and it may actually cause deadlock in GSS. */ + * and it may actually cause deadlock in GSS. + */ up_write(&cli->cl_sem); rc = ptlrpc_disconnect_import(imp, 0); down_write(&cli->cl_sem); @@ -615,7 +579,8 @@ int client_disconnect_export(struct obd_export *exp) out_disconnect: /* Use server style - class_disconnect should be always called for - * o_disconnect. */ + * o_disconnect. + */ err = class_disconnect(exp); if (!rc && err) rc = err; @@ -634,7 +599,8 @@ int target_pack_pool_reply(struct ptlrpc_request *req) struct obd_device *obd; /* Check that we still have all structures alive as this may - * be some late RPC at shutdown time. */ + * be some late RPC at shutdown time. + */ if (unlikely(!req->rq_export || !req->rq_export->exp_obd || !exp_connect_lru_resize(req->rq_export))) { lustre_msg_set_slv(req->rq_repmsg, 0); @@ -684,14 +650,14 @@ void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id) svcpt = req->rq_rqbd->rqbd_svcpt; rs = req->rq_reply_state; - if (rs == NULL || !rs->rs_difficult) { + if (!rs || !rs->rs_difficult) { /* no notifiers */ target_send_reply_msg(req, rc, fail_id); return; } /* must be an export if locks saved */ - LASSERT(req->rq_export != NULL); + LASSERT(req->rq_export); /* req/reply consistent */ LASSERT(rs->rs_svcpt == svcpt); @@ -700,7 +666,7 @@ void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id) LASSERT(!rs->rs_scheduled_ever); LASSERT(!rs->rs_handled); LASSERT(!rs->rs_on_net); - LASSERT(rs->rs_export == NULL); + LASSERT(!rs->rs_export); LASSERT(list_empty(&rs->rs_obd_list)); LASSERT(list_empty(&rs->rs_exp_list)); @@ -739,7 +705,8 @@ void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id) * reply ref until ptlrpc_handle_rs() is done * with the reply state (if the send was successful, there * would have been +1 ref for the net, which - * reply_out_callback leaves alone) */ + * reply_out_callback leaves alone) + */ rs->rs_on_net = 0; ptlrpc_rs_addref(rs); } @@ -760,7 +727,7 @@ void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id) } EXPORT_SYMBOL(target_send_reply); -ldlm_mode_t lck_compat_array[] = { +enum ldlm_mode lck_compat_array[] = { [LCK_EX] = LCK_COMPAT_EX, [LCK_PW] = LCK_COMPAT_PW, [LCK_PR] = LCK_COMPAT_PR, @@ -775,7 +742,7 @@ ldlm_mode_t lck_compat_array[] = { * Rather arbitrary mapping from LDLM error codes to errno values. This should * not escape to the user level. */ -int ldlm_error2errno(ldlm_error_t error) +int ldlm_error2errno(enum ldlm_error error) { int result; @@ -803,7 +770,7 @@ int ldlm_error2errno(ldlm_error_t error) break; default: if (((int)error) < 0) /* cast to signed type */ - result = error; /* as ldlm_error_t can be unsigned */ + result = error; /* as enum ldlm_error can be unsigned */ else { CERROR("Invalid DLM result code: %d\n", error); result = -EPROTO; diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c index cf9ec0cfe247..ecd65a7a3dc9 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c @@ -91,7 +91,7 @@ static ldlm_policy_local_to_wire_t ldlm_policy_local_to_wire[] = { /** * Converts lock policy from local format to on the wire lock_desc format */ -static void ldlm_convert_policy_to_wire(ldlm_type_t type, +static void ldlm_convert_policy_to_wire(enum ldlm_type type, const ldlm_policy_data_t *lpolicy, ldlm_wire_policy_data_t *wpolicy) { @@ -105,7 +105,7 @@ static void ldlm_convert_policy_to_wire(ldlm_type_t type, /** * Converts lock policy from on the wire lock_desc format to local format */ -void ldlm_convert_policy_to_local(struct obd_export *exp, ldlm_type_t type, +void ldlm_convert_policy_to_local(struct obd_export *exp, enum ldlm_type type, const ldlm_wire_policy_data_t *wpolicy, ldlm_policy_data_t *lpolicy) { @@ -326,9 +326,11 @@ static int ldlm_lock_destroy_internal(struct ldlm_lock *lock) if (lock->l_export && lock->l_export->exp_lock_hash) { /* NB: it's safe to call cfs_hash_del() even lock isn't - * in exp_lock_hash. */ + * in exp_lock_hash. + */ /* In the function below, .hs_keycmp resolves to - * ldlm_export_lock_keycmp() */ + * ldlm_export_lock_keycmp() + */ /* coverity[overrun-buffer-val] */ cfs_hash_del(lock->l_export->exp_lock_hash, &lock->l_remote_handle, &lock->l_exp_hash); @@ -337,16 +339,6 @@ static int ldlm_lock_destroy_internal(struct ldlm_lock *lock) ldlm_lock_remove_from_lru(lock); class_handle_unhash(&lock->l_handle); -#if 0 - /* Wake anyone waiting for this lock */ - /* FIXME: I should probably add yet another flag, instead of using - * l_export to only call this on clients */ - if (lock->l_export) - class_export_put(lock->l_export); - lock->l_export = NULL; - if (lock->l_export && lock->l_completion_ast) - lock->l_completion_ast(lock, 0); -#endif return 1; } @@ -412,11 +404,10 @@ static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource) { struct ldlm_lock *lock; - if (resource == NULL) - LBUG(); + LASSERT(resource); - lock = kmem_cache_alloc(ldlm_lock_slab, GFP_NOFS | __GFP_ZERO); - if (lock == NULL) + lock = kmem_cache_zalloc(ldlm_lock_slab, GFP_NOFS); + if (!lock) return NULL; spin_lock_init(&lock->l_lock); @@ -485,7 +476,7 @@ int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock, unlock_res_and_lock(lock); newres = ldlm_resource_get(ns, NULL, new_resid, type, 1); - if (newres == NULL) + if (!newres) return -ENOMEM; lu_ref_add(&newres->lr_reference, "lock", lock); @@ -547,11 +538,12 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle, LASSERT(handle); lock = class_handle2object(handle->cookie); - if (lock == NULL) + if (!lock) return NULL; /* It's unlikely but possible that someone marked the lock as - * destroyed after we did handle2object on it */ + * destroyed after we did handle2object on it + */ if (flags == 0 && ((lock->l_flags & LDLM_FL_DESTROYED) == 0)) { lu_ref_add(&lock->l_reference, "handle", current); return lock; @@ -559,7 +551,7 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle, lock_res_and_lock(lock); - LASSERT(lock->l_resource != NULL); + LASSERT(lock->l_resource); lu_ref_add_atomic(&lock->l_reference, "handle", current); if (unlikely(lock->l_flags & LDLM_FL_DESTROYED)) { @@ -611,13 +603,14 @@ static void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new, LDLM_DEBUG(lock, "lock incompatible; sending blocking AST."); lock->l_flags |= LDLM_FL_AST_SENT; /* If the enqueuing client said so, tell the AST recipient to - * discard dirty data, rather than writing back. */ + * discard dirty data, rather than writing back. + */ if (new->l_flags & LDLM_FL_AST_DISCARD_DATA) lock->l_flags |= LDLM_FL_DISCARD_DATA; LASSERT(list_empty(&lock->l_bl_ast)); list_add(&lock->l_bl_ast, work_list); LDLM_LOCK_GET(lock); - LASSERT(lock->l_blocking_lock == NULL); + LASSERT(!lock->l_blocking_lock); lock->l_blocking_lock = LDLM_LOCK_GET(new); } } @@ -664,7 +657,7 @@ void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode) struct ldlm_lock *lock; lock = ldlm_handle2lock(lockh); - LASSERT(lock != NULL); + LASSERT(lock); ldlm_lock_addref_internal(lock, mode); LDLM_LOCK_PUT(lock); } @@ -708,7 +701,7 @@ int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode) result = -EAGAIN; lock = ldlm_handle2lock(lockh); - if (lock != NULL) { + if (lock) { lock_res_and_lock(lock); if (lock->l_readers != 0 || lock->l_writers != 0 || !(lock->l_flags & LDLM_FL_CBPENDING)) { @@ -780,7 +773,8 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode) if (lock->l_flags & LDLM_FL_LOCAL && !lock->l_readers && !lock->l_writers) { /* If this is a local lock on a server namespace and this was - * the last reference, cancel the lock. */ + * the last reference, cancel the lock. + */ CDEBUG(D_INFO, "forcing cancel of local lock\n"); lock->l_flags |= LDLM_FL_CBPENDING; } @@ -788,7 +782,8 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode) if (!lock->l_readers && !lock->l_writers && (lock->l_flags & LDLM_FL_CBPENDING)) { /* If we received a blocked AST and this was the last reference, - * run the callback. */ + * run the callback. + */ LDLM_DEBUG(lock, "final decref done on cbpending lock"); @@ -809,7 +804,8 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode) LDLM_DEBUG(lock, "add lock into lru list"); /* If this is a client-side namespace and this was the last - * reference, put it on the LRU. */ + * reference, put it on the LRU. + */ ldlm_lock_add_to_lru(lock); unlock_res_and_lock(lock); @@ -818,7 +814,8 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode) /* Call ldlm_cancel_lru() only if EARLY_CANCEL and LRU RESIZE * are not supported by the server, otherwise, it is done on - * enqueue. */ + * enqueue. + */ if (!exp_connect_cancelset(lock->l_conn_export) && !ns_connect_lru_resize(ns)) ldlm_cancel_lru(ns, 0, LCF_ASYNC, 0); @@ -835,7 +832,7 @@ void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode) { struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0); - LASSERTF(lock != NULL, "Non-existing lock: %#llx\n", lockh->cookie); + LASSERTF(lock, "Non-existing lock: %#llx\n", lockh->cookie); ldlm_lock_decref_internal(lock, mode); LDLM_LOCK_PUT(lock); } @@ -852,7 +849,7 @@ void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode) { struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0); - LASSERT(lock != NULL); + LASSERT(lock); LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]); lock_res_and_lock(lock); @@ -893,8 +890,7 @@ static void search_granted_lock(struct list_head *queue, list_for_each(tmp, queue) { lock = list_entry(tmp, struct ldlm_lock, l_res_link); - mode_end = list_entry(lock->l_sl_mode.prev, - struct ldlm_lock, l_sl_mode); + mode_end = list_prev_entry(lock, l_sl_mode); if (lock->l_req_mode != req->l_req_mode) { /* jump to last lock of mode group */ @@ -914,14 +910,13 @@ static void search_granted_lock(struct list_head *queue, if (lock->l_resource->lr_type == LDLM_IBITS) { for (;;) { policy_end = - list_entry(lock->l_sl_policy.prev, - struct ldlm_lock, - l_sl_policy); + list_prev_entry(lock, l_sl_policy); if (lock->l_policy_data.l_inodebits.bits == req->l_policy_data.l_inodebits.bits) { /* insert point is last lock of - * the policy group */ + * the policy group + */ prev->res_link = &policy_end->l_res_link; prev->mode_link = @@ -942,7 +937,8 @@ static void search_granted_lock(struct list_head *queue, } /* loop over policy groups within the mode group */ /* insert point is last lock of the mode group, - * new policy group is started */ + * new policy group is started + */ prev->res_link = &mode_end->l_res_link; prev->mode_link = &mode_end->l_sl_mode; prev->policy_link = &req->l_sl_policy; @@ -954,7 +950,8 @@ static void search_granted_lock(struct list_head *queue, } /* insert point is last lock on the queue, - * new mode group and new policy group are started */ + * new mode group and new policy group are started + */ prev->res_link = queue->prev; prev->mode_link = &req->l_sl_mode; prev->policy_link = &req->l_sl_policy; @@ -1034,10 +1031,7 @@ void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list) else ldlm_resource_add_lock(res, &res->lr_granted, lock); - if (lock->l_granted_mode < res->lr_most_restr) - res->lr_most_restr = lock->l_granted_mode; - - if (work_list && lock->l_completion_ast != NULL) + if (work_list && lock->l_completion_ast) ldlm_add_ast_work_item(lock, NULL, work_list); ldlm_pool_add(&ldlm_res_to_ns(res)->ns_pool, lock); @@ -1050,7 +1044,7 @@ void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list) * comment above ldlm_lock_match */ static struct ldlm_lock *search_queue(struct list_head *queue, - ldlm_mode_t *mode, + enum ldlm_mode *mode, ldlm_policy_data_t *policy, struct ldlm_lock *old_lock, __u64 flags, int unref) @@ -1059,7 +1053,7 @@ static struct ldlm_lock *search_queue(struct list_head *queue, struct list_head *tmp; list_for_each(tmp, queue) { - ldlm_mode_t match; + enum ldlm_mode match; lock = list_entry(tmp, struct ldlm_lock, l_res_link); @@ -1067,7 +1061,8 @@ static struct ldlm_lock *search_queue(struct list_head *queue, break; /* Check if this lock can be matched. - * Used by LU-2919(exclusive open) for open lease lock */ + * Used by LU-2919(exclusive open) for open lease lock + */ if (ldlm_is_excl(lock)) continue; @@ -1076,7 +1071,8 @@ static struct ldlm_lock *search_queue(struct list_head *queue, * if it passes in CBPENDING and the lock still has users. * this is generally only going to be used by children * whose parents already hold a lock so forward progress - * can still happen. */ + * can still happen. + */ if (lock->l_flags & LDLM_FL_CBPENDING && !(flags & LDLM_FL_CBPENDING)) continue; @@ -1100,7 +1096,8 @@ static struct ldlm_lock *search_queue(struct list_head *queue, continue; /* We match if we have existing lock with same or wider set - of bits. */ + * of bits. + */ if (lock->l_resource->lr_type == LDLM_IBITS && ((lock->l_policy_data.l_inodebits.bits & policy->l_inodebits.bits) != @@ -1192,16 +1189,18 @@ EXPORT_SYMBOL(ldlm_lock_allow_match); * keep caller code unchanged), the context failure will be discovered by * caller sometime later. */ -ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, - const struct ldlm_res_id *res_id, ldlm_type_t type, - ldlm_policy_data_t *policy, ldlm_mode_t mode, - struct lustre_handle *lockh, int unref) +enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, + const struct ldlm_res_id *res_id, + enum ldlm_type type, + ldlm_policy_data_t *policy, + enum ldlm_mode mode, + struct lustre_handle *lockh, int unref) { struct ldlm_resource *res; struct ldlm_lock *lock, *old_lock = NULL; int rc = 0; - if (ns == NULL) { + if (!ns) { old_lock = ldlm_handle2lock(lockh); LASSERT(old_lock); @@ -1212,8 +1211,8 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, } res = ldlm_resource_get(ns, NULL, res_id, type, 0); - if (res == NULL) { - LASSERT(old_lock == NULL); + if (!res) { + LASSERT(!old_lock); return 0; } @@ -1222,7 +1221,7 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, lock = search_queue(&res->lr_granted, &mode, policy, old_lock, flags, unref); - if (lock != NULL) { + if (lock) { rc = 1; goto out; } @@ -1232,7 +1231,7 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, } lock = search_queue(&res->lr_waiting, &mode, policy, old_lock, flags, unref); - if (lock != NULL) { + if (lock) { rc = 1; goto out; } @@ -1317,14 +1316,14 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, } EXPORT_SYMBOL(ldlm_lock_match); -ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh, - __u64 *bits) +enum ldlm_mode ldlm_revalidate_lock_handle(struct lustre_handle *lockh, + __u64 *bits) { struct ldlm_lock *lock; - ldlm_mode_t mode = 0; + enum ldlm_mode mode = 0; lock = ldlm_handle2lock(lockh); - if (lock != NULL) { + if (lock) { lock_res_and_lock(lock); if (lock->l_flags & LDLM_FL_GONE_MASK) goto out; @@ -1340,7 +1339,7 @@ ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh, } out: - if (lock != NULL) { + if (lock) { unlock_res_and_lock(lock); LDLM_LOCK_PUT(lock); } @@ -1354,7 +1353,7 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill, { void *lvb; - LASSERT(data != NULL); + LASSERT(data); LASSERT(size >= 0); switch (lock->l_lvb_type) { @@ -1368,7 +1367,7 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill, lvb = req_capsule_server_swab_get(pill, &RMF_DLM_LVB, lustre_swab_ost_lvb); - if (unlikely(lvb == NULL)) { + if (unlikely(!lvb)) { LDLM_ERROR(lock, "no LVB"); return -EPROTO; } @@ -1385,7 +1384,7 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill, lvb = req_capsule_server_sized_swab_get(pill, &RMF_DLM_LVB, size, lustre_swab_ost_lvb_v1); - if (unlikely(lvb == NULL)) { + if (unlikely(!lvb)) { LDLM_ERROR(lock, "no LVB"); return -EPROTO; } @@ -1410,7 +1409,7 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill, lvb = req_capsule_server_swab_get(pill, &RMF_DLM_LVB, lustre_swab_lquota_lvb); - if (unlikely(lvb == NULL)) { + if (unlikely(!lvb)) { LDLM_ERROR(lock, "no LVB"); return -EPROTO; } @@ -1431,7 +1430,7 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill, lvb = req_capsule_client_get(pill, &RMF_DLM_LVB); else lvb = req_capsule_server_get(pill, &RMF_DLM_LVB); - if (unlikely(lvb == NULL)) { + if (unlikely(!lvb)) { LDLM_ERROR(lock, "no LVB"); return -EPROTO; } @@ -1453,8 +1452,8 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill, */ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns, const struct ldlm_res_id *res_id, - ldlm_type_t type, - ldlm_mode_t mode, + enum ldlm_type type, + enum ldlm_mode mode, const struct ldlm_callback_suite *cbs, void *data, __u32 lvb_len, enum lvb_type lvb_type) @@ -1463,12 +1462,12 @@ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns, struct ldlm_resource *res; res = ldlm_resource_get(ns, NULL, res_id, type, 1); - if (res == NULL) + if (!res) return NULL; lock = ldlm_lock_new(res); - if (lock == NULL) + if (!lock) return NULL; lock->l_req_mode = mode; @@ -1483,7 +1482,7 @@ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns, lock->l_tree_node = NULL; /* if this is the extent lock, allocate the interval tree node */ if (type == LDLM_EXTENT) { - if (ldlm_interval_alloc(lock) == NULL) + if (!ldlm_interval_alloc(lock)) goto out; } @@ -1514,9 +1513,9 @@ out: * Does not block. As a result of enqueue the lock would be put * into granted or waiting list. */ -ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns, - struct ldlm_lock **lockp, - void *cookie, __u64 *flags) +enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *ns, + struct ldlm_lock **lockp, + void *cookie, __u64 *flags) { struct ldlm_lock *lock = *lockp; struct ldlm_resource *res = lock->l_resource; @@ -1527,7 +1526,8 @@ ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns, if (lock->l_req_mode == lock->l_granted_mode) { /* The server returned a blocked lock, but it was granted * before we got a chance to actually enqueue it. We don't - * need to do anything else. */ + * need to do anything else. + */ *flags &= ~(LDLM_FL_BLOCK_GRANTED | LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_WAIT); goto out; @@ -1540,7 +1540,8 @@ ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns, LBUG(); /* Some flags from the enqueue want to make it into the AST, via the - * lock's l_flags. */ + * lock's l_flags. + */ lock->l_flags |= *flags & LDLM_FL_AST_DISCARD_DATA; /* @@ -1621,19 +1622,21 @@ ldlm_work_cp_ast_lock(struct ptlrpc_request_set *rqset, void *opaq) * This can't happen with the blocking_ast, however, because we * will never call the local blocking_ast until we drop our * reader/writer reference, which we won't do until we get the - * reply and finish enqueueing. */ + * reply and finish enqueueing. + */ /* nobody should touch l_cp_ast */ lock_res_and_lock(lock); list_del_init(&lock->l_cp_ast); LASSERT(lock->l_flags & LDLM_FL_CP_REQD); /* save l_completion_ast since it can be changed by - * mds_intent_policy(), see bug 14225 */ + * mds_intent_policy(), see bug 14225 + */ completion_callback = lock->l_completion_ast; lock->l_flags &= ~LDLM_FL_CP_REQD; unlock_res_and_lock(lock); - if (completion_callback != NULL) + if (completion_callback) rc = completion_callback(lock, 0, (void *)arg); LDLM_LOCK_RELEASE(lock); @@ -1749,10 +1752,11 @@ int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list, /* We create a ptlrpc request set with flow control extension. * This request set will use the work_ast_lock function to produce new * requests and will send a new request each time one completes in order - * to keep the number of requests in flight to ns_max_parallel_ast */ + * to keep the number of requests in flight to ns_max_parallel_ast + */ arg->set = ptlrpc_prep_fcset(ns->ns_max_parallel_ast ? : UINT_MAX, work_ast_lock, arg); - if (arg->set == NULL) { + if (!arg->set) { rc = -ENOMEM; goto out; } @@ -1815,7 +1819,8 @@ void ldlm_lock_cancel(struct ldlm_lock *lock) ns = ldlm_res_to_ns(res); /* Please do not, no matter how tempting, remove this LBUG without - * talking to me first. -phik */ + * talking to me first. -phik + */ if (lock->l_readers || lock->l_writers) { LDLM_ERROR(lock, "lock still has references"); LBUG(); @@ -1831,7 +1836,8 @@ void ldlm_lock_cancel(struct ldlm_lock *lock) ldlm_pool_del(&ns->ns_pool, lock); /* Make sure we will not be called again for same lock what is possible - * if not to zero out lock->l_granted_mode */ + * if not to zero out lock->l_granted_mode + */ lock->l_granted_mode = LCK_MINMODE; unlock_res_and_lock(lock); } @@ -1846,7 +1852,7 @@ int ldlm_lock_set_data(struct lustre_handle *lockh, void *data) int rc = -EINVAL; if (lock) { - if (lock->l_ast_data == NULL) + if (!lock->l_ast_data) lock->l_ast_data = data; if (lock->l_ast_data == data) rc = 0; @@ -1874,7 +1880,7 @@ void ldlm_lock_dump_handle(int level, struct lustre_handle *lockh) return; lock = ldlm_handle2lock(lockh); - if (lock == NULL) + if (!lock) return; LDLM_DEBUG_LIMIT(level, lock, "###"); @@ -1900,13 +1906,13 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, if (exp && exp->exp_connection) { nid = libcfs_nid2str(exp->exp_connection->c_peer.nid); - } else if (exp && exp->exp_obd != NULL) { + } else if (exp && exp->exp_obd) { struct obd_import *imp = exp->exp_obd->u.cli.cl_import; nid = libcfs_nid2str(imp->imp_connection->c_peer.nid); } - if (resource == NULL) { + if (!resource) { libcfs_debug_vmsg2(msgdata, fmt, args, " ns: \?\? lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: \?\? rrc=\?\? type: \?\?\? flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n", lock, diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c index 79aeb2bf6c8e..ebe9042adb25 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c @@ -107,7 +107,7 @@ struct ldlm_bl_work_item { struct list_head blwi_head; int blwi_count; struct completion blwi_comp; - ldlm_cancel_flags_t blwi_flags; + enum ldlm_cancel_flags blwi_flags; int blwi_mem_pressure; }; @@ -136,7 +136,7 @@ void ldlm_handle_bl_callback(struct ldlm_namespace *ns, CDEBUG(D_DLMTRACE, "Lock %p already unused, calling callback (%p)\n", lock, lock->l_blocking_ast); - if (lock->l_blocking_ast != NULL) + if (lock->l_blocking_ast) lock->l_blocking_ast(lock, ld, lock->l_ast_data, LDLM_CB_BLOCKING); } else { @@ -185,7 +185,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req, } else if (lvb_len > 0) { if (lock->l_lvb_len > 0) { /* for extent lock, lvb contains ost_lvb{}. */ - LASSERT(lock->l_lvb_data != NULL); + LASSERT(lock->l_lvb_data); if (unlikely(lock->l_lvb_len < lvb_len)) { LDLM_ERROR(lock, "Replied LVB is larger than expectation, expected = %d, replied = %d", @@ -194,7 +194,8 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req, goto out; } } else if (ldlm_has_layout(lock)) { /* for layout lock, lvb has - * variable length */ + * variable length + */ void *lvb_data; lvb_data = kzalloc(lvb_len, GFP_NOFS); @@ -205,7 +206,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req, } lock_res_and_lock(lock); - LASSERT(lock->l_lvb_data == NULL); + LASSERT(!lock->l_lvb_data); lock->l_lvb_type = LVB_T_LAYOUT; lock->l_lvb_data = lvb_data; lock->l_lvb_len = lvb_len; @@ -224,7 +225,8 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req, } /* If we receive the completion AST before the actual enqueue returned, - * then we might need to switch lock modes, resources, or extents. */ + * then we might need to switch lock modes, resources, or extents. + */ if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) { lock->l_req_mode = dlm_req->lock_desc.l_granted_mode; LDLM_DEBUG(lock, "completion AST, new lock mode"); @@ -256,7 +258,8 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req, if (dlm_req->lock_flags & LDLM_FL_AST_SENT) { /* BL_AST locks are not needed in LRU. - * Let ldlm_cancel_lru() be fast. */ + * Let ldlm_cancel_lru() be fast. + */ ldlm_lock_remove_from_lru(lock); lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST; LDLM_DEBUG(lock, "completion AST includes blocking AST"); @@ -276,8 +279,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req, LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work"); - /* Let Enqueue to call osc_lock_upcall() and initialize - * l_ast_data */ + /* Let Enqueue to call osc_lock_upcall() and initialize l_ast_data */ OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 2); ldlm_run_ast_work(ns, &ast_list, LDLM_WORK_CP_AST); @@ -312,10 +314,10 @@ static void ldlm_handle_gl_callback(struct ptlrpc_request *req, LDLM_DEBUG(lock, "client glimpse AST callback handler"); - if (lock->l_glimpse_ast != NULL) + if (lock->l_glimpse_ast) rc = lock->l_glimpse_ast(lock, req); - if (req->rq_repmsg != NULL) { + if (req->rq_repmsg) { ptlrpc_reply(req); } else { req->rq_status = rc; @@ -353,7 +355,7 @@ static int ldlm_callback_reply(struct ptlrpc_request *req, int rc) } static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi, - ldlm_cancel_flags_t cancel_flags) + enum ldlm_cancel_flags cancel_flags) { struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool; @@ -371,7 +373,8 @@ static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi, wake_up(&blp->blp_waitq); /* can not check blwi->blwi_flags as blwi could be already freed in - LCF_ASYNC mode */ + * LCF_ASYNC mode + */ if (!(cancel_flags & LCF_ASYNC)) wait_for_completion(&blwi->blwi_comp); @@ -383,7 +386,7 @@ static inline void init_blwi(struct ldlm_bl_work_item *blwi, struct ldlm_lock_desc *ld, struct list_head *cancels, int count, struct ldlm_lock *lock, - ldlm_cancel_flags_t cancel_flags) + enum ldlm_cancel_flags cancel_flags) { init_completion(&blwi->blwi_comp); INIT_LIST_HEAD(&blwi->blwi_head); @@ -393,7 +396,7 @@ static inline void init_blwi(struct ldlm_bl_work_item *blwi, blwi->blwi_ns = ns; blwi->blwi_flags = cancel_flags; - if (ld != NULL) + if (ld) blwi->blwi_ld = *ld; if (count) { list_add(&blwi->blwi_head, cancels); @@ -417,7 +420,7 @@ static int ldlm_bl_to_thread(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, struct ldlm_lock *lock, struct list_head *cancels, int count, - ldlm_cancel_flags_t cancel_flags) + enum ldlm_cancel_flags cancel_flags) { if (cancels && count == 0) return 0; @@ -451,7 +454,7 @@ int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, struct list_head *cancels, int count, - ldlm_cancel_flags_t cancel_flags) + enum ldlm_cancel_flags cancel_flags) { return ldlm_bl_to_thread(ns, ld, NULL, cancels, count, cancel_flags); } @@ -470,14 +473,14 @@ static int ldlm_handle_setinfo(struct ptlrpc_request *req) req_capsule_set(&req->rq_pill, &RQF_OBD_SET_INFO); key = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY); - if (key == NULL) { + if (!key) { DEBUG_REQ(D_IOCTL, req, "no set_info key"); return -EFAULT; } keylen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_KEY, RCL_CLIENT); val = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL); - if (val == NULL) { + if (!val) { DEBUG_REQ(D_IOCTL, req, "no set_info val"); return -EFAULT; } @@ -519,7 +522,7 @@ static int ldlm_handle_qc_callback(struct ptlrpc_request *req) struct client_obd *cli = &req->rq_export->exp_obd->u.cli; oqctl = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL); - if (oqctl == NULL) { + if (!oqctl) { CERROR("Can't unpack obd_quotactl\n"); return -EPROTO; } @@ -541,7 +544,8 @@ static int ldlm_callback_handler(struct ptlrpc_request *req) /* Requests arrive in sender's byte order. The ptlrpc service * handler has already checked and, if necessary, byte-swapped the * incoming request message body, but I am responsible for the - * message buffers. */ + * message buffers. + */ /* do nothing for sec context finalize */ if (lustre_msg_get_opc(req->rq_reqmsg) == SEC_CTX_FINI) @@ -549,15 +553,14 @@ static int ldlm_callback_handler(struct ptlrpc_request *req) req_capsule_init(&req->rq_pill, req, RCL_SERVER); - if (req->rq_export == NULL) { + if (!req->rq_export) { rc = ldlm_callback_reply(req, -ENOTCONN); ldlm_callback_errmsg(req, "Operate on unconnected server", rc, NULL); return 0; } - LASSERT(req->rq_export != NULL); - LASSERT(req->rq_export->exp_obd != NULL); + LASSERT(req->rq_export->exp_obd); switch (lustre_msg_get_opc(req->rq_reqmsg)) { case LDLM_BL_CALLBACK: @@ -591,12 +594,12 @@ static int ldlm_callback_handler(struct ptlrpc_request *req) } ns = req->rq_export->exp_obd->obd_namespace; - LASSERT(ns != NULL); + LASSERT(ns); req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK); dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ); - if (dlm_req == NULL) { + if (!dlm_req) { rc = ldlm_callback_reply(req, -EPROTO); ldlm_callback_errmsg(req, "Operate without parameter", rc, NULL); @@ -604,7 +607,8 @@ static int ldlm_callback_handler(struct ptlrpc_request *req) } /* Force a known safe race, send a cancel to the server for a lock - * which the server has already started a blocking callback on. */ + * which the server has already started a blocking callback on. + */ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE) && lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) { rc = ldlm_cli_cancel(&dlm_req->lock_handle[0], 0); @@ -634,7 +638,8 @@ static int ldlm_callback_handler(struct ptlrpc_request *req) /* If somebody cancels lock and cache is already dropped, * or lock is failed before cp_ast received on client, * we can tell the server we have no lock. Otherwise, we - * should send cancel after dropping the cache. */ + * should send cancel after dropping the cache. + */ if (((lock->l_flags & LDLM_FL_CANCELING) && (lock->l_flags & LDLM_FL_BL_DONE)) || (lock->l_flags & LDLM_FL_FAILED)) { @@ -648,7 +653,8 @@ static int ldlm_callback_handler(struct ptlrpc_request *req) return 0; } /* BL_AST locks are not needed in LRU. - * Let ldlm_cancel_lru() be fast. */ + * Let ldlm_cancel_lru() be fast. + */ ldlm_lock_remove_from_lru(lock); lock->l_flags |= LDLM_FL_BL_AST; } @@ -661,7 +667,8 @@ static int ldlm_callback_handler(struct ptlrpc_request *req) * But we'd also like to be able to indicate in the reply that we're * cancelling right now, because it's unused, or have an intent result * in the reply, so we might have to push the responsibility for sending - * the reply down into the AST handlers, alas. */ + * the reply down into the AST handlers, alas. + */ switch (lustre_msg_get_opc(req->rq_reqmsg)) { case LDLM_BL_CALLBACK: @@ -781,17 +788,17 @@ static int ldlm_bl_thread_main(void *arg) blwi = ldlm_bl_get_work(blp); - if (blwi == NULL) { + if (!blwi) { atomic_dec(&blp->blp_busy_threads); l_wait_event_exclusive(blp->blp_waitq, - (blwi = ldlm_bl_get_work(blp)) != NULL, + (blwi = ldlm_bl_get_work(blp)), &lwi); busy = atomic_inc_return(&blp->blp_busy_threads); } else { busy = atomic_read(&blp->blp_busy_threads); } - if (blwi->blwi_ns == NULL) + if (!blwi->blwi_ns) /* added by ldlm_cleanup() */ break; @@ -810,7 +817,8 @@ static int ldlm_bl_thread_main(void *arg) /* The special case when we cancel locks in LRU * asynchronously, we pass the list of locks here. * Thus locks are marked LDLM_FL_CANCELING, but NOT - * canceled locally yet. */ + * canceled locally yet. + */ count = ldlm_cli_cancel_list_local(&blwi->blwi_head, blwi->blwi_count, LCF_BL_AST); @@ -915,7 +923,7 @@ static int ldlm_setup(void) int rc = 0; int i; - if (ldlm_state != NULL) + if (ldlm_state) return -EALREADY; ldlm_state = kzalloc(sizeof(*ldlm_state), GFP_NOFS); @@ -1040,7 +1048,7 @@ static int ldlm_cleanup(void) ldlm_pools_fini(); - if (ldlm_state->ldlm_bl_pool != NULL) { + if (ldlm_state->ldlm_bl_pool) { struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool; while (atomic_read(&blp->blp_num_threads) > 0) { @@ -1059,7 +1067,7 @@ static int ldlm_cleanup(void) kfree(blp); } - if (ldlm_state->ldlm_cb_service != NULL) + if (ldlm_state->ldlm_cb_service) ptlrpc_unregister_service(ldlm_state->ldlm_cb_service); if (ldlm_ns_kset) @@ -1085,13 +1093,13 @@ int ldlm_init(void) ldlm_resource_slab = kmem_cache_create("ldlm_resources", sizeof(struct ldlm_resource), 0, SLAB_HWCACHE_ALIGN, NULL); - if (ldlm_resource_slab == NULL) + if (!ldlm_resource_slab) return -ENOMEM; ldlm_lock_slab = kmem_cache_create("ldlm_locks", sizeof(struct ldlm_lock), 0, SLAB_HWCACHE_ALIGN | SLAB_DESTROY_BY_RCU, NULL); - if (ldlm_lock_slab == NULL) { + if (!ldlm_lock_slab) { kmem_cache_destroy(ldlm_resource_slab); return -ENOMEM; } @@ -1099,7 +1107,7 @@ int ldlm_init(void) ldlm_interval_slab = kmem_cache_create("interval_node", sizeof(struct ldlm_interval), 0, SLAB_HWCACHE_ALIGN, NULL); - if (ldlm_interval_slab == NULL) { + if (!ldlm_interval_slab) { kmem_cache_destroy(ldlm_resource_slab); kmem_cache_destroy(ldlm_lock_slab); return -ENOMEM; @@ -1117,7 +1125,8 @@ void ldlm_exit(void) kmem_cache_destroy(ldlm_resource_slab); /* ldlm_lock_put() use RCU to call ldlm_lock_free, so need call * synchronize_rcu() to wait a grace period elapsed, so that - * ldlm_lock_free() get a chance to be called. */ + * ldlm_lock_free() get a chance to be called. + */ synchronize_rcu(); kmem_cache_destroy(ldlm_lock_slab); kmem_cache_destroy(ldlm_interval_slab); diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c index 3d7c137d223a..3e937b050203 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c @@ -246,7 +246,6 @@ static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl) */ obd = container_of(pl, struct ldlm_namespace, ns_pool)->ns_obd; - LASSERT(obd != NULL); read_lock(&obd->obd_pool_lock); pl->pl_server_lock_volume = obd->obd_pool_slv; atomic_set(&pl->pl_limit, obd->obd_pool_limit); @@ -381,7 +380,7 @@ static int ldlm_pool_recalc(struct ldlm_pool *pl) spin_unlock(&pl->pl_lock); recalc: - if (pl->pl_ops->po_recalc != NULL) { + if (pl->pl_ops->po_recalc) { count = pl->pl_ops->po_recalc(pl); lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT, count); @@ -409,7 +408,7 @@ static int ldlm_pool_shrink(struct ldlm_pool *pl, int nr, gfp_t gfp_mask) { int cancel = 0; - if (pl->pl_ops->po_shrink != NULL) { + if (pl->pl_ops->po_shrink) { cancel = pl->pl_ops->po_shrink(pl, nr, gfp_mask); if (nr > 0) { lprocfs_counter_add(pl->pl_stats, @@ -643,11 +642,11 @@ static void ldlm_pool_sysfs_fini(struct ldlm_pool *pl) static void ldlm_pool_debugfs_fini(struct ldlm_pool *pl) { - if (pl->pl_stats != NULL) { + if (pl->pl_stats) { lprocfs_free_stats(&pl->pl_stats); pl->pl_stats = NULL; } - if (pl->pl_debugfs_entry != NULL) { + if (pl->pl_debugfs_entry) { ldebugfs_remove(&pl->pl_debugfs_entry); pl->pl_debugfs_entry = NULL; } @@ -834,7 +833,7 @@ static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask) continue; } - if (ns_old == NULL) + if (!ns_old) ns_old = ns; ldlm_namespace_get(ns); @@ -957,7 +956,7 @@ static int ldlm_pools_recalc(ldlm_side_t client) continue; } - if (ns_old == NULL) + if (!ns_old) ns_old = ns; spin_lock(&ns->ns_lock); @@ -1040,7 +1039,7 @@ static int ldlm_pools_thread_start(void) struct l_wait_info lwi = { 0 }; struct task_struct *task; - if (ldlm_pools_thread != NULL) + if (ldlm_pools_thread) return -EALREADY; ldlm_pools_thread = kzalloc(sizeof(*ldlm_pools_thread), GFP_NOFS); @@ -1065,7 +1064,7 @@ static int ldlm_pools_thread_start(void) static void ldlm_pools_thread_stop(void) { - if (ldlm_pools_thread == NULL) + if (!ldlm_pools_thread) return; thread_set_flags(ldlm_pools_thread, SVC_STOPPING); diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c index b9eb37762434..c7904a96f9af 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c @@ -94,7 +94,7 @@ static int ldlm_expired_completion_wait(void *data) struct obd_import *imp; struct obd_device *obd; - if (lock->l_conn_export == NULL) { + if (!lock->l_conn_export) { static unsigned long next_dump, last_dump; LCONSOLE_WARN("lock timed out (enqueued at %lld, %llds ago)\n", @@ -128,7 +128,8 @@ static int ldlm_expired_completion_wait(void *data) } /* We use the same basis for both server side and client side functions - from a single node. */ + * from a single node. + */ static int ldlm_get_enq_timeout(struct ldlm_lock *lock) { int timeout = at_get(ldlm_lock_to_ns_at(lock)); @@ -136,8 +137,9 @@ static int ldlm_get_enq_timeout(struct ldlm_lock *lock) if (AT_OFF) return obd_timeout / 2; /* Since these are non-updating timeouts, we should be conservative. - It would be nice to have some kind of "early reply" mechanism for - lock callbacks too... */ + * It would be nice to have some kind of "early reply" mechanism for + * lock callbacks too... + */ timeout = min_t(int, at_max, timeout + (timeout >> 1)); /* 150% */ return max(timeout, ldlm_enqueue_min); } @@ -239,12 +241,13 @@ noreproc: obd = class_exp2obd(lock->l_conn_export); /* if this is a local lock, then there is no import */ - if (obd != NULL) + if (obd) imp = obd->u.cli.cl_import; /* Wait a long time for enqueue - server may have to callback a - lock from another client. Server will evict the other client if it - doesn't respond reasonably, and then give us the lock. */ + * lock from another client. Server will evict the other client if it + * doesn't respond reasonably, and then give us the lock. + */ timeout = ldlm_get_enq_timeout(lock) * 2; lwd.lwd_lock = lock; @@ -258,7 +261,7 @@ noreproc: interrupted_completion_wait, &lwd); } - if (imp != NULL) { + if (imp) { spin_lock(&imp->imp_lock); lwd.lwd_conn_cnt = imp->imp_conn_cnt; spin_unlock(&imp->imp_lock); @@ -296,7 +299,8 @@ static void failed_lock_cleanup(struct ldlm_namespace *ns, !(lock->l_flags & LDLM_FL_FAILED)) { /* Make sure that this lock will not be found by raced * bl_ast and -EINVAL reply is sent to server anyways. - * bug 17645 */ + * bug 17645 + */ lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_FAILED | LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING; need_cancel = 1; @@ -312,11 +316,13 @@ static void failed_lock_cleanup(struct ldlm_namespace *ns, ldlm_lock_decref_internal(lock, mode); /* XXX - HACK because we shouldn't call ldlm_lock_destroy() - * from llite/file.c/ll_file_flock(). */ + * from llite/file.c/ll_file_flock(). + */ /* This code makes for the fact that we do not have blocking handler on * a client for flock locks. As such this is the place where we must * completely kill failed locks. (interrupted and those that - * were waiting to be granted when server evicted us. */ + * were waiting to be granted when server evicted us. + */ if (lock->l_resource->lr_type == LDLM_FLOCK) { lock_res_and_lock(lock); ldlm_resource_unlink_lock(lock); @@ -331,7 +337,8 @@ static void failed_lock_cleanup(struct ldlm_namespace *ns, * Called after receiving reply from server. */ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, - ldlm_type_t type, __u8 with_policy, ldlm_mode_t mode, + enum ldlm_type type, __u8 with_policy, + enum ldlm_mode mode, __u64 *flags, void *lvb, __u32 lvb_len, struct lustre_handle *lockh, int rc) { @@ -363,13 +370,13 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, /* Before we return, swab the reply */ reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP); - if (reply == NULL) { + if (!reply) { rc = -EPROTO; goto cleanup; } if (lvb_len != 0) { - LASSERT(lvb != NULL); + LASSERT(lvb); size = req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER); @@ -401,7 +408,8 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, /* Key change rehash lock in per-export hash with new key */ if (exp->exp_lock_hash) { /* In the function below, .hs_keycmp resolves to - * ldlm_export_lock_keycmp() */ + * ldlm_export_lock_keycmp() + */ /* coverity[overrun-buffer-val] */ cfs_hash_rehash_key(exp->exp_lock_hash, &lock->l_remote_handle, @@ -415,7 +423,8 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags & LDLM_INHERIT_FLAGS); /* move NO_TIMEOUT flag to the lock to force ldlm_lock_match() - * to wait with no timeout as well */ + * to wait with no timeout as well + */ lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags & LDLM_FL_NO_TIMEOUT); unlock_res_and_lock(lock); @@ -425,7 +434,8 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, /* If enqueue returned a blocked lock but the completion handler has * already run, then it fixed up the resource and we don't need to do it - * again. */ + * again. + */ if ((*flags) & LDLM_FL_LOCK_CHANGED) { int newmode = reply->lock_desc.l_req_mode; @@ -445,7 +455,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, rc = ldlm_lock_change_resource(ns, lock, &reply->lock_desc.l_resource.lr_name); - if (rc || lock->l_resource == NULL) { + if (rc || !lock->l_resource) { rc = -ENOMEM; goto cleanup; } @@ -467,7 +477,8 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, if ((*flags) & LDLM_FL_AST_SENT || /* Cancel extent locks as soon as possible on a liblustre client, * because it cannot handle asynchronous ASTs robustly (see - * bug 7311). */ + * bug 7311). + */ (LIBLUSTRE_CLIENT && type == LDLM_EXTENT)) { lock_res_and_lock(lock); lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST; @@ -476,12 +487,14 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, } /* If the lock has already been granted by a completion AST, don't - * clobber the LVB with an older one. */ + * clobber the LVB with an older one. + */ if (lvb_len != 0) { /* We must lock or a racing completion might update lvb without * letting us know and we'll clobber the correct value. - * Cannot unlock after the check either, a that still leaves - * a tiny window for completion to get in */ + * Cannot unlock after the check either, as that still leaves + * a tiny window for completion to get in + */ lock_res_and_lock(lock); if (lock->l_req_mode != lock->l_granted_mode) rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER, @@ -495,7 +508,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, if (!is_replay) { rc = ldlm_lock_enqueue(ns, &lock, NULL, flags); - if (lock->l_completion_ast != NULL) { + if (lock->l_completion_ast) { int err = lock->l_completion_ast(lock, *flags, NULL); if (!rc) @@ -505,9 +518,10 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, } } - if (lvb_len && lvb != NULL) { + if (lvb_len && lvb) { /* Copy the LVB here, and not earlier, because the completion - * AST (if any) can override what we got in the reply */ + * AST (if any) can override what we got in the reply + */ memcpy(lvb, lock->l_lvb_data, lvb_len); } @@ -579,7 +593,7 @@ int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req, LIST_HEAD(head); int rc; - if (cancels == NULL) + if (!cancels) cancels = &head; if (ns_connect_cancelset(ns)) { /* Estimate the amount of available space in the request. */ @@ -593,7 +607,8 @@ int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req, /* Cancel LRU locks here _only_ if the server supports * EARLY_CANCEL. Otherwise we have to send extra CANCEL - * RPC, which will make us slower. */ + * RPC, which will make us slower. + */ if (avail > count) count += ldlm_cancel_lru_local(ns, cancels, to_free, avail - count, 0, flags); @@ -618,7 +633,8 @@ int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req, /* Skip first lock handler in ldlm_request_pack(), * this method will increment @lock_count according * to the lock handle amount actually written to - * the buffer. */ + * the buffer. + */ dlm->lock_count = canceloff; } /* Pack into the request @pack lock handles. */ @@ -665,15 +681,14 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp, int rc, err; struct ptlrpc_request *req; - LASSERT(exp != NULL); - ns = exp->exp_obd->obd_namespace; /* If we're replaying this lock, just check some invariants. - * If we're creating a new lock, get everything all setup nice. */ + * If we're creating a new lock, get everything all setup nicely. + */ if (is_replay) { lock = ldlm_handle2lock_long(lockh, 0); - LASSERT(lock != NULL); + LASSERT(lock); LDLM_DEBUG(lock, "client-side enqueue START"); LASSERT(exp == lock->l_conn_export); } else { @@ -685,16 +700,21 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp, lock = ldlm_lock_create(ns, res_id, einfo->ei_type, einfo->ei_mode, &cbs, einfo->ei_cbdata, lvb_len, lvb_type); - if (lock == NULL) + if (!lock) return -ENOMEM; /* for the local lock, add the reference */ ldlm_lock_addref_internal(lock, einfo->ei_mode); ldlm_lock2handle(lock, lockh); - if (policy != NULL) - lock->l_policy_data = *policy; + if (policy) + lock->l_policy_data = *policy; + + if (einfo->ei_type == LDLM_EXTENT) { + /* extent lock without policy is a bug */ + if (!policy) + LBUG(); - if (einfo->ei_type == LDLM_EXTENT) lock->l_req_extent = policy->l_extent; + } LDLM_DEBUG(lock, "client-side enqueue START, flags %llx\n", *flags); } @@ -706,12 +726,12 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp, /* lock not sent to server yet */ - if (reqp == NULL || *reqp == NULL) { + if (!reqp || !*reqp) { req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE, LUSTRE_DLM_VERSION, LDLM_ENQUEUE); - if (req == NULL) { + if (!req) { failed_lock_cleanup(ns, lock, einfo->ei_mode); LDLM_LOCK_RELEASE(lock); return -ENOMEM; @@ -754,7 +774,7 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp, policy->l_extent.end == OBD_OBJECT_EOF)); if (async) { - LASSERT(reqp != NULL); + LASSERT(reqp); return 0; } @@ -767,13 +787,14 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp, lockh, rc); /* If ldlm_cli_enqueue_fini did not find the lock, we need to free - * one reference that we took */ + * one reference that we took + */ if (err == -ENOLCK) LDLM_LOCK_RELEASE(lock); else rc = err; - if (!req_passed_in && req != NULL) { + if (!req_passed_in && req) { ptlrpc_req_finished(req); if (reqp) *reqp = NULL; @@ -832,7 +853,7 @@ static void ldlm_cancel_pack(struct ptlrpc_request *req, int max, packed = 0; dlm = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ); - LASSERT(dlm != NULL); + LASSERT(dlm); /* Check the room in the request buffer. */ max = req_capsule_get_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT) - @@ -843,7 +864,8 @@ static void ldlm_cancel_pack(struct ptlrpc_request *req, /* XXX: it would be better to pack lock handles grouped by resource. * so that the server cancel would call filter_lvbo_update() less - * frequently. */ + * frequently. + */ list_for_each_entry(lock, head, l_bl_ast) { if (!count--) break; @@ -858,17 +880,18 @@ static void ldlm_cancel_pack(struct ptlrpc_request *req, /** * Prepare and send a batched cancel RPC. It will include \a count lock - * handles of locks given in \a cancels list. */ + * handles of locks given in \a cancels list. + */ static int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *cancels, - int count, ldlm_cancel_flags_t flags) + int count, enum ldlm_cancel_flags flags) { struct ptlrpc_request *req = NULL; struct obd_import *imp; int free, sent = 0; int rc = 0; - LASSERT(exp != NULL); + LASSERT(exp); LASSERT(count > 0); CFS_FAIL_TIMEOUT(OBD_FAIL_LDLM_PAUSE_CANCEL, cfs_fail_val); @@ -883,14 +906,14 @@ static int ldlm_cli_cancel_req(struct obd_export *exp, while (1) { imp = class_exp2cliimp(exp); - if (imp == NULL || imp->imp_invalid) { + if (!imp || imp->imp_invalid) { CDEBUG(D_DLMTRACE, "skipping cancel on invalid import %p\n", imp); return count; } req = ptlrpc_request_alloc(imp, &RQF_LDLM_CANCEL); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto out; } @@ -946,7 +969,6 @@ out: static inline struct ldlm_pool *ldlm_imp2pl(struct obd_import *imp) { - LASSERT(imp != NULL); return &imp->imp_obd->obd_namespace->ns_pool; } @@ -971,7 +993,8 @@ int ldlm_cli_update_pool(struct ptlrpc_request *req) * is the case when server does not support LRU resize feature. * This is also possible in some recovery cases when server-side * reqs have no reference to the OBD export and thus access to - * server-side namespace is not possible. */ + * server-side namespace is not possible. + */ if (lustre_msg_get_slv(req->rq_repmsg) == 0 || lustre_msg_get_limit(req->rq_repmsg) == 0) { DEBUG_REQ(D_HA, req, @@ -989,7 +1012,8 @@ int ldlm_cli_update_pool(struct ptlrpc_request *req) * to the pool thread. We do not access obd_namespace and pool * directly here as there is no reliable way to make sure that * they are still alive at cleanup time. Evil races are possible - * which may cause Oops at that time. */ + * which may cause Oops at that time. + */ write_lock(&obd->obd_pool_lock); obd->obd_pool_slv = new_slv; obd->obd_pool_limit = new_limit; @@ -1005,7 +1029,7 @@ EXPORT_SYMBOL(ldlm_cli_update_pool); * Lock must not have any readers or writers by this time. */ int ldlm_cli_cancel(struct lustre_handle *lockh, - ldlm_cancel_flags_t cancel_flags) + enum ldlm_cancel_flags cancel_flags) { struct obd_export *exp; int avail, flags, count = 1; @@ -1016,8 +1040,8 @@ int ldlm_cli_cancel(struct lustre_handle *lockh, /* concurrent cancels on the same handle can happen */ lock = ldlm_handle2lock_long(lockh, LDLM_FL_CANCELING); - if (lock == NULL) { - LDLM_DEBUG_NOLOCK("lock is already being destroyed\n"); + if (!lock) { + LDLM_DEBUG_NOLOCK("lock is already being destroyed"); return 0; } @@ -1028,7 +1052,8 @@ int ldlm_cli_cancel(struct lustre_handle *lockh, } /* Even if the lock is marked as LDLM_FL_BL_AST, this is a LDLM_CANCEL * RPC which goes to canceld portal, so we can cancel other LRU locks - * here and send them all as one LDLM_CANCEL RPC. */ + * here and send them all as one LDLM_CANCEL RPC. + */ LASSERT(list_empty(&lock->l_bl_ast)); list_add(&lock->l_bl_ast, &cancels); @@ -1055,7 +1080,7 @@ EXPORT_SYMBOL(ldlm_cli_cancel); * Return the number of cancelled locks. */ int ldlm_cli_cancel_list_local(struct list_head *cancels, int count, - ldlm_cancel_flags_t flags) + enum ldlm_cancel_flags flags) { LIST_HEAD(head); struct ldlm_lock *lock, *next; @@ -1076,7 +1101,8 @@ int ldlm_cli_cancel_list_local(struct list_head *cancels, int count, /* Until we have compound requests and can send LDLM_CANCEL * requests batched with generic RPCs, we need to send cancels * with the LDLM_FL_BL_AST flag in a separate RPC from - * the one being generated now. */ + * the one being generated now. + */ if (!(flags & LCF_BL_AST) && (rc == LDLM_FL_BL_AST)) { LDLM_DEBUG(lock, "Cancel lock separately"); list_del_init(&lock->l_bl_ast); @@ -1116,7 +1142,8 @@ static ldlm_policy_res_t ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns, lock_res_and_lock(lock); /* don't check added & count since we want to process all locks - * from unused list */ + * from unused list + */ switch (lock->l_resource->lr_type) { case LDLM_EXTENT: case LDLM_IBITS: @@ -1152,7 +1179,8 @@ static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns, unsigned long la; /* Stop LRU processing when we reach past @count or have checked all - * locks in LRU. */ + * locks in LRU. + */ if (count && added >= count) return LDLM_POLICY_KEEP_LOCK; @@ -1166,7 +1194,8 @@ static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns, ldlm_pool_set_clv(pl, lv); /* Stop when SLV is not yet come from server or lv is smaller than - * it is. */ + * it is. + */ return (slv == 0 || lv < slv) ? LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK; } @@ -1186,7 +1215,8 @@ static ldlm_policy_res_t ldlm_cancel_passed_policy(struct ldlm_namespace *ns, int count) { /* Stop LRU processing when we reach past @count or have checked all - * locks in LRU. */ + * locks in LRU. + */ return (added >= count) ? LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK; } @@ -1227,7 +1257,8 @@ static ldlm_policy_res_t ldlm_cancel_default_policy(struct ldlm_namespace *ns, int count) { /* Stop LRU processing when we reach past count or have checked all - * locks in LRU. */ + * locks in LRU. + */ return (added >= count) ? LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK; } @@ -1307,7 +1338,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, count += unused - ns->ns_max_unused; pf = ldlm_cancel_lru_policy(ns, flags); - LASSERT(pf != NULL); + LASSERT(pf); while (!list_empty(&ns->ns_unused_list)) { ldlm_policy_res_t result; @@ -1331,7 +1362,8 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, continue; /* Somebody is already doing CANCEL. No need for this - * lock in LRU, do not traverse it again. */ + * lock in LRU, do not traverse it again. + */ if (!(lock->l_flags & LDLM_FL_CANCELING)) break; @@ -1380,7 +1412,8 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, /* Another thread is removing lock from LRU, or * somebody is already doing CANCEL, or there * is a blocking request which will send cancel - * by itself, or the lock is no longer unused. */ + * by itself, or the lock is no longer unused. + */ unlock_res_and_lock(lock); lu_ref_del(&lock->l_reference, __func__, current); @@ -1394,7 +1427,8 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, * better send cancel notification to server, so that it * frees appropriate state. This might lead to a race * where while we are doing cancel here, server is also - * silently cancelling this lock. */ + * silently cancelling this lock. + */ lock->l_flags &= ~LDLM_FL_CANCEL_ON_BLOCK; /* Setting the CBPENDING flag is a little misleading, @@ -1402,7 +1436,8 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, * CBPENDING is set, the lock can accumulate no more * readers/writers. Since readers and writers are * already zero here, ldlm_lock_decref() won't see - * this flag and call l_blocking_ast */ + * this flag and call l_blocking_ast + */ lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING; /* We can't re-add to l_lru as it confuses the @@ -1410,7 +1445,8 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, * arrives after we drop lr_lock below. We use l_bl_ast * and can't use l_pending_chain as it is used both on * server and client nevertheless bug 5666 says it is - * used only on server */ + * used only on server + */ LASSERT(list_empty(&lock->l_bl_ast)); list_add(&lock->l_bl_ast, cancels); unlock_res_and_lock(lock); @@ -1425,7 +1461,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels, int count, int max, - ldlm_cancel_flags_t cancel_flags, int flags) + enum ldlm_cancel_flags cancel_flags, int flags) { int added; @@ -1444,14 +1480,15 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, * callback will be performed in this function. */ int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, - ldlm_cancel_flags_t cancel_flags, + enum ldlm_cancel_flags cancel_flags, int flags) { LIST_HEAD(cancels); int count, rc; /* Just prepare the list of locks, do not actually cancel them yet. - * Locks are cancelled later in a separate thread. */ + * Locks are cancelled later in a separate thread. + */ count = ldlm_prepare_lru_list(ns, &cancels, nr, 0, flags); rc = ldlm_bl_to_thread_list(ns, NULL, &cancels, count, cancel_flags); if (rc == 0) @@ -1468,15 +1505,16 @@ int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, int ldlm_cancel_resource_local(struct ldlm_resource *res, struct list_head *cancels, ldlm_policy_data_t *policy, - ldlm_mode_t mode, __u64 lock_flags, - ldlm_cancel_flags_t cancel_flags, void *opaque) + enum ldlm_mode mode, __u64 lock_flags, + enum ldlm_cancel_flags cancel_flags, + void *opaque) { struct ldlm_lock *lock; int count = 0; lock_res(res); list_for_each_entry(lock, &res->lr_granted, l_res_link) { - if (opaque != NULL && lock->l_ast_data != opaque) { + if (opaque && lock->l_ast_data != opaque) { LDLM_ERROR(lock, "data %p doesn't match opaque %p", lock->l_ast_data, opaque); continue; @@ -1486,7 +1524,8 @@ int ldlm_cancel_resource_local(struct ldlm_resource *res, continue; /* If somebody is already doing CANCEL, or blocking AST came, - * skip this lock. */ + * skip this lock. + */ if (lock->l_flags & LDLM_FL_BL_AST || lock->l_flags & LDLM_FL_CANCELING) continue; @@ -1495,7 +1534,8 @@ int ldlm_cancel_resource_local(struct ldlm_resource *res, continue; /* If policy is given and this is IBITS lock, add to list only - * those locks that match by policy. */ + * those locks that match by policy. + */ if (policy && (lock->l_resource->lr_type == LDLM_IBITS) && !(lock->l_policy_data.l_inodebits.bits & policy->l_inodebits.bits)) @@ -1527,7 +1567,8 @@ EXPORT_SYMBOL(ldlm_cancel_resource_local); * Destroy \a cancels at the end. */ int ldlm_cli_cancel_list(struct list_head *cancels, int count, - struct ptlrpc_request *req, ldlm_cancel_flags_t flags) + struct ptlrpc_request *req, + enum ldlm_cancel_flags flags) { struct ldlm_lock *lock; int res = 0; @@ -1539,7 +1580,8 @@ int ldlm_cli_cancel_list(struct list_head *cancels, int count, * Usually it is enough to have just 1 RPC, but it is possible that * there are too many locks to be cancelled in LRU or on a resource. * It would also speed up the case when the server does not support - * the feature. */ + * the feature. + */ while (count > 0) { LASSERT(!list_empty(cancels)); lock = list_entry(cancels->next, struct ldlm_lock, @@ -1577,12 +1619,13 @@ EXPORT_SYMBOL(ldlm_cli_cancel_list); * Cancel all locks on a resource that have 0 readers/writers. * * If flags & LDLM_FL_LOCAL_ONLY, throw the locks away without trying - * to notify the server. */ + * to notify the server. + */ int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns, const struct ldlm_res_id *res_id, ldlm_policy_data_t *policy, - ldlm_mode_t mode, - ldlm_cancel_flags_t flags, + enum ldlm_mode mode, + enum ldlm_cancel_flags flags, void *opaque) { struct ldlm_resource *res; @@ -1591,7 +1634,7 @@ int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns, int rc; res = ldlm_resource_get(ns, NULL, res_id, 0, 0); - if (res == NULL) { + if (!res) { /* This is not a problem. */ CDEBUG(D_INFO, "No resource %llu\n", res_id->name[0]); return 0; @@ -1638,17 +1681,17 @@ static int ldlm_cli_hash_cancel_unused(struct cfs_hash *hs, * to notify the server. */ int ldlm_cli_cancel_unused(struct ldlm_namespace *ns, const struct ldlm_res_id *res_id, - ldlm_cancel_flags_t flags, void *opaque) + enum ldlm_cancel_flags flags, void *opaque) { struct ldlm_cli_cancel_arg arg = { .lc_flags = flags, .lc_opaque = opaque, }; - if (ns == NULL) + if (!ns) return ELDLM_OK; - if (res_id != NULL) { + if (res_id) { return ldlm_cli_cancel_unused_resource(ns, res_id, NULL, LCK_MINMODE, flags, opaque); @@ -1743,13 +1786,13 @@ int ldlm_resource_iterate(struct ldlm_namespace *ns, struct ldlm_resource *res; int rc; - if (ns == NULL) { + if (!ns) { CERROR("must pass in namespace\n"); LBUG(); } res = ldlm_resource_get(ns, NULL, res_id, 0, 0); - if (res == NULL) + if (!res) return 0; LDLM_RESOURCE_ADDREF(res); @@ -1796,7 +1839,7 @@ static int replay_lock_interpret(const struct lu_env *env, goto out; reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP); - if (reply == NULL) { + if (!reply) { rc = -EPROTO; goto out; } @@ -1815,7 +1858,8 @@ static int replay_lock_interpret(const struct lu_env *env, exp = req->rq_export; if (exp && exp->exp_lock_hash) { /* In the function below, .hs_keycmp resolves to - * ldlm_export_lock_keycmp() */ + * ldlm_export_lock_keycmp() + */ /* coverity[overrun-buffer-val] */ cfs_hash_rehash_key(exp->exp_lock_hash, &lock->l_remote_handle, @@ -1850,7 +1894,8 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock) /* If this is reply-less callback lock, we cannot replay it, since * server might have long dropped it, but notification of that event was - * lost by network. (and server granted conflicting lock already) */ + * lost by network. (and server granted conflicting lock already) + */ if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) { LDLM_DEBUG(lock, "Not replaying reply-less lock:"); ldlm_lock_cancel(lock); @@ -1882,7 +1927,7 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock) req = ptlrpc_request_alloc_pack(imp, &RQF_LDLM_ENQUEUE, LUSTRE_DLM_VERSION, LDLM_ENQUEUE); - if (req == NULL) + if (!req) return -ENOMEM; /* We're part of recovery, so don't wait for it. */ @@ -1901,7 +1946,8 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock) /* notify the server we've replayed all requests. * also, we mark the request to be put on a dedicated * queue to be processed after all request replayes. - * bug 6063 */ + * bug 6063 + */ lustre_msg_set_flags(req->rq_reqmsg, MSG_REQ_REPLAY_DONE); LDLM_DEBUG(lock, "replaying lock:"); @@ -1936,7 +1982,8 @@ static void ldlm_cancel_unused_locks_for_replay(struct ldlm_namespace *ns) /* We don't need to care whether or not LRU resize is enabled * because the LDLM_CANCEL_NO_WAIT policy doesn't use the - * count parameter */ + * count parameter + */ canceled = ldlm_cancel_lru_local(ns, &cancels, ns->ns_nr_unused, 0, LCF_LOCAL, LDLM_CANCEL_NO_WAIT); diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c index 0ae610015b7c..9dede87ad0a3 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c @@ -56,7 +56,8 @@ LIST_HEAD(ldlm_srv_namespace_list); struct mutex ldlm_cli_namespace_lock; /* Client Namespaces that have active resources in them. * Once all resources go away, ldlm_poold moves such namespaces to the - * inactive list */ + * inactive list + */ LIST_HEAD(ldlm_cli_active_namespace_list); /* Client namespaces that don't have any locks in them */ static LIST_HEAD(ldlm_cli_inactive_namespace_list); @@ -66,7 +67,8 @@ static struct dentry *ldlm_ns_debugfs_dir; struct dentry *ldlm_svc_debugfs_dir; /* during debug dump certain amount of granted locks for one resource to avoid - * DDOS. */ + * DDOS. + */ static unsigned int ldlm_dump_granted_max = 256; static ssize_t @@ -275,7 +277,8 @@ static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr, ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_CANCEL_PASSED); /* Make sure that LRU resize was originally supported before - * turning it on here. */ + * turning it on here. + */ if (lru_resize && (ns->ns_orig_connect_flags & OBD_CONNECT_LRU_RESIZE)) { CDEBUG(D_DLMTRACE, @@ -380,7 +383,7 @@ static void ldlm_namespace_debugfs_unregister(struct ldlm_namespace *ns) else ldebugfs_remove(&ns->ns_debugfs_entry); - if (ns->ns_stats != NULL) + if (ns->ns_stats) lprocfs_free_stats(&ns->ns_stats); } @@ -400,7 +403,7 @@ static int ldlm_namespace_sysfs_register(struct ldlm_namespace *ns) "%s", ldlm_ns_name(ns)); ns->ns_stats = lprocfs_alloc_stats(LDLM_NSS_LAST, 0); - if (ns->ns_stats == NULL) { + if (!ns->ns_stats) { kobject_put(&ns->ns_kobj); return -ENOMEM; } @@ -420,7 +423,7 @@ static int ldlm_namespace_debugfs_register(struct ldlm_namespace *ns) } else { ns_entry = debugfs_create_dir(ldlm_ns_name(ns), ldlm_ns_debugfs_dir); - if (ns_entry == NULL) + if (!ns_entry) return -ENOMEM; ns->ns_debugfs_entry = ns_entry; } @@ -554,7 +557,7 @@ static struct cfs_hash_ops ldlm_ns_fid_hash_ops = { }; struct ldlm_ns_hash_def { - ldlm_ns_type_t nsd_type; + enum ldlm_ns_type nsd_type; /** hash bucket bits */ unsigned nsd_bkt_bits; /** hash bits */ @@ -621,8 +624,8 @@ static void ldlm_namespace_register(struct ldlm_namespace *ns, */ struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name, ldlm_side_t client, - ldlm_appetite_t apt, - ldlm_ns_type_t ns_type) + enum ldlm_appetite apt, + enum ldlm_ns_type ns_type) { struct ldlm_namespace *ns = NULL; struct ldlm_ns_bucket *nsb; @@ -631,7 +634,7 @@ struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name, int idx; int rc; - LASSERT(obd != NULL); + LASSERT(obd); rc = ldlm_get_ref(); if (rc) { @@ -664,7 +667,7 @@ struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name, CFS_HASH_BIGNAME | CFS_HASH_SPIN_BKTLOCK | CFS_HASH_NO_ITEMREF); - if (ns->ns_rs_hash == NULL) + if (!ns->ns_rs_hash) goto out_ns; cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, idx) { @@ -749,7 +752,8 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q, struct lustre_handle lockh; /* First, we look for non-cleaned-yet lock - * all cleaned locks are marked by CLEANED flag. */ + * all cleaned locks are marked by CLEANED flag. + */ lock_res(res); list_for_each(tmp, q) { lock = list_entry(tmp, struct ldlm_lock, @@ -763,13 +767,14 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q, break; } - if (lock == NULL) { + if (!lock) { unlock_res(res); break; } /* Set CBPENDING so nothing in the cancellation path - * can match this lock. */ + * can match this lock. + */ lock->l_flags |= LDLM_FL_CBPENDING; lock->l_flags |= LDLM_FL_FAILED; lock->l_flags |= flags; @@ -782,7 +787,8 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q, /* This is a little bit gross, but much better than the * alternative: pretend that we got a blocking AST from * the server, so that when the lock is decref'd, it - * will go away ... */ + * will go away ... + */ unlock_res(res); LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY"); if (lock->l_completion_ast) @@ -837,7 +843,7 @@ static int ldlm_resource_complain(struct cfs_hash *hs, struct cfs_hash_bd *bd, */ int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags) { - if (ns == NULL) { + if (!ns) { CDEBUG(D_INFO, "NULL ns, skipping cleanup\n"); return ELDLM_OK; } @@ -873,7 +879,8 @@ force_wait: atomic_read(&ns->ns_bref) == 0, &lwi); /* Forced cleanups should be able to reclaim all references, - * so it's safe to wait forever... we can't leak locks... */ + * so it's safe to wait forever... we can't leak locks... + */ if (force && rc == -ETIMEDOUT) { LCONSOLE_ERROR("Forced cleanup waiting for %s namespace with %d resources in use, (rc=%d)\n", ldlm_ns_name(ns), @@ -943,7 +950,8 @@ static void ldlm_namespace_unregister(struct ldlm_namespace *ns, LASSERT(!list_empty(&ns->ns_list_chain)); /* Some asserts and possibly other parts of the code are still * using list_empty(&ns->ns_list_chain). This is why it is - * important to use list_del_init() here. */ + * important to use list_del_init() here. + */ list_del_init(&ns->ns_list_chain); ldlm_namespace_nr_dec(client); mutex_unlock(ldlm_namespace_lock(client)); @@ -963,7 +971,8 @@ void ldlm_namespace_free_post(struct ldlm_namespace *ns) ldlm_namespace_unregister(ns, ns->ns_client); /* Fini pool _before_ parent proc dir is removed. This is important as * ldlm_pool_fini() removes own proc dir which is child to @dir. - * Removing it after @dir may cause oops. */ + * Removing it after @dir may cause oops. + */ ldlm_pool_fini(&ns->ns_pool); ldlm_namespace_debugfs_unregister(ns); @@ -971,7 +980,8 @@ void ldlm_namespace_free_post(struct ldlm_namespace *ns) cfs_hash_putref(ns->ns_rs_hash); /* Namespace \a ns should be not on list at this time, otherwise * this will cause issues related to using freed \a ns in poold - * thread. */ + * thread. + */ LASSERT(list_empty(&ns->ns_list_chain)); kfree(ns); ldlm_put_ref(); @@ -1031,8 +1041,8 @@ static struct ldlm_resource *ldlm_resource_new(void) struct ldlm_resource *res; int idx; - res = kmem_cache_alloc(ldlm_resource_slab, GFP_NOFS | __GFP_ZERO); - if (res == NULL) + res = kmem_cache_zalloc(ldlm_resource_slab, GFP_NOFS); + if (!res) return NULL; INIT_LIST_HEAD(&res->lr_granted); @@ -1050,7 +1060,8 @@ static struct ldlm_resource *ldlm_resource_new(void) lu_ref_init(&res->lr_reference); /* The creator of the resource must unlock the mutex after LVB - * initialization. */ + * initialization. + */ mutex_init(&res->lr_lvb_mutex); mutex_lock(&res->lr_lvb_mutex); @@ -1065,7 +1076,8 @@ static struct ldlm_resource *ldlm_resource_new(void) */ struct ldlm_resource * ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent, - const struct ldlm_res_id *name, ldlm_type_t type, int create) + const struct ldlm_res_id *name, enum ldlm_type type, + int create) { struct hlist_node *hnode; struct ldlm_resource *res; @@ -1073,14 +1085,13 @@ ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent, __u64 version; int ns_refcount = 0; - LASSERT(ns != NULL); - LASSERT(parent == NULL); - LASSERT(ns->ns_rs_hash != NULL); + LASSERT(!parent); + LASSERT(ns->ns_rs_hash); LASSERT(name->name[0] != 0); cfs_hash_bd_get_and_lock(ns->ns_rs_hash, (void *)name, &bd, 0); hnode = cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name); - if (hnode != NULL) { + if (hnode) { cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0); res = hlist_entry(hnode, struct ldlm_resource, lr_hash); /* Synchronize with regard to resource creation. */ @@ -1111,13 +1122,12 @@ ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent, res->lr_ns_bucket = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd); res->lr_name = *name; res->lr_type = type; - res->lr_most_restr = LCK_NL; cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1); hnode = (version == cfs_hash_bd_version_get(&bd)) ? NULL : cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name); - if (hnode != NULL) { + if (hnode) { /* Someone won the race and already added the resource. */ cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1); /* Clean lu_ref for failed resource. */ @@ -1167,7 +1177,8 @@ ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent, /* Let's see if we happened to be the very first resource in this * namespace. If so, and this is a client namespace, we need to move * the namespace into the active namespaces list to be patrolled by - * the ldlm_poold. */ + * the ldlm_poold. + */ if (ns_refcount == 1) { mutex_lock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT)); ldlm_namespace_move_to_active_locked(ns, LDLM_NAMESPACE_CLIENT); diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c index 3d6745e63fe3..dd1c827013b9 100644 --- a/drivers/staging/lustre/lustre/llite/dcache.c +++ b/drivers/staging/lustre/lustre/llite/dcache.c @@ -60,9 +60,9 @@ static void ll_release(struct dentry *de) { struct ll_dentry_data *lld; - LASSERT(de != NULL); + LASSERT(de); lld = ll_d2d(de); - if (lld == NULL) /* NFS copies the de->d_op methods (bug 4655) */ + if (!lld) /* NFS copies the de->d_op methods (bug 4655) */ return; if (lld->lld_it) { @@ -80,7 +80,8 @@ static void ll_release(struct dentry *de) * This avoids a race where ll_lookup_it() instantiates a dentry, but we get * an AST before calling d_revalidate_it(). The dentry still exists (marked * INVALID) so d_lookup() matches it, but we have no lock on it (so - * lock_match() fails) and we spin around real_lookup(). */ + * lock_match() fails) and we spin around real_lookup(). + */ static int ll_dcompare(const struct dentry *parent, const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) @@ -117,7 +118,8 @@ static inline int return_if_equal(struct ldlm_lock *lock, void *data) /* find any ldlm lock of the inode in mdc and lov * return 0 not find * 1 find one - * < 0 error */ + * < 0 error + */ static int find_cbdata(struct inode *inode) { struct ll_sb_info *sbi = ll_i2sbi(inode); @@ -131,7 +133,7 @@ static int find_cbdata(struct inode *inode) return rc; lsm = ccc_inode_lsm_get(inode); - if (lsm == NULL) + if (!lsm) return rc; rc = obd_find_cbdata(sbi->ll_dt_exp, lsm, return_if_equal, NULL); @@ -163,10 +165,12 @@ static int ll_ddelete(const struct dentry *de) /* Disable this piece of code temporarily because this is called * inside dcache_lock so it's not appropriate to do lots of work * here. ATTENTION: Before this piece of code enabling, LU-2487 must be - * resolved. */ + * resolved. + */ #if 0 /* if not ldlm lock for this inode, set i_nlink to 0 so that - * this inode can be recycled later b=20433 */ + * this inode can be recycled later b=20433 + */ if (d_really_is_positive(de) && !find_cbdata(d_inode(de))) clear_nlink(d_inode(de)); #endif @@ -178,19 +182,16 @@ static int ll_ddelete(const struct dentry *de) int ll_d_init(struct dentry *de) { - LASSERT(de != NULL); - CDEBUG(D_DENTRY, "ldd on dentry %pd (%p) parent %p inode %p refc %d\n", - de, de, de->d_parent, d_inode(de), - d_count(de)); + de, de, de->d_parent, d_inode(de), d_count(de)); - if (de->d_fsdata == NULL) { + if (!de->d_fsdata) { struct ll_dentry_data *lld; lld = kzalloc(sizeof(*lld), GFP_NOFS); if (likely(lld)) { spin_lock(&de->d_lock); - if (likely(de->d_fsdata == NULL)) { + if (likely(!de->d_fsdata)) { de->d_fsdata = lld; __d_lustre_invalidate(de); } else { @@ -218,7 +219,8 @@ void ll_intent_drop_lock(struct lookup_intent *it) ldlm_lock_decref(&handle, it->d.lustre.it_lock_mode); /* bug 494: intent_release may be called multiple times, from - * this thread and we don't want to double-decref this lock */ + * this thread and we don't want to double-decref this lock + */ it->d.lustre.it_lock_mode = 0; if (it->d.lustre.it_remote_lock_mode != 0) { handle.cookie = it->d.lustre.it_remote_lock_handle; @@ -251,8 +253,6 @@ void ll_invalidate_aliases(struct inode *inode) { struct dentry *dentry; - LASSERT(inode != NULL); - CDEBUG(D_INODE, "marking dentries for ino %lu/%u(%p) invalid\n", inode->i_ino, inode->i_generation, inode); @@ -286,9 +286,7 @@ int ll_revalidate_it_finish(struct ptlrpc_request *request, void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode) { - LASSERT(it != NULL); - - if (it->d.lustre.it_lock_mode && inode != NULL) { + if (it->d.lustre.it_lock_mode && inode) { struct ll_sb_info *sbi = ll_i2sbi(inode); CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n", @@ -300,7 +298,8 @@ void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode) if (it->it_op == IT_LOOKUP || it->it_op == IT_GETATTR) { /* on 2.6 there are situation when several lookups and * revalidations may be requested during single operation. - * therefore, we don't release intent here -bzzz */ + * therefore, we don't release intent here -bzzz + */ ll_intent_drop_lock(it); } } @@ -328,7 +327,7 @@ static int ll_revalidate_dentry(struct dentry *dentry, if (lookup_flags & LOOKUP_RCU) return -ECHILD; - do_statahead_enter(dir, &dentry, d_inode(dentry) == NULL); + do_statahead_enter(dir, &dentry, !d_inode(dentry)); ll_statahead_mark(dir, dentry); return 1; } diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c index 8982f7d1b374..4e0a3e583330 100644 --- a/drivers/staging/lustre/lustre/llite/dir.c +++ b/drivers/staging/lustre/lustre/llite/dir.c @@ -55,6 +55,7 @@ #include "../include/lustre_lite.h" #include "../include/lustre_dlm.h" #include "../include/lustre_fid.h" +#include "../include/lustre_kernelcomm.h" #include "llite_internal.h" /* @@ -189,8 +190,6 @@ static int ll_dir_filler(void *_hash, struct page *page0) } else if (rc == 0) { body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY); /* Checked by mdc_readpage() */ - LASSERT(body != NULL); - if (body->valid & OBD_MD_FLSIZE) cl_isize_write(inode, body->size); @@ -244,7 +243,7 @@ void ll_release_page(struct page *page, int remove) kunmap(page); if (remove) { lock_page(page); - if (likely(page->mapping != NULL)) + if (likely(page->mapping)) truncate_complete_page(page->mapping, page); unlock_page(page); } @@ -333,7 +332,7 @@ struct page *ll_get_dir_page(struct inode *dir, __u64 hash, struct lustre_handle lockh; struct lu_dirpage *dp; struct page *page; - ldlm_mode_t mode; + enum ldlm_mode mode; int rc; __u64 start = 0; __u64 end = 0; @@ -356,7 +355,7 @@ struct page *ll_get_dir_page(struct inode *dir, __u64 hash, struct md_op_data *op_data; op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0, - LUSTRE_OPC_ANY, NULL); + LUSTRE_OPC_ANY, NULL); if (IS_ERR(op_data)) return (void *)op_data; @@ -369,8 +368,8 @@ struct page *ll_get_dir_page(struct inode *dir, __u64 hash, if (request) ptlrpc_req_finished(request); if (rc < 0) { - CERROR("lock enqueue: "DFID" at %llu: rc %d\n", - PFID(ll_inode2fid(dir)), hash, rc); + CERROR("lock enqueue: " DFID " at %llu: rc %d\n", + PFID(ll_inode2fid(dir)), hash, rc); return ERR_PTR(rc); } @@ -380,7 +379,8 @@ struct page *ll_get_dir_page(struct inode *dir, __u64 hash, &it.d.lustre.it_lock_handle, dir, NULL); } else { /* for cross-ref object, l_ast_data of the lock may not be set, - * we reset it here */ + * we reset it here + */ md_set_lock_data(ll_i2sbi(dir)->ll_md_exp, &lockh.cookie, dir, NULL); } @@ -392,7 +392,7 @@ struct page *ll_get_dir_page(struct inode *dir, __u64 hash, CERROR("dir page locate: "DFID" at %llu: rc %ld\n", PFID(ll_inode2fid(dir)), lhash, PTR_ERR(page)); goto out_unlock; - } else if (page != NULL) { + } else if (page) { /* * XXX nikita: not entirely correct handling of a corner case: * suppose hash chain of entries with hash value HASH crosses @@ -498,7 +498,7 @@ int ll_dir_read(struct inode *inode, struct dir_context *ctx) __u64 next; dp = page_address(page); - for (ent = lu_dirent_start(dp); ent != NULL && !done; + for (ent = lu_dirent_start(dp); ent && !done; ent = lu_dirent_next(ent)) { __u16 type; int namelen; @@ -688,7 +688,7 @@ int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump, struct obd_device *mgc = lsi->lsi_mgc; int lum_size; - if (lump != NULL) { + if (lump) { /* * This is coming from userspace, so should be in * local endian. But the MDS would like it in little @@ -724,7 +724,7 @@ int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump, if (IS_ERR(op_data)) return PTR_ERR(op_data); - if (lump != NULL && lump->lmm_magic == cpu_to_le32(LMV_USER_MAGIC)) + if (lump && lump->lmm_magic == cpu_to_le32(LMV_USER_MAGIC)) op_data->op_cli_flags |= CLI_SET_MEA; /* swabbing is done in lov_setstripe() on server side */ @@ -738,8 +738,9 @@ int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump, } /* In the following we use the fact that LOV_USER_MAGIC_V1 and - LOV_USER_MAGIC_V3 have the same initial fields so we do not - need to make the distinction between the 2 versions */ + * LOV_USER_MAGIC_V3 have the same initial fields so we do not + * need to make the distinction between the 2 versions + */ if (set_default && mgc->u.cli.cl_mgc_mgsexp) { char *param = NULL; char *buf; @@ -811,7 +812,6 @@ int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp, } body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - LASSERT(body != NULL); lmmsize = body->eadatasize; @@ -823,7 +823,6 @@ int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp, lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_MDT_MD, lmmsize); - LASSERT(lmm != NULL); /* * This is coming from the MDS, so is probably in @@ -879,7 +878,7 @@ int ll_get_mdt_idx(struct inode *inode) /** * Generic handler to do any pre-copy work. * - * It send a first hsm_progress (with extent length == 0) to coordinator as a + * It sends a first hsm_progress (with extent length == 0) to coordinator as a * first information for it that real work has started. * * Moreover, for a ARCHIVE request, it will sample the file data version and @@ -931,8 +930,9 @@ static int ll_ioc_copy_start(struct super_block *sb, struct hsm_copy *copy) goto progress; } - /* Store it the hsm_copy for later copytool use. - * Always modified even if no lsm. */ + /* Store in the hsm_copy for later copytool use. + * Always modified even if no lsm. + */ copy->hc_data_version = data_version; } @@ -1008,12 +1008,14 @@ static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy) goto progress; } - /* Store it the hsm_copy for later copytool use. - * Always modified even if no lsm. */ + /* Store in the hsm_copy for later copytool use. + * Always modified even if no lsm. + */ hpk.hpk_data_version = data_version; /* File could have been stripped during archiving, so we need - * to check anyway. */ + * to check anyway. + */ if ((copy->hc_hai.hai_action == HSMA_ARCHIVE) && (copy->hc_data_version != data_version)) { CDEBUG(D_HSM, "File data version mismatched. File content was changed during archiving. " @@ -1025,7 +1027,8 @@ static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy) * the cdt will loop on retried archive requests. * The policy engine will ask for a new archive later * when the file will not be modified for some tunable - * time */ + * time + */ /* we do not notify caller */ hpk.hpk_flags &= ~HP_FLAG_RETRY; /* hpk_errval must be >= 0 */ @@ -1153,7 +1156,8 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl) return rc; } /* If QIF_SPACE is not set, client should collect the - * space usage from OSSs by itself */ + * space usage from OSSs by itself + */ if (cmd == Q_GETQUOTA && !(oqctl->qc_dqblk.dqb_valid & QIF_SPACE) && !oqctl->qc_dqblk.dqb_curspace) { @@ -1204,7 +1208,8 @@ out: /* This function tries to get a single name component, * to send to the server. No actual path traversal involved, - * so we limit to NAME_MAX */ + * so we limit to NAME_MAX + */ static char *ll_getname(const char __user *filename) { int ret = 0, len; @@ -1252,7 +1257,7 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return ll_iocontrol(inode, file, cmd, arg); case FSFILT_IOC_GETVERSION_OLD: case FSFILT_IOC_GETVERSION: - return put_user(inode->i_generation, (int *)arg); + return put_user(inode->i_generation, (int __user *)arg); /* We need to special case any other ioctls we want to handle, * to send them to the MDS/OST as appropriate and to properly * network encode the arg field. @@ -1266,7 +1271,7 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if (mdtidx < 0) return mdtidx; - if (put_user((int)mdtidx, (int *)arg)) + if (put_user((int)mdtidx, (int __user *)arg)) return -EFAULT; return 0; @@ -1278,7 +1283,7 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg) char *filename; struct md_op_data *op_data; - rc = obd_ioctl_getdata(&buf, &len, (void *)arg); + rc = obd_ioctl_getdata(&buf, &len, (void __user *)arg); if (rc) return rc; data = (void *)buf; @@ -1320,12 +1325,12 @@ out_free: int len; int rc; - rc = obd_ioctl_getdata(&buf, &len, (void *)arg); + rc = obd_ioctl_getdata(&buf, &len, (void __user *)arg); if (rc) return rc; data = (void *)buf; - if (data->ioc_inlbuf1 == NULL || data->ioc_inlbuf2 == NULL || + if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 || data->ioc_inllen1 == 0 || data->ioc_inllen2 == 0) { rc = -EINVAL; goto lmv_out_free; @@ -1363,8 +1368,8 @@ lmv_out_free: case LL_IOC_LOV_SETSTRIPE: { struct lov_user_md_v3 lumv3; struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&lumv3; - struct lov_user_md_v1 *lumv1p = (struct lov_user_md_v1 *)arg; - struct lov_user_md_v3 *lumv3p = (struct lov_user_md_v3 *)arg; + struct lov_user_md_v1 __user *lumv1p = (void __user *)arg; + struct lov_user_md_v3 __user *lumv3p = (void __user *)arg; int set_default = 0; @@ -1389,7 +1394,7 @@ lmv_out_free: return rc; } case LL_IOC_LMV_GETSTRIPE: { - struct lmv_user_md *lump = (struct lmv_user_md *)arg; + struct lmv_user_md __user *lump = (void __user *)arg; struct lmv_user_md lum; struct lmv_user_md *tmp; int lum_size; @@ -1422,7 +1427,7 @@ lmv_out_free: tmp->lum_objects[0].lum_mds = mdtindex; memcpy(&tmp->lum_objects[0].lum_fid, ll_inode2fid(inode), sizeof(struct lu_fid)); - if (copy_to_user((void *)arg, tmp, lum_size)) { + if (copy_to_user((void __user *)arg, tmp, lum_size)) { rc = -EFAULT; goto free_lmv; } @@ -1433,13 +1438,13 @@ free_lmv: case LL_IOC_LOV_SWAP_LAYOUTS: return -EPERM; case LL_IOC_OBD_STATFS: - return ll_obd_statfs(inode, (void *)arg); + return ll_obd_statfs(inode, (void __user *)arg); case LL_IOC_LOV_GETSTRIPE: case LL_IOC_MDC_GETINFO: case IOC_MDC_GETFILEINFO: case IOC_MDC_GETFILESTRIPE: { struct ptlrpc_request *request = NULL; - struct lov_user_md *lump; + struct lov_user_md __user *lump; struct lov_mds_md *lmm = NULL; struct mdt_body *body; char *filename = NULL; @@ -1447,7 +1452,7 @@ free_lmv: if (cmd == IOC_MDC_GETFILEINFO || cmd == IOC_MDC_GETFILESTRIPE) { - filename = ll_getname((const char *)arg); + filename = ll_getname((const char __user *)arg); if (IS_ERR(filename)) return PTR_ERR(filename); @@ -1460,7 +1465,7 @@ free_lmv: if (request) { body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY); - LASSERT(body != NULL); + LASSERT(body); } else { goto out_req; } @@ -1476,11 +1481,11 @@ free_lmv: if (cmd == IOC_MDC_GETFILESTRIPE || cmd == LL_IOC_LOV_GETSTRIPE) { - lump = (struct lov_user_md *)arg; + lump = (struct lov_user_md __user *)arg; } else { - struct lov_user_mds_data *lmdp; + struct lov_user_mds_data __user *lmdp; - lmdp = (struct lov_user_mds_data *)arg; + lmdp = (struct lov_user_mds_data __user *)arg; lump = &lmdp->lmd_lmm; } if (copy_to_user(lump, lmm, lmmsize)) { @@ -1492,7 +1497,7 @@ free_lmv: } skip_lmm: if (cmd == IOC_MDC_GETFILEINFO || cmd == LL_IOC_MDC_GETINFO) { - struct lov_user_mds_data *lmdp; + struct lov_user_mds_data __user *lmdp; lstat_t st = { 0 }; st.st_dev = inode->i_sb->s_dev; @@ -1509,7 +1514,7 @@ skip_lmm: st.st_ctime = body->ctime; st.st_ino = inode->i_ino; - lmdp = (struct lov_user_mds_data *)arg; + lmdp = (struct lov_user_mds_data __user *)arg; if (copy_to_user(&lmdp->lmd_st, &st, sizeof(st))) { rc = -EFAULT; goto out_req; @@ -1523,14 +1528,14 @@ out_req: return rc; } case IOC_LOV_GETINFO: { - struct lov_user_mds_data *lumd; + struct lov_user_mds_data __user *lumd; struct lov_stripe_md *lsm; - struct lov_user_md *lum; + struct lov_user_md __user *lum; struct lov_mds_md *lmm; int lmmsize; lstat_t st; - lumd = (struct lov_user_mds_data *)arg; + lumd = (struct lov_user_mds_data __user *)arg; lum = &lumd->lmd_lmm; rc = ll_get_max_mdsize(sbi, &lmmsize); @@ -1538,7 +1543,7 @@ out_req: return rc; lmm = libcfs_kvzalloc(lmmsize, GFP_NOFS); - if (lmm == NULL) + if (!lmm) return -ENOMEM; if (copy_from_user(lmm, lum, lmmsize)) { rc = -EFAULT; @@ -1636,8 +1641,8 @@ free_lmm: NULL); if (rc) { CDEBUG(D_QUOTA, "mdc ioctl %d failed: %d\n", cmd, rc); - if (copy_to_user((void *)arg, check, - sizeof(*check))) + if (copy_to_user((void __user *)arg, check, + sizeof(*check))) CDEBUG(D_QUOTA, "copy_to_user failed\n"); goto out_poll; } @@ -1646,8 +1651,8 @@ free_lmm: NULL); if (rc) { CDEBUG(D_QUOTA, "osc ioctl %d failed: %d\n", cmd, rc); - if (copy_to_user((void *)arg, check, - sizeof(*check))) + if (copy_to_user((void __user *)arg, check, + sizeof(*check))) CDEBUG(D_QUOTA, "copy_to_user failed\n"); goto out_poll; } @@ -1662,14 +1667,15 @@ out_poll: if (!qctl) return -ENOMEM; - if (copy_from_user(qctl, (void *)arg, sizeof(*qctl))) { + if (copy_from_user(qctl, (void __user *)arg, sizeof(*qctl))) { rc = -EFAULT; goto out_quotactl; } rc = quotactl_ioctl(sbi, qctl); - if (rc == 0 && copy_to_user((void *)arg, qctl, sizeof(*qctl))) + if (rc == 0 && copy_to_user((void __user *)arg, qctl, + sizeof(*qctl))) rc = -EFAULT; out_quotactl: @@ -1686,7 +1692,6 @@ out_quotactl: if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) { struct ll_file_data *fd = LUSTRE_FPRIVATE(file); - LASSERT(fd != NULL); rc = rct_add(&sbi->ll_rct, current_pid(), arg); if (!rc) fd->fd_flags |= LL_FILE_RMTACL; @@ -1699,7 +1704,7 @@ out_quotactl: int count, vallen; struct obd_export *exp; - if (copy_from_user(&count, (int *)arg, sizeof(int))) + if (copy_from_user(&count, (int __user *)arg, sizeof(int))) return -EFAULT; /* get ost count when count is zero, get mdt count otherwise */ @@ -1712,34 +1717,35 @@ out_quotactl: return rc; } - if (copy_to_user((int *)arg, &count, sizeof(int))) + if (copy_to_user((int __user *)arg, &count, sizeof(int))) return -EFAULT; return 0; } case LL_IOC_PATH2FID: - if (copy_to_user((void *)arg, ll_inode2fid(inode), - sizeof(struct lu_fid))) + if (copy_to_user((void __user *)arg, ll_inode2fid(inode), + sizeof(struct lu_fid))) return -EFAULT; return 0; case LL_IOC_GET_CONNECT_FLAGS: { - return obd_iocontrol(cmd, sbi->ll_md_exp, 0, NULL, (void *)arg); + return obd_iocontrol(cmd, sbi->ll_md_exp, 0, NULL, + (void __user *)arg); } case OBD_IOC_CHANGELOG_SEND: case OBD_IOC_CHANGELOG_CLEAR: if (!capable(CFS_CAP_SYS_ADMIN)) return -EPERM; - rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void *)arg, + rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void __user *)arg, sizeof(struct ioc_changelog)); return rc; case OBD_IOC_FID2PATH: - return ll_fid2path(inode, (void *)arg); + return ll_fid2path(inode, (void __user *)arg); case LL_IOC_HSM_REQUEST: { struct hsm_user_request *hur; ssize_t totalsize; - hur = memdup_user((void *)arg, sizeof(*hur)); + hur = memdup_user((void __user *)arg, sizeof(*hur)); if (IS_ERR(hur)) return PTR_ERR(hur); @@ -1754,11 +1760,11 @@ out_quotactl: return -E2BIG; hur = libcfs_kvzalloc(totalsize, GFP_NOFS); - if (hur == NULL) + if (!hur) return -ENOMEM; /* Copy the whole struct */ - if (copy_from_user(hur, (void *)arg, totalsize)) { + if (copy_from_user(hur, (void __user *)arg, totalsize)) { kvfree(hur); return -EFAULT; } @@ -1794,7 +1800,7 @@ out_quotactl: struct hsm_progress_kernel hpk; struct hsm_progress hp; - if (copy_from_user(&hp, (void *)arg, sizeof(hp))) + if (copy_from_user(&hp, (void __user *)arg, sizeof(hp))) return -EFAULT; hpk.hpk_fid = hp.hp_fid; @@ -1805,13 +1811,14 @@ out_quotactl: hpk.hpk_data_version = 0; /* File may not exist in Lustre; all progress - * reported to Lustre root */ + * reported to Lustre root + */ rc = obd_iocontrol(cmd, sbi->ll_md_exp, sizeof(hpk), &hpk, NULL); return rc; } case LL_IOC_HSM_CT_START: - rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void *)arg, + rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void __user *)arg, sizeof(struct lustre_kernelcomm)); return rc; @@ -1819,12 +1826,12 @@ out_quotactl: struct hsm_copy *copy; int rc; - copy = memdup_user((char *)arg, sizeof(*copy)); + copy = memdup_user((char __user *)arg, sizeof(*copy)); if (IS_ERR(copy)) return PTR_ERR(copy); rc = ll_ioc_copy_start(inode->i_sb, copy); - if (copy_to_user((char *)arg, copy, sizeof(*copy))) + if (copy_to_user((char __user *)arg, copy, sizeof(*copy))) rc = -EFAULT; kfree(copy); @@ -1834,19 +1841,20 @@ out_quotactl: struct hsm_copy *copy; int rc; - copy = memdup_user((char *)arg, sizeof(*copy)); + copy = memdup_user((char __user *)arg, sizeof(*copy)); if (IS_ERR(copy)) return PTR_ERR(copy); rc = ll_ioc_copy_end(inode->i_sb, copy); - if (copy_to_user((char *)arg, copy, sizeof(*copy))) + if (copy_to_user((char __user *)arg, copy, sizeof(*copy))) rc = -EFAULT; kfree(copy); return rc; } default: - return obd_iocontrol(cmd, sbi->ll_dt_exp, 0, NULL, (void *)arg); + return obd_iocontrol(cmd, sbi->ll_dt_exp, 0, NULL, + (void __user *)arg); } } diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c index 39e2ffd5f97f..cf619af3caf5 100644 --- a/drivers/staging/lustre/lustre/llite/file.c +++ b/drivers/staging/lustre/lustre/llite/file.c @@ -64,8 +64,8 @@ static struct ll_file_data *ll_file_data_get(void) { struct ll_file_data *fd; - fd = kmem_cache_alloc(ll_file_data_slab, GFP_NOFS | __GFP_ZERO); - if (fd == NULL) + fd = kmem_cache_zalloc(ll_file_data_slab, GFP_NOFS); + if (!fd) return NULL; fd->fd_write_failed = false; return fd; @@ -73,7 +73,7 @@ static struct ll_file_data *ll_file_data_get(void) static void ll_file_data_put(struct ll_file_data *fd) { - if (fd != NULL) + if (fd) kmem_cache_free(ll_file_data_slab, fd); } @@ -134,7 +134,7 @@ static int ll_close_inode_openhandle(struct obd_export *md_exp, int epoch_close = 1; int rc; - if (obd == NULL) { + if (!obd) { /* * XXX: in case of LMV, is this correct to access * ->exp_handle? @@ -153,7 +153,7 @@ static int ll_close_inode_openhandle(struct obd_export *md_exp, } ll_prepare_close(inode, op_data, och); - if (data_version != NULL) { + if (data_version) { /* Pass in data_version implies release. */ op_data->op_bias |= MDS_HSM_RELEASE; op_data->op_data_version = *data_version; @@ -166,7 +166,8 @@ static int ll_close_inode_openhandle(struct obd_export *md_exp, /* This close must have the epoch closed. */ LASSERT(epoch_close); /* MDS has instructed us to obtain Size-on-MDS attribute from - * OSTs and send setattr to back to MDS. */ + * OSTs and send setattr to back to MDS. + */ rc = ll_som_update(inode, op_data); if (rc) { CERROR("inode %lu mdc Size-on-MDS update failed: rc = %d\n", @@ -179,7 +180,8 @@ static int ll_close_inode_openhandle(struct obd_export *md_exp, } /* DATA_MODIFIED flag was successfully sent on close, cancel data - * modification flag. */ + * modification flag. + */ if (rc == 0 && (op_data->op_bias & MDS_DATA_MODIFIED)) { struct ll_inode_info *lli = ll_i2info(inode); @@ -242,7 +244,8 @@ int ll_md_real_close(struct inode *inode, fmode_t fmode) mutex_lock(&lli->lli_och_mutex); if (*och_usecount > 0) { /* There are still users of this handle, so skip - * freeing it. */ + * freeing it. + */ mutex_unlock(&lli->lli_och_mutex); return 0; } @@ -251,9 +254,10 @@ int ll_md_real_close(struct inode *inode, fmode_t fmode) *och_p = NULL; mutex_unlock(&lli->lli_och_mutex); - if (och != NULL) { + if (och) { /* There might be a race and this handle may already - be closed. */ + * be closed. + */ rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, inode, och, NULL); } @@ -276,26 +280,29 @@ static int ll_md_close(struct obd_export *md_exp, struct inode *inode, if (unlikely(fd->fd_flags & LL_FILE_GROUP_LOCKED)) ll_put_grouplock(inode, file, fd->fd_grouplock.cg_gid); - if (fd->fd_lease_och != NULL) { + if (fd->fd_lease_och) { bool lease_broken; /* Usually the lease is not released when the - * application crashed, we need to release here. */ + * application crashed, we need to release here. + */ rc = ll_lease_close(fd->fd_lease_och, inode, &lease_broken); - CDEBUG(rc ? D_ERROR : D_INODE, "Clean up lease "DFID" %d/%d\n", - PFID(&lli->lli_fid), rc, lease_broken); + CDEBUG(rc ? D_ERROR : D_INODE, + "Clean up lease " DFID " %d/%d\n", + PFID(&lli->lli_fid), rc, lease_broken); fd->fd_lease_och = NULL; } - if (fd->fd_och != NULL) { + if (fd->fd_och) { rc = ll_close_inode_openhandle(md_exp, inode, fd->fd_och, NULL); fd->fd_och = NULL; goto out; } /* Let's see if we have good enough OPEN lock on the file and if - we can skip talking to MDS */ + * we can skip talking to MDS + */ mutex_lock(&lli->lli_och_mutex); if (fd->fd_omode & FMODE_WRITE) { @@ -343,7 +350,6 @@ int ll_file_release(struct inode *inode, struct file *file) if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) { struct ll_file_data *fd = LUSTRE_FPRIVATE(file); - LASSERT(fd != NULL); if (unlikely(fd->fd_flags & LL_FILE_RMTACL)) { fd->fd_flags &= ~LL_FILE_RMTACL; rct_del(&sbi->ll_rct, current_pid()); @@ -355,11 +361,12 @@ int ll_file_release(struct inode *inode, struct file *file) if (!is_root_inode(inode)) ll_stats_ops_tally(sbi, LPROC_LL_RELEASE, 1); fd = LUSTRE_FPRIVATE(file); - LASSERT(fd != NULL); + LASSERT(fd); - /* The last ref on @file, maybe not the owner pid of statahead. + /* The last ref on @file, maybe not be the owner pid of statahead. * Different processes can open the same dir, "ll_opendir_key" means: - * it is me that should stop the statahead thread. */ + * it is me that should stop the statahead thread. + */ if (S_ISDIR(inode->i_mode) && lli->lli_opendir_key == fd && lli->lli_opendir_pid != 0) ll_stop_statahead(inode, lli->lli_opendir_key); @@ -396,16 +403,16 @@ static int ll_intent_file_open(struct dentry *dentry, void *lmm, __u32 opc = LUSTRE_OPC_ANY; int rc; - /* Usually we come here only for NFSD, and we want open lock. - But we can also get here with pre 2.6.15 patchless kernels, and in - that case that lock is also ok */ + /* Usually we come here only for NFSD, and we want open lock. */ /* We can also get here if there was cached open handle in revalidate_it * but it disappeared while we were getting from there to ll_file_open. * But this means this file was closed and immediately opened which - * makes a good candidate for using OPEN lock */ + * makes a good candidate for using OPEN lock + */ /* If lmmsize & lmm are not 0, we are just setting stripe info - * parameters. No need for the open lock */ - if (lmm == NULL && lmmsize == 0) { + * parameters. No need for the open lock + */ + if (!lmm && lmmsize == 0) { itp->it_flags |= MDS_OPEN_LOCK; if (itp->it_flags & FMODE_WRITE) opc = LUSTRE_OPC_CREATE; @@ -426,7 +433,7 @@ static int ll_intent_file_open(struct dentry *dentry, void *lmm, * with messages with -ESTALE errors. */ if (!it_disposition(itp, DISP_OPEN_OPEN) || - it_open_error(DISP_OPEN_OPEN, itp)) + it_open_error(DISP_OPEN_OPEN, itp)) goto out; ll_release_openhandle(inode, itp); goto out; @@ -492,7 +499,7 @@ static int ll_local_open(struct file *file, struct lookup_intent *it, LASSERT(!LUSTRE_FPRIVATE(file)); - LASSERT(fd != NULL); + LASSERT(fd); if (och) { struct ptlrpc_request *req = it->d.lustre.it_data; @@ -543,7 +550,7 @@ int ll_file_open(struct inode *inode, struct file *file) file->private_data = NULL; /* prevent ll_local_open assertion */ fd = ll_file_data_get(); - if (fd == NULL) { + if (!fd) { rc = -ENOMEM; goto out_openerr; } @@ -551,7 +558,7 @@ int ll_file_open(struct inode *inode, struct file *file) fd->fd_file = file; if (S_ISDIR(inode->i_mode)) { spin_lock(&lli->lli_sa_lock); - if (lli->lli_opendir_key == NULL && lli->lli_sai == NULL && + if (!lli->lli_opendir_key && !lli->lli_sai && lli->lli_opendir_pid == 0) { lli->lli_opendir_key = fd; lli->lli_opendir_pid = current_pid(); @@ -568,7 +575,8 @@ int ll_file_open(struct inode *inode, struct file *file) if (!it || !it->d.lustre.it_disposition) { /* Convert f_flags into access mode. We cannot use file->f_mode, * because everything but O_ACCMODE mask was stripped from - * there */ + * there + */ if ((oit.it_flags + 1) & O_ACCMODE) oit.it_flags++; if (file->f_flags & O_TRUNC) @@ -577,17 +585,20 @@ int ll_file_open(struct inode *inode, struct file *file) /* kernel only call f_op->open in dentry_open. filp_open calls * dentry_open after call to open_namei that checks permissions. * Only nfsd_open call dentry_open directly without checking - * permissions and because of that this code below is safe. */ + * permissions and because of that this code below is safe. + */ if (oit.it_flags & (FMODE_WRITE | FMODE_READ)) oit.it_flags |= MDS_OPEN_OWNEROVERRIDE; /* We do not want O_EXCL here, presumably we opened the file - * already? XXX - NFS implications? */ + * already? XXX - NFS implications? + */ oit.it_flags &= ~O_EXCL; /* bug20584, if "it_flags" contains O_CREAT, the file will be * created if necessary, then "IT_CREAT" should be set to keep - * consistent with it */ + * consistent with it + */ if (oit.it_flags & O_CREAT) oit.it_op |= IT_CREAT; @@ -611,7 +622,8 @@ restart: if (*och_p) { /* Open handle is present */ if (it_disposition(it, DISP_OPEN_OPEN)) { /* Well, there's extra open request that we do not need, - let's close it somehow. This will decref request. */ + * let's close it somehow. This will decref request. + */ rc = it_open_error(DISP_OPEN_OPEN, it); if (rc) { mutex_unlock(&lli->lli_och_mutex); @@ -632,10 +644,11 @@ restart: LASSERT(*och_usecount == 0); if (!it->d.lustre.it_disposition) { /* We cannot just request lock handle now, new ELC code - means that one of other OPEN locks for this file - could be cancelled, and since blocking ast handler - would attempt to grab och_mutex as well, that would - result in a deadlock */ + * means that one of other OPEN locks for this file + * could be cancelled, and since blocking ast handler + * would attempt to grab och_mutex as well, that would + * result in a deadlock + */ mutex_unlock(&lli->lli_och_mutex); it->it_create_mode |= M_CHECK_STALE; rc = ll_intent_file_open(file->f_path.dentry, NULL, 0, it); @@ -655,9 +668,11 @@ restart: /* md_intent_lock() didn't get a request ref if there was an * open error, so don't do cleanup on the request here - * (bug 3430) */ + * (bug 3430) + */ /* XXX (green): Should not we bail out on any error here, not - * just open error? */ + * just open error? + */ rc = it_open_error(DISP_OPEN_OPEN, it); if (rc) goto out_och_free; @@ -672,8 +687,9 @@ restart: fd = NULL; /* Must do this outside lli_och_mutex lock to prevent deadlock where - different kind of OPEN lock for this same inode gets cancelled - by ldlm_cancel_lru */ + * different kind of OPEN lock for this same inode gets cancelled + * by ldlm_cancel_lru + */ if (!S_ISREG(inode->i_mode)) goto out_och_free; @@ -712,7 +728,8 @@ out_openerr: } static int ll_md_blocking_lease_ast(struct ldlm_lock *lock, - struct ldlm_lock_desc *desc, void *data, int flag) + struct ldlm_lock_desc *desc, + void *data, int flag) { int rc; struct lustre_handle lockh; @@ -752,7 +769,7 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode, if (fmode != FMODE_WRITE && fmode != FMODE_READ) return ERR_PTR(-EINVAL); - if (file != NULL) { + if (file) { struct ll_inode_info *lli = ll_i2info(inode); struct ll_file_data *fd = LUSTRE_FPRIVATE(file); struct obd_client_handle **och_p; @@ -764,18 +781,18 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode, /* Get the openhandle of the file */ rc = -EBUSY; mutex_lock(&lli->lli_och_mutex); - if (fd->fd_lease_och != NULL) { + if (fd->fd_lease_och) { mutex_unlock(&lli->lli_och_mutex); return ERR_PTR(rc); } - if (fd->fd_och == NULL) { + if (!fd->fd_och) { if (file->f_mode & FMODE_WRITE) { - LASSERT(lli->lli_mds_write_och != NULL); + LASSERT(lli->lli_mds_write_och); och_p = &lli->lli_mds_write_och; och_usecount = &lli->lli_open_fd_write_count; } else { - LASSERT(lli->lli_mds_read_och != NULL); + LASSERT(lli->lli_mds_read_och); och_p = &lli->lli_mds_read_och; och_usecount = &lli->lli_open_fd_read_count; } @@ -790,7 +807,7 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode, if (rc < 0) /* more than 1 opener */ return ERR_PTR(rc); - LASSERT(fd->fd_och != NULL); + LASSERT(fd->fd_och); old_handle = fd->fd_och->och_fh; } @@ -799,7 +816,7 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode, return ERR_PTR(-ENOMEM); op_data = ll_prep_md_op_data(NULL, inode, inode, NULL, 0, 0, - LUSTRE_OPC_ANY, NULL); + LUSTRE_OPC_ANY, NULL); if (IS_ERR(op_data)) { rc = PTR_ERR(op_data); goto out; @@ -811,13 +828,14 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode, it.it_flags = fmode | open_flags; it.it_flags |= MDS_OPEN_LOCK | MDS_OPEN_BY_FID | MDS_OPEN_LEASE; rc = md_intent_lock(sbi->ll_md_exp, op_data, NULL, 0, &it, 0, &req, - ll_md_blocking_lease_ast, + ll_md_blocking_lease_ast, /* LDLM_FL_NO_LRU: To not put the lease lock into LRU list, otherwise * it can be cancelled which may mislead applications that the lease is * broken; * LDLM_FL_EXCL: Set this flag so that it won't be matched by normal * open in ll_md_blocking_ast(). Otherwise as ll_md_blocking_lease_ast - * doesn't deal with openhandle, so normal openhandle will be leaked. */ + * doesn't deal with openhandle, so normal openhandle will be leaked. + */ LDLM_FL_NO_LRU | LDLM_FL_EXCL); ll_finish_md_op_data(op_data); ptlrpc_req_finished(req); @@ -847,8 +865,8 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode, it.d.lustre.it_lock_bits != MDS_INODELOCK_OPEN) { /* open lock must return for lease */ CERROR(DFID "lease granted but no open lock, %d/%llu.\n", - PFID(ll_inode2fid(inode)), it.d.lustre.it_lock_mode, - it.d.lustre.it_lock_bits); + PFID(ll_inode2fid(inode)), it.d.lustre.it_lock_mode, + it.d.lustre.it_lock_bits); rc = -EPROTO; goto out_close; } @@ -864,7 +882,7 @@ out_close: /* cancel open lock */ if (it.d.lustre.it_lock_mode != 0) { ldlm_lock_decref_and_cancel(&och->och_lease_handle, - it.d.lustre.it_lock_mode); + it.d.lustre.it_lock_mode); it.d.lustre.it_lock_mode = 0; } out_release_it: @@ -886,19 +904,19 @@ static int ll_lease_close(struct obd_client_handle *och, struct inode *inode, int rc; lock = ldlm_handle2lock(&och->och_lease_handle); - if (lock != NULL) { + if (lock) { lock_res_and_lock(lock); cancelled = ldlm_is_cancel(lock); unlock_res_and_lock(lock); ldlm_lock_put(lock); } - CDEBUG(D_INODE, "lease for "DFID" broken? %d\n", - PFID(&ll_i2info(inode)->lli_fid), cancelled); + CDEBUG(D_INODE, "lease for " DFID " broken? %d\n", + PFID(&ll_i2info(inode)->lli_fid), cancelled); if (!cancelled) ldlm_cli_cancel(&och->och_lease_handle, 0); - if (lease_broken != NULL) + if (lease_broken) *lease_broken = cancelled; rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, inode, och, @@ -914,7 +932,7 @@ static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp, struct obd_info oinfo = { }; int rc; - LASSERT(lsm != NULL); + LASSERT(lsm); oinfo.oi_md = lsm; oinfo.oi_oa = obdo; @@ -933,8 +951,8 @@ static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp, } set = ptlrpc_prep_set(); - if (set == NULL) { - CERROR("can't allocate ptlrpc set\n"); + if (!set) { + CERROR("cannot allocate ptlrpc set: rc = %d\n", -ENOMEM); rc = -ENOMEM; } else { rc = obd_getattr_async(exp, &oinfo, set); @@ -986,7 +1004,8 @@ int ll_merge_lvb(const struct lu_env *env, struct inode *inode) ll_inode_size_lock(inode); /* merge timestamps the most recently obtained from mds with - timestamps obtained from osts */ + * timestamps obtained from osts + */ LTIME_S(inode->i_atime) = lli->lli_lvb.lvb_atime; LTIME_S(inode->i_mtime) = lli->lli_lvb.lvb_mtime; LTIME_S(inode->i_ctime) = lli->lli_lvb.lvb_ctime; @@ -1009,8 +1028,8 @@ int ll_merge_lvb(const struct lu_env *env, struct inode *inode) if (lvb.lvb_mtime < attr->cat_mtime) lvb.lvb_mtime = attr->cat_mtime; - CDEBUG(D_VFSTRACE, DFID" updating i_size %llu\n", - PFID(&lli->lli_fid), attr->cat_size); + CDEBUG(D_VFSTRACE, DFID " updating i_size %llu\n", + PFID(&lli->lli_fid), attr->cat_size); cl_isize_write_nolock(inode, attr->cat_size); inode->i_blocks = attr->cat_blocks; @@ -1155,12 +1174,13 @@ restart: out: cl_io_fini(env, io); /* If any bit been read/written (result != 0), we just return - * short read/write instead of restart io. */ + * short read/write instead of restart io. + */ if ((result == 0 || result == -ENODATA) && io->ci_need_restart) { CDEBUG(D_VFSTRACE, "Restart %s on %pD from %lld, count:%zd\n", iot == CIT_READ ? "read" : "write", file, *ppos, count); - LASSERTF(io->ci_nob == 0, "%zd", io->ci_nob); + LASSERTF(io->ci_nob == 0, "%zd\n", io->ci_nob); goto restart; } @@ -1221,7 +1241,7 @@ static ssize_t ll_file_write_iter(struct kiocb *iocb, struct iov_iter *from) args->u.normal.via_iocb = iocb; result = ll_file_io_generic(env, args, iocb->ki_filp, CIT_WRITE, - &iocb->ki_pos, iov_iter_count(from)); + &iocb->ki_pos, iov_iter_count(from)); cl_env_put(env, &refcheck); return result; } @@ -1260,8 +1280,8 @@ static int ll_lov_recreate(struct inode *inode, struct ost_id *oi, u32 ost_idx) int rc = 0; struct lov_stripe_md *lsm = NULL, *lsm2; - oa = kmem_cache_alloc(obdo_cachep, GFP_NOFS | __GFP_ZERO); - if (oa == NULL) + oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); + if (!oa) return -ENOMEM; lsm = ccc_inode_lsm_get(inode); @@ -1274,7 +1294,7 @@ static int ll_lov_recreate(struct inode *inode, struct ost_id *oi, u32 ost_idx) (lsm->lsm_stripe_count)); lsm2 = libcfs_kvzalloc(lsm_size, GFP_NOFS); - if (lsm2 == NULL) { + if (!lsm2) { rc = -ENOMEM; goto out; } @@ -1307,7 +1327,7 @@ static int ll_lov_recreate_obj(struct inode *inode, unsigned long arg) if (!capable(CFS_CAP_SYS_ADMIN)) return -EPERM; - if (copy_from_user(&ucreat, (struct ll_recreate_obj *)arg, + if (copy_from_user(&ucreat, (struct ll_recreate_obj __user *)arg, sizeof(ucreat))) return -EFAULT; @@ -1325,7 +1345,7 @@ static int ll_lov_recreate_fid(struct inode *inode, unsigned long arg) if (!capable(CFS_CAP_SYS_ADMIN)) return -EPERM; - if (copy_from_user(&fid, (struct lu_fid *)arg, sizeof(fid))) + if (copy_from_user(&fid, (struct lu_fid __user *)arg, sizeof(fid))) return -EFAULT; fid_to_ostid(&fid, &oi); @@ -1341,7 +1361,7 @@ int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry, int rc = 0; lsm = ccc_inode_lsm_get(inode); - if (lsm != NULL) { + if (lsm) { ccc_inode_lsm_put(inode, lsm); CDEBUG(D_IOCTL, "stripe already exists for ino %lu\n", inode->i_ino); @@ -1401,18 +1421,16 @@ int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename, } body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - LASSERT(body != NULL); /* checked by mdc_getattr_name */ lmmsize = body->eadatasize; if (!(body->valid & (OBD_MD_FLEASIZE | OBD_MD_FLDIREA)) || - lmmsize == 0) { + lmmsize == 0) { rc = -ENODATA; goto out; } lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_MDT_MD, lmmsize); - LASSERT(lmm != NULL); if ((lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V1)) && (lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V3))) { @@ -1433,7 +1451,8 @@ int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename, stripe_count = 0; /* if function called for directory - we should - * avoid swab not existent lsm objects */ + * avoid swab not existent lsm objects + */ if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V1)) { lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lmm); if (S_ISREG(body->mode)) @@ -1457,7 +1476,7 @@ out: } static int ll_lov_setea(struct inode *inode, struct file *file, - unsigned long arg) + unsigned long arg) { int flags = MDS_OPEN_HAS_OBJS | FMODE_WRITE; struct lov_user_md *lump; @@ -1469,16 +1488,16 @@ static int ll_lov_setea(struct inode *inode, struct file *file, return -EPERM; lump = libcfs_kvzalloc(lum_size, GFP_NOFS); - if (lump == NULL) + if (!lump) return -ENOMEM; - if (copy_from_user(lump, (struct lov_user_md *)arg, lum_size)) { + if (copy_from_user(lump, (struct lov_user_md __user *)arg, lum_size)) { kvfree(lump); return -EFAULT; } rc = ll_lov_setstripe_ea_info(inode, file->f_path.dentry, flags, lump, - lum_size); + lum_size); cl_lov_delay_create_clear(&file->f_flags); kvfree(lump); @@ -1488,12 +1507,12 @@ static int ll_lov_setea(struct inode *inode, struct file *file, static int ll_lov_setstripe(struct inode *inode, struct file *file, unsigned long arg) { - struct lov_user_md_v3 lumv3; - struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&lumv3; - struct lov_user_md_v1 *lumv1p = (struct lov_user_md_v1 *)arg; - struct lov_user_md_v3 *lumv3p = (struct lov_user_md_v3 *)arg; - int lum_size, rc; - int flags = FMODE_WRITE; + struct lov_user_md_v3 lumv3; + struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&lumv3; + struct lov_user_md_v1 __user *lumv1p = (void __user *)arg; + struct lov_user_md_v3 __user *lumv3p = (void __user *)arg; + int lum_size, rc; + int flags = FMODE_WRITE; /* first try with v1 which is smaller than v3 */ lum_size = sizeof(struct lov_user_md_v1); @@ -1518,7 +1537,7 @@ static int ll_lov_setstripe(struct inode *inode, struct file *file, ll_layout_refresh(inode, &gen); lsm = ccc_inode_lsm_get(inode); rc = obd_iocontrol(LL_IOC_LOV_GETSTRIPE, ll_i2dtexp(inode), - 0, lsm, (void *)arg); + 0, lsm, (void __user *)arg); ccc_inode_lsm_put(inode, lsm); } return rc; @@ -1530,9 +1549,9 @@ static int ll_lov_getstripe(struct inode *inode, unsigned long arg) int rc = -ENODATA; lsm = ccc_inode_lsm_get(inode); - if (lsm != NULL) + if (lsm) rc = obd_iocontrol(LL_IOC_LOV_GETSTRIPE, ll_i2dtexp(inode), 0, - lsm, (void *)arg); + lsm, (void __user *)arg); ccc_inode_lsm_put(inode, lsm); return rc; } @@ -1560,7 +1579,7 @@ ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg) spin_unlock(&lli->lli_lock); return -EINVAL; } - LASSERT(fd->fd_grouplock.cg_lock == NULL); + LASSERT(!fd->fd_grouplock.cg_lock); spin_unlock(&lli->lli_lock); rc = cl_get_grouplock(cl_i2info(inode)->lli_clob, @@ -1597,11 +1616,11 @@ static int ll_put_grouplock(struct inode *inode, struct file *file, CWARN("no group lock held\n"); return -EINVAL; } - LASSERT(fd->fd_grouplock.cg_lock != NULL); + LASSERT(fd->fd_grouplock.cg_lock); if (fd->fd_grouplock.cg_gid != arg) { CWARN("group lock %lu doesn't match current id %lu\n", - arg, fd->fd_grouplock.cg_gid); + arg, fd->fd_grouplock.cg_gid); spin_unlock(&lli->lli_lock); return -EINVAL; } @@ -1688,7 +1707,7 @@ static int ll_do_fiemap(struct inode *inode, struct ll_user_fiemap *fiemap, } lsm = ccc_inode_lsm_get(inode); - if (lsm == NULL) + if (!lsm) return -ENOENT; /* If the stripe_count > 1 and the application does not understand @@ -1782,9 +1801,10 @@ static int ll_ioctl_fiemap(struct inode *inode, unsigned long arg) int rc = 0; /* Get the extent count so we can calculate the size of - * required fiemap buffer */ + * required fiemap buffer + */ if (get_user(extent_count, - &((struct ll_user_fiemap __user *)arg)->fm_extent_count)) + &((struct ll_user_fiemap __user *)arg)->fm_extent_count)) return -EFAULT; if (extent_count >= @@ -1794,7 +1814,7 @@ static int ll_ioctl_fiemap(struct inode *inode, unsigned long arg) sizeof(struct ll_fiemap_extent)); fiemap_s = libcfs_kvzalloc(num_bytes, GFP_NOFS); - if (fiemap_s == NULL) + if (!fiemap_s) return -ENOMEM; /* get the fiemap value */ @@ -1806,11 +1826,12 @@ static int ll_ioctl_fiemap(struct inode *inode, unsigned long arg) /* If fm_extent_count is non-zero, read the first extent since * it is used to calculate end_offset and device from previous - * fiemap call. */ + * fiemap call. + */ if (extent_count) { if (copy_from_user(&fiemap_s->fm_extents[0], - (char __user *)arg + sizeof(*fiemap_s), - sizeof(struct ll_fiemap_extent))) { + (char __user *)arg + sizeof(*fiemap_s), + sizeof(struct ll_fiemap_extent))) { rc = -EFAULT; goto error; } @@ -1826,7 +1847,7 @@ static int ll_ioctl_fiemap(struct inode *inode, unsigned long arg) ret_bytes += (fiemap_s->fm_mapped_extents * sizeof(struct ll_fiemap_extent)); - if (copy_to_user((void *)arg, fiemap_s, ret_bytes)) + if (copy_to_user((void __user *)arg, fiemap_s, ret_bytes)) rc = -EFAULT; error: @@ -1917,13 +1938,14 @@ int ll_hsm_release(struct inode *inode) /* Release the file. * NB: lease lock handle is released in mdc_hsm_release_pack() because - * we still need it to pack l_remote_handle to MDT. */ + * we still need it to pack l_remote_handle to MDT. + */ rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, inode, och, &data_version); och = NULL; out: - if (och != NULL && !IS_ERR(och)) /* close the file */ + if (och && !IS_ERR(och)) /* close the file */ ll_lease_close(och, inode, NULL); return rc; @@ -2007,7 +2029,8 @@ static int ll_swap_layouts(struct file *file1, struct file *file2, } /* to be able to restore mtime and atime after swap - * we need to first save them */ + * we need to first save them + */ if (lsl->sl_flags & (SWAP_LAYOUTS_KEEP_MTIME | SWAP_LAYOUTS_KEEP_ATIME)) { llss->ia1.ia_mtime = llss->inode1->i_mtime; @@ -2019,7 +2042,8 @@ static int ll_swap_layouts(struct file *file1, struct file *file2, } /* ultimate check, before swapping the layouts we check if - * dataversion has changed (if requested) */ + * dataversion has changed (if requested) + */ if (llss->check_dv1) { rc = ll_data_version(llss->inode1, &dv, 0); if (rc) @@ -2042,9 +2066,11 @@ static int ll_swap_layouts(struct file *file1, struct file *file2, /* struct md_op_data is used to send the swap args to the mdt * only flags is missing, so we use struct mdc_swap_layouts - * through the md_op_data->op_data */ + * through the md_op_data->op_data + */ /* flags from user space have to be converted before they are send to - * server, no flag is sent today, they are only used on the client */ + * server, no flag is sent today, they are only used on the client + */ msl.msl_flags = 0; rc = -ENOMEM; op_data = ll_prep_md_op_data(NULL, llss->inode1, llss->inode2, NULL, 0, @@ -2113,7 +2139,8 @@ static int ll_hsm_state_set(struct inode *inode, struct hsm_state_set *hss) return -EINVAL; /* Non-root users are forbidden to set or clear flags which are - * NOT defined in HSM_USER_MASK. */ + * NOT defined in HSM_USER_MASK. + */ if (((hss->hss_setmask | hss->hss_clearmask) & ~HSM_USER_MASK) && !capable(CFS_CAP_SYS_ADMIN)) return -EPERM; @@ -2211,14 +2238,14 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) switch (cmd) { case LL_IOC_GETFLAGS: /* Get the current value of the file flags */ - return put_user(fd->fd_flags, (int *)arg); + return put_user(fd->fd_flags, (int __user *)arg); case LL_IOC_SETFLAGS: case LL_IOC_CLRFLAGS: /* Set or clear specific file flags */ /* XXX This probably needs checks to ensure the flags are * not abused, and to handle any flag side effects. */ - if (get_user(flags, (int *) arg)) + if (get_user(flags, (int __user *)arg)) return -EFAULT; if (cmd == LL_IOC_SETFLAGS) { @@ -2242,15 +2269,15 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) struct file *file2; struct lustre_swap_layouts lsl; - if (copy_from_user(&lsl, (char *)arg, - sizeof(struct lustre_swap_layouts))) + if (copy_from_user(&lsl, (char __user *)arg, + sizeof(struct lustre_swap_layouts))) return -EFAULT; if ((file->f_flags & O_ACCMODE) == 0) /* O_RDONLY */ return -EPERM; file2 = fget(lsl.sl_fd); - if (file2 == NULL) + if (!file2) return -EBADF; rc = -EPERM; @@ -2272,13 +2299,13 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return ll_iocontrol(inode, file, cmd, arg); case FSFILT_IOC_GETVERSION_OLD: case FSFILT_IOC_GETVERSION: - return put_user(inode->i_generation, (int *)arg); + return put_user(inode->i_generation, (int __user *)arg); case LL_IOC_GROUP_LOCK: return ll_get_grouplock(inode, file, arg); case LL_IOC_GROUP_UNLOCK: return ll_put_grouplock(inode, file, arg); case IOC_OBD_STATFS: - return ll_obd_statfs(inode, (void *)arg); + return ll_obd_statfs(inode, (void __user *)arg); /* We need to special case any other ioctls we want to handle, * to send them to the MDS/OST as appropriate and to properly @@ -2289,25 +2316,26 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case LL_IOC_FLUSHCTX: return ll_flush_ctx(inode); case LL_IOC_PATH2FID: { - if (copy_to_user((void *)arg, ll_inode2fid(inode), + if (copy_to_user((void __user *)arg, ll_inode2fid(inode), sizeof(struct lu_fid))) return -EFAULT; return 0; } case OBD_IOC_FID2PATH: - return ll_fid2path(inode, (void *)arg); + return ll_fid2path(inode, (void __user *)arg); case LL_IOC_DATA_VERSION: { struct ioc_data_version idv; int rc; - if (copy_from_user(&idv, (char *)arg, sizeof(idv))) + if (copy_from_user(&idv, (char __user *)arg, sizeof(idv))) return -EFAULT; rc = ll_data_version(inode, &idv.idv_version, - !(idv.idv_flags & LL_DV_NOFLUSH)); + !(idv.idv_flags & LL_DV_NOFLUSH)); - if (rc == 0 && copy_to_user((char *) arg, &idv, sizeof(idv))) + if (rc == 0 && copy_to_user((char __user *)arg, &idv, + sizeof(idv))) return -EFAULT; return rc; @@ -2320,7 +2348,7 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if (mdtidx < 0) return mdtidx; - if (put_user((int)mdtidx, (int *)arg)) + if (put_user(mdtidx, (int __user *)arg)) return -EFAULT; return 0; @@ -2347,7 +2375,7 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data), op_data, NULL); - if (copy_to_user((void *)arg, hus, sizeof(*hus))) + if (copy_to_user((void __user *)arg, hus, sizeof(*hus))) rc = -EFAULT; ll_finish_md_op_data(op_data); @@ -2358,7 +2386,7 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) struct hsm_state_set *hss; int rc; - hss = memdup_user((char *)arg, sizeof(*hss)); + hss = memdup_user((char __user *)arg, sizeof(*hss)); if (IS_ERR(hss)) return PTR_ERR(hss); @@ -2386,7 +2414,7 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data), op_data, NULL); - if (copy_to_user((char *)arg, hca, sizeof(*hca))) + if (copy_to_user((char __user *)arg, hca, sizeof(*hca))) rc = -EFAULT; ll_finish_md_op_data(op_data); @@ -2412,13 +2440,13 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) break; case F_UNLCK: mutex_lock(&lli->lli_och_mutex); - if (fd->fd_lease_och != NULL) { + if (fd->fd_lease_och) { och = fd->fd_lease_och; fd->fd_lease_och = NULL; } mutex_unlock(&lli->lli_och_mutex); - if (och != NULL) { + if (och) { mode = och->och_flags & (FMODE_READ|FMODE_WRITE); rc = ll_lease_close(och, inode, &lease_broken); @@ -2443,12 +2471,12 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) rc = 0; mutex_lock(&lli->lli_och_mutex); - if (fd->fd_lease_och == NULL) { + if (!fd->fd_lease_och) { fd->fd_lease_och = och; och = NULL; } mutex_unlock(&lli->lli_och_mutex); - if (och != NULL) { + if (och) { /* impossible now that only excl is supported for now */ ll_lease_close(och, inode, &lease_broken); rc = -EBUSY; @@ -2461,11 +2489,11 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) rc = 0; mutex_lock(&lli->lli_och_mutex); - if (fd->fd_lease_och != NULL) { + if (fd->fd_lease_och) { struct obd_client_handle *och = fd->fd_lease_och; lock = ldlm_handle2lock(&och->och_lease_handle); - if (lock != NULL) { + if (lock) { lock_res_and_lock(lock); if (!ldlm_is_cancel(lock)) rc = och->och_flags & @@ -2480,7 +2508,7 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case LL_IOC_HSM_IMPORT: { struct hsm_user_import *hui; - hui = memdup_user((void *)arg, sizeof(*hui)); + hui = memdup_user((void __user *)arg, sizeof(*hui)); if (IS_ERR(hui)) return PTR_ERR(hui); @@ -2497,7 +2525,7 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return err; return obd_iocontrol(cmd, ll_i2dtexp(inode), 0, NULL, - (void *)arg); + (void __user *)arg); } } } @@ -2536,15 +2564,17 @@ static int ll_flush(struct file *file, fl_owner_t id) LASSERT(!S_ISDIR(inode->i_mode)); /* catch async errors that were recorded back when async writeback - * failed for pages in this mapping. */ + * failed for pages in this mapping. + */ rc = lli->lli_async_rc; lli->lli_async_rc = 0; err = lov_read_and_clear_async_rc(lli->lli_clob); if (rc == 0) rc = err; - /* The application has been told write failure already. - * Do not report failure again. */ + /* The application has been told about write failure already. + * Do not report failure again. + */ if (fd->fd_write_failed) return 0; return rc ? -EIO : 0; @@ -2612,7 +2642,8 @@ int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync) inode_lock(inode); /* catch async errors that were recorded back when async writeback - * failed for pages in this mapping. */ + * failed for pages in this mapping. + */ if (!S_ISDIR(inode->i_mode)) { err = lli->lli_async_rc; lli->lli_async_rc = 0; @@ -2683,7 +2714,8 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock) * I guess between lockd processes) and then compares pid. * As such we assign pid to the owner field to make it all work, * conflict with normal locks is unlikely since pid space and - * pointer space for current->files are not intersecting */ + * pointer space for current->files are not intersecting + */ if (file_lock->fl_lmops && file_lock->fl_lmops->lm_compare_owner) flock.l_flock.owner = (unsigned long)file_lock->fl_pid; @@ -2699,7 +2731,8 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock) * order to process an unlock request we need all of the same * information that is given with a normal read or write record * lock request. To avoid creating another ldlm unlock (cancel) - * message we'll treat a LCK_NL flock request as an unlock. */ + * message we'll treat a LCK_NL flock request as an unlock. + */ einfo.ei_mode = LCK_NL; break; case F_WRLCK: @@ -2707,7 +2740,7 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock) break; default: CDEBUG(D_INFO, "Unknown fcntl lock type: %d\n", - file_lock->fl_type); + file_lock->fl_type); return -ENOTSUPP; } @@ -2730,7 +2763,8 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock) #endif flags = LDLM_FL_TEST_LOCK; /* Save the old mode so that if the mode in the lock changes we - * can decrement the appropriate reader or writer refcount. */ + * can decrement the appropriate reader or writer refcount. + */ file_lock->fl_type = einfo.ei_mode; break; default: @@ -2757,7 +2791,7 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock) if (rc2 && file_lock->fl_type != F_UNLCK) { einfo.ei_mode = LCK_NL; md_enqueue(sbi->ll_md_exp, &einfo, NULL, - op_data, &lockh, &flock, 0, NULL /* req */, flags); + op_data, &lockh, &flock, 0, NULL /* req */, flags); rc = rc2; } @@ -2782,11 +2816,12 @@ ll_file_noflock(struct file *file, int cmd, struct file_lock *file_lock) * \param l_req_mode [IN] searched lock mode * \retval boolean, true iff all bits are found */ -int ll_have_md_lock(struct inode *inode, __u64 *bits, ldlm_mode_t l_req_mode) +int ll_have_md_lock(struct inode *inode, __u64 *bits, + enum ldlm_mode l_req_mode) { struct lustre_handle lockh; ldlm_policy_data_t policy; - ldlm_mode_t mode = (l_req_mode == LCK_MINMODE) ? + enum ldlm_mode mode = (l_req_mode == LCK_MINMODE) ? (LCK_CR|LCK_CW|LCK_PR|LCK_PW) : l_req_mode; struct lu_fid *fid; __u64 flags; @@ -2822,13 +2857,13 @@ int ll_have_md_lock(struct inode *inode, __u64 *bits, ldlm_mode_t l_req_mode) return *bits == 0; } -ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits, - struct lustre_handle *lockh, __u64 flags, - ldlm_mode_t mode) +enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits, + struct lustre_handle *lockh, __u64 flags, + enum ldlm_mode mode) { ldlm_policy_data_t policy = { .l_inodebits = {bits} }; struct lu_fid *fid; - ldlm_mode_t rc; + enum ldlm_mode rc; fid = &ll_i2info(inode)->lli_fid; CDEBUG(D_INFO, "trying to match res "DFID"\n", PFID(fid)); @@ -2866,8 +2901,6 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits) struct obd_export *exp; int rc = 0; - LASSERT(inode != NULL); - CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),name=%pd\n", inode->i_ino, inode->i_generation, inode, dentry); @@ -2875,7 +2908,8 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits) /* XXX: Enable OBD_CONNECT_ATTRFID to reduce unnecessary getattr RPC. * But under CMD case, it caused some lock issues, should be fixed - * with new CMD ibits lock. See bug 12718 */ + * with new CMD ibits lock. See bug 12718 + */ if (exp_connect_flags(exp) & OBD_CONNECT_ATTRFID) { struct lookup_intent oit = { .it_op = IT_GETATTR }; struct md_op_data *op_data; @@ -2893,7 +2927,8 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits) oit.it_create_mode |= M_CHECK_STALE; rc = md_intent_lock(exp, op_data, NULL, 0, /* we are not interested in name - based lookup */ + * based lookup + */ &oit, 0, &req, ll_md_blocking_ast, 0); ll_finish_md_op_data(op_data); @@ -2910,9 +2945,10 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits) } /* Unlinked? Unhash dentry, so it is not picked up later by - do_lookup() -> ll_revalidate_it(). We cannot use d_drop - here to preserve get_cwd functionality on 2.6. - Bug 10503 */ + * do_lookup() -> ll_revalidate_it(). We cannot use d_drop + * here to preserve get_cwd functionality on 2.6. + * Bug 10503 + */ if (!d_inode(dentry)->i_nlink) d_lustre_invalidate(dentry, 0); @@ -3026,26 +3062,33 @@ static int ll_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, sizeof(struct ll_fiemap_extent)); fiemap = libcfs_kvzalloc(num_bytes, GFP_NOFS); - if (fiemap == NULL) + if (!fiemap) return -ENOMEM; fiemap->fm_flags = fieinfo->fi_flags; fiemap->fm_extent_count = fieinfo->fi_extents_max; fiemap->fm_start = start; fiemap->fm_length = len; - if (extent_count > 0) - memcpy(&fiemap->fm_extents[0], fieinfo->fi_extents_start, - sizeof(struct ll_fiemap_extent)); + if (extent_count > 0 && + copy_from_user(&fiemap->fm_extents[0], fieinfo->fi_extents_start, + sizeof(struct ll_fiemap_extent)) != 0) { + rc = -EFAULT; + goto out; + } rc = ll_do_fiemap(inode, fiemap, num_bytes); fieinfo->fi_flags = fiemap->fm_flags; fieinfo->fi_extents_mapped = fiemap->fm_mapped_extents; - if (extent_count > 0) - memcpy(fieinfo->fi_extents_start, &fiemap->fm_extents[0], - fiemap->fm_mapped_extents * - sizeof(struct ll_fiemap_extent)); + if (extent_count > 0 && + copy_to_user(fieinfo->fi_extents_start, &fiemap->fm_extents[0], + fiemap->fm_mapped_extents * + sizeof(struct ll_fiemap_extent)) != 0) { + rc = -EFAULT; + goto out; + } +out: kvfree(fiemap); return rc; } @@ -3067,13 +3110,12 @@ int ll_inode_permission(struct inode *inode, int mask) { int rc = 0; -#ifdef MAY_NOT_BLOCK if (mask & MAY_NOT_BLOCK) return -ECHILD; -#endif /* as root inode are NOT getting validated in lookup operation, - * need to do it before permission check. */ + * need to do it before permission check. + */ if (is_root_inode(inode)) { rc = __ll_inode_revalidate(inode->i_sb->s_root, @@ -3173,8 +3215,7 @@ void *ll_iocontrol_register(llioc_callback_t cb, int count, unsigned int *cmd) unsigned int size; struct llioc_data *in_data = NULL; - if (cb == NULL || cmd == NULL || - count > LLIOC_MAX_CMD || count < 0) + if (!cb || !cmd || count > LLIOC_MAX_CMD || count < 0) return NULL; size = sizeof(*in_data) + count * sizeof(unsigned int); @@ -3200,7 +3241,7 @@ void ll_iocontrol_unregister(void *magic) { struct llioc_data *tmp; - if (magic == NULL) + if (!magic) return; down_write(&llioc.ioc_sem); @@ -3254,7 +3295,7 @@ int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf) struct lu_env *env; int result; - if (lli->lli_clob == NULL) + if (!lli->lli_clob) return 0; env = cl_env_nested_get(&nest); @@ -3267,13 +3308,14 @@ int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf) if (conf->coc_opc == OBJECT_CONF_SET) { struct ldlm_lock *lock = conf->coc_lock; - LASSERT(lock != NULL); + LASSERT(lock); LASSERT(ldlm_has_layout(lock)); if (result == 0) { /* it can only be allowed to match after layout is * applied to inode otherwise false layout would be * seen. Applying layout should happen before dropping - * the intent lock. */ + * the intent lock. + */ ldlm_lock_allow_match(lock); } } @@ -3296,14 +3338,15 @@ static int ll_layout_fetch(struct inode *inode, struct ldlm_lock *lock) PFID(ll_inode2fid(inode)), !!(lock->l_flags & LDLM_FL_LVB_READY), lock->l_lvb_data, lock->l_lvb_len); - if ((lock->l_lvb_data != NULL) && (lock->l_flags & LDLM_FL_LVB_READY)) + if (lock->l_lvb_data && (lock->l_flags & LDLM_FL_LVB_READY)) return 0; /* if layout lock was granted right away, the layout is returned * within DLM_LVB of dlm reply; otherwise if the lock was ever * blocked and then granted via completion ast, we have to fetch * layout here. Please note that we can't use the LVB buffer in - * completion AST because it doesn't have a large enough buffer */ + * completion AST because it doesn't have a large enough buffer + */ rc = ll_get_default_mdsize(sbi, &lmmsize); if (rc == 0) rc = md_getxattr(sbi->ll_md_exp, ll_inode2fid(inode), @@ -3313,7 +3356,7 @@ static int ll_layout_fetch(struct inode *inode, struct ldlm_lock *lock) return rc; body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - if (body == NULL) { + if (!body) { rc = -EPROTO; goto out; } @@ -3325,20 +3368,20 @@ static int ll_layout_fetch(struct inode *inode, struct ldlm_lock *lock) } lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA, lmmsize); - if (lmm == NULL) { + if (!lmm) { rc = -EFAULT; goto out; } lvbdata = libcfs_kvzalloc(lmmsize, GFP_NOFS); - if (lvbdata == NULL) { + if (!lvbdata) { rc = -ENOMEM; goto out; } memcpy(lvbdata, lmm, lmmsize); lock_res_and_lock(lock); - if (lock->l_lvb_data != NULL) + if (lock->l_lvb_data) kvfree(lock->l_lvb_data); lock->l_lvb_data = lvbdata; @@ -3354,8 +3397,8 @@ out: * Apply the layout to the inode. Layout lock is held and will be released * in this function. */ -static int ll_layout_lock_set(struct lustre_handle *lockh, ldlm_mode_t mode, - struct inode *inode, __u32 *gen, bool reconf) +static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode, + struct inode *inode, __u32 *gen, bool reconf) { struct ll_inode_info *lli = ll_i2info(inode); struct ll_sb_info *sbi = ll_i2sbi(inode); @@ -3369,10 +3412,10 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, ldlm_mode_t mode, LASSERT(lustre_handle_is_used(lockh)); lock = ldlm_handle2lock(lockh); - LASSERT(lock != NULL); + LASSERT(lock); LASSERT(ldlm_has_layout(lock)); - LDLM_DEBUG(lock, "File %p/"DFID" being reconfigured: %d.\n", + LDLM_DEBUG(lock, "File %p/"DFID" being reconfigured: %d", inode, PFID(&lli->lli_fid), reconf); /* in case this is a caching lock and reinstate with new inode */ @@ -3382,12 +3425,14 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, ldlm_mode_t mode, lvb_ready = !!(lock->l_flags & LDLM_FL_LVB_READY); unlock_res_and_lock(lock); /* checking lvb_ready is racy but this is okay. The worst case is - * that multi processes may configure the file on the same time. */ + * that multi processes may configure the file on the same time. + */ if (lvb_ready || !reconf) { rc = -ENODATA; if (lvb_ready) { /* layout_gen must be valid if layout lock is not - * cancelled and stripe has already set */ + * cancelled and stripe has already set + */ *gen = ll_layout_version_get(lli); rc = 0; } @@ -3401,26 +3446,28 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, ldlm_mode_t mode, /* for layout lock, lmm is returned in lock's lvb. * lvb_data is immutable if the lock is held so it's safe to access it * without res lock. See the description in ldlm_lock_decref_internal() - * for the condition to free lvb_data of layout lock */ - if (lock->l_lvb_data != NULL) { + * for the condition to free lvb_data of layout lock + */ + if (lock->l_lvb_data) { rc = obd_unpackmd(sbi->ll_dt_exp, &md.lsm, lock->l_lvb_data, lock->l_lvb_len); if (rc >= 0) { *gen = LL_LAYOUT_GEN_EMPTY; - if (md.lsm != NULL) + if (md.lsm) *gen = md.lsm->lsm_layout_gen; rc = 0; } else { - CERROR("%s: file "DFID" unpackmd error: %d\n", - ll_get_fsname(inode->i_sb, NULL, 0), - PFID(&lli->lli_fid), rc); + CERROR("%s: file " DFID " unpackmd error: %d\n", + ll_get_fsname(inode->i_sb, NULL, 0), + PFID(&lli->lli_fid), rc); } } if (rc < 0) goto out; /* set layout to file. Unlikely this will fail as old layout was - * surely eliminated */ + * surely eliminated + */ memset(&conf, 0, sizeof(conf)); conf.coc_opc = OBJECT_CONF_SET; conf.coc_inode = inode; @@ -3428,7 +3475,7 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, ldlm_mode_t mode, conf.u.coc_md = &md; rc = ll_layout_conf(inode, &conf); - if (md.lsm != NULL) + if (md.lsm) obd_free_memmd(sbi->ll_dt_exp, &md.lsm); /* refresh layout failed, need to wait */ @@ -3440,9 +3487,9 @@ out: /* wait for IO to complete if it's still being used. */ if (wait_layout) { - CDEBUG(D_INODE, "%s: %p/"DFID" wait for layout reconf.\n", - ll_get_fsname(inode->i_sb, NULL, 0), - inode, PFID(&lli->lli_fid)); + CDEBUG(D_INODE, "%s: %p/" DFID " wait for layout reconf.\n", + ll_get_fsname(inode->i_sb, NULL, 0), + inode, PFID(&lli->lli_fid)); memset(&conf, 0, sizeof(conf)); conf.coc_opc = OBJECT_CONF_WAIT; @@ -3451,8 +3498,8 @@ out: if (rc == 0) rc = -EAGAIN; - CDEBUG(D_INODE, "file: "DFID" waiting layout return: %d.\n", - PFID(&lli->lli_fid), rc); + CDEBUG(D_INODE, "file: " DFID " waiting layout return: %d.\n", + PFID(&lli->lli_fid), rc); } return rc; } @@ -3477,7 +3524,7 @@ int ll_layout_refresh(struct inode *inode, __u32 *gen) struct md_op_data *op_data; struct lookup_intent it; struct lustre_handle lockh; - ldlm_mode_t mode; + enum ldlm_mode mode; struct ldlm_enqueue_info einfo = { .ei_type = LDLM_IBITS, .ei_mode = LCK_CR, @@ -3499,7 +3546,8 @@ int ll_layout_refresh(struct inode *inode, __u32 *gen) again: /* mostly layout lock is caching on the local side, so try to match - * it before grabbing layout lock mutex. */ + * it before grabbing layout lock mutex. + */ mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0, LCK_CR | LCK_CW | LCK_PR | LCK_PW); if (mode != 0) { /* hit cached lock */ @@ -3512,7 +3560,7 @@ again: } op_data = ll_prep_md_op_data(NULL, inode, inode, NULL, - 0, 0, LUSTRE_OPC_ANY, NULL); + 0, 0, LUSTRE_OPC_ANY, NULL); if (IS_ERR(op_data)) { mutex_unlock(&lli->lli_layout_mutex); return PTR_ERR(op_data); @@ -3523,14 +3571,13 @@ again: it.it_op = IT_LAYOUT; lockh.cookie = 0ULL; - LDLM_DEBUG_NOLOCK("%s: requeue layout lock for file %p/"DFID".\n", - ll_get_fsname(inode->i_sb, NULL, 0), inode, + LDLM_DEBUG_NOLOCK("%s: requeue layout lock for file %p/" DFID "", + ll_get_fsname(inode->i_sb, NULL, 0), inode, PFID(&lli->lli_fid)); rc = md_enqueue(sbi->ll_md_exp, &einfo, &it, op_data, &lockh, NULL, 0, NULL, 0); - if (it.d.lustre.it_data != NULL) - ptlrpc_req_finished(it.d.lustre.it_data); + ptlrpc_req_finished(it.d.lustre.it_data); it.d.lustre.it_data = NULL; ll_finish_md_op_data(op_data); diff --git a/drivers/staging/lustre/lustre/llite/llite_close.c b/drivers/staging/lustre/lustre/llite/llite_close.c index 3f348a3aad43..a55ac4dccd90 100644 --- a/drivers/staging/lustre/lustre/llite/llite_close.c +++ b/drivers/staging/lustre/lustre/llite/llite_close.c @@ -52,9 +52,8 @@ void vvp_write_pending(struct ccc_object *club, struct ccc_page *page) spin_lock(&lli->lli_lock); lli->lli_flags |= LLIF_SOM_DIRTY; - if (page != NULL && list_empty(&page->cpg_pending_linkage)) - list_add(&page->cpg_pending_linkage, - &club->cob_pending_list); + if (page && list_empty(&page->cpg_pending_linkage)) + list_add(&page->cpg_pending_linkage, &club->cob_pending_list); spin_unlock(&lli->lli_lock); } @@ -65,7 +64,7 @@ void vvp_write_complete(struct ccc_object *club, struct ccc_page *page) int rc = 0; spin_lock(&lli->lli_lock); - if (page != NULL && !list_empty(&page->cpg_pending_linkage)) { + if (page && !list_empty(&page->cpg_pending_linkage)) { list_del_init(&page->cpg_pending_linkage); rc = 1; } @@ -76,7 +75,8 @@ void vvp_write_complete(struct ccc_object *club, struct ccc_page *page) /** Queues DONE_WRITING if * - done writing is allowed; - * - inode has no no dirty pages; */ + * - inode has no no dirty pages; + */ void ll_queue_done_writing(struct inode *inode, unsigned long flags) { struct ll_inode_info *lli = ll_i2info(inode); @@ -106,7 +106,8 @@ void ll_queue_done_writing(struct inode *inode, unsigned long flags) * close() happen, epoch is closed as the inode is marked as * LLIF_EPOCH_PENDING. When pages are written inode should not * be inserted into the queue again, clear this flag to avoid - * it. */ + * it. + */ lli->lli_flags &= ~LLIF_DONE_WRITING; wake_up(&lcq->lcq_waitq); @@ -144,10 +145,11 @@ void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data, spin_lock(&lli->lli_lock); if (!(list_empty(&club->cob_pending_list))) { if (!(lli->lli_flags & LLIF_EPOCH_PENDING)) { - LASSERT(*och != NULL); - LASSERT(lli->lli_pending_och == NULL); + LASSERT(*och); + LASSERT(!lli->lli_pending_och); /* Inode is dirty and there is no pending write done - * request yet, DONE_WRITE is to be sent later. */ + * request yet, DONE_WRITE is to be sent later. + */ lli->lli_flags |= LLIF_EPOCH_PENDING; lli->lli_pending_och = *och; spin_unlock(&lli->lli_lock); @@ -159,7 +161,8 @@ void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data, if (flags & LLIF_DONE_WRITING) { /* Some pages are still dirty, it is early to send * DONE_WRITE. Wait until all pages will be flushed - * and try DONE_WRITE again later. */ + * and try DONE_WRITE again later. + */ LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING)); lli->lli_flags |= LLIF_DONE_WRITING; spin_unlock(&lli->lli_lock); @@ -187,7 +190,8 @@ void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data, } /* There is a pending DONE_WRITE -- close epoch with no - * attribute change. */ + * attribute change. + */ if (lli->lli_flags & LLIF_EPOCH_PENDING) { spin_unlock(&lli->lli_lock); goto out; @@ -215,13 +219,13 @@ int ll_som_update(struct inode *inode, struct md_op_data *op_data) struct obdo *oa; int rc; - LASSERT(op_data != NULL); + LASSERT(op_data); if (lli->lli_flags & LLIF_MDS_SIZE_LOCK) CERROR("ino %lu/%u(flags %u) som valid it just after recovery\n", inode->i_ino, inode->i_generation, lli->lli_flags); - oa = kmem_cache_alloc(obdo_cachep, GFP_NOFS | __GFP_ZERO); + oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); if (!oa) { CERROR("can't allocate memory for Size-on-MDS update.\n"); return -ENOMEM; @@ -266,7 +270,7 @@ static void ll_prepare_done_writing(struct inode *inode, { ll_ioepoch_close(inode, op_data, och, LLIF_DONE_WRITING); /* If there is no @och, we do not do D_W yet. */ - if (*och == NULL) + if (!*och) return; ll_pack_inode2opdata(inode, op_data, &(*och)->och_fh); @@ -289,13 +293,14 @@ static void ll_done_writing(struct inode *inode) ll_prepare_done_writing(inode, op_data, &och); /* If there is no @och, we do not do D_W yet. */ - if (och == NULL) + if (!och) goto out; rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, NULL); if (rc == -EAGAIN) /* MDS has instructed us to obtain Size-on-MDS attribute from - * OSTs and send setattr to back to MDS. */ + * OSTs and send setattr to back to MDS. + */ rc = ll_som_update(inode, op_data); else if (rc) CERROR("inode %lu mdc done_writing failed: rc = %d\n", @@ -316,7 +321,7 @@ static struct ll_inode_info *ll_close_next_lli(struct ll_close_queue *lcq) if (!list_empty(&lcq->lcq_head)) { lli = list_entry(lcq->lcq_head.next, struct ll_inode_info, - lli_close_list); + lli_close_list); list_del_init(&lli->lli_close_list); } else if (atomic_read(&lcq->lcq_stop)) lli = ERR_PTR(-EALREADY); diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h index 845e992ca5fc..973f5cdec192 100644 --- a/drivers/staging/lustre/lustre/llite/llite_internal.h +++ b/drivers/staging/lustre/lustre/llite/llite_internal.h @@ -93,9 +93,10 @@ struct ll_remote_perm { gid_t lrp_gid; uid_t lrp_fsuid; gid_t lrp_fsgid; - int lrp_access_perm; /* MAY_READ/WRITE/EXEC, this - is access permission with - lrp_fsuid/lrp_fsgid. */ + int lrp_access_perm; /* MAY_READ/WRITE/EXEC, this + * is access permission with + * lrp_fsuid/lrp_fsgid. + */ }; enum lli_flags { @@ -106,7 +107,8 @@ enum lli_flags { /* DONE WRITING is allowed. */ LLIF_DONE_WRITING = (1 << 2), /* Sizeon-on-MDS attributes are changed. An attribute update needs to - * be sent to MDS. */ + * be sent to MDS. + */ LLIF_SOM_DIRTY = (1 << 3), /* File data is modified. */ LLIF_DATA_MODIFIED = (1 << 4), @@ -130,22 +132,23 @@ struct ll_inode_info { /* identifying fields for both metadata and data stacks. */ struct lu_fid lli_fid; /* Parent fid for accessing default stripe data on parent directory - * for allocating OST objects after a mknod() and later open-by-FID. */ + * for allocating OST objects after a mknod() and later open-by-FID. + */ struct lu_fid lli_pfid; - struct list_head lli_close_list; - /* open count currently used by capability only, indicate whether - * capability needs renewal */ - atomic_t lli_open_count; + struct list_head lli_close_list; + unsigned long lli_rmtperm_time; /* handle is to be sent to MDS later on done_writing and setattr. * Open handle data are needed for the recovery to reconstruct - * the inode state on the MDS. XXX: recovery is not ready yet. */ + * the inode state on the MDS. XXX: recovery is not ready yet. + */ struct obd_client_handle *lli_pending_och; /* We need all three because every inode may be opened in different - * modes */ + * modes + */ struct obd_client_handle *lli_mds_read_och; struct obd_client_handle *lli_mds_write_och; struct obd_client_handle *lli_mds_exec_och; @@ -162,7 +165,8 @@ struct ll_inode_info { spinlock_t lli_agl_lock; /* Try to make the d::member and f::member are aligned. Before using - * these members, make clear whether it is directory or not. */ + * these members, make clear whether it is directory or not. + */ union { /* for directory */ struct { @@ -173,13 +177,15 @@ struct ll_inode_info { /* since parent-child threads can share the same @file * struct, "opendir_key" is the token when dir close for * case of parent exit before child -- it is me should - * cleanup the dir readahead. */ + * cleanup the dir readahead. + */ void *d_opendir_key; struct ll_statahead_info *d_sai; /* protect statahead stuff. */ spinlock_t d_sa_lock; - /* "opendir_pid" is the token when lookup/revalid - * -- I am the owner of dir statahead. */ + /* "opendir_pid" is the token when lookup/revalidate + * -- I am the owner of dir statahead. + */ pid_t d_opendir_pid; } d; @@ -281,11 +287,8 @@ static inline void ll_layout_version_set(struct ll_inode_info *lli, __u32 gen) int ll_xattr_cache_destroy(struct inode *inode); -int ll_xattr_cache_get(struct inode *inode, - const char *name, - char *buffer, - size_t size, - __u64 valid); +int ll_xattr_cache_get(struct inode *inode, const char *name, + char *buffer, size_t size, __u64 valid); /* * Locking to guarantee consistency of non-atomic updates to long long i_size, @@ -305,7 +308,8 @@ static inline struct ll_inode_info *ll_i2info(struct inode *inode) } /* default to about 40meg of readahead on a given system. That much tied - * up in 512k readahead requests serviced at 40ms each is about 1GB/s. */ + * up in 512k readahead requests serviced at 40ms each is about 1GB/s. + */ #define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - PAGE_CACHE_SHIFT)) /* default to read-ahead full files smaller than 2MB on the second read */ @@ -344,11 +348,13 @@ struct ra_io_arg { unsigned long ria_end; /* end offset of read-ahead*/ /* If stride read pattern is detected, ria_stoff means where * stride read is started. Note: for normal read-ahead, the - * value here is meaningless, and also it will not be accessed*/ + * value here is meaningless, and also it will not be accessed + */ pgoff_t ria_stoff; /* ria_length and ria_pages are the length and pages length in the * stride I/O mode. And they will also be used to check whether - * it is stride I/O read-ahead in the read-ahead pages*/ + * it is stride I/O read-ahead in the read-ahead pages + */ unsigned long ria_length; unsigned long ria_pages; }; @@ -455,7 +461,8 @@ struct eacl_table { struct ll_sb_info { /* this protects pglist and ra_info. It isn't safe to - * grab from interrupt contexts */ + * grab from interrupt contexts + */ spinlock_t ll_lock; spinlock_t ll_pp_extent_lock; /* pp_extent entry*/ spinlock_t ll_process_lock; /* ll_rw_process_info */ @@ -468,10 +475,8 @@ struct ll_sb_info { int ll_flags; unsigned int ll_umounting:1, ll_xattr_cache_enabled:1; - struct list_head ll_conn_chain; /* per-conn chain of SBs */ struct lustre_client_ocd ll_lco; - struct list_head ll_orphan_dentry_list; /*please don't ask -p*/ struct ll_close_queue *ll_lcq; struct lprocfs_stats *ll_stats; /* lprocfs stats counter */ @@ -502,13 +507,16 @@ struct ll_sb_info { /* metadata stat-ahead */ unsigned int ll_sa_max; /* max statahead RPCs */ atomic_t ll_sa_total; /* statahead thread started - * count */ + * count + */ atomic_t ll_sa_wrong; /* statahead thread stopped for - * low hit ratio */ + * low hit ratio + */ atomic_t ll_agl_total; /* AGL thread started count */ - dev_t ll_sdev_orig; /* save s_dev before assign for - * clustered nfs */ + dev_t ll_sdev_orig; /* save s_dev before assign for + * clustered nfs + */ struct rmtacl_ctl_table ll_rct; struct eacl_table ll_et; __kernel_fsid_t ll_fsid; @@ -619,13 +627,15 @@ struct ll_file_data { __u32 fd_flags; fmode_t fd_omode; /* openhandle if lease exists for this file. - * Borrow lli->lli_och_mutex to protect assignment */ + * Borrow lli->lli_och_mutex to protect assignment + */ struct obd_client_handle *fd_lease_och; struct obd_client_handle *fd_och; struct file *fd_file; /* Indicate whether need to report failure when close. * true: failure is known, not report again. - * false: unknown failure, should report. */ + * false: unknown failure, should report. + */ bool fd_write_failed; }; @@ -705,10 +715,10 @@ extern struct file_operations ll_file_operations_flock; extern struct file_operations ll_file_operations_noflock; extern const struct inode_operations ll_file_inode_operations; int ll_have_md_lock(struct inode *inode, __u64 *bits, - ldlm_mode_t l_req_mode); -ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits, - struct lustre_handle *lockh, __u64 flags, - ldlm_mode_t mode); + enum ldlm_mode l_req_mode); +enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits, + struct lustre_handle *lockh, __u64 flags, + enum ldlm_mode mode); int ll_file_open(struct inode *inode, struct file *file); int ll_file_release(struct inode *inode, struct file *file); int ll_glimpse_ioctl(struct ll_sb_info *sbi, @@ -782,7 +792,7 @@ int ll_show_options(struct seq_file *seq, struct dentry *dentry); void ll_dirty_page_discard_warn(struct page *page, int ioret); int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req, struct super_block *, struct lookup_intent *); -int ll_obd_statfs(struct inode *inode, void *arg); +int ll_obd_statfs(struct inode *inode, void __user *arg); int ll_get_max_mdsize(struct ll_sb_info *sbi, int *max_mdsize); int ll_get_default_mdsize(struct ll_sb_info *sbi, int *default_mdsize); int ll_process_config(struct lustre_cfg *lcfg); @@ -796,7 +806,7 @@ char *ll_get_fsname(struct super_block *sb, char *buf, int buflen); void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req); /* llite/llite_nfs.c */ -extern struct export_operations lustre_export_operations; +extern const struct export_operations lustre_export_operations; __u32 get_uuid2int(const char *name, int len); void get_uuid2fsid(const char *name, int len, __kernel_fsid_t *fsid); struct inode *search_inode_for_lustre(struct super_block *sb, @@ -913,7 +923,7 @@ static inline struct vvp_thread_info *vvp_env_info(const struct lu_env *env) struct vvp_thread_info *info; info = lu_context_key_get(&env->le_ctx, &vvp_key); - LASSERT(info != NULL); + LASSERT(info); return info; } @@ -937,7 +947,7 @@ static inline struct vvp_session *vvp_env_session(const struct lu_env *env) struct vvp_session *ses; ses = lu_context_key_get(env->le_ses, &vvp_session_key); - LASSERT(ses != NULL); + LASSERT(ses); return ses; } @@ -957,8 +967,8 @@ int ll_close_thread_start(struct ll_close_queue **lcq_ret); int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last); int ll_file_mmap(struct file *file, struct vm_area_struct *vma); -void policy_from_vma(ldlm_policy_data_t *policy, - struct vm_area_struct *vma, unsigned long addr, size_t count); +void policy_from_vma(ldlm_policy_data_t *policy, struct vm_area_struct *vma, + unsigned long addr, size_t count); struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr, size_t count); @@ -968,7 +978,7 @@ static inline void ll_invalidate_page(struct page *vmpage) loff_t offset = vmpage->index << PAGE_CACHE_SHIFT; LASSERT(PageLocked(vmpage)); - if (mapping == NULL) + if (!mapping) return; ll_teardown_mmaps(mapping, offset, offset + PAGE_CACHE_SIZE); @@ -993,7 +1003,7 @@ static inline struct client_obd *sbi2mdc(struct ll_sb_info *sbi) { struct obd_device *obd = sbi->ll_md_exp->exp_obd; - if (obd == NULL) + if (!obd) LBUG(); return &obd->u.cli; } @@ -1018,7 +1028,7 @@ static inline struct lu_fid *ll_inode2fid(struct inode *inode) { struct lu_fid *fid; - LASSERT(inode != NULL); + LASSERT(inode); fid = &ll_i2info(inode)->lli_fid; return fid; @@ -1107,39 +1117,44 @@ static inline u64 rce_ops2valid(int ops) struct ll_statahead_info { struct inode *sai_inode; atomic_t sai_refcount; /* when access this struct, hold - * refcount */ + * refcount + */ unsigned int sai_generation; /* generation for statahead */ unsigned int sai_max; /* max ahead of lookup */ __u64 sai_sent; /* stat requests sent count */ __u64 sai_replied; /* stat requests which received - * reply */ + * reply + */ __u64 sai_index; /* index of statahead entry */ __u64 sai_index_wait; /* index of entry which is the - * caller is waiting for */ + * caller is waiting for + */ __u64 sai_hit; /* hit count */ __u64 sai_miss; /* miss count: - * for "ls -al" case, it includes - * hidden dentry miss; - * for "ls -l" case, it does not - * include hidden dentry miss. - * "sai_miss_hidden" is used for - * the later case. - */ + * for "ls -al" case, it includes + * hidden dentry miss; + * for "ls -l" case, it does not + * include hidden dentry miss. + * "sai_miss_hidden" is used for + * the later case. + */ unsigned int sai_consecutive_miss; /* consecutive miss */ unsigned int sai_miss_hidden;/* "ls -al", but first dentry - * is not a hidden one */ + * is not a hidden one + */ unsigned int sai_skip_hidden;/* skipped hidden dentry count */ unsigned int sai_ls_all:1, /* "ls -al", do stat-ahead for - * hidden entries */ + * hidden entries + */ sai_agl_valid:1;/* AGL is valid for the dir */ - wait_queue_head_t sai_waitq; /* stat-ahead wait queue */ + wait_queue_head_t sai_waitq; /* stat-ahead wait queue */ struct ptlrpc_thread sai_thread; /* stat-ahead thread */ struct ptlrpc_thread sai_agl_thread; /* AGL thread */ - struct list_head sai_entries; /* entry list */ - struct list_head sai_entries_received; /* entries returned */ - struct list_head sai_entries_stated; /* entries stated */ - struct list_head sai_entries_agl; /* AGL entries to be sent */ - struct list_head sai_cache[LL_SA_CACHE_SIZE]; + struct list_head sai_entries; /* entry list */ + struct list_head sai_entries_received; /* entries returned */ + struct list_head sai_entries_stated; /* entries stated */ + struct list_head sai_entries_agl; /* AGL entries to be sent */ + struct list_head sai_cache[LL_SA_CACHE_SIZE]; spinlock_t sai_cache_lock[LL_SA_CACHE_SIZE]; atomic_t sai_cache_count; /* entry count in cache */ }; @@ -1171,8 +1186,8 @@ ll_statahead_mark(struct inode *dir, struct dentry *dentry) if (lli->lli_opendir_pid != current_pid()) return; - LASSERT(ldd != NULL); - if (sai != NULL) + LASSERT(ldd); + if (sai) ldd->lld_sa_generation = sai->sai_generation; } @@ -1191,7 +1206,7 @@ d_need_statahead(struct inode *dir, struct dentry *dentryp) return -EAGAIN; /* statahead has been stopped */ - if (lli->lli_opendir_key == NULL) + if (!lli->lli_opendir_key) return -EAGAIN; ldd = ll_d2d(dentryp); @@ -1313,13 +1328,15 @@ int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end, /** direct write pages */ struct ll_dio_pages { /** page array to be written. we don't support - * partial pages except the last one. */ + * partial pages except the last one. + */ struct page **ldp_pages; /* offset of each page */ loff_t *ldp_offsets; /** if ldp_offsets is NULL, it means a sequential * pages to be written, then this is the file offset - * of the * first page. */ + * of the first page. + */ loff_t ldp_start_offset; /** how many bytes are to be written. */ size_t ldp_size; @@ -1345,7 +1362,6 @@ static inline int ll_file_nolock(const struct file *file) struct ll_file_data *fd = LUSTRE_FPRIVATE(file); struct inode *inode = file_inode(file); - LASSERT(fd != NULL); return ((fd->fd_flags & LL_FILE_IGNORE_LOCK) || (ll_i2sbi(inode)->ll_flags & LL_SBI_NOLCK)); } @@ -1362,7 +1378,8 @@ static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode, * remote MDT, where the object is, will grant * UPDATE|PERM lock. The inode will be attached to both * LOOKUP and PERM locks, so revoking either locks will - * case the dcache being cleared */ + * case the dcache being cleared + */ if (it->d.lustre.it_remote_lock_mode) { handle.cookie = it->d.lustre.it_remote_lock_handle; CDEBUG(D_DLMTRACE, "setting l_data to inode %p(%lu/%u) for remote lock %#llx\n", @@ -1383,7 +1400,7 @@ static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode, it->d.lustre.it_lock_set = 1; } - if (bits != NULL) + if (bits) *bits = it->d.lustre.it_lock_bits; } @@ -1401,14 +1418,14 @@ static inline int d_lustre_invalid(const struct dentry *dentry) { struct ll_dentry_data *lld = ll_d2d(dentry); - return (lld == NULL) || lld->lld_invalid; + return !lld || lld->lld_invalid; } static inline void __d_lustre_invalidate(struct dentry *dentry) { struct ll_dentry_data *lld = ll_d2d(dentry); - if (lld != NULL) + if (lld) lld->lld_invalid = 1; } @@ -1442,7 +1459,7 @@ static inline void d_lustre_invalidate(struct dentry *dentry, int nested) static inline void d_lustre_revalidate(struct dentry *dentry) { spin_lock(&dentry->d_lock); - LASSERT(ll_d2d(dentry) != NULL); + LASSERT(ll_d2d(dentry)); ll_d2d(dentry)->lld_invalid = 0; spin_unlock(&dentry->d_lock); } diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c index b2fc5b3786ee..6d6bb33e3655 100644 --- a/drivers/staging/lustre/lustre/llite/llite_lib.c +++ b/drivers/staging/lustre/lustre/llite/llite_lib.c @@ -102,8 +102,6 @@ static struct ll_sb_info *ll_init_sbi(struct super_block *sb) sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file; sbi->ll_ra_info.ra_max_read_ahead_whole_pages = SBI_DEFAULT_READAHEAD_WHOLE_MAX; - INIT_LIST_HEAD(&sbi->ll_conn_chain); - INIT_LIST_HEAD(&sbi->ll_orphan_dentry_list); ll_generate_random_uuid(uuid); class_uuid_unparse(uuid, &sbi->ll_sb_uuid); @@ -171,7 +169,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, return -ENOMEM; } - if (llite_root != NULL) { + if (llite_root) { err = ldebugfs_register_mountpoint(llite_root, sb, dt, md); if (err < 0) CERROR("could not register mount in <debugfs>/lustre/llite\n"); @@ -204,7 +202,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT)) /* flag mdc connection as lightweight, only used for test - * purpose, use with care */ + * purpose, use with care + */ data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT; data->ocd_ibits_known = MDS_INODELOCK_FULL; @@ -252,10 +251,11 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, /* For mount, we only need fs info from MDT0, and also in DNE, it * can make sure the client can be mounted as long as MDT0 is - * available */ + * available + */ err = obd_statfs(NULL, sbi->ll_md_exp, osfs, - cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS), - OBD_STATFS_FOR_MDT0); + cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS), + OBD_STATFS_FOR_MDT0); if (err) goto out_md_fid; @@ -265,7 +265,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, * we can access the MDC export directly and exp_connect_flags will * be non-zero, but if accessing an upgraded 2.1 server it will * have the correct flags filled in. - * XXX: fill in the LMV exp_connect_flags from MDC(s). */ + * XXX: fill in the LMV exp_connect_flags from MDC(s). + */ valid = exp_connect_flags(sbi->ll_md_exp) & CLIENT_CONNECT_MDT_REQD; if (exp_connect_flags(sbi->ll_md_exp) != 0 && valid != CLIENT_CONNECT_MDT_REQD) { @@ -308,15 +309,11 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, } if (data->ocd_connect_flags & OBD_CONNECT_ACL) { -#ifdef MS_POSIXACL sb->s_flags |= MS_POSIXACL; -#endif sbi->ll_flags |= LL_SBI_ACL; } else { LCONSOLE_INFO("client wants to enable acl, but mdt not!\n"); -#ifdef MS_POSIXACL sb->s_flags &= ~MS_POSIXACL; -#endif sbi->ll_flags &= ~LL_SBI_ACL; } @@ -382,7 +379,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, /* OBD_CONNECT_CKSUM should always be set, even if checksums are * disabled by default, because it can still be enabled on the * fly via /sys. As a consequence, we still need to come to an - * agreement on the supported algorithms at connect time */ + * agreement on the supported algorithms at connect time + */ data->ocd_connect_flags |= OBD_CONNECT_CKSUM; if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY)) @@ -453,7 +451,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, #endif /* make root inode - * XXX: move this to after cbd setup? */ + * XXX: move this to after cbd setup? + */ valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS; if (sbi->ll_flags & LL_SBI_RMT_CLIENT) valid |= OBD_MD_FLRMTPERM; @@ -493,7 +492,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, md_free_lustre_md(sbi->ll_md_exp, &lmd); ptlrpc_req_finished(request); - if (root == NULL || IS_ERR(root)) { + if (!(root)) { if (lmd.lsm) obd_free_memmd(sbi->ll_dt_exp, &lmd.lsm); #ifdef CONFIG_FS_POSIX_ACL @@ -502,8 +501,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, lmd.posix_acl = NULL; } #endif - err = IS_ERR(root) ? PTR_ERR(root) : -EBADF; - root = NULL; + err = -EBADF; CERROR("lustre_lite: bad iget4 for root\n"); goto out_root; } @@ -532,9 +530,9 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, &sbi->ll_cache, NULL); sb->s_root = d_make_root(root); - if (sb->s_root == NULL) { + if (!sb->s_root) { CERROR("%s: can't make root dentry\n", - ll_get_fsname(sb, NULL, 0)); + ll_get_fsname(sb, NULL, 0)); err = -ENOMEM; goto out_lock_cn_cb; } @@ -543,11 +541,13 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, /* We set sb->s_dev equal on all lustre clients in order to support * NFS export clustering. NFSD requires that the FSID be the same - * on all clients. */ + * on all clients. + */ /* s_dev is also used in lt_compare() to compare two fs, but that is - * only a node-local comparison. */ + * only a node-local comparison. + */ uuid = obd_get_uuid(sbi->ll_md_exp); - if (uuid != NULL) { + if (uuid) { sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid)); get_uuid2fsid(uuid->uuid, strlen(uuid->uuid), &sbi->ll_fsid); } @@ -597,7 +597,7 @@ int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize) size = sizeof(int); rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_EASIZE), - KEY_DEFAULT_EASIZE, &size, lmmsize, NULL); + KEY_DEFAULT_EASIZE, &size, lmmsize, NULL); if (rc) CERROR("Get default mdsize error rc %d\n", rc); @@ -619,13 +619,12 @@ static void client_common_put_super(struct super_block *sb) cl_sb_fini(sb); - list_del(&sbi->ll_conn_chain); - obd_fid_fini(sbi->ll_dt_exp->exp_obd); obd_disconnect(sbi->ll_dt_exp); sbi->ll_dt_exp = NULL; /* wait till all OSCs are gone, since cl_cache is accessing sbi. - * see LU-2543. */ + * see LU-2543. + */ obd_zombie_barrier(); ldebugfs_unregister_mountpoint(sbi); @@ -646,7 +645,8 @@ void ll_kill_super(struct super_block *sb) sbi = ll_s2sbi(sb); /* we need to restore s_dev from changed for clustered NFS before * put_super because new kernels have cached s_dev and change sb->s_dev - * in put_super not affected real removing devices */ + * in put_super not affected real removing devices + */ if (sbi) { sb->s_dev = sbi->ll_sdev_orig; sbi->ll_umounting = 1; @@ -777,7 +777,7 @@ static int ll_options(char *options, int *flags) next: /* Find next opt */ s2 = strchr(s1, ','); - if (s2 == NULL) + if (!s2) break; s1 = s2 + 1; } @@ -797,7 +797,6 @@ void ll_lli_init(struct ll_inode_info *lli) /* Do not set lli_fid, it has been initialized already. */ fid_zero(&lli->lli_pfid); INIT_LIST_HEAD(&lli->lli_close_list); - atomic_set(&lli->lli_open_count, 0); lli->lli_rmtperm_time = 0; lli->lli_pending_och = NULL; lli->lli_mds_read_och = NULL; @@ -890,8 +889,9 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt) sb->s_d_op = &ll_d_ops; /* Generate a string unique to this super, in case some joker tries - to mount the same fs at two mount points. - Use the address of the super itself.*/ + * to mount the same fs at two mount points. + * Use the address of the super itself. + */ cfg->cfg_instance = sb; cfg->cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid; cfg->cfg_callback = class_config_llog_handler; @@ -904,7 +904,7 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt) /* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */ lprof = class_get_profile(profilenm); - if (lprof == NULL) { + if (!lprof) { LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be read from the MGS. Does that filesystem exist?\n", profilenm); err = -EINVAL; @@ -964,7 +964,8 @@ void ll_put_super(struct super_block *sb) } /* We need to set force before the lov_disconnect in - lustre_common_put_super, since l_d cleans up osc's as well. */ + * lustre_common_put_super, since l_d cleans up osc's as well. + */ if (force) { next = 0; while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, @@ -1036,8 +1037,8 @@ void ll_clear_inode(struct inode *inode) if (S_ISDIR(inode->i_mode)) { /* these should have been cleared in ll_file_release */ - LASSERT(lli->lli_opendir_key == NULL); - LASSERT(lli->lli_sai == NULL); + LASSERT(!lli->lli_opendir_key); + LASSERT(!lli->lli_sai); LASSERT(lli->lli_opendir_pid == 0); } @@ -1065,7 +1066,7 @@ void ll_clear_inode(struct inode *inode) ll_xattr_cache_destroy(inode); if (sbi->ll_flags & LL_SBI_RMT_CLIENT) { - LASSERT(lli->lli_posix_acl == NULL); + LASSERT(!lli->lli_posix_acl); if (lli->lli_remote_perms) { free_rmtperm_hash(lli->lli_remote_perms); lli->lli_remote_perms = NULL; @@ -1074,7 +1075,7 @@ void ll_clear_inode(struct inode *inode) #ifdef CONFIG_FS_POSIX_ACL else if (lli->lli_posix_acl) { LASSERT(atomic_read(&lli->lli_posix_acl->a_refcount) == 1); - LASSERT(lli->lli_remote_perms == NULL); + LASSERT(!lli->lli_remote_perms); posix_acl_release(lli->lli_posix_acl); lli->lli_posix_acl = NULL; } @@ -1095,7 +1096,7 @@ void ll_clear_inode(struct inode *inode) #define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET) static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data, - struct md_open_data **mod) + struct md_open_data **mod) { struct lustre_md md; struct inode *inode = d_inode(dentry); @@ -1115,7 +1116,8 @@ static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data, if (rc == -ENOENT) { clear_nlink(inode); /* Unlinked special device node? Or just a race? - * Pretend we done everything. */ + * Pretend we did everything. + */ if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode)) { ia_valid = op_data->op_attr.ia_valid; @@ -1138,7 +1140,8 @@ static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data, ia_valid = op_data->op_attr.ia_valid; /* inode size will be in cl_setattr_ost, can't do it now since dirty - * cache is not cleared yet. */ + * cache is not cleared yet. + */ op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE); rc = simple_setattr(dentry, &op_data->op_attr); op_data->op_attr.ia_valid = ia_valid; @@ -1161,7 +1164,6 @@ static int ll_setattr_done_writing(struct inode *inode, struct ll_inode_info *lli = ll_i2info(inode); int rc = 0; - LASSERT(op_data != NULL); if (!S_ISREG(inode->i_mode)) return 0; @@ -1175,7 +1177,8 @@ static int ll_setattr_done_writing(struct inode *inode, rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, mod); if (rc == -EAGAIN) /* MDS has instructed us to obtain Size-on-MDS attribute - * from OSTs and send setattr to back to MDS. */ + * from OSTs and send setattr to back to MDS. + */ rc = ll_som_update(inode, op_data); else if (rc) CERROR("inode %lu mdc truncate failed: rc = %d\n", @@ -1208,11 +1211,11 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import) int rc = 0, rc1 = 0; CDEBUG(D_VFSTRACE, - "%s: setattr inode %p/fid:"DFID - " from %llu to %llu, valid %x, hsm_import %d\n", - ll_get_fsname(inode->i_sb, NULL, 0), inode, - PFID(&lli->lli_fid), i_size_read(inode), attr->ia_size, - attr->ia_valid, hsm_import); + "%s: setattr inode %p/fid:" DFID + " from %llu to %llu, valid %x, hsm_import %d\n", + ll_get_fsname(inode->i_sb, NULL, 0), inode, + PFID(&lli->lli_fid), i_size_read(inode), attr->ia_size, + attr->ia_valid, hsm_import); if (attr->ia_valid & ATTR_SIZE) { /* Check new size against VFS/VM file size limit and rlimit */ @@ -1222,7 +1225,8 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import) /* The maximum Lustre file size is variable, based on the * OST maximum object size and number of stripes. This - * needs another check in addition to the VFS check above. */ + * needs another check in addition to the VFS check above. + */ if (attr->ia_size > ll_file_maxbytes(inode)) { CDEBUG(D_INODE, "file "DFID" too large %llu > %llu\n", PFID(&lli->lli_fid), attr->ia_size, @@ -1270,7 +1274,8 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import) } /* We always do an MDS RPC, even if we're only changing the size; - * only the MDS knows whether truncate() should fail with -ETXTBUSY */ + * only the MDS knows whether truncate() should fail with -ETXTBUSY + */ op_data = kzalloc(sizeof(*op_data), GFP_NOFS); if (!op_data) @@ -1304,7 +1309,8 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import) /* if not in HSM import mode, clear size attr for released file * we clear the attribute send to MDT in op_data, not the original * received from caller in attr which is used later to - * decide return code */ + * decide return code + */ if (file_is_released && (attr->ia_valid & ATTR_SIZE) && !hsm_import) op_data->op_attr.ia_valid &= ~ATTR_SIZE; @@ -1322,7 +1328,7 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import) } /* RPC to MDT is sent, cancel data modification flag */ - if (rc == 0 && (op_data->op_bias & MDS_DATA_MODIFIED)) { + if (op_data->op_bias & MDS_DATA_MODIFIED) { spin_lock(&lli->lli_lock); lli->lli_flags &= ~LLIF_DATA_MODIFIED; spin_unlock(&lli->lli_lock); @@ -1342,7 +1348,8 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import) * extent lock (new_size:EOF for truncate). It may seem * excessive to send mtime/atime updates to OSTs when not * setting times to past, but it is necessary due to possible - * time de-synchronization between MDT inode and OST objects */ + * time de-synchronization between MDT inode and OST objects + */ if (attr->ia_valid & ATTR_SIZE) down_write(&lli->lli_trunc_sem); rc = cl_setattr_ost(inode, attr); @@ -1470,7 +1477,8 @@ int ll_statfs(struct dentry *de, struct kstatfs *sfs) /* We need to downshift for all 32-bit kernels, because we can't * tell if the kernel is being called via sys_statfs64() or not. * Stop before overflowing f_bsize - in which case it is better - * to just risk EOVERFLOW if caller is using old sys_statfs(). */ + * to just risk EOVERFLOW if caller is using old sys_statfs(). + */ if (sizeof(long) < 8) { while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) { sfs->f_bsize <<= 1; @@ -1514,7 +1522,7 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md) struct ll_sb_info *sbi = ll_i2sbi(inode); LASSERT((lsm != NULL) == ((body->valid & OBD_MD_FLEASIZE) != 0)); - if (lsm != NULL) { + if (lsm) { if (!lli->lli_has_smd && !(sbi->ll_flags & LL_SBI_LAYOUT_LOCK)) cl_file_inode_init(inode, md); @@ -1599,12 +1607,13 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md) if (exp_connect_som(ll_i2mdexp(inode)) && S_ISREG(inode->i_mode)) { struct lustre_handle lockh; - ldlm_mode_t mode; + enum ldlm_mode mode; /* As it is possible a blocking ast has been processed * by this time, we need to check there is an UPDATE * lock on the client and set LLIF_MDS_SIZE_LOCK holding - * it. */ + * it. + */ mode = ll_take_md_lock(inode, MDS_INODELOCK_UPDATE, &lockh, LDLM_FL_CBPENDING, LCK_CR | LCK_CW | @@ -1617,7 +1626,8 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md) inode->i_ino, lli->lli_flags); } else { /* Use old size assignment to avoid - * deadlock bz14138 & bz14326 */ + * deadlock bz14138 & bz14326 + */ i_size_write(inode, body->size); spin_lock(&lli->lli_lock); lli->lli_flags |= LLIF_MDS_SIZE_LOCK; @@ -1627,7 +1637,8 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md) } } else { /* Use old size assignment to avoid - * deadlock bz14138 & bz14326 */ + * deadlock bz14138 & bz14326 + */ i_size_write(inode, body->size); CDEBUG(D_VFSTRACE, "inode=%lu, updating i_size %llu\n", @@ -1657,7 +1668,8 @@ void ll_read_inode2(struct inode *inode, void *opaque) /* Core attributes from the MDS first. This is a new inode, and * the VFS doesn't zero times in the core inode so we have to do * it ourselves. They will be overwritten by either MDS or OST - * attributes - we just need to make sure they aren't newer. */ + * attributes - we just need to make sure they aren't newer. + */ LTIME_S(inode->i_mtime) = 0; LTIME_S(inode->i_atime) = 0; LTIME_S(inode->i_ctime) = 0; @@ -1689,9 +1701,10 @@ void ll_delete_inode(struct inode *inode) { struct cl_inode_info *lli = cl_i2info(inode); - if (S_ISREG(inode->i_mode) && lli->lli_clob != NULL) + if (S_ISREG(inode->i_mode) && lli->lli_clob) /* discard all dirty pages before truncating them, required by - * osc_extent implementation at LU-1030. */ + * osc_extent implementation at LU-1030. + */ cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, CL_FSYNC_DISCARD, 1); @@ -1744,14 +1757,14 @@ int ll_iocontrol(struct inode *inode, struct file *file, ptlrpc_req_finished(req); - return put_user(flags, (int *)arg); + return put_user(flags, (int __user *)arg); } case FSFILT_IOC_SETFLAGS: { struct lov_stripe_md *lsm; struct obd_info oinfo = { }; struct md_op_data *op_data; - if (get_user(flags, (int *)arg)) + if (get_user(flags, (int __user *)arg)) return -EFAULT; op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0, @@ -1776,8 +1789,7 @@ int ll_iocontrol(struct inode *inode, struct file *file, return 0; } - oinfo.oi_oa = kmem_cache_alloc(obdo_cachep, - GFP_NOFS | __GFP_ZERO); + oinfo.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); if (!oinfo.oi_oa) { ccc_inode_lsm_put(inode, lsm); return -ENOMEM; @@ -1809,7 +1821,7 @@ int ll_flush_ctx(struct inode *inode) struct ll_sb_info *sbi = ll_i2sbi(inode); CDEBUG(D_SEC, "flush context for user %d\n", - from_kuid(&init_user_ns, current_uid())); + from_kuid(&init_user_ns, current_uid())); obd_set_info_async(NULL, sbi->ll_md_exp, sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX, @@ -1831,7 +1843,7 @@ void ll_umount_begin(struct super_block *sb) sb->s_count, atomic_read(&sb->s_active)); obd = class_exp2obd(sbi->ll_md_exp); - if (obd == NULL) { + if (!obd) { CERROR("Invalid MDC connection handle %#llx\n", sbi->ll_md_exp->exp_handle.h_cookie); return; @@ -1839,7 +1851,7 @@ void ll_umount_begin(struct super_block *sb) obd->obd_force = 1; obd = class_exp2obd(sbi->ll_dt_exp); - if (obd == NULL) { + if (!obd) { CERROR("Invalid LOV connection handle %#llx\n", sbi->ll_dt_exp->exp_handle.h_cookie); return; @@ -1920,13 +1932,8 @@ void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req) body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY); op_data = kzalloc(sizeof(*op_data), GFP_NOFS); - if (!op_data) { - CWARN("%s: cannot allocate op_data to release open handle for " - DFID "\n", - ll_get_fsname(sb, NULL, 0), PFID(&body->fid1)); - + if (!op_data) return; - } op_data->op_fid1 = body->fid1; op_data->op_ioepoch = body->ioepoch; @@ -1941,7 +1948,7 @@ int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req, struct super_block *sb, struct lookup_intent *it) { struct ll_sb_info *sbi = NULL; - struct lustre_md md; + struct lustre_md md = { NULL }; int rc; LASSERT(*inode || sb); @@ -1954,7 +1961,7 @@ int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req, if (*inode) { ll_update_inode(*inode, &md); } else { - LASSERT(sb != NULL); + LASSERT(sb); /* * At this point server returns to client's same fid as client @@ -1965,15 +1972,14 @@ int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req, *inode = ll_iget(sb, cl_fid_build_ino(&md.body->fid1, sbi->ll_flags & LL_SBI_32BIT_API), &md); - if (*inode == NULL || IS_ERR(*inode)) { + if (!*inode) { #ifdef CONFIG_FS_POSIX_ACL if (md.posix_acl) { posix_acl_release(md.posix_acl); md.posix_acl = NULL; } #endif - rc = IS_ERR(*inode) ? PTR_ERR(*inode) : -ENOMEM; - *inode = NULL; + rc = -ENOMEM; CERROR("new_inode -fatal: rc %d\n", rc); goto out; } @@ -1986,14 +1992,15 @@ int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req, * 1. proc1: mdt returns a lsm but not granting layout * 2. layout was changed by another client * 3. proc2: refresh layout and layout lock granted - * 4. proc1: to apply a stale layout */ - if (it != NULL && it->d.lustre.it_lock_mode != 0) { + * 4. proc1: to apply a stale layout + */ + if (it && it->d.lustre.it_lock_mode != 0) { struct lustre_handle lockh; struct ldlm_lock *lock; lockh.cookie = it->d.lustre.it_lock_handle; lock = ldlm_handle2lock(&lockh); - LASSERT(lock != NULL); + LASSERT(lock); if (ldlm_has_layout(lock)) { struct cl_object_conf conf; @@ -2008,7 +2015,7 @@ int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req, } out: - if (md.lsm != NULL) + if (md.lsm) obd_free_memmd(sbi->ll_dt_exp, &md.lsm); md_free_lustre_md(sbi->ll_md_exp, &md); @@ -2019,14 +2026,13 @@ cleanup: return rc; } -int ll_obd_statfs(struct inode *inode, void *arg) +int ll_obd_statfs(struct inode *inode, void __user *arg) { struct ll_sb_info *sbi = NULL; struct obd_export *exp; char *buf = NULL; struct obd_ioctl_data *data = NULL; __u32 type; - __u32 flags; int len = 0, rc; if (!inode) { @@ -2069,8 +2075,7 @@ int ll_obd_statfs(struct inode *inode, void *arg) goto out_statfs; } - flags = (type & LL_STATFS_NODELAY) ? OBD_STATFS_NODELAY : 0; - rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, &flags); + rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, NULL); if (rc) goto out_statfs; out_statfs: @@ -2101,7 +2106,8 @@ int ll_process_config(struct lustre_cfg *lcfg) LASSERT(s2lsi((struct super_block *)sb)->lsi_lmd->lmd_magic == LMD_MAGIC); /* Note we have not called client_common_fill_super yet, so - proc fns must be able to handle that! */ + * proc fns must be able to handle that! + */ rc = class_process_proc_param(PARAM_LLITE, lvars.obd_vars, lcfg, sb); if (rc > 0) @@ -2111,19 +2117,17 @@ int ll_process_config(struct lustre_cfg *lcfg) /* this function prepares md_op_data hint for passing ot down to MD stack. */ struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data, - struct inode *i1, struct inode *i2, - const char *name, int namelen, - int mode, __u32 opc, void *data) + struct inode *i1, struct inode *i2, + const char *name, int namelen, + int mode, __u32 opc, void *data) { - LASSERT(i1 != NULL); - if (namelen > ll_i2sbi(i1)->ll_namelen) return ERR_PTR(-ENAMETOOLONG); - if (op_data == NULL) + if (!op_data) op_data = kzalloc(sizeof(*op_data), GFP_NOFS); - if (op_data == NULL) + if (!op_data) return ERR_PTR(-ENOMEM); ll_i2gids(op_data->op_suppgids, i1, i2); @@ -2143,8 +2147,8 @@ struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data, op_data->op_cap = cfs_curproc_cap_pack(); op_data->op_bias = 0; op_data->op_cli_flags = 0; - if ((opc == LUSTRE_OPC_CREATE) && (name != NULL) && - filename_is_volatile(name, namelen, NULL)) + if ((opc == LUSTRE_OPC_CREATE) && name && + filename_is_volatile(name, namelen, NULL)) op_data->op_bias |= MDS_CREATE_VOLATILE; op_data->op_opc = opc; op_data->op_mds = 0; @@ -2152,7 +2156,8 @@ struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data, /* If the file is being opened after mknod() (normally due to NFS) * try to use the default stripe data from parent directory for - * allocating OST objects. Try to pass the parent FID to MDS. */ + * allocating OST objects. Try to pass the parent FID to MDS. + */ if (opc == LUSTRE_OPC_CREATE && i1 == i2 && S_ISREG(i2->i_mode) && !ll_i2info(i2)->lli_has_smd) { struct ll_inode_info *lli = ll_i2info(i2); @@ -2179,7 +2184,7 @@ int ll_show_options(struct seq_file *seq, struct dentry *dentry) { struct ll_sb_info *sbi; - LASSERT((seq != NULL) && (dentry != NULL)); + LASSERT(seq && dentry); sbi = ll_s2sbi(dentry->d_sb); if (sbi->ll_flags & LL_SBI_NOLCK) @@ -2221,8 +2226,8 @@ int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg) if (!obd) return -ENOENT; - if (copy_to_user((void *)arg, obd->obd_name, - strlen(obd->obd_name) + 1)) + if (copy_to_user((void __user *)arg, obd->obd_name, + strlen(obd->obd_name) + 1)) return -EFAULT; return 0; @@ -2240,10 +2245,11 @@ char *ll_get_fsname(struct super_block *sb, char *buf, int buflen) char *ptr; int len; - if (buf == NULL) { + if (!buf) { /* this means the caller wants to use static buffer * and it doesn't care about race. Usually this is - * in error reporting path */ + * in error reporting path + */ buf = fsname_static; buflen = sizeof(fsname_static); } @@ -2269,9 +2275,9 @@ void ll_dirty_page_discard_warn(struct page *page, int ioret) /* this can be called inside spin lock so use GFP_ATOMIC. */ buf = (char *)__get_free_page(GFP_ATOMIC); - if (buf != NULL) { + if (buf) { dentry = d_find_alias(page->mapping->host); - if (dentry != NULL) + if (dentry) path = dentry_path_raw(dentry, buf, PAGE_SIZE); } @@ -2282,9 +2288,9 @@ void ll_dirty_page_discard_warn(struct page *page, int ioret) PFID(&obj->cob_header.coh_lu.loh_fid), (path && !IS_ERR(path)) ? path : "", ioret); - if (dentry != NULL) + if (dentry) dput(dentry); - if (buf != NULL) + if (buf) free_page((unsigned long)buf); } diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c index bbae95c9feed..69445a9f2011 100644 --- a/drivers/staging/lustre/lustre/llite/llite_mmap.c +++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c @@ -54,8 +54,8 @@ static const struct vm_operations_struct ll_file_vm_ops; void policy_from_vma(ldlm_policy_data_t *policy, - struct vm_area_struct *vma, unsigned long addr, - size_t count) + struct vm_area_struct *vma, unsigned long addr, + size_t count) { policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) + (vma->vm_pgoff << PAGE_CACHE_SHIFT); @@ -72,7 +72,7 @@ struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr, LASSERT(!down_write_trylock(&mm->mmap_sem)); for (vma = find_vma(mm, addr); - vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) { + vma && vma->vm_start < (addr + count); vma = vma->vm_next) { if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops && vma->vm_flags & VM_SHARED) { ret = vma; @@ -119,13 +119,13 @@ ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret, */ env = cl_env_nested_get(nest); if (IS_ERR(env)) - return ERR_PTR(-EINVAL); + return ERR_PTR(-EINVAL); *env_ret = env; io = ccc_env_thread_io(env); io->ci_obj = ll_i2info(inode)->lli_clob; - LASSERT(io->ci_obj != NULL); + LASSERT(io->ci_obj); fio = &io->u.ci_fault; fio->ft_index = index; @@ -136,7 +136,7 @@ ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret, * the kernel will not read other pages not covered by ldlm in * filemap_nopage. we do our readahead in ll_readpage. */ - if (ra_flags != NULL) + if (ra_flags) *ra_flags = vma->vm_flags & (VM_RAND_READ|VM_SEQ_READ); vma->vm_flags &= ~VM_SEQ_READ; vma->vm_flags |= VM_RAND_READ; @@ -151,8 +151,7 @@ ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret, LASSERT(cio->cui_cl.cis_io == io); - /* mmap lock must be MANDATORY it has to cache - * pages. */ + /* mmap lock must be MANDATORY it has to cache pages. */ io->ci_lockreq = CILR_MANDATORY; cio->cui_fd = fd; } else { @@ -178,8 +177,6 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage, struct inode *inode; struct ll_inode_info *lli; - LASSERT(vmpage != NULL); - io = ll_fault_io_init(vma, &env, &nest, vmpage->index, NULL); if (IS_ERR(io)) { result = PTR_ERR(io); @@ -201,7 +198,8 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage, /* we grab lli_trunc_sem to exclude truncate case. * Otherwise, we could add dirty pages into osc cache - * while truncate is on-going. */ + * while truncate is on-going. + */ inode = ccc_object_inode(io->ci_obj); lli = ll_i2info(inode); down_read(&lli->lli_trunc_sem); @@ -217,12 +215,13 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage, struct ll_inode_info *lli = ll_i2info(inode); lock_page(vmpage); - if (vmpage->mapping == NULL) { + if (!vmpage->mapping) { unlock_page(vmpage); /* page was truncated and lock was cancelled, return * ENODATA so that VM_FAULT_NOPAGE will be returned - * to handle_mm_fault(). */ + * to handle_mm_fault(). + */ if (result == 0) result = -ENODATA; } else if (!PageDirty(vmpage)) { @@ -315,12 +314,13 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf) result = cl_io_loop(env, io); /* ft_flags are only valid if we reached - * the call to filemap_fault */ + * the call to filemap_fault + */ if (vio->u.fault.fault.ft_flags_valid) fault_ret = vio->u.fault.fault.ft_flags; vmpage = vio->u.fault.ft_vmpage; - if (result != 0 && vmpage != NULL) { + if (result != 0 && vmpage) { page_cache_release(vmpage); vmf->page = NULL; } @@ -344,9 +344,10 @@ static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf) int result; sigset_t set; - /* Only SIGKILL and SIGTERM is allowed for fault/nopage/mkwrite + /* Only SIGKILL and SIGTERM are allowed for fault/nopage/mkwrite * so that it can be killed by admin but not cause segfault by - * other signals. */ + * other signals. + */ set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM)); restart: @@ -357,7 +358,7 @@ restart: /* check if this page has been truncated */ lock_page(vmpage); - if (unlikely(vmpage->mapping == NULL)) { /* unlucky */ + if (unlikely(!vmpage->mapping)) { /* unlucky */ unlock_page(vmpage); page_cache_release(vmpage); vmf->page = NULL; @@ -447,7 +448,8 @@ static void ll_vm_close(struct vm_area_struct *vma) } /* XXX put nice comment here. talk about __free_pte -> dirty pages and - * nopage's reference passing to the pte */ + * nopage's reference passing to the pte + */ int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last) { int rc = -ENOENT; diff --git a/drivers/staging/lustre/lustre/llite/llite_nfs.c b/drivers/staging/lustre/lustre/llite/llite_nfs.c index 18aab25f9cd9..193aab879709 100644 --- a/drivers/staging/lustre/lustre/llite/llite_nfs.c +++ b/drivers/staging/lustre/lustre/llite/llite_nfs.c @@ -105,7 +105,8 @@ struct inode *search_inode_for_lustre(struct super_block *sb, return ERR_PTR(rc); /* Because inode is NULL, ll_prep_md_op_data can not - * be used here. So we allocate op_data ourselves */ + * be used here. So we allocate op_data ourselves + */ op_data = kzalloc(sizeof(*op_data), GFP_NOFS); if (!op_data) return ERR_PTR(-ENOMEM); @@ -141,10 +142,11 @@ ll_iget_for_nfs(struct super_block *sb, struct lu_fid *fid, struct lu_fid *paren struct inode *inode; struct dentry *result; - CDEBUG(D_INFO, "Get dentry for fid: "DFID"\n", PFID(fid)); if (!fid_is_sane(fid)) return ERR_PTR(-ESTALE); + CDEBUG(D_INFO, "Get dentry for fid: " DFID "\n", PFID(fid)); + inode = search_inode_for_lustre(sb, fid); if (IS_ERR(inode)) return ERR_CAST(inode); @@ -160,7 +162,7 @@ ll_iget_for_nfs(struct super_block *sb, struct lu_fid *fid, struct lu_fid *paren * We have to find the parent to tell MDS how to init lov objects. */ if (S_ISREG(inode->i_mode) && !ll_i2info(inode)->lli_has_smd && - parent != NULL) { + parent && !fid_is_zero(parent)) { struct ll_inode_info *lli = ll_i2info(inode); spin_lock(&lli->lli_lock); @@ -174,8 +176,6 @@ ll_iget_for_nfs(struct super_block *sb, struct lu_fid *fid, struct lu_fid *paren return result; } -#define LUSTRE_NFS_FID 0x97 - /** * \a connectable - is nfsd will connect himself or this should be done * at lustre @@ -188,20 +188,25 @@ ll_iget_for_nfs(struct super_block *sb, struct lu_fid *fid, struct lu_fid *paren static int ll_encode_fh(struct inode *inode, __u32 *fh, int *plen, struct inode *parent) { + int fileid_len = sizeof(struct lustre_nfs_fid) / 4; struct lustre_nfs_fid *nfs_fid = (void *)fh; - CDEBUG(D_INFO, "encoding for (%lu,"DFID") maxlen=%d minlen=%d\n", - inode->i_ino, PFID(ll_inode2fid(inode)), *plen, - (int)sizeof(struct lustre_nfs_fid)); + CDEBUG(D_INFO, "encoding for (%lu," DFID ") maxlen=%d minlen=%d\n", + inode->i_ino, PFID(ll_inode2fid(inode)), *plen, fileid_len); - if (*plen < sizeof(struct lustre_nfs_fid) / 4) - return 255; + if (*plen < fileid_len) { + *plen = fileid_len; + return FILEID_INVALID; + } nfs_fid->lnf_child = *ll_inode2fid(inode); - nfs_fid->lnf_parent = *ll_inode2fid(parent); - *plen = sizeof(struct lustre_nfs_fid) / 4; + if (parent) + nfs_fid->lnf_parent = *ll_inode2fid(parent); + else + fid_zero(&nfs_fid->lnf_parent); + *plen = fileid_len; - return LUSTRE_NFS_FID; + return FILEID_LUSTRE; } static int ll_nfs_get_name_filldir(struct dir_context *ctx, const char *name, @@ -209,7 +214,8 @@ static int ll_nfs_get_name_filldir(struct dir_context *ctx, const char *name, unsigned type) { /* It is hack to access lde_fid for comparison with lgd_fid. - * So the input 'name' must be part of the 'lu_dirent'. */ + * So the input 'name' must be part of the 'lu_dirent'. + */ struct lu_dirent *lde = container_of0(name, struct lu_dirent, lde_name); struct ll_getname_data *lgd = container_of(ctx, struct ll_getname_data, ctx); @@ -259,7 +265,7 @@ static struct dentry *ll_fh_to_dentry(struct super_block *sb, struct fid *fid, { struct lustre_nfs_fid *nfs_fid = (struct lustre_nfs_fid *)fid; - if (fh_type != LUSTRE_NFS_FID) + if (fh_type != FILEID_LUSTRE) return ERR_PTR(-EPROTO); return ll_iget_for_nfs(sb, &nfs_fid->lnf_child, &nfs_fid->lnf_parent); @@ -270,7 +276,7 @@ static struct dentry *ll_fh_to_parent(struct super_block *sb, struct fid *fid, { struct lustre_nfs_fid *nfs_fid = (struct lustre_nfs_fid *)fid; - if (fh_type != LUSTRE_NFS_FID) + if (fh_type != FILEID_LUSTRE) return ERR_PTR(-EPROTO); return ll_iget_for_nfs(sb, &nfs_fid->lnf_parent, NULL); @@ -292,8 +298,8 @@ static struct dentry *ll_get_parent(struct dentry *dchild) sbi = ll_s2sbi(dir->i_sb); - CDEBUG(D_INFO, "getting parent for (%lu,"DFID")\n", - dir->i_ino, PFID(ll_inode2fid(dir))); + CDEBUG(D_INFO, "getting parent for (%lu," DFID ")\n", + dir->i_ino, PFID(ll_inode2fid(dir))); rc = ll_get_default_mdsize(sbi, &lmmsize); if (rc != 0) @@ -314,8 +320,8 @@ static struct dentry *ll_get_parent(struct dentry *dchild) body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); LASSERT(body->valid & OBD_MD_FLID); - CDEBUG(D_INFO, "parent for "DFID" is "DFID"\n", - PFID(ll_inode2fid(dir)), PFID(&body->fid1)); + CDEBUG(D_INFO, "parent for " DFID " is " DFID "\n", + PFID(ll_inode2fid(dir)), PFID(&body->fid1)); result = ll_iget_for_nfs(dir->i_sb, &body->fid1, NULL); @@ -323,10 +329,10 @@ static struct dentry *ll_get_parent(struct dentry *dchild) return result; } -struct export_operations lustre_export_operations = { - .get_parent = ll_get_parent, - .encode_fh = ll_encode_fh, - .get_name = ll_get_name, +const struct export_operations lustre_export_operations = { + .get_parent = ll_get_parent, + .encode_fh = ll_encode_fh, + .get_name = ll_get_name, .fh_to_dentry = ll_fh_to_dentry, .fh_to_parent = ll_fh_to_parent, }; diff --git a/drivers/staging/lustre/lustre/llite/llite_rmtacl.c b/drivers/staging/lustre/lustre/llite/llite_rmtacl.c index b27c3f2fcd02..8509b07cb5c7 100644 --- a/drivers/staging/lustre/lustre/llite/llite_rmtacl.c +++ b/drivers/staging/lustre/lustre/llite/llite_rmtacl.c @@ -98,7 +98,7 @@ static void rce_free(struct rmtacl_ctl_entry *rce) } static struct rmtacl_ctl_entry *__rct_search(struct rmtacl_ctl_table *rct, - pid_t key) + pid_t key) { struct rmtacl_ctl_entry *rce; struct list_head *head = &rct->rct_entries[rce_hashfunc(key)]; @@ -125,12 +125,12 @@ int rct_add(struct rmtacl_ctl_table *rct, pid_t key, int ops) struct rmtacl_ctl_entry *rce, *e; rce = rce_alloc(key, ops); - if (rce == NULL) + if (!rce) return -ENOMEM; spin_lock(&rct->rct_lock); e = __rct_search(rct, key); - if (unlikely(e != NULL)) { + if (unlikely(e)) { CWARN("Unexpected stale rmtacl_entry found: [key: %d] [ops: %d]\n", (int)key, ops); rce_free(e); @@ -172,7 +172,7 @@ void rct_fini(struct rmtacl_ctl_table *rct) for (i = 0; i < RCE_HASHES; i++) while (!list_empty(&rct->rct_entries[i])) { rce = list_entry(rct->rct_entries[i].next, - struct rmtacl_ctl_entry, rce_list); + struct rmtacl_ctl_entry, rce_list); rce_free(rce); } spin_unlock(&rct->rct_lock); @@ -208,12 +208,12 @@ void ee_free(struct eacl_entry *ee) } static struct eacl_entry *__et_search_del(struct eacl_table *et, pid_t key, - struct lu_fid *fid, int type) + struct lu_fid *fid, int type) { struct eacl_entry *ee; struct list_head *head = &et->et_entries[ee_hashfunc(key)]; - LASSERT(fid != NULL); + LASSERT(fid); list_for_each_entry(ee, head, ee_list) if (ee->ee_key == key) { if (lu_fid_eq(&ee->ee_fid, fid) && @@ -256,12 +256,12 @@ int ee_add(struct eacl_table *et, pid_t key, struct lu_fid *fid, int type, struct eacl_entry *ee, *e; ee = ee_alloc(key, fid, type, header); - if (ee == NULL) + if (!ee) return -ENOMEM; spin_lock(&et->et_lock); e = __et_search_del(et, key, fid, type); - if (unlikely(e != NULL)) { + if (unlikely(e)) { CWARN("Unexpected stale eacl_entry found: [key: %d] [fid: " DFID "] [type: %d]\n", (int)key, PFID(fid), type); ee_free(e); @@ -290,7 +290,7 @@ void et_fini(struct eacl_table *et) for (i = 0; i < EE_HASHES; i++) while (!list_empty(&et->et_entries[i])) { ee = list_entry(et->et_entries[i].next, - struct eacl_entry, ee_list); + struct eacl_entry, ee_list); ee_free(ee); } spin_unlock(&et->et_lock); diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c index 871924b3f2e7..b725fc16cf49 100644 --- a/drivers/staging/lustre/lustre/llite/lloop.c +++ b/drivers/staging/lustre/lustre/llite/lloop.c @@ -211,9 +211,8 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head) return io->ci_result; io->ci_lockreq = CILR_NEVER; - LASSERT(head != NULL); rw = head->bi_rw; - for (bio = head; bio != NULL; bio = bio->bi_next) { + for (bio = head; bio ; bio = bio->bi_next) { LASSERT(rw == bio->bi_rw); offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset; @@ -297,7 +296,7 @@ static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req) spin_lock_irq(&lo->lo_lock); first = lo->lo_bio; - if (unlikely(first == NULL)) { + if (unlikely(!first)) { spin_unlock_irq(&lo->lo_lock); return 0; } @@ -308,7 +307,7 @@ static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req) rw = first->bi_rw; bio = &lo->lo_bio; while (*bio && (*bio)->bi_rw == rw) { - CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u \n", + CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u\n", (unsigned long long)(*bio)->bi_iter.bi_sector, (*bio)->bi_iter.bi_size, page_count, (*bio)->bi_vcnt); @@ -458,7 +457,7 @@ static int loop_thread(void *data) total_count, times, total_count / times); } - LASSERT(bio != NULL); + LASSERT(bio); LASSERT(count <= atomic_read(&lo->lo_pending)); loop_handle_bio(lo, bio); atomic_sub(count, &lo->lo_pending); @@ -560,7 +559,7 @@ static int loop_clr_fd(struct lloop_device *lo, struct block_device *bdev, if (lo->lo_refcnt > count) /* we needed one fd for the ioctl */ return -EBUSY; - if (filp == NULL) + if (!filp) return -EINVAL; spin_lock_irq(&lo->lo_lock); @@ -625,18 +624,18 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode, case LL_IOC_LLOOP_INFO: { struct lu_fid fid; - if (lo->lo_backing_file == NULL) { + if (!lo->lo_backing_file) { err = -ENOENT; break; } - if (inode == NULL) + if (!inode) inode = file_inode(lo->lo_backing_file); if (lo->lo_state == LLOOP_BOUND) fid = ll_i2info(inode)->lli_fid; else fid_zero(&fid); - if (copy_to_user((struct lu_fid *)arg, &fid, sizeof(fid))) + if (copy_to_user((void __user *)arg, &fid, sizeof(fid))) err = -EFAULT; break; } @@ -676,7 +675,7 @@ static enum llioc_iter lloop_ioctl(struct inode *unused, struct file *file, if (magic != ll_iocontrol_magic) return LLIOC_CONT; - if (disks == NULL) { + if (!disks) { err = -ENODEV; goto out1; } @@ -708,7 +707,7 @@ static enum llioc_iter lloop_ioctl(struct inode *unused, struct file *file, dev = MKDEV(lloop_major, lo->lo_number); /* quit if the used pointer is writable */ - if (put_user((long)old_encode_dev(dev), (long *)arg)) { + if (put_user((long)old_encode_dev(dev), (long __user *)arg)) { err = -EFAULT; goto out; } @@ -793,7 +792,7 @@ static int __init lloop_init(void) lloop_major, max_loop); ll_iocontrol_magic = ll_iocontrol_register(lloop_ioctl, 2, cmdlist); - if (ll_iocontrol_magic == NULL) + if (!ll_iocontrol_magic) goto out_mem1; loop_dev = kcalloc(max_loop, sizeof(*loop_dev), GFP_KERNEL); @@ -872,11 +871,12 @@ static void lloop_exit(void) kfree(loop_dev); } -module_init(lloop_init); -module_exit(lloop_exit); - module_param(max_loop, int, 0444); MODULE_PARM_DESC(max_loop, "maximum of lloop_device"); MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>"); MODULE_DESCRIPTION("Lustre virtual block device"); +MODULE_VERSION(LUSTRE_VERSION_STRING); MODULE_LICENSE("GPL"); + +module_init(lloop_init); +module_exit(lloop_exit); diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c index f134ad9d23f0..45941a6600fe 100644 --- a/drivers/staging/lustre/lustre/llite/lproc_llite.c +++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c @@ -43,7 +43,7 @@ #include "llite_internal.h" #include "vvp_internal.h" -/* /proc/lustre/llite mount point registration */ +/* debugfs llite mount point registration */ static struct file_operations ll_rw_extents_stats_fops; static struct file_operations ll_rw_extents_stats_pp_fops; static struct file_operations ll_rw_offset_stats_fops; @@ -345,7 +345,8 @@ static ssize_t max_read_ahead_whole_mb_store(struct kobject *kobj, return rc; /* Cap this at the current max readahead window size, the readahead - * algorithm does this anyway so it's pointless to set it larger. */ + * algorithm does this anyway so it's pointless to set it larger. + */ if (pages_number > sbi->ll_ra_info.ra_max_pages_per_file) { CERROR("can't set max_read_ahead_whole_mb more than max_read_ahead_per_file_mb: %lu\n", sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_CACHE_SHIFT)); @@ -453,7 +454,7 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file, if (diff <= 0) break; - if (sbi->ll_dt_exp == NULL) { /* being initialized */ + if (!sbi->ll_dt_exp) { /* being initialized */ rc = -ENODEV; break; } @@ -461,9 +462,9 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file, /* difficult - have to ask OSCs to drop LRU slots. */ tmp = diff << 1; rc = obd_set_info_async(NULL, sbi->ll_dt_exp, - sizeof(KEY_CACHE_LRU_SHRINK), - KEY_CACHE_LRU_SHRINK, - sizeof(tmp), &tmp, NULL); + sizeof(KEY_CACHE_LRU_SHRINK), + KEY_CACHE_LRU_SHRINK, + sizeof(tmp), &tmp, NULL); if (rc < 0) break; } @@ -966,9 +967,9 @@ int ldebugfs_register_mountpoint(struct dentry *parent, name[MAX_STRING_SIZE] = '\0'; - LASSERT(sbi != NULL); - LASSERT(mdc != NULL); - LASSERT(osc != NULL); + LASSERT(sbi); + LASSERT(mdc); + LASSERT(osc); /* Get fsname */ len = strlen(lsi->lsi_lmd->lmd_profile); @@ -999,7 +1000,7 @@ int ldebugfs_register_mountpoint(struct dentry *parent, CWARN("Error adding the extent_stats file\n"); rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, - "extents_stats_per_process", + "extents_stats_per_process", 0644, &ll_rw_extents_stats_pp_fops, sbi); if (rc) CWARN("Error adding the extents_stats_per_process file\n"); @@ -1012,7 +1013,7 @@ int ldebugfs_register_mountpoint(struct dentry *parent, /* File operations stats */ sbi->ll_stats = lprocfs_alloc_stats(LPROC_LL_FILE_OPCODES, LPROCFS_STATS_FLAG_NONE); - if (sbi->ll_stats == NULL) { + if (!sbi->ll_stats) { err = -ENOMEM; goto out; } @@ -1033,13 +1034,13 @@ int ldebugfs_register_mountpoint(struct dentry *parent, llite_opcode_table[id].opname, ptr); } err = ldebugfs_register_stats(sbi->ll_debugfs_entry, "stats", - sbi->ll_stats); + sbi->ll_stats); if (err) goto out; sbi->ll_ra_stats = lprocfs_alloc_stats(ARRAY_SIZE(ra_stat_string), LPROCFS_STATS_FLAG_NONE); - if (sbi->ll_ra_stats == NULL) { + if (!sbi->ll_ra_stats) { err = -ENOMEM; goto out; } @@ -1049,7 +1050,7 @@ int ldebugfs_register_mountpoint(struct dentry *parent, ra_stat_string[id], "pages"); err = ldebugfs_register_stats(sbi->ll_debugfs_entry, "read_ahead_stats", - sbi->ll_ra_stats); + sbi->ll_ra_stats); if (err) goto out; @@ -1103,7 +1104,7 @@ void ldebugfs_unregister_mountpoint(struct ll_sb_info *sbi) #define pct(a, b) (b ? a * 100 / b : 0) static void ll_display_extents_info(struct ll_rw_extents_info *io_extents, - struct seq_file *seq, int which) + struct seq_file *seq, int which) { unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum; unsigned long start, end, r, w; @@ -1503,5 +1504,5 @@ LPROC_SEQ_FOPS(ll_rw_offset_stats); void lprocfs_llite_init_vars(struct lprocfs_static_vars *lvars) { - lvars->obd_vars = lprocfs_llite_obd_vars; + lvars->obd_vars = lprocfs_llite_obd_vars; } diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c index da5f443a0768..f8f98e4e8258 100644 --- a/drivers/staging/lustre/lustre/llite/namei.c +++ b/drivers/staging/lustre/lustre/llite/namei.c @@ -118,16 +118,16 @@ struct inode *ll_iget(struct super_block *sb, ino_t hash, ll_read_inode2(inode, md); if (S_ISREG(inode->i_mode) && - ll_i2info(inode)->lli_clob == NULL) { + !ll_i2info(inode)->lli_clob) { CDEBUG(D_INODE, - "%s: apply lsm %p to inode "DFID".\n", - ll_get_fsname(sb, NULL, 0), md->lsm, - PFID(ll_inode2fid(inode))); + "%s: apply lsm %p to inode " DFID ".\n", + ll_get_fsname(sb, NULL, 0), md->lsm, + PFID(ll_inode2fid(inode))); rc = cl_file_inode_init(inode, md); } if (rc != 0) { iget_failed(inode); - inode = ERR_PTR(rc); + inode = NULL; } else unlock_new_inode(inode); } else if (!(inode->i_state & (I_FREEING | I_CLEAR))) @@ -180,10 +180,11 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, __u64 bits = lock->l_policy_data.l_inodebits.bits; /* Inode is set to lock->l_resource->lr_lvb_inode - * for mdc - bug 24555 */ - LASSERT(lock->l_ast_data == NULL); + * for mdc - bug 24555 + */ + LASSERT(!lock->l_ast_data); - if (inode == NULL) + if (!inode) break; /* Invalidate all dentries associated with this inode */ @@ -202,7 +203,8 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, } /* For OPEN locks we differentiate between lock modes - * LCK_CR, LCK_CW, LCK_PR - bug 22891 */ + * LCK_CR, LCK_CW, LCK_PR - bug 22891 + */ if (bits & MDS_INODELOCK_OPEN) ll_have_md_lock(inode, &bits, lock->l_req_mode); @@ -260,7 +262,7 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, } if ((bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM)) && - inode->i_sb->s_root != NULL && + inode->i_sb->s_root && !is_root_inode(inode)) ll_invalidate_aliases(inode); @@ -285,15 +287,11 @@ __u32 ll_i2suppgid(struct inode *i) /* Pack the required supplementary groups into the supplied groups array. * If we don't need to use the groups from the target inode(s) then we * instead pack one or more groups from the user's supplementary group - * array in case it might be useful. Not needed if doing an MDS-side upcall. */ + * array in case it might be useful. Not needed if doing an MDS-side upcall. + */ void ll_i2gids(__u32 *suppgids, struct inode *i1, struct inode *i2) { -#if 0 - int i; -#endif - - LASSERT(i1 != NULL); - LASSERT(suppgids != NULL); + LASSERT(i1); suppgids[0] = ll_i2suppgid(i1); @@ -301,22 +299,6 @@ void ll_i2gids(__u32 *suppgids, struct inode *i1, struct inode *i2) suppgids[1] = ll_i2suppgid(i2); else suppgids[1] = -1; - -#if 0 - for (i = 0; i < current_ngroups; i++) { - if (suppgids[0] == -1) { - if (current_groups[i] != suppgids[1]) - suppgids[0] = current_groups[i]; - continue; - } - if (suppgids[1] == -1) { - if (current_groups[i] != suppgids[0]) - suppgids[1] = current_groups[i]; - continue; - } - break; - } -#endif } /* @@ -409,7 +391,8 @@ static int ll_lookup_it_finish(struct ptlrpc_request *request, int rc = 0; /* NB 1 request reference will be taken away by ll_intent_lock() - * when I return */ + * when I return + */ CDEBUG(D_DENTRY, "it %p it_disposition %x\n", it, it->d.lustre.it_disposition); if (!it_disposition(it, DISP_LOOKUP_NEG)) { @@ -420,13 +403,14 @@ static int ll_lookup_it_finish(struct ptlrpc_request *request, ll_set_lock_data(ll_i2sbi(parent)->ll_md_exp, inode, it, &bits); /* We used to query real size from OSTs here, but actually - this is not needed. For stat() calls size would be updated - from subsequent do_revalidate()->ll_inode_revalidate_it() in - 2.4 and - vfs_getattr_it->ll_getattr()->ll_inode_revalidate_it() in 2.6 - Everybody else who needs correct file size would call - ll_glimpse_size or some equivalent themselves anyway. - Also see bug 7198. */ + * this is not needed. For stat() calls size would be updated + * from subsequent do_revalidate()->ll_inode_revalidate_it() in + * 2.4 and + * vfs_getattr_it->ll_getattr()->ll_inode_revalidate_it() in 2.6 + * Everybody else who needs correct file size would call + * ll_glimpse_size or some equivalent themselves anyway. + * Also see bug 7198. + */ } /* Only hash *de if it is unhashed (new dentry). @@ -443,9 +427,10 @@ static int ll_lookup_it_finish(struct ptlrpc_request *request, *de = alias; } else if (!it_disposition(it, DISP_LOOKUP_NEG) && !it_disposition(it, DISP_OPEN_CREATE)) { - /* With DISP_OPEN_CREATE dentry will - instantiated in ll_create_it. */ - LASSERT(d_inode(*de) == NULL); + /* With DISP_OPEN_CREATE dentry will be + * instantiated in ll_create_it. + */ + LASSERT(!d_inode(*de)); d_instantiate(*de, inode); } @@ -498,7 +483,7 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry, if (d_mountpoint(dentry)) CERROR("Tell Peter, lookup on mtpt, it %s\n", LL_IT2STR(it)); - if (it == NULL || it->it_op == IT_GETXATTR) + if (!it || it->it_op == IT_GETXATTR) it = &lookup_it; if (it->it_op == IT_GETATTR) { @@ -557,7 +542,7 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry, out: if (req) ptlrpc_req_finished(req); - if (it->it_op == IT_GETATTR && (retval == NULL || retval == dentry)) + if (it->it_op == IT_GETATTR && (!retval || retval == dentry)) ll_statahead_mark(parent, dentry); return retval; } @@ -582,7 +567,7 @@ static struct dentry *ll_lookup_nd(struct inode *parent, struct dentry *dentry, itp = ⁢ de = ll_lookup_it(parent, dentry, itp, 0); - if (itp != NULL) + if (itp) ll_intent_release(itp); return de; @@ -622,7 +607,7 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry, de = ll_lookup_it(dir, dentry, it, lookup_flags); if (IS_ERR(de)) rc = PTR_ERR(de); - else if (de != NULL) + else if (de) dentry = de; if (!rc) { @@ -631,7 +616,7 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry, rc = ll_create_it(dir, dentry, mode, it); if (rc) { /* We dget in ll_splice_alias. */ - if (de != NULL) + if (de) dput(de); goto out_release; } @@ -655,7 +640,7 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry, /* We dget in ll_splice_alias. finish_open takes * care of dget for fd open. */ - if (de != NULL) + if (de) dput(de); } } else { @@ -693,7 +678,8 @@ static struct inode *ll_create_node(struct inode *dir, struct lookup_intent *it) /* We asked for a lock on the directory, but were granted a * lock on the inode. Since we finally have an inode pointer, - * stuff it in the lock. */ + * stuff it in the lock. + */ CDEBUG(D_DLMTRACE, "setting l_ast_data to inode %p (%lu/%u)\n", inode, inode->i_ino, inode->i_generation); ll_set_lock_data(sbi->ll_md_exp, inode, it, NULL); @@ -767,7 +753,7 @@ static int ll_new_node(struct inode *dir, struct dentry *dentry, int tgt_len = 0; int err; - if (unlikely(tgt != NULL)) + if (unlikely(tgt)) tgt_len = strlen(tgt) + 1; op_data = ll_prep_md_op_data(NULL, dir, NULL, @@ -888,10 +874,11 @@ int ll_objects_destroy(struct ptlrpc_request *request, struct inode *dir) /* The MDS sent back the EA because we unlinked the last reference * to this file. Use this EA to unlink the objects on the OST. * It's opaque so we don't swab here; we leave it to obd_unpackmd() to - * check it is complete and sensible. */ + * check it is complete and sensible. + */ eadata = req_capsule_server_sized_get(&request->rq_pill, &RMF_MDT_MD, body->eadatasize); - LASSERT(eadata != NULL); + LASSERT(eadata); rc = obd_unpackmd(ll_i2dtexp(dir), &lsm, eadata, body->eadatasize); if (rc < 0) { @@ -900,8 +887,8 @@ int ll_objects_destroy(struct ptlrpc_request *request, struct inode *dir) } LASSERT(rc >= sizeof(*lsm)); - oa = kmem_cache_alloc(obdo_cachep, GFP_NOFS | __GFP_ZERO); - if (oa == NULL) { + oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); + if (!oa) { rc = -ENOMEM; goto out_free_memmd; } @@ -917,7 +904,7 @@ int ll_objects_destroy(struct ptlrpc_request *request, struct inode *dir) &RMF_LOGCOOKIES, sizeof(struct llog_cookie) * lsm->lsm_stripe_count); - if (oti.oti_logcookies == NULL) { + if (!oti.oti_logcookies) { oa->o_valid &= ~OBD_MD_FLCOOKIE; body->valid &= ~OBD_MD_FLCOOKIE; } @@ -938,7 +925,8 @@ out: /* ll_unlink() doesn't update the inode with the new link count. * Instead, ll_ddelete() and ll_d_iput() will update it based upon if there * is any lock existing. They will recycle dentries and inodes based upon locks - * too. b=20433 */ + * too. b=20433 + */ static int ll_unlink(struct inode *dir, struct dentry *dentry) { struct ptlrpc_request *request = NULL; @@ -1028,7 +1016,7 @@ static int ll_symlink(struct inode *dir, struct dentry *dentry, dir, 3000, oldname); err = ll_new_node(dir, dentry, oldname, S_IFLNK | S_IRWXUGO, - 0, LUSTRE_OPC_SYMLINK); + 0, LUSTRE_OPC_SYMLINK); if (!err) ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_SYMLINK, 1); diff --git a/drivers/staging/lustre/lustre/llite/remote_perm.c b/drivers/staging/lustre/lustre/llite/remote_perm.c index fe4a72268e3a..e9d25317cd28 100644 --- a/drivers/staging/lustre/lustre/llite/remote_perm.c +++ b/drivers/staging/lustre/lustre/llite/remote_perm.c @@ -61,7 +61,7 @@ static inline struct ll_remote_perm *alloc_ll_remote_perm(void) { struct ll_remote_perm *lrp; - lrp = kmem_cache_alloc(ll_remote_perm_cachep, GFP_KERNEL | __GFP_ZERO); + lrp = kmem_cache_zalloc(ll_remote_perm_cachep, GFP_KERNEL); if (lrp) INIT_HLIST_NODE(&lrp->lrp_list); return lrp; @@ -82,7 +82,7 @@ static struct hlist_head *alloc_rmtperm_hash(void) struct hlist_head *hash; int i; - hash = kmem_cache_alloc(ll_rmtperm_hash_cachep, GFP_NOFS | __GFP_ZERO); + hash = kmem_cache_zalloc(ll_rmtperm_hash_cachep, GFP_NOFS); if (!hash) return NULL; diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c index f355474967d6..34614acf3f8e 100644 --- a/drivers/staging/lustre/lustre/llite/rw.c +++ b/drivers/staging/lustre/lustre/llite/rw.c @@ -70,9 +70,9 @@ static void ll_cl_fini(struct ll_cl_context *lcc) struct cl_page *page = lcc->lcc_page; LASSERT(lcc->lcc_cookie == current); - LASSERT(env != NULL); + LASSERT(env); - if (page != NULL) { + if (page) { lu_ref_del(&page->cp_reference, "cl_io", io); cl_page_put(env, page); } @@ -97,7 +97,7 @@ static struct ll_cl_context *ll_cl_init(struct file *file, int result = 0; clob = ll_i2info(vmpage->mapping->host)->lli_clob; - LASSERT(clob != NULL); + LASSERT(clob); env = cl_env_get(&refcheck); if (IS_ERR(env)) @@ -111,7 +111,7 @@ static struct ll_cl_context *ll_cl_init(struct file *file, cio = ccc_env_io(env); io = cio->cui_cl.cis_io; - if (io == NULL && create) { + if (!io && create) { struct inode *inode = vmpage->mapping->host; loff_t pos; @@ -120,7 +120,8 @@ static struct ll_cl_context *ll_cl_init(struct file *file, /* this is too bad. Someone is trying to write the * page w/o holding inode mutex. This means we can - * add dirty pages into cache during truncate */ + * add dirty pages into cache during truncate + */ CERROR("Proc %s is dirtying page w/o inode lock, this will break truncate\n", current->comm); dump_stack(); @@ -163,12 +164,11 @@ static struct ll_cl_context *ll_cl_init(struct file *file, } lcc->lcc_io = io; - if (io == NULL) + if (!io) result = -EIO; if (result == 0) { struct cl_page *page; - LASSERT(io != NULL); LASSERT(io->ci_state == CIS_IO_GOING); LASSERT(cio->cui_fd == LUSTRE_FPRIVATE(file)); page = cl_page_find(env, clob, vmpage->index, vmpage, @@ -240,7 +240,8 @@ int ll_prepare_write(struct file *file, struct page *vmpage, unsigned from, ll_cl_fini(lcc); } /* returning 0 in prepare assumes commit must be called - * afterwards */ + * afterwards + */ } else { result = PTR_ERR(lcc); } @@ -296,8 +297,8 @@ static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which); * to get an ra budget that is larger than the remaining readahead pages * and reach here at exactly the same time. They will compute /a ret to * consume the remaining pages, but will fail at atomic_add_return() and - * get a zero ra window, although there is still ra space remaining. - Jay */ - + * get a zero ra window, although there is still ra space remaining. - Jay + */ static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, struct ra_io_arg *ria, unsigned long pages) @@ -307,7 +308,8 @@ static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, /* If read-ahead pages left are less than 1M, do not do read-ahead, * otherwise it will form small read RPC(< 1M), which hurt server - * performance a lot. */ + * performance a lot. + */ ret = min(ra->ra_max_pages - atomic_read(&ra->ra_cur_pages), pages); if (ret < 0 || ret < min_t(long, PTLRPC_MAX_BRW_PAGES, pages)) { ret = 0; @@ -324,7 +326,8 @@ static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, * branch is more expensive than subtracting zero from the result. * * Strided read is left unaligned to avoid small fragments beyond - * the RPC boundary from needing an extra read RPC. */ + * the RPC boundary from needing an extra read RPC. + */ if (ria->ria_pages == 0) { long beyond_rpc = (ria->ria_start + ret) % PTLRPC_MAX_BRW_PAGES; @@ -364,7 +367,7 @@ void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which) #define RAS_CDEBUG(ras) \ CDEBUG(D_READA, \ "lrp %lu cr %lu cp %lu ws %lu wl %lu nra %lu r %lu ri %lu" \ - "csr %lu sf %lu sp %lu sl %lu \n", \ + "csr %lu sf %lu sp %lu sl %lu\n", \ ras->ras_last_readpage, ras->ras_consecutive_requests, \ ras->ras_consecutive_pages, ras->ras_window_start, \ ras->ras_window_len, ras->ras_next_readahead, \ @@ -378,9 +381,9 @@ static int index_in_window(unsigned long index, unsigned long point, unsigned long start = point - before, end = point + after; if (start > point) - start = 0; + start = 0; if (end < point) - end = ~0; + end = ~0; return start <= index && index <= end; } @@ -473,7 +476,7 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io, const char *msg = NULL; vmpage = grab_cache_page_nowait(mapping, index); - if (vmpage != NULL) { + if (vmpage) { /* Check if vmpage was truncated or reclaimed */ if (vmpage->mapping == mapping) { page = cl_page_find(env, clob, vmpage->index, @@ -500,7 +503,7 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io, which = RA_STAT_FAILED_GRAB_PAGE; msg = "g_c_p_n failed"; } - if (msg != NULL) { + if (msg) { ll_ra_stats_inc(mapping, which); CDEBUG(D_READA, "%s\n", msg); } @@ -515,13 +518,15 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io, /* Limit this to the blocksize instead of PTLRPC_BRW_MAX_SIZE, since we don't * know what the actual RPC size is. If this needs to change, it makes more * sense to tune the i_blkbits value for the file based on the OSTs it is - * striped over, rather than having a constant value for all files here. */ + * striped over, rather than having a constant value for all files here. + */ /* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_CACHE_SHIFT)). * Temporarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled * by default, this should be adjusted corresponding with max_read_ahead_mb * and max_read_ahead_per_file_mb otherwise the readahead budget can be used - * up quickly which will affect read performance significantly. See LU-2816 */ + * up quickly which will affect read performance significantly. See LU-2816 + */ #define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_CACHE_SHIFT) static inline int stride_io_mode(struct ll_readahead_state *ras) @@ -570,7 +575,7 @@ stride_pg_count(pgoff_t st_off, unsigned long st_len, unsigned long st_pgs, if (end_left > st_pgs) end_left = st_pgs; - CDEBUG(D_READA, "start %llu, end %llu start_left %lu end_left %lu \n", + CDEBUG(D_READA, "start %llu, end %llu start_left %lu end_left %lu\n", start, end, start_left, end_left); if (start == end) @@ -600,7 +605,8 @@ static int ras_inside_ra_window(unsigned long idx, struct ra_io_arg *ria) /* If ria_length == ria_pages, it means non-stride I/O mode, * idx should always inside read-ahead window in this case * For stride I/O mode, just check whether the idx is inside - * the ria_pages. */ + * the ria_pages. + */ return ria->ria_length == 0 || ria->ria_length == ria->ria_pages || (idx >= ria->ria_stoff && (idx - ria->ria_stoff) % ria->ria_length < ria->ria_pages); @@ -616,12 +622,12 @@ static int ll_read_ahead_pages(const struct lu_env *env, int rc, count = 0, stride_ria; unsigned long page_idx; - LASSERT(ria != NULL); + LASSERT(ria); RIA_DEBUG(ria); stride_ria = ria->ria_length > ria->ria_pages && ria->ria_pages > 0; - for (page_idx = ria->ria_start; page_idx <= ria->ria_end && - *reserved_pages > 0; page_idx++) { + for (page_idx = ria->ria_start; + page_idx <= ria->ria_end && *reserved_pages > 0; page_idx++) { if (ras_inside_ra_window(page_idx, ria)) { /* If the page is inside the read-ahead window*/ rc = ll_read_ahead_page(env, io, queue, @@ -634,11 +640,13 @@ static int ll_read_ahead_pages(const struct lu_env *env, } else if (stride_ria) { /* If it is not in the read-ahead window, and it is * read-ahead mode, then check whether it should skip - * the stride gap */ + * the stride gap + */ pgoff_t offset; /* FIXME: This assertion only is valid when it is for * forward read-ahead, it will be fixed when backward - * read-ahead is implemented */ + * read-ahead is implemented + */ LASSERTF(page_idx > ria->ria_stoff, "Invalid page_idx %lu rs %lu re %lu ro %lu rl %lu rp %lu\n", page_idx, ria->ria_start, ria->ria_end, ria->ria_stoff, @@ -647,7 +655,7 @@ static int ll_read_ahead_pages(const struct lu_env *env, offset = offset % (ria->ria_length); if (offset > ria->ria_pages) { page_idx += ria->ria_length - offset; - CDEBUG(D_READA, "i %lu skip %lu \n", page_idx, + CDEBUG(D_READA, "i %lu skip %lu\n", page_idx, ria->ria_length - offset); continue; } @@ -699,7 +707,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io, bead = NULL; /* Enlarge the RA window to encompass the full read */ - if (bead != NULL && ras->ras_window_start + ras->ras_window_len < + if (bead && ras->ras_window_start + ras->ras_window_len < bead->lrr_start + bead->lrr_count) { ras->ras_window_len = bead->lrr_start + bead->lrr_count - ras->ras_window_start; @@ -721,7 +729,8 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io, */ /* Note: we only trim the RPC, instead of extending the RPC * to the boundary, so to avoid reading too much pages during - * random reading. */ + * random reading. + */ rpc_boundary = (end + 1) & (~(PTLRPC_MAX_BRW_PAGES - 1)); if (rpc_boundary > 0) rpc_boundary--; @@ -764,7 +773,6 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io, ret = ll_read_ahead_pages(env, io, queue, ria, &reserved, mapping, &ra_end); - LASSERTF(reserved >= 0, "reserved %lu\n", reserved); if (reserved != 0) ll_ra_count_put(ll_i2sbi(inode), reserved); @@ -775,8 +783,9 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io, * the ras we need to go back and update the ras so that the * next read-ahead tries from where we left off. we only do so * if the region we failed to issue read-ahead on is still ahead - * of the app and behind the next index to start read-ahead from */ - CDEBUG(D_READA, "ra_end %lu end %lu stride end %lu \n", + * of the app and behind the next index to start read-ahead from + */ + CDEBUG(D_READA, "ra_end %lu end %lu stride end %lu\n", ra_end, end, ria->ria_end); if (ra_end != end + 1) { @@ -860,7 +869,7 @@ static void ras_update_stride_detector(struct ll_readahead_state *ras, unsigned long stride_gap = index - ras->ras_last_readpage - 1; if (!stride_io_mode(ras) && (stride_gap != 0 || - ras->ras_consecutive_stride_requests == 0)) { + ras->ras_consecutive_stride_requests == 0)) { ras->ras_stride_pages = ras->ras_consecutive_pages; ras->ras_stride_length = stride_gap+ras->ras_consecutive_pages; } @@ -881,7 +890,8 @@ static void ras_update_stride_detector(struct ll_readahead_state *ras, } /* Stride Read-ahead window will be increased inc_len according to - * stride I/O pattern */ + * stride I/O pattern + */ static void ras_stride_increase_window(struct ll_readahead_state *ras, struct ll_ra_info *ra, unsigned long inc_len) @@ -952,7 +962,8 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode, * or reads to some other part of the file. Secondly if we get a * read-ahead miss that we think we've previously issued. This can * be a symptom of there being so many read-ahead pages that the VM is - * reclaiming it before we get to it. */ + * reclaiming it before we get to it. + */ if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) { zero = 1; ll_ra_stats_inc_sbi(sbi, RA_STAT_DISTANT_READPAGE); @@ -969,7 +980,8 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode, * file up to ra_max_pages_per_file. This is simply a best effort * and only occurs once per open file. Normal RA behavior is reverted * to for subsequent IO. The mmap case does not increment - * ras_requests and thus can never trigger this behavior. */ + * ras_requests and thus can never trigger this behavior. + */ if (ras->ras_requests == 2 && !ras->ras_request_index) { __u64 kms_pages; @@ -1015,14 +1027,16 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode, stride_io_mode(ras)) { /*If stride-RA hit cache miss, the stride dector *will not be reset to avoid the overhead of - *redetecting read-ahead mode */ + *redetecting read-ahead mode + */ if (index != ras->ras_last_readpage + 1) ras->ras_consecutive_pages = 0; ras_reset(inode, ras, index); RAS_CDEBUG(ras); } else { /* Reset both stride window and normal RA - * window */ + * window + */ ras_reset(inode, ras, index); ras->ras_consecutive_pages++; ras_stride_reset(ras); @@ -1031,7 +1045,8 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode, } else if (stride_io_mode(ras)) { /* If this is contiguous read but in stride I/O mode * currently, check whether stride step still is valid, - * if invalid, it will reset the stride ra window*/ + * if invalid, it will reset the stride ra window + */ if (!index_in_stride_window(ras, index)) { /* Shrink stride read-ahead window to be zero */ ras_stride_reset(ras); @@ -1047,7 +1062,8 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode, if (stride_io_mode(ras)) /* Since stride readahead is sensitive to the offset * of read-ahead, so we use original offset here, - * instead of ras_window_start, which is RPC aligned */ + * instead of ras_window_start, which is RPC aligned + */ ras->ras_next_readahead = max(index, ras->ras_next_readahead); else ras->ras_next_readahead = max(ras->ras_window_start, @@ -1055,7 +1071,8 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode, RAS_CDEBUG(ras); /* Trigger RA in the mmap case where ras_consecutive_requests - * is not incremented and thus can't be used to trigger RA */ + * is not incremented and thus can't be used to trigger RA + */ if (!ras->ras_window_len && ras->ras_consecutive_pages == 4) { ras->ras_window_len = RAS_INCREASE_STEP(inode); goto out_unlock; @@ -1101,7 +1118,7 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc) LASSERT(PageLocked(vmpage)); LASSERT(!PageWriteback(vmpage)); - LASSERT(ll_i2dtexp(inode) != NULL); + LASSERT(ll_i2dtexp(inode)); env = cl_env_nested_get(&nest); if (IS_ERR(env)) { @@ -1110,7 +1127,7 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc) } clob = ll_i2info(inode)->lli_clob; - LASSERT(clob != NULL); + LASSERT(clob); io = ccc_env_thread_io(env); io->ci_obj = clob; @@ -1153,14 +1170,16 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc) /* Flush page failed because the extent is being written out. * Wait for the write of extent to be finished to avoid * breaking kernel which assumes ->writepage should mark - * PageWriteback or clean the page. */ + * PageWriteback or clean the page. + */ result = cl_sync_file_range(inode, offset, offset + PAGE_CACHE_SIZE - 1, CL_FSYNC_LOCAL, 1); if (result > 0) { /* actually we may have written more than one page. * decreasing this page because the caller will count - * it. */ + * it. + */ wbc->nr_to_write -= result - 1; result = 0; } @@ -1210,7 +1229,8 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc) if (sbi->ll_umounting) /* if the mountpoint is being umounted, all pages have to be * evicted to avoid hitting LBUG when truncate_inode_pages() - * is called later on. */ + * is called later on. + */ ignore_layout = 1; result = cl_sync_file_range(inode, start, end, mode, ignore_layout); if (result > 0) { diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c index 711fda93a58d..7a5db67bc680 100644 --- a/drivers/staging/lustre/lustre/llite/rw26.c +++ b/drivers/staging/lustre/lustre/llite/rw26.c @@ -92,9 +92,9 @@ static void ll_invalidatepage(struct page *vmpage, unsigned int offset, if (!IS_ERR(env)) { inode = vmpage->mapping->host; obj = ll_i2info(inode)->lli_clob; - if (obj != NULL) { + if (obj) { page = cl_vmpage_page(vmpage, obj); - if (page != NULL) { + if (page) { lu_ref_add(&page->cp_reference, "delete", vmpage); cl_page_delete(env, page); @@ -109,12 +109,7 @@ static void ll_invalidatepage(struct page *vmpage, unsigned int offset, } } -#ifdef HAVE_RELEASEPAGE_WITH_INT -#define RELEASEPAGE_ARG_TYPE int -#else -#define RELEASEPAGE_ARG_TYPE gfp_t -#endif -static int ll_releasepage(struct page *vmpage, RELEASEPAGE_ARG_TYPE gfp_mask) +static int ll_releasepage(struct page *vmpage, gfp_t gfp_mask) { struct cl_env_nest nest; struct lu_env *env; @@ -128,11 +123,11 @@ static int ll_releasepage(struct page *vmpage, RELEASEPAGE_ARG_TYPE gfp_mask) return 0; mapping = vmpage->mapping; - if (mapping == NULL) + if (!mapping) return 1; obj = ll_i2info(mapping->host)->lli_clob; - if (obj == NULL) + if (!obj) return 1; /* 1 for page allocator, 1 for cl_page and 1 for page cache */ @@ -145,12 +140,13 @@ static int ll_releasepage(struct page *vmpage, RELEASEPAGE_ARG_TYPE gfp_mask) /* If we can't allocate an env we won't call cl_page_put() * later on which further means it's impossible to drop * page refcount by cl_page, so ask kernel to not free - * this page. */ + * this page. + */ return 0; page = cl_vmpage_page(vmpage, obj); - result = page == NULL; - if (page != NULL) { + result = !page; + if (page) { if (!cl_page_in_use(page)) { result = 1; cl_page_delete(env, page); @@ -212,7 +208,8 @@ static inline int ll_get_user_pages(int rw, unsigned long user_addr, } /* ll_free_user_pages - tear down page struct array - * @pages: array of page struct pointers underlying target buffer */ + * @pages: array of page struct pointers underlying target buffer + */ static void ll_free_user_pages(struct page **pages, int npages, int do_dirty) { int i; @@ -246,7 +243,7 @@ ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io, cl_2queue_init(queue); for (i = 0; i < page_count; i++) { if (pv->ldp_offsets) - file_offset = pv->ldp_offsets[i]; + file_offset = pv->ldp_offsets[i]; LASSERT(!(file_offset & (page_size - 1))); clp = cl_page_find(env, obj, cl_index(obj, file_offset), @@ -266,7 +263,8 @@ ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io, do_io = true; /* check the page type: if the page is a host page, then do - * write directly */ + * write directly + */ if (clp->cp_type == CPT_CACHEABLE) { struct page *vmpage = cl_page_vmpage(env, clp); struct page *src_page; @@ -284,14 +282,16 @@ ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io, kunmap_atomic(src); /* make sure page will be added to the transfer by - * cl_io_submit()->...->vvp_page_prep_write(). */ + * cl_io_submit()->...->vvp_page_prep_write(). + */ if (rw == WRITE) set_page_dirty(vmpage); if (rw == READ) { /* do not issue the page for read, since it * may reread a ra page which has NOT uptodate - * bit set. */ + * bit set. + */ cl_page_disown(env, io, clp); do_io = false; } @@ -339,29 +339,25 @@ static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io, size_t size, loff_t file_offset, struct page **pages, int page_count) { - struct ll_dio_pages pvec = { .ldp_pages = pages, - .ldp_nr = page_count, - .ldp_size = size, - .ldp_offsets = NULL, - .ldp_start_offset = file_offset - }; - - return ll_direct_rw_pages(env, io, rw, inode, &pvec); + struct ll_dio_pages pvec = { + .ldp_pages = pages, + .ldp_nr = page_count, + .ldp_size = size, + .ldp_offsets = NULL, + .ldp_start_offset = file_offset + }; + + return ll_direct_rw_pages(env, io, rw, inode, &pvec); } -#ifdef KMALLOC_MAX_SIZE -#define MAX_MALLOC KMALLOC_MAX_SIZE -#else -#define MAX_MALLOC (128 * 1024) -#endif - /* This is the maximum size of a single O_DIRECT request, based on the * kmalloc limit. We need to fit all of the brw_page structs, each one * representing PAGE_SIZE worth of user data, into a single buffer, and * then truncate this to be a full-sized RPC. For 4kB PAGE_SIZE this is - * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. */ -#define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * PAGE_CACHE_SIZE) & \ - ~(DT_MAX_BRW_SIZE - 1)) + * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. + */ +#define MAX_DIO_SIZE ((KMALLOC_MAX_SIZE / sizeof(struct brw_page) * \ + PAGE_CACHE_SIZE) & ~(DT_MAX_BRW_SIZE - 1)) static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter, loff_t file_offset) { @@ -396,7 +392,7 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter, env = cl_env_get(&refcheck); LASSERT(!IS_ERR(env)); io = ccc_env_io(env)->cui_cl.cis_io; - LASSERT(io != NULL); + LASSERT(io); /* 0. Need locking between buffered and direct access. and race with * size changing by concurrent truncates and writes. @@ -433,7 +429,8 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter, * for the request, shrink it to a smaller * PAGE_SIZE multiple and try again. * We should always be able to kmalloc for a - * page worth of page pointers = 4MB on i386. */ + * page worth of page pointers = 4MB on i386. + */ if (result == -ENOMEM && size > (PAGE_CACHE_SIZE / sizeof(*pages)) * PAGE_CACHE_SIZE) { @@ -461,7 +458,7 @@ out: struct lov_stripe_md *lsm; lsm = ccc_inode_lsm_get(inode); - LASSERT(lsm != NULL); + LASSERT(lsm); lov_stripe_lock(lsm); obd_adjust_kms(ll_i2dtexp(inode), lsm, file_offset, 0); lov_stripe_unlock(lsm); @@ -474,8 +471,8 @@ out: } static int ll_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, - struct page **pagep, void **fsdata) + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata) { pgoff_t index = pos >> PAGE_CACHE_SHIFT; struct page *page; @@ -512,8 +509,8 @@ static int ll_write_end(struct file *file, struct address_space *mapping, #ifdef CONFIG_MIGRATION static int ll_migratepage(struct address_space *mapping, - struct page *newpage, struct page *page, - enum migrate_mode mode + struct page *newpage, struct page *page, + enum migrate_mode mode ) { /* Always fail page migration until we have a proper implementation */ diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c index 88ffd8e3abdb..99ffd1589df8 100644 --- a/drivers/staging/lustre/lustre/llite/statahead.c +++ b/drivers/staging/lustre/lustre/llite/statahead.c @@ -49,13 +49,13 @@ #define SA_OMITTED_ENTRY_MAX 8ULL -typedef enum { +enum se_stat { /** negative values are for error cases */ SA_ENTRY_INIT = 0, /** init entry */ SA_ENTRY_SUCC = 1, /** stat succeed */ SA_ENTRY_INVA = 2, /** invalid entry */ SA_ENTRY_DEST = 3, /** entry to be destroyed */ -} se_stat_t; +}; struct ll_sa_entry { /* link into sai->sai_entries */ @@ -71,7 +71,7 @@ struct ll_sa_entry { /* low layer ldlm lock handle */ __u64 se_handle; /* entry status */ - se_stat_t se_stat; + enum se_stat se_stat; /* entry size, contains name */ int se_size; /* pointer to async getattr enqueue info */ @@ -130,7 +130,7 @@ ll_sa_entry_unhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry) static inline int agl_should_run(struct ll_statahead_info *sai, struct inode *inode) { - return (inode != NULL && S_ISREG(inode->i_mode) && sai->sai_agl_valid); + return (inode && S_ISREG(inode->i_mode) && sai->sai_agl_valid); } static inline int sa_sent_full(struct ll_statahead_info *sai) @@ -284,7 +284,7 @@ ll_sa_entry_get_byindex(struct ll_statahead_info *sai, __u64 index) } static void ll_sa_entry_cleanup(struct ll_statahead_info *sai, - struct ll_sa_entry *entry) + struct ll_sa_entry *entry) { struct md_enqueue_info *minfo = entry->se_minfo; struct ptlrpc_request *req = entry->se_req; @@ -303,7 +303,7 @@ static void ll_sa_entry_cleanup(struct ll_statahead_info *sai, } static void ll_sa_entry_put(struct ll_statahead_info *sai, - struct ll_sa_entry *entry) + struct ll_sa_entry *entry) { if (atomic_dec_and_test(&entry->se_refcount)) { CDEBUG(D_READA, "free sa entry %.*s(%p) index %llu\n", @@ -366,7 +366,7 @@ ll_sa_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry) */ static void do_sa_entry_to_stated(struct ll_statahead_info *sai, - struct ll_sa_entry *entry, se_stat_t stat) + struct ll_sa_entry *entry, enum se_stat stat) { struct ll_sa_entry *se; struct list_head *pos = &sai->sai_entries_stated; @@ -392,7 +392,7 @@ do_sa_entry_to_stated(struct ll_statahead_info *sai, */ static int ll_sa_entry_to_stated(struct ll_statahead_info *sai, - struct ll_sa_entry *entry, se_stat_t stat) + struct ll_sa_entry *entry, enum se_stat stat) { struct ll_inode_info *lli = ll_i2info(sai->sai_inode); int ret = 1; @@ -494,12 +494,13 @@ static void ll_sai_put(struct ll_statahead_info *sai) if (unlikely(atomic_read(&sai->sai_refcount) > 0)) { /* It is race case, the interpret callback just hold - * a reference count */ + * a reference count + */ spin_unlock(&lli->lli_sa_lock); return; } - LASSERT(lli->lli_opendir_key == NULL); + LASSERT(!lli->lli_opendir_key); LASSERT(thread_is_stopped(&sai->sai_thread)); LASSERT(thread_is_stopped(&sai->sai_agl_thread)); @@ -513,8 +514,8 @@ static void ll_sai_put(struct ll_statahead_info *sai) PFID(&lli->lli_fid), sai->sai_sent, sai->sai_replied); - list_for_each_entry_safe(entry, next, - &sai->sai_entries, se_link) + list_for_each_entry_safe(entry, next, &sai->sai_entries, + se_link) do_sa_entry_fini(sai, entry); LASSERT(list_empty(&sai->sai_entries)); @@ -618,20 +619,21 @@ static void ll_post_statahead(struct ll_statahead_info *sai) it = &minfo->mi_it; req = entry->se_req; body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - if (body == NULL) { + if (!body) { rc = -EFAULT; goto out; } child = entry->se_inode; - if (child == NULL) { + if (!child) { /* * lookup. */ LASSERT(fid_is_zero(&minfo->mi_data.op_fid2)); /* XXX: No fid in reply, this is probably cross-ref case. - * SA can't handle it yet. */ + * SA can't handle it yet. + */ if (body->valid & OBD_MD_MDS) { rc = -EAGAIN; goto out; @@ -672,7 +674,8 @@ out: /* The "ll_sa_entry_to_stated()" will drop related ldlm ibits lock * reference count by calling "ll_intent_drop_lock()" in spite of the * above operations failed or not. Do not worry about calling - * "ll_intent_drop_lock()" more than once. */ + * "ll_intent_drop_lock()" more than once. + */ rc = ll_sa_entry_to_stated(sai, entry, rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC); if (rc == 0 && entry->se_index == sai->sai_index_wait) @@ -698,14 +701,15 @@ static int ll_statahead_interpret(struct ptlrpc_request *req, /* release ibits lock ASAP to avoid deadlock when statahead * thread enqueues lock on parent in readdir and another * process enqueues lock on child with parent lock held, eg. - * unlink. */ + * unlink. + */ handle = it->d.lustre.it_lock_handle; ll_intent_drop_lock(it); } spin_lock(&lli->lli_sa_lock); /* stale entry */ - if (unlikely(lli->lli_sai == NULL || + if (unlikely(!lli->lli_sai || lli->lli_sai->sai_generation != minfo->mi_generation)) { spin_unlock(&lli->lli_sa_lock); rc = -ESTALE; @@ -720,7 +724,7 @@ static int ll_statahead_interpret(struct ptlrpc_request *req, } entry = ll_sa_entry_get_byindex(sai, minfo->mi_cbdata); - if (entry == NULL) { + if (!entry) { sai->sai_replied++; spin_unlock(&lli->lli_sa_lock); rc = -EIDRM; @@ -736,11 +740,12 @@ static int ll_statahead_interpret(struct ptlrpc_request *req, /* Release the async ibits lock ASAP to avoid deadlock * when statahead thread tries to enqueue lock on parent * for readpage and other tries to enqueue lock on child - * with parent's lock held, for example: unlink. */ + * with parent's lock held, for example: unlink. + */ entry->se_handle = handle; wakeup = list_empty(&sai->sai_entries_received); list_add_tail(&entry->se_list, - &sai->sai_entries_received); + &sai->sai_entries_received); } sai->sai_replied++; spin_unlock(&lli->lli_sa_lock); @@ -756,7 +761,7 @@ out: iput(dir); kfree(minfo); } - if (sai != NULL) + if (sai) ll_sai_put(sai); return rc; } @@ -853,7 +858,7 @@ static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry, struct ldlm_enqueue_info *einfo; int rc; - if (unlikely(inode == NULL)) + if (unlikely(!inode)) return 1; if (d_mountpoint(dentry)) @@ -908,10 +913,9 @@ static void ll_statahead_one(struct dentry *parent, const char *entry_name, rc = do_sa_revalidate(dir, entry, dentry); if (rc == 1 && agl_should_run(sai, d_inode(dentry))) ll_agl_add(sai, d_inode(dentry), entry->se_index); - } - if (dentry != NULL) dput(dentry); + } if (rc) { rc1 = ll_sa_entry_to_stated(sai, entry, @@ -948,7 +952,8 @@ static int ll_agl_thread(void *arg) if (thread_is_init(thread)) /* If someone else has changed the thread state * (e.g. already changed to SVC_STOPPING), we can't just - * blindly overwrite that setting. */ + * blindly overwrite that setting. + */ thread_set_flags(thread, SVC_RUNNING); spin_unlock(&plli->lli_agl_lock); wake_up(&thread->t_ctl_waitq); @@ -964,7 +969,8 @@ static int ll_agl_thread(void *arg) spin_lock(&plli->lli_agl_lock); /* The statahead thread maybe help to process AGL entries, - * so check whether list empty again. */ + * so check whether list empty again. + */ if (!list_empty(&sai->sai_entries_agl)) { clli = list_entry(sai->sai_entries_agl.next, struct ll_inode_info, lli_agl_list); @@ -1007,8 +1013,8 @@ static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai) sai, parent); plli = ll_i2info(d_inode(parent)); - task = kthread_run(ll_agl_thread, parent, - "ll_agl_%u", plli->lli_opendir_pid); + task = kthread_run(ll_agl_thread, parent, "ll_agl_%u", + plli->lli_opendir_pid); if (IS_ERR(task)) { CERROR("can't start ll_agl thread, rc: %ld\n", PTR_ERR(task)); thread_set_flags(thread, SVC_STOPPED); @@ -1049,7 +1055,8 @@ static int ll_statahead_thread(void *arg) if (thread_is_init(thread)) /* If someone else has changed the thread state * (e.g. already changed to SVC_STOPPING), we can't just - * blindly overwrite that setting. */ + * blindly overwrite that setting. + */ thread_set_flags(thread, SVC_RUNNING); spin_unlock(&plli->lli_sa_lock); wake_up(&thread->t_ctl_waitq); @@ -1070,7 +1077,7 @@ static int ll_statahead_thread(void *arg) } dp = page_address(page); - for (ent = lu_dirent_start(dp); ent != NULL; + for (ent = lu_dirent_start(dp); ent; ent = lu_dirent_next(ent)) { __u64 hash; int namelen; @@ -1137,7 +1144,8 @@ interpret_it: /* If no window for metadata statahead, but there are * some AGL entries to be triggered, then try to help - * to process the AGL entries. */ + * to process the AGL entries. + */ if (sa_sent_full(sai)) { spin_lock(&plli->lli_agl_lock); while (!list_empty(&sai->sai_entries_agl)) { @@ -1274,7 +1282,7 @@ void ll_stop_statahead(struct inode *dir, void *key) { struct ll_inode_info *lli = ll_i2info(dir); - if (unlikely(key == NULL)) + if (unlikely(!key)) return; spin_lock(&lli->lli_sa_lock); @@ -1357,7 +1365,7 @@ static int is_first_dirent(struct inode *dir, struct dentry *dentry) } dp = page_address(page); - for (ent = lu_dirent_start(dp); ent != NULL; + for (ent = lu_dirent_start(dp); ent; ent = lu_dirent_next(ent)) { __u64 hash; int namelen; @@ -1365,7 +1373,8 @@ static int is_first_dirent(struct inode *dir, struct dentry *dentry) hash = le64_to_cpu(ent->lde_hash); /* The ll_get_dir_page() can return any page containing - * the given hash which may be not the start hash. */ + * the given hash which may be not the start hash. + */ if (unlikely(hash < pos)) continue; @@ -1448,7 +1457,7 @@ ll_sai_unplug(struct ll_statahead_info *sai, struct ll_sa_entry *entry) struct ll_sb_info *sbi = ll_i2sbi(sai->sai_inode); int hit; - if (entry != NULL && entry->se_stat == SA_ENTRY_SUCC) + if (entry && entry->se_stat == SA_ENTRY_SUCC) hit = 1; else hit = 0; @@ -1498,6 +1507,7 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp, struct ll_sa_entry *entry; struct ptlrpc_thread *thread; struct l_wait_info lwi = { 0 }; + struct task_struct *task; int rc = 0; struct ll_inode_info *plli; @@ -1540,7 +1550,7 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp, } entry = ll_sa_entry_get_byname(sai, &(*dentryp)->d_name); - if (entry == NULL || only_unplug) { + if (!entry || only_unplug) { ll_sai_unplug(sai, entry); return entry ? 1 : -EAGAIN; } @@ -1559,8 +1569,7 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp, } } - if (entry->se_stat == SA_ENTRY_SUCC && - entry->se_inode != NULL) { + if (entry->se_stat == SA_ENTRY_SUCC && entry->se_inode) { struct inode *inode = entry->se_inode; struct lookup_intent it = { .it_op = IT_GETATTR, .d.lustre.it_lock_handle = @@ -1570,11 +1579,11 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp, rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode), &bits); if (rc == 1) { - if (d_inode(*dentryp) == NULL) { + if (!d_inode(*dentryp)) { struct dentry *alias; alias = ll_splice_alias(inode, - *dentryp); + *dentryp); if (IS_ERR(alias)) { ll_sai_unplug(sai, entry); return PTR_ERR(alias); @@ -1583,7 +1592,7 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp, } else if (d_inode(*dentryp) != inode) { /* revalidate, but inode is recreated */ CDEBUG(D_READA, - "stale dentry %pd inode %lu/%u, statahead inode %lu/%u\n", + "stale dentry %pd inode %lu/%u, statahead inode %lu/%u\n", *dentryp, d_inode(*dentryp)->i_ino, d_inode(*dentryp)->i_generation, @@ -1616,14 +1625,14 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp, } sai = ll_sai_alloc(); - if (sai == NULL) { + if (!sai) { rc = -ENOMEM; goto out; } sai->sai_ls_all = (rc == LS_FIRST_DOT_DE); sai->sai_inode = igrab(dir); - if (unlikely(sai->sai_inode == NULL)) { + if (unlikely(!sai->sai_inode)) { CWARN("Do not start stat ahead on dying inode "DFID"\n", PFID(&lli->lli_fid)); rc = -ESTALE; @@ -1651,25 +1660,28 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp, * but as soon as we expose the sai by attaching it to the lli that * default reference can be dropped by another thread calling * ll_stop_statahead. We need to take a local reference to protect - * the sai buffer while we intend to access it. */ + * the sai buffer while we intend to access it. + */ ll_sai_get(sai); lli->lli_sai = sai; plli = ll_i2info(d_inode(parent)); - rc = PTR_ERR(kthread_run(ll_statahead_thread, parent, - "ll_sa_%u", plli->lli_opendir_pid)); + task = kthread_run(ll_statahead_thread, parent, "ll_sa_%u", + plli->lli_opendir_pid); thread = &sai->sai_thread; - if (IS_ERR_VALUE(rc)) { + if (IS_ERR(task)) { + rc = PTR_ERR(task); CERROR("can't start ll_sa thread, rc: %d\n", rc); dput(parent); lli->lli_opendir_key = NULL; thread_set_flags(thread, SVC_STOPPED); thread_set_flags(&sai->sai_agl_thread, SVC_STOPPED); /* Drop both our own local reference and the default - * reference from allocation time. */ + * reference from allocation time. + */ ll_sai_put(sai); ll_sai_put(sai); - LASSERT(lli->lli_sai == NULL); + LASSERT(!lli->lli_sai); return -EAGAIN; } diff --git a/drivers/staging/lustre/lustre/llite/super25.c b/drivers/staging/lustre/lustre/llite/super25.c index 86c371ef71ea..61856d37afc5 100644 --- a/drivers/staging/lustre/lustre/llite/super25.c +++ b/drivers/staging/lustre/lustre/llite/super25.c @@ -53,8 +53,8 @@ static struct inode *ll_alloc_inode(struct super_block *sb) struct ll_inode_info *lli; ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_ALLOC_INODE, 1); - lli = kmem_cache_alloc(ll_inode_cachep, GFP_NOFS | __GFP_ZERO); - if (lli == NULL) + lli = kmem_cache_zalloc(ll_inode_cachep, GFP_NOFS); + if (!lli) return NULL; inode_init_once(&lli->lli_vfs_inode); @@ -89,7 +89,7 @@ MODULE_ALIAS_FS("lustre"); void lustre_register_client_process_config(int (*cpc)(struct lustre_cfg *lcfg)); -static int __init init_lustre_lite(void) +static int __init lustre_init(void) { lnet_process_id_t lnet_id; struct timespec64 ts; @@ -99,7 +99,8 @@ static int __init init_lustre_lite(void) /* print an address of _any_ initialized kernel symbol from this * module, to allow debugging with gdb that doesn't support data - * symbols from modules.*/ + * symbols from modules. + */ CDEBUG(D_INFO, "Lustre client module (%p).\n", &lustre_super_operations); @@ -108,26 +109,26 @@ static int __init init_lustre_lite(void) sizeof(struct ll_inode_info), 0, SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, NULL); - if (ll_inode_cachep == NULL) + if (!ll_inode_cachep) goto out_cache; ll_file_data_slab = kmem_cache_create("ll_file_data", - sizeof(struct ll_file_data), 0, - SLAB_HWCACHE_ALIGN, NULL); - if (ll_file_data_slab == NULL) + sizeof(struct ll_file_data), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!ll_file_data_slab) goto out_cache; ll_remote_perm_cachep = kmem_cache_create("ll_remote_perm_cache", sizeof(struct ll_remote_perm), 0, 0, NULL); - if (ll_remote_perm_cachep == NULL) + if (!ll_remote_perm_cachep) goto out_cache; ll_rmtperm_hash_cachep = kmem_cache_create("ll_rmtperm_hash_cache", REMOTE_PERM_HASHSIZE * sizeof(struct list_head), 0, 0, NULL); - if (ll_rmtperm_hash_cachep == NULL) + if (!ll_rmtperm_hash_cachep) goto out_cache; llite_root = debugfs_create_dir("llite", debugfs_lustre_root); @@ -146,7 +147,8 @@ static int __init init_lustre_lite(void) cfs_get_random_bytes(seed, sizeof(seed)); /* Nodes with small feet have little entropy. The NID for this - * node gives the most entropy in the low bits */ + * node gives the most entropy in the low bits + */ for (i = 0;; i++) { if (LNetGetId(i, &lnet_id) == -ENOENT) break; @@ -186,7 +188,7 @@ out_cache: return rc; } -static void __exit exit_lustre_lite(void) +static void __exit lustre_exit(void) { lustre_register_client_fill_super(NULL); lustre_register_kill_super_cb(NULL); @@ -207,8 +209,9 @@ static void __exit exit_lustre_lite(void) } MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>"); -MODULE_DESCRIPTION("Lustre Lite Client File System"); +MODULE_DESCRIPTION("Lustre Client File System"); +MODULE_VERSION(LUSTRE_VERSION_STRING); MODULE_LICENSE("GPL"); -module_init(init_lustre_lite); -module_exit(exit_lustre_lite); +module_init(lustre_init); +module_exit(lustre_exit); diff --git a/drivers/staging/lustre/lustre/llite/symlink.c b/drivers/staging/lustre/lustre/llite/symlink.c index 2610348f6c72..46d03ea48352 100644 --- a/drivers/staging/lustre/lustre/llite/symlink.c +++ b/drivers/staging/lustre/lustre/llite/symlink.c @@ -59,7 +59,8 @@ static int ll_readlink_internal(struct inode *inode, *symname = lli->lli_symlink_name; /* If the total CDEBUG() size is larger than a page, it * will print a warning to the console, avoid this by - * printing just the last part of the symlink. */ + * printing just the last part of the symlink. + */ CDEBUG(D_INODE, "using cached symlink %s%.*s, len = %d\n", print_limit < symlen ? "..." : "", print_limit, (*symname) + symlen - print_limit, symlen); @@ -81,7 +82,6 @@ static int ll_readlink_internal(struct inode *inode, } body = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_BODY); - LASSERT(body != NULL); if ((body->valid & OBD_MD_LINKNAME) == 0) { CERROR("OBD_MD_LINKNAME not set on reply\n"); rc = -EPROTO; @@ -91,13 +91,13 @@ static int ll_readlink_internal(struct inode *inode, LASSERT(symlen != 0); if (body->eadatasize != symlen) { CERROR("inode %lu: symlink length %d not expected %d\n", - inode->i_ino, body->eadatasize - 1, symlen - 1); + inode->i_ino, body->eadatasize - 1, symlen - 1); rc = -EPROTO; goto failed; } *symname = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_MD); - if (*symname == NULL || + if (!*symname || strnlen(*symname, symlen) != symlen - 1) { /* not full/NULL terminated */ CERROR("inode %lu: symlink not NULL terminated string of length %d\n", diff --git a/drivers/staging/lustre/lustre/llite/vvp_dev.c b/drivers/staging/lustre/lustre/llite/vvp_dev.c index fdca4ec0555d..282b70b776da 100644 --- a/drivers/staging/lustre/lustre/llite/vvp_dev.c +++ b/drivers/staging/lustre/lustre/llite/vvp_dev.c @@ -79,8 +79,8 @@ static void *vvp_key_init(const struct lu_context *ctx, { struct vvp_thread_info *info; - info = kmem_cache_alloc(vvp_thread_kmem, GFP_NOFS | __GFP_ZERO); - if (info == NULL) + info = kmem_cache_zalloc(vvp_thread_kmem, GFP_NOFS); + if (!info) info = ERR_PTR(-ENOMEM); return info; } @@ -98,8 +98,8 @@ static void *vvp_session_key_init(const struct lu_context *ctx, { struct vvp_session *session; - session = kmem_cache_alloc(vvp_session_kmem, GFP_NOFS | __GFP_ZERO); - if (session == NULL) + session = kmem_cache_zalloc(vvp_session_kmem, GFP_NOFS); + if (!session) session = ERR_PTR(-ENOMEM); return session; } @@ -228,7 +228,7 @@ int cl_sb_fini(struct super_block *sb) if (!IS_ERR(env)) { cld = sbi->ll_cl; - if (cld != NULL) { + if (cld) { cl_stack_fini(env, cld); sbi->ll_cl = NULL; sbi->ll_site = NULL; @@ -325,11 +325,11 @@ static struct cl_object *vvp_pgcache_obj(const struct lu_env *env, cfs_hash_hlist_for_each(dev->ld_site->ls_obj_hash, id->vpi_bucket, vvp_pgcache_obj_get, id); - if (id->vpi_obj != NULL) { + if (id->vpi_obj) { struct lu_object *lu_obj; lu_obj = lu_object_locate(id->vpi_obj, dev->ld_type); - if (lu_obj != NULL) { + if (lu_obj) { lu_object_ref_add(lu_obj, "dump", current); return lu2cl(lu_obj); } @@ -355,7 +355,7 @@ static loff_t vvp_pgcache_find(const struct lu_env *env, if (id.vpi_bucket >= CFS_HASH_NHLIST(site->ls_obj_hash)) return ~0ULL; clob = vvp_pgcache_obj(env, dev, &id); - if (clob != NULL) { + if (clob) { struct cl_object_header *hdr; int nr; struct cl_page *pg; @@ -443,7 +443,7 @@ static int vvp_pgcache_show(struct seq_file *f, void *v) vvp_pgcache_id_unpack(pos, &id); sbi = f->private; clob = vvp_pgcache_obj(env, &sbi->ll_cl->cd_lu_dev, &id); - if (clob != NULL) { + if (clob) { hdr = cl_object_header(clob); spin_lock(&hdr->coh_page_guard); @@ -452,7 +452,7 @@ static int vvp_pgcache_show(struct seq_file *f, void *v) seq_printf(f, "%8x@"DFID": ", id.vpi_index, PFID(&hdr->coh_lu.loh_fid)); - if (page != NULL) { + if (page) { vvp_pgcache_page_show(env, f, page); cl_page_put(env, page); } else diff --git a/drivers/staging/lustre/lustre/llite/vvp_internal.h b/drivers/staging/lustre/lustre/llite/vvp_internal.h index 2e39533a45f8..bb393378c9bb 100644 --- a/drivers/staging/lustre/lustre/llite/vvp_internal.h +++ b/drivers/staging/lustre/lustre/llite/vvp_internal.h @@ -44,14 +44,13 @@ #include "../include/cl_object.h" #include "llite_internal.h" -int vvp_io_init (const struct lu_env *env, - struct cl_object *obj, struct cl_io *io); -int vvp_lock_init (const struct lu_env *env, - struct cl_object *obj, struct cl_lock *lock, +int vvp_io_init(const struct lu_env *env, + struct cl_object *obj, struct cl_io *io); +int vvp_lock_init(const struct lu_env *env, + struct cl_object *obj, struct cl_lock *lock, const struct cl_io *io); -int vvp_page_init (const struct lu_env *env, - struct cl_object *obj, - struct cl_page *page, struct page *vmpage); +int vvp_page_init(const struct lu_env *env, struct cl_object *obj, + struct cl_page *page, struct page *vmpage); struct lu_object *vvp_object_alloc(const struct lu_env *env, const struct lu_object_header *hdr, struct lu_device *dev); diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c index 0920ac6b3003..fb0c26ee7ff3 100644 --- a/drivers/staging/lustre/lustre/llite/vvp_io.c +++ b/drivers/staging/lustre/lustre/llite/vvp_io.c @@ -68,7 +68,7 @@ int cl_is_normalio(const struct lu_env *env, const struct cl_io *io) * have to acquire group lock. */ static bool can_populate_pages(const struct lu_env *env, struct cl_io *io, - struct inode *inode) + struct inode *inode) { struct ll_inode_info *lli = ll_i2info(inode); struct ccc_io *cio = ccc_env_io(env); @@ -78,7 +78,8 @@ static bool can_populate_pages(const struct lu_env *env, struct cl_io *io, case CIT_READ: case CIT_WRITE: /* don't need lock here to check lli_layout_gen as we have held - * extent lock and GROUP lock has to hold to swap layout */ + * extent lock and GROUP lock has to hold to swap layout + */ if (ll_layout_version_get(lli) != cio->cui_layout_gen) { io->ci_need_restart = 1; /* this will return application a short read/write */ @@ -134,7 +135,8 @@ static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios) */ rc = ll_layout_restore(ccc_object_inode(obj)); /* if restore registration failed, no restart, - * we will return -ENODATA */ + * we will return -ENODATA + */ /* The layout will change after restore, so we need to * block on layout lock hold by the MDT * as MDT will not send new layout in lvb (see LU-3124) @@ -164,8 +166,7 @@ static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios) DFID" layout changed from %d to %d.\n", PFID(lu_object_fid(&obj->co_lu)), cio->cui_layout_gen, gen); - /* today successful restore is the only possible - * case */ + /* today successful restore is the only possible case */ /* restore was done, clear restoring state */ ll_i2info(ccc_object_inode(obj))->lli_flags &= ~LLIF_FILE_RESTORING; @@ -181,7 +182,7 @@ static void vvp_io_fault_fini(const struct lu_env *env, CLOBINVRNT(env, io->ci_obj, ccc_object_invariant(io->ci_obj)); - if (page != NULL) { + if (page) { lu_ref_del(&page->cp_reference, "fault", io); cl_page_put(env, page); io->u.ci_fault.ft_page = NULL; @@ -220,11 +221,11 @@ static int vvp_mmap_locks(const struct lu_env *env, if (!cl_is_normalio(env, io)) return 0; - if (vio->cui_iter == NULL) /* nfs or loop back device write */ + if (!vio->cui_iter) /* nfs or loop back device write */ return 0; /* No MM (e.g. NFS)? No vmas too. */ - if (mm == NULL) + if (!mm) return 0; iov_for_each(iov, i, *(vio->cui_iter)) { @@ -456,7 +457,8 @@ static void vvp_io_setattr_end(const struct lu_env *env, if (cl_io_is_trunc(io)) /* Truncate in memory pages - they must be clean pages - * because osc has already notified to destroy osc_extents. */ + * because osc has already notified to destroy osc_extents. + */ vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size); inode_unlock(inode); @@ -499,8 +501,8 @@ static int vvp_io_read_start(const struct lu_env *env, goto out; LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu, - "Read ino %lu, %lu bytes, offset %lld, size %llu\n", - inode->i_ino, cnt, pos, i_size_read(inode)); + "Read ino %lu, %lu bytes, offset %lld, size %llu\n", + inode->i_ino, cnt, pos, i_size_read(inode)); /* turn off the kernel's read-ahead */ cio->cui_fd->fd_file->f_ra.ra_pages = 0; @@ -525,11 +527,12 @@ static int vvp_io_read_start(const struct lu_env *env, break; case IO_SPLICE: result = generic_file_splice_read(file, &pos, - vio->u.splice.cui_pipe, cnt, - vio->u.splice.cui_flags); + vio->u.splice.cui_pipe, cnt, + vio->u.splice.cui_flags); /* LU-1109: do splice read stripe by stripe otherwise if it * may make nfsd stuck if this read occupied all internal pipe - * buffers. */ + * buffers. + */ io->ci_continue = 0; break; default: @@ -587,7 +590,7 @@ static int vvp_io_write_start(const struct lu_env *env, CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt); - if (cio->cui_iter == NULL) /* from a temp io in ll_cl_init(). */ + if (!cio->cui_iter) /* from a temp io in ll_cl_init(). */ result = 0; else result = generic_file_write_iter(cio->cui_iocb, cio->cui_iter); @@ -673,7 +676,7 @@ static int vvp_io_fault_start(const struct lu_env *env, /* must return locked page */ if (fio->ft_mkwrite) { - LASSERT(cfio->ft_vmpage != NULL); + LASSERT(cfio->ft_vmpage); lock_page(cfio->ft_vmpage); } else { result = vvp_io_kernel_fault(cfio); @@ -689,13 +692,15 @@ static int vvp_io_fault_start(const struct lu_env *env, size = i_size_read(inode); /* Though we have already held a cl_lock upon this page, but - * it still can be truncated locally. */ + * it still can be truncated locally. + */ if (unlikely((vmpage->mapping != inode->i_mapping) || (page_offset(vmpage) > size))) { CDEBUG(D_PAGE, "llite: fault and truncate race happened!\n"); /* return +1 to stop cl_io_loop() and ll_fault() will catch - * and retry. */ + * and retry. + */ result = 1; goto out; } @@ -736,7 +741,8 @@ static int vvp_io_fault_start(const struct lu_env *env, } /* if page is going to be written, we should add this page into cache - * earlier. */ + * earlier. + */ if (fio->ft_mkwrite) { wait_on_page_writeback(vmpage); if (set_page_dirty(vmpage)) { @@ -750,7 +756,8 @@ static int vvp_io_fault_start(const struct lu_env *env, /* Do not set Dirty bit here so that in case IO is * started before the page is really made dirty, we - * still have chance to detect it. */ + * still have chance to detect it. + */ result = cl_page_cache_add(env, io, page, CRT_WRITE); LASSERT(cl_page_is_owned(page, io)); @@ -792,7 +799,7 @@ static int vvp_io_fault_start(const struct lu_env *env, out: /* return unlocked vmpage to avoid deadlocking */ - if (vmpage != NULL) + if (vmpage) unlock_page(vmpage); cfio->fault.ft_flags &= ~VM_FAULT_LOCKED; return result; @@ -803,7 +810,8 @@ static int vvp_io_fsync_start(const struct lu_env *env, { /* we should mark TOWRITE bit to each dirty page in radix tree to * verify pages have been written, but this is difficult because of - * race. */ + * race. + */ return 0; } @@ -1003,7 +1011,7 @@ static int vvp_io_commit_write(const struct lu_env *env, * * (3) IO is batched up to the RPC size and is async until the * client max cache is hit - * (/proc/fs/lustre/osc/OSC.../max_dirty_mb) + * (/sys/fs/lustre/osc/OSC.../max_dirty_mb) * */ if (!PageDirty(vmpage)) { @@ -1153,7 +1161,8 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj, count = io->u.ci_rw.crw_count; /* "If nbyte is 0, read() will return 0 and have no other - * results." -- Single Unix Spec */ + * results." -- Single Unix Spec + */ if (count == 0) result = 1; else @@ -1173,25 +1182,28 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj, /* ignore layout change for generic CIT_MISC but not for glimpse. * io context for glimpse must set ci_verify_layout to true, - * see cl_glimpse_size0() for details. */ + * see cl_glimpse_size0() for details. + */ if (io->ci_type == CIT_MISC && !io->ci_verify_layout) io->ci_ignore_layout = 1; /* Enqueue layout lock and get layout version. We need to do this * even for operations requiring to open file, such as read and write, - * because it might not grant layout lock in IT_OPEN. */ + * because it might not grant layout lock in IT_OPEN. + */ if (result == 0 && !io->ci_ignore_layout) { result = ll_layout_refresh(inode, &cio->cui_layout_gen); if (result == -ENOENT) /* If the inode on MDS has been removed, but the objects * on OSTs haven't been destroyed (async unlink), layout * fetch will return -ENOENT, we'd ignore this error - * and continue with dirty flush. LU-3230. */ + * and continue with dirty flush. LU-3230. + */ result = 0; if (result < 0) CERROR("%s: refresh file layout " DFID " error %d.\n", - ll_get_fsname(inode->i_sb, NULL, 0), - PFID(lu_object_fid(&obj->co_lu)), result); + ll_get_fsname(inode->i_sb, NULL, 0), + PFID(lu_object_fid(&obj->co_lu)), result); } return result; diff --git a/drivers/staging/lustre/lustre/llite/vvp_object.c b/drivers/staging/lustre/lustre/llite/vvp_object.c index c82714ea898e..03c887d8ed83 100644 --- a/drivers/staging/lustre/lustre/llite/vvp_object.c +++ b/drivers/staging/lustre/lustre/llite/vvp_object.c @@ -137,7 +137,8 @@ static int vvp_conf_set(const struct lu_env *env, struct cl_object *obj, * page may be stale due to layout change, and the process * will never be notified. * This operation is expensive but mmap processes have to pay - * a price themselves. */ + * a price themselves. + */ unmap_mapping_range(conf->coc_inode->i_mapping, 0, OBD_OBJECT_EOF, 0); @@ -147,7 +148,7 @@ static int vvp_conf_set(const struct lu_env *env, struct cl_object *obj, if (conf->coc_opc != OBJECT_CONF_SET) return 0; - if (conf->u.coc_md != NULL && conf->u.coc_md->lsm != NULL) { + if (conf->u.coc_md && conf->u.coc_md->lsm) { CDEBUG(D_VFSTRACE, DFID ": layout version change: %u -> %u\n", PFID(&lli->lli_fid), lli->lli_layout_gen, conf->u.coc_md->lsm->lsm_layout_gen); @@ -186,9 +187,8 @@ struct ccc_object *cl_inode2ccc(struct inode *inode) struct cl_object *obj = lli->lli_clob; struct lu_object *lu; - LASSERT(obj != NULL); lu = lu_object_locate(obj->co_lu.lo_header, &vvp_device_type); - LASSERT(lu != NULL); + LASSERT(lu); return lu2ccc(lu); } diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c index a133475a7c74..850bae734075 100644 --- a/drivers/staging/lustre/lustre/llite/vvp_page.c +++ b/drivers/staging/lustre/lustre/llite/vvp_page.c @@ -56,7 +56,7 @@ static void vvp_page_fini_common(struct ccc_page *cp) { struct page *vmpage = cp->cpg_page; - LASSERT(vmpage != NULL); + LASSERT(vmpage); page_cache_release(vmpage); } @@ -81,7 +81,7 @@ static int vvp_page_own(const struct lu_env *env, struct ccc_page *vpg = cl2ccc_page(slice); struct page *vmpage = vpg->cpg_page; - LASSERT(vmpage != NULL); + LASSERT(vmpage); if (nonblock) { if (!trylock_page(vmpage)) return -EAGAIN; @@ -105,7 +105,7 @@ static void vvp_page_assume(const struct lu_env *env, { struct page *vmpage = cl2vm_page(slice); - LASSERT(vmpage != NULL); + LASSERT(vmpage); LASSERT(PageLocked(vmpage)); wait_on_page_writeback(vmpage); } @@ -116,7 +116,7 @@ static void vvp_page_unassume(const struct lu_env *env, { struct page *vmpage = cl2vm_page(slice); - LASSERT(vmpage != NULL); + LASSERT(vmpage); LASSERT(PageLocked(vmpage)); } @@ -125,7 +125,7 @@ static void vvp_page_disown(const struct lu_env *env, { struct page *vmpage = cl2vm_page(slice); - LASSERT(vmpage != NULL); + LASSERT(vmpage); LASSERT(PageLocked(vmpage)); unlock_page(cl2vm_page(slice)); @@ -139,7 +139,7 @@ static void vvp_page_discard(const struct lu_env *env, struct address_space *mapping; struct ccc_page *cpg = cl2ccc_page(slice); - LASSERT(vmpage != NULL); + LASSERT(vmpage); LASSERT(PageLocked(vmpage)); mapping = vmpage->mapping; @@ -161,7 +161,7 @@ static int vvp_page_unmap(const struct lu_env *env, struct page *vmpage = cl2vm_page(slice); __u64 offset; - LASSERT(vmpage != NULL); + LASSERT(vmpage); LASSERT(PageLocked(vmpage)); offset = vmpage->index << PAGE_CACHE_SHIFT; @@ -199,7 +199,7 @@ static void vvp_page_export(const struct lu_env *env, { struct page *vmpage = cl2vm_page(slice); - LASSERT(vmpage != NULL); + LASSERT(vmpage); LASSERT(PageLocked(vmpage)); if (uptodate) SetPageUptodate(vmpage); @@ -232,7 +232,8 @@ static int vvp_page_prep_write(const struct lu_env *env, LASSERT(!PageDirty(vmpage)); /* ll_writepage path is not a sync write, so need to set page writeback - * flag */ + * flag + */ if (!pg->cp_sync_io) set_page_writeback(vmpage); @@ -262,7 +263,7 @@ static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret set_bit(AS_EIO, &inode->i_mapping->flags); if ((ioret == -ESHUTDOWN || ioret == -EINTR) && - obj->cob_discard_page_warned == 0) { + obj->cob_discard_page_warned == 0) { obj->cob_discard_page_warned = 1; ll_dirty_page_discard_warn(vmpage, ioret); } @@ -290,7 +291,7 @@ static void vvp_page_completion_read(const struct lu_env *env, } else cp->cpg_defer_uptodate = 0; - if (page->cp_sync_io == NULL) + if (!page->cp_sync_io) unlock_page(vmpage); } @@ -317,7 +318,7 @@ static void vvp_page_completion_write(const struct lu_env *env, cp->cpg_write_queued = 0; vvp_write_complete(cl2ccc(slice->cpl_obj), cp); - if (pg->cp_sync_io != NULL) { + if (pg->cp_sync_io) { LASSERT(PageLocked(vmpage)); LASSERT(!PageWriteback(vmpage)); } else { @@ -356,15 +357,14 @@ static int vvp_page_make_ready(const struct lu_env *env, lock_page(vmpage); if (clear_page_dirty_for_io(vmpage)) { LASSERT(pg->cp_state == CPS_CACHED); - /* This actually clears the dirty bit in the radix - * tree. */ + /* This actually clears the dirty bit in the radix tree. */ set_page_writeback(vmpage); - vvp_write_pending(cl2ccc(slice->cpl_obj), - cl2ccc_page(slice)); + vvp_write_pending(cl2ccc(slice->cpl_obj), cl2ccc_page(slice)); CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n"); } else if (pg->cp_state == CPS_PAGEOUT) { /* is it possible for osc_flush_async_page() to already - * make it ready? */ + * make it ready? + */ result = -EALREADY; } else { CL_PAGE_DEBUG(D_ERROR, env, pg, "Unexpecting page state %d.\n", @@ -385,7 +385,7 @@ static int vvp_page_print(const struct lu_env *env, (*printer)(env, cookie, LUSTRE_VVP_NAME "-page@%p(%d:%d:%d) vm@%p ", vp, vp->cpg_defer_uptodate, vp->cpg_ra_used, vp->cpg_write_queued, vmpage); - if (vmpage != NULL) { + if (vmpage) { (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru", (long)vmpage->flags, page_count(vmpage), page_mapcount(vmpage), vmpage->private, @@ -530,7 +530,7 @@ static const struct cl_page_operations vvp_transient_page_ops = { }; int vvp_page_init(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, struct page *vmpage) + struct cl_page *page, struct page *vmpage) { struct ccc_page *cpg = cl_object_page_slice(obj, page); @@ -543,14 +543,13 @@ int vvp_page_init(const struct lu_env *env, struct cl_object *obj, if (page->cp_type == CPT_CACHEABLE) { SetPagePrivate(vmpage); vmpage->private = (unsigned long)page; - cl_page_slice_add(page, &cpg->cpg_cl, obj, - &vvp_page_ops); + cl_page_slice_add(page, &cpg->cpg_cl, obj, &vvp_page_ops); } else { struct ccc_object *clobj = cl2ccc(obj); LASSERT(!inode_trylock(clobj->cob_inode)); cl_page_slice_add(page, &cpg->cpg_cl, obj, - &vvp_transient_page_ops); + &vvp_transient_page_ops); clobj->cob_transient_pages++; } return 0; diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c index 8eb43f192d1f..b68dcc921ca2 100644 --- a/drivers/staging/lustre/lustre/llite/xattr.c +++ b/drivers/staging/lustre/lustre/llite/xattr.c @@ -135,7 +135,7 @@ int ll_setxattr_common(struct inode *inode, const char *name, /* b15587: ignore security.capability xattr for now */ if ((xattr_type == XATTR_SECURITY_T && - strcmp(name, "security.capability") == 0)) + strcmp(name, "security.capability") == 0)) return 0; /* LU-549: Disable security.selinux when selinux is disabled */ @@ -148,7 +148,7 @@ int ll_setxattr_common(struct inode *inode, const char *name, (xattr_type == XATTR_ACL_ACCESS_T || xattr_type == XATTR_ACL_DEFAULT_T)) { rce = rct_search(&sbi->ll_rct, current_pid()); - if (rce == NULL || + if (!rce || (rce->rce_ops != RMT_LSETFACL && rce->rce_ops != RMT_RSETFACL)) return -EOPNOTSUPP; @@ -158,7 +158,6 @@ int ll_setxattr_common(struct inode *inode, const char *name, ee = et_search_del(&sbi->ll_et, current_pid(), ll_inode2fid(inode), xattr_type); - LASSERT(ee != NULL); if (valid & OBD_MD_FLXATTR) { acl = lustre_acl_xattr_merge2ext( (posix_acl_xattr_header *)value, @@ -192,12 +191,11 @@ int ll_setxattr_common(struct inode *inode, const char *name, valid, name, pv, size, 0, flags, ll_i2suppgid(inode), &req); #ifdef CONFIG_FS_POSIX_ACL - if (new_value != NULL) - /* - * Release the posix ACL space. - */ - kfree(new_value); - if (acl != NULL) + /* + * Release the posix ACL space. + */ + kfree(new_value); + if (acl) lustre_ext_acl_xattr_free(acl); #endif if (rc) { @@ -239,11 +237,12 @@ int ll_setxattr(struct dentry *dentry, const char *name, /* Attributes that are saved via getxattr will always have * the stripe_offset as 0. Instead, the MDS should be - * allowed to pick the starting OST index. b=17846 */ - if (lump != NULL && lump->lmm_stripe_offset == 0) + * allowed to pick the starting OST index. b=17846 + */ + if (lump && lump->lmm_stripe_offset == 0) lump->lmm_stripe_offset = -1; - if (lump != NULL && S_ISREG(inode->i_mode)) { + if (lump && S_ISREG(inode->i_mode)) { int flags = FMODE_WRITE; int lum_size = (lump->lmm_magic == LOV_USER_MAGIC_V1) ? sizeof(*lump) : sizeof(struct lov_user_md_v3); @@ -312,7 +311,7 @@ int ll_getxattr_common(struct inode *inode, const char *name, /* b15587: ignore security.capability xattr for now */ if ((xattr_type == XATTR_SECURITY_T && - strcmp(name, "security.capability") == 0)) + strcmp(name, "security.capability") == 0)) return -ENODATA; /* LU-549: Disable security.selinux when selinux is disabled */ @@ -325,7 +324,7 @@ int ll_getxattr_common(struct inode *inode, const char *name, (xattr_type == XATTR_ACL_ACCESS_T || xattr_type == XATTR_ACL_DEFAULT_T)) { rce = rct_search(&sbi->ll_rct, current_pid()); - if (rce == NULL || + if (!rce || (rce->rce_ops != RMT_LSETFACL && rce->rce_ops != RMT_LGETFACL && rce->rce_ops != RMT_RSETFACL && @@ -366,7 +365,7 @@ do_getxattr: goto out_xattr; /* Add "system.posix_acl_access" to the list */ - if (lli->lli_posix_acl != NULL && valid & OBD_MD_FLXATTRLS) { + if (lli->lli_posix_acl && valid & OBD_MD_FLXATTRLS) { if (size == 0) { rc += sizeof(XATTR_NAME_ACL_ACCESS); } else if (size - rc >= sizeof(XATTR_NAME_ACL_ACCESS)) { @@ -398,7 +397,7 @@ getxattr_nocache: if (size < body->eadatasize) { CERROR("server bug: replied size %u > %u\n", - body->eadatasize, (int)size); + body->eadatasize, (int)size); rc = -ERANGE; goto out; } @@ -410,7 +409,7 @@ getxattr_nocache: /* do not need swab xattr data */ xdata = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA, - body->eadatasize); + body->eadatasize); if (!xdata) { rc = -EFAULT; goto out; @@ -482,13 +481,14 @@ ssize_t ll_getxattr(struct dentry *dentry, const char *name, if (size == 0 && S_ISDIR(inode->i_mode)) { /* XXX directory EA is fix for now, optimize to save - * RPC transfer */ + * RPC transfer + */ rc = sizeof(struct lov_user_md); goto out; } lsm = ccc_inode_lsm_get(inode); - if (lsm == NULL) { + if (!lsm) { if (S_ISDIR(inode->i_mode)) { rc = ll_dir_getstripe(inode, &lmm, &lmmsize, &request); @@ -497,7 +497,8 @@ ssize_t ll_getxattr(struct dentry *dentry, const char *name, } } else { /* LSM is present already after lookup/getattr call. - * we need to grab layout lock once it is implemented */ + * we need to grab layout lock once it is implemented + */ rc = obd_packmd(ll_i2dtexp(inode), &lmm, lsm); lmmsize = rc; } @@ -510,7 +511,8 @@ ssize_t ll_getxattr(struct dentry *dentry, const char *name, /* used to call ll_get_max_mdsize() forward to get * the maximum buffer size, while some apps (such as * rsync 3.0.x) care much about the exact xattr value - * size */ + * size + */ rc = lmmsize; goto out; } @@ -526,7 +528,8 @@ ssize_t ll_getxattr(struct dentry *dentry, const char *name, memcpy(lump, lmm, lmmsize); /* do not return layout gen for getxattr otherwise it would * confuse tar --xattr by recognizing layout gen as stripe - * offset when the file is restored. See LU-2809. */ + * offset when the file is restored. See LU-2809. + */ lump->lmm_layout_gen = 0; rc = lmmsize; @@ -560,7 +563,7 @@ ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size) if (rc < 0) goto out; - if (buffer != NULL) { + if (buffer) { struct ll_sb_info *sbi = ll_i2sbi(inode); char *xattr_name = buffer; int xlen, rem = rc; @@ -598,12 +601,12 @@ ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size) const size_t name_len = sizeof("lov") - 1; const size_t total_len = prefix_len + name_len + 1; - if (((rc + total_len) > size) && (buffer != NULL)) { + if (((rc + total_len) > size) && buffer) { ptlrpc_req_finished(request); return -ERANGE; } - if (buffer != NULL) { + if (buffer) { buffer += rc; memcpy(buffer, XATTR_LUSTRE_PREFIX, prefix_len); memcpy(buffer + prefix_len, "lov", name_len); diff --git a/drivers/staging/lustre/lustre/llite/xattr_cache.c b/drivers/staging/lustre/lustre/llite/xattr_cache.c index d1402762a0b2..3480ce2bb3cc 100644 --- a/drivers/staging/lustre/lustre/llite/xattr_cache.c +++ b/drivers/staging/lustre/lustre/llite/xattr_cache.c @@ -23,7 +23,8 @@ */ struct ll_xattr_entry { struct list_head xe_list; /* protected with - * lli_xattrs_list_rwsem */ + * lli_xattrs_list_rwsem + */ char *xe_name; /* xattr name, \0-terminated */ char *xe_value; /* xattr value */ unsigned xe_namelen; /* strlen(xe_name) + 1 */ @@ -59,9 +60,6 @@ void ll_xattr_fini(void) */ static void ll_xattr_cache_init(struct ll_inode_info *lli) { - - LASSERT(lli != NULL); - INIT_LIST_HEAD(&lli->lli_xattrs); lli->lli_flags |= LLIF_XATTR_CACHE; } @@ -83,8 +81,7 @@ static int ll_xattr_cache_find(struct list_head *cache, list_for_each_entry(entry, cache, xe_list) { /* xattr_name == NULL means look for any entry */ - if (xattr_name == NULL || - strcmp(xattr_name, entry->xe_name) == 0) { + if (!xattr_name || strcmp(xattr_name, entry->xe_name) == 0) { *xattr = entry; CDEBUG(D_CACHE, "find: [%s]=%.*s\n", entry->xe_name, entry->xe_vallen, @@ -117,8 +114,8 @@ static int ll_xattr_cache_add(struct list_head *cache, return -EPROTO; } - xattr = kmem_cache_alloc(xattr_kmem, GFP_NOFS | __GFP_ZERO); - if (xattr == NULL) { + xattr = kmem_cache_zalloc(xattr_kmem, GFP_NOFS); + if (!xattr) { CDEBUG(D_CACHE, "failed to allocate xattr\n"); return -ENOMEM; } @@ -136,8 +133,8 @@ static int ll_xattr_cache_add(struct list_head *cache, xattr->xe_vallen = xattr_val_len; list_add(&xattr->xe_list, cache); - CDEBUG(D_CACHE, "set: [%s]=%.*s\n", xattr_name, - xattr_val_len, xattr_val); + CDEBUG(D_CACHE, "set: [%s]=%.*s\n", xattr_name, xattr_val_len, + xattr_val); return 0; err_value: @@ -194,7 +191,7 @@ static int ll_xattr_cache_list(struct list_head *cache, list_for_each_entry_safe(xattr, tmp, cache, xe_list) { CDEBUG(D_CACHE, "list: buffer=%p[%d] name=%s\n", - xld_buffer, xld_tail, xattr->xe_name); + xld_buffer, xld_tail, xattr->xe_name); if (xld_buffer) { xld_size -= xattr->xe_namelen; @@ -270,7 +267,7 @@ static int ll_xattr_find_get_lock(struct inode *inode, struct lookup_intent *oit, struct ptlrpc_request **req) { - ldlm_mode_t mode; + enum ldlm_mode mode; struct lustre_handle lockh = { 0 }; struct md_op_data *op_data; struct ll_inode_info *lli = ll_i2info(inode); @@ -284,7 +281,8 @@ static int ll_xattr_find_get_lock(struct inode *inode, mutex_lock(&lli->lli_xattrs_enq_lock); /* inode may have been shrunk and recreated, so data is gone, match lock - * only when data exists. */ + * only when data exists. + */ if (ll_xattr_cache_valid(lli)) { /* Try matching first. */ mode = ll_take_md_lock(inode, MDS_INODELOCK_XATTR, &lockh, 0, @@ -359,7 +357,7 @@ static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit) } /* Matched but no cache? Cancelled on error by a parallel refill. */ - if (unlikely(req == NULL)) { + if (unlikely(!req)) { CDEBUG(D_CACHE, "cancelled by a parallel getxattr\n"); rc = -EIO; goto out_maybe_drop; @@ -376,19 +374,19 @@ static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit) } body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - if (body == NULL) { + if (!body) { CERROR("no MDT BODY in the refill xattr reply\n"); rc = -EPROTO; goto out_destroy; } /* do not need swab xattr data */ xdata = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA, - body->eadatasize); + body->eadatasize); xval = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS, - body->aclsize); + body->aclsize); xsizes = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS_LENS, body->max_mdsize * sizeof(__u32)); - if (xdata == NULL || xval == NULL || xsizes == NULL) { + if (!xdata || !xval || !xsizes) { CERROR("wrong setxattr reply\n"); rc = -EPROTO; goto out_destroy; @@ -404,7 +402,7 @@ static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit) for (i = 0; i < body->max_mdsize; i++) { CDEBUG(D_CACHE, "caching [%s]=%.*s\n", xdata, *xsizes, xval); /* Perform consistency checks: attr names and vals in pill */ - if (memchr(xdata, 0, xtail - xdata) == NULL) { + if (!memchr(xdata, 0, xtail - xdata)) { CERROR("xattr protocol violation (names are broken)\n"); rc = -EPROTO; } else if (xval + *xsizes > xvtail) { @@ -471,11 +469,8 @@ out_destroy: * \retval -ERANGE the buffer is not large enough * \retval -ENODATA no such attr or the list is empty */ -int ll_xattr_cache_get(struct inode *inode, - const char *name, - char *buffer, - size_t size, - __u64 valid) +int ll_xattr_cache_get(struct inode *inode, const char *name, char *buffer, + size_t size, __u64 valid) { struct lookup_intent oit = { .it_op = IT_GETXATTR }; struct ll_inode_info *lli = ll_i2info(inode); @@ -504,7 +499,7 @@ int ll_xattr_cache_get(struct inode *inode, if (size != 0) { if (size >= xattr->xe_vallen) memcpy(buffer, xattr->xe_value, - xattr->xe_vallen); + xattr->xe_vallen); else rc = -ERANGE; } diff --git a/drivers/staging/lustre/lustre/lmv/lmv_fld.c b/drivers/staging/lustre/lustre/lmv/lmv_fld.c index ee235926f52b..378691b2a062 100644 --- a/drivers/staging/lustre/lustre/lmv/lmv_fld.c +++ b/drivers/staging/lustre/lustre/lmv/lmv_fld.c @@ -58,7 +58,8 @@ int lmv_fld_lookup(struct lmv_obd *lmv, int rc; /* FIXME: Currently ZFS still use local seq for ROOT unfortunately, and - * this fid_is_local check should be removed once LU-2240 is fixed */ + * this fid_is_local check should be removed once LU-2240 is fixed + */ LASSERTF((fid_seq_in_fldb(fid_seq(fid)) || fid_seq_is_local_file(fid_seq(fid))) && fid_is_sane(fid), DFID" is insane!\n", PFID(fid)); diff --git a/drivers/staging/lustre/lustre/lmv/lmv_intent.c b/drivers/staging/lustre/lustre/lmv/lmv_intent.c index 66de27f1d289..e0958eaed054 100644 --- a/drivers/staging/lustre/lustre/lmv/lmv_intent.c +++ b/drivers/staging/lustre/lustre/lmv/lmv_intent.c @@ -69,7 +69,7 @@ static int lmv_intent_remote(struct obd_export *exp, void *lmm, int rc = 0; body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY); - if (body == NULL) + if (!body) return -EPROTO; LASSERT((body->valid & OBD_MD_MDS)); @@ -107,14 +107,16 @@ static int lmv_intent_remote(struct obd_export *exp, void *lmm, op_data->op_fid1 = body->fid1; /* Sent the parent FID to the remote MDT */ - if (parent_fid != NULL) { + if (parent_fid) { /* The parent fid is only for remote open to * check whether the open is from OBF, - * see mdt_cross_open */ + * see mdt_cross_open + */ LASSERT(it->it_op & IT_OPEN); op_data->op_fid2 = *parent_fid; /* Add object FID to op_fid3, in case it needs to check stale - * (M_CHECK_STALE), see mdc_finish_intent_lock */ + * (M_CHECK_STALE), see mdc_finish_intent_lock + */ op_data->op_fid3 = body->fid1; } @@ -173,7 +175,8 @@ static int lmv_intent_open(struct obd_export *exp, struct md_op_data *op_data, return PTR_ERR(tgt); /* If it is ready to open the file by FID, do not need - * allocate FID at all, otherwise it will confuse MDT */ + * allocate FID at all, otherwise it will confuse MDT + */ if ((it->it_op & IT_CREAT) && !(it->it_flags & MDS_OPEN_BY_FID)) { /* @@ -204,7 +207,7 @@ static int lmv_intent_open(struct obd_export *exp, struct md_op_data *op_data, return rc; body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY); - if (body == NULL) + if (!body) return -EPROTO; /* * Not cross-ref case, just get out of here. @@ -268,9 +271,9 @@ static int lmv_intent_lookup(struct obd_export *exp, op_data->op_bias &= ~MDS_CROSS_REF; rc = md_intent_lock(tgt->ltd_exp, op_data, lmm, lmmsize, it, - flags, reqp, cb_blocking, extra_lock_flags); + flags, reqp, cb_blocking, extra_lock_flags); - if (rc < 0 || *reqp == NULL) + if (rc < 0 || !*reqp) return rc; /* @@ -278,7 +281,7 @@ static int lmv_intent_lookup(struct obd_export *exp, * remote inode. Let's check this. */ body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY); - if (body == NULL) + if (!body) return -EPROTO; /* Not cross-ref case, just get out of here. */ if (likely(!(body->valid & OBD_MD_MDS))) @@ -299,7 +302,6 @@ int lmv_intent_lock(struct obd_export *exp, struct md_op_data *op_data, struct obd_device *obd = exp->exp_obd; int rc; - LASSERT(it != NULL); LASSERT(fid_is_sane(&op_data->op_fid1)); CDEBUG(D_INODE, "INTENT LOCK '%s' for '%*s' on "DFID"\n", diff --git a/drivers/staging/lustre/lustre/lmv/lmv_internal.h b/drivers/staging/lustre/lustre/lmv/lmv_internal.h index eb8e673cbc3f..8a0087190e23 100644 --- a/drivers/staging/lustre/lustre/lmv/lmv_internal.h +++ b/drivers/staging/lustre/lustre/lmv/lmv_internal.h @@ -66,7 +66,7 @@ static inline struct lmv_stripe_md *lmv_get_mea(struct ptlrpc_request *req) struct mdt_body *body; struct lmv_stripe_md *mea; - LASSERT(req != NULL); + LASSERT(req); body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); @@ -75,13 +75,11 @@ static inline struct lmv_stripe_md *lmv_get_mea(struct ptlrpc_request *req) mea = req_capsule_server_sized_get(&req->rq_pill, &RMF_MDT_MD, body->eadatasize); - LASSERT(mea != NULL); - if (mea->mea_count == 0) return NULL; if (mea->mea_magic != MEA_MAGIC_LAST_CHAR && - mea->mea_magic != MEA_MAGIC_ALL_CHARS && - mea->mea_magic != MEA_MAGIC_HASH_SEGMENT) + mea->mea_magic != MEA_MAGIC_ALL_CHARS && + mea->mea_magic != MEA_MAGIC_HASH_SEGMENT) return NULL; return mea; @@ -101,7 +99,7 @@ lmv_get_target(struct lmv_obd *lmv, u32 mds) int i; for (i = 0; i < count; i++) { - if (lmv->tgts[i] == NULL) + if (!lmv->tgts[i]) continue; if (lmv->tgts[i]->ltd_idx == mds) diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c index bbafe0a710d8..0f776cf8a5aa 100644 --- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c +++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c @@ -53,6 +53,7 @@ #include "../include/lprocfs_status.h" #include "../include/lustre_lite.h" #include "../include/lustre_fid.h" +#include "../include/lustre_kernelcomm.h" #include "lmv_internal.h" static void lmv_activate_target(struct lmv_obd *lmv, @@ -87,7 +88,7 @@ static int lmv_set_mdc_active(struct lmv_obd *lmv, struct obd_uuid *uuid, spin_lock(&lmv->lmv_lock); for (i = 0; i < lmv->desc.ld_tgt_count; i++) { tgt = lmv->tgts[i]; - if (tgt == NULL || tgt->ltd_exp == NULL) + if (!tgt || !tgt->ltd_exp) continue; CDEBUG(D_INFO, "Target idx %d is %s conn %#llx\n", i, @@ -103,7 +104,7 @@ static int lmv_set_mdc_active(struct lmv_obd *lmv, struct obd_uuid *uuid, } obd = class_exp2obd(tgt->ltd_exp); - if (obd == NULL) { + if (!obd) { rc = -ENOTCONN; goto out_lmv_lock; } @@ -237,7 +238,7 @@ static int lmv_connect(const struct lu_env *env, * and MDC stuff will be called directly, for instance while reading * ../mdc/../kbytesfree procfs file, etc. */ - if (data->ocd_connect_flags & OBD_CONNECT_REAL) + if (data && data->ocd_connect_flags & OBD_CONNECT_REAL) rc = lmv_check_connect(obd); if (rc && lmv->lmv_tgts_kobj) @@ -261,7 +262,7 @@ static void lmv_set_timeouts(struct obd_device *obd) for (i = 0; i < lmv->desc.ld_tgt_count; i++) { tgt = lmv->tgts[i]; - if (tgt == NULL || tgt->ltd_exp == NULL || tgt->ltd_active == 0) + if (!tgt || !tgt->ltd_exp || tgt->ltd_active == 0) continue; obd_set_info_async(NULL, tgt->ltd_exp, sizeof(KEY_INTERMDS), @@ -301,8 +302,7 @@ static int lmv_init_ea_size(struct obd_export *exp, int easize, return 0; for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - if (lmv->tgts[i] == NULL || - lmv->tgts[i]->ltd_exp == NULL || + if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp || lmv->tgts[i]->ltd_active == 0) { CWARN("%s: NULL export for %d\n", obd->obd_name, i); continue; @@ -311,7 +311,7 @@ static int lmv_init_ea_size(struct obd_export *exp, int easize, rc = md_init_ea_size(lmv->tgts[i]->ltd_exp, easize, def_easize, cookiesize, def_cookiesize); if (rc) { - CERROR("%s: obd_init_ea_size() failed on MDT target %d: rc = %d.\n", + CERROR("%s: obd_init_ea_size() failed on MDT target %d: rc = %d\n", obd->obd_name, i, rc); break; } @@ -339,9 +339,8 @@ static int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt) } CDEBUG(D_CONFIG, "connect to %s(%s) - %s, %s FOR %s\n", - mdc_obd->obd_name, mdc_obd->obd_uuid.uuid, - tgt->ltd_uuid.uuid, obd->obd_uuid.uuid, - cluuid->uuid); + mdc_obd->obd_name, mdc_obd->obd_uuid.uuid, + tgt->ltd_uuid.uuid, obd->obd_uuid.uuid, cluuid->uuid); if (!mdc_obd->obd_set_up) { CERROR("target %s is not set up\n", tgt->ltd_uuid.uuid); @@ -397,8 +396,8 @@ static int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt) lmv->max_cookiesize, lmv->max_def_cookiesize); CDEBUG(D_CONFIG, "Connected to %s(%s) successfully (%d)\n", - mdc_obd->obd_name, mdc_obd->obd_uuid.uuid, - atomic_read(&obd->obd_refcount)); + mdc_obd->obd_name, mdc_obd->obd_uuid.uuid, + atomic_read(&obd->obd_refcount)); if (lmv->lmv_tgts_kobj) /* Even if we failed to create the link, that's fine */ @@ -409,7 +408,7 @@ static int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt) static void lmv_del_target(struct lmv_obd *lmv, int index) { - if (lmv->tgts[index] == NULL) + if (!lmv->tgts[index]) return; kfree(lmv->tgts[index]); @@ -418,7 +417,7 @@ static void lmv_del_target(struct lmv_obd *lmv, int index) } static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp, - __u32 index, int gen) + __u32 index, int gen) { struct lmv_obd *lmv = &obd->u.lmv; struct lmv_tgt_desc *tgt; @@ -441,7 +440,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp, } } - if ((index < lmv->tgts_size) && (lmv->tgts[index] != NULL)) { + if ((index < lmv->tgts_size) && lmv->tgts[index]) { tgt = lmv->tgts[index]; CERROR("%s: UUID %s already assigned at LOV target index %d: rc = %d\n", obd->obd_name, @@ -459,7 +458,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp, while (newsize < index + 1) newsize <<= 1; newtgts = kcalloc(newsize, sizeof(*newtgts), GFP_NOFS); - if (newtgts == NULL) { + if (!newtgts) { lmv_init_unlock(lmv); return -ENOMEM; } @@ -538,11 +537,9 @@ int lmv_check_connect(struct obd_device *obd) CDEBUG(D_CONFIG, "Time to connect %s to %s\n", lmv->cluuid.uuid, obd->obd_name); - LASSERT(lmv->tgts != NULL); - for (i = 0; i < lmv->desc.ld_tgt_count; i++) { tgt = lmv->tgts[i]; - if (tgt == NULL) + if (!tgt) continue; rc = lmv_connect_mdc(obd, tgt); if (rc) @@ -562,7 +559,7 @@ int lmv_check_connect(struct obd_device *obd) int rc2; tgt = lmv->tgts[i]; - if (tgt == NULL) + if (!tgt) continue; tgt->ltd_active = 0; if (tgt->ltd_exp) { @@ -585,9 +582,6 @@ static int lmv_disconnect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt) struct obd_device *mdc_obd; int rc; - LASSERT(tgt != NULL); - LASSERT(obd != NULL); - mdc_obd = class_exp2obd(tgt->ltd_exp); if (mdc_obd) { @@ -640,7 +634,7 @@ static int lmv_disconnect(struct obd_export *exp) goto out_local; for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL) + if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp) continue; lmv_disconnect_mdc(obd, lmv->tgts[i]); @@ -662,7 +656,8 @@ out_local: return rc; } -static int lmv_fid2path(struct obd_export *exp, int len, void *karg, void *uarg) +static int lmv_fid2path(struct obd_export *exp, int len, void *karg, + void __user *uarg) { struct obd_device *obddev = class_exp2obd(exp); struct lmv_obd *lmv = &obddev->u.lmv; @@ -683,8 +678,9 @@ repeat_fid2path: goto out_fid2path; /* If remote_gf != NULL, it means just building the - * path on the remote MDT, copy this path segment to gf */ - if (remote_gf != NULL) { + * path on the remote MDT, copy this path segment to gf + */ + if (remote_gf) { struct getinfo_fid2path *ori_gf; char *ptr; @@ -714,7 +710,7 @@ repeat_fid2path: goto out_fid2path; /* sigh, has to go to another MDT to do path building further */ - if (remote_gf == NULL) { + if (!remote_gf) { remote_gf_size = sizeof(*remote_gf) + PATH_MAX; remote_gf = kzalloc(remote_gf_size, GFP_NOFS); if (!remote_gf) { @@ -779,7 +775,7 @@ static void lmv_hsm_req_build(struct lmv_obd *lmv, nr_out = 0; for (i = 0; i < hur_in->hur_request.hr_itemcount; i++) { curr_tgt = lmv_find_target(lmv, - &hur_in->hur_user_item[i].hui_fid); + &hur_in->hur_user_item[i].hui_fid); if (obd_uuid_equals(&curr_tgt->ltd_uuid, &tgt_mds->ltd_uuid)) { hur_out->hur_user_item[nr_out] = hur_in->hur_user_item[i]; @@ -792,14 +788,17 @@ static void lmv_hsm_req_build(struct lmv_obd *lmv, } static int lmv_hsm_ct_unregister(struct lmv_obd *lmv, unsigned int cmd, int len, - struct lustre_kernelcomm *lk, void *uarg) + struct lustre_kernelcomm *lk, + void __user *uarg) { - int i, rc = 0; + int rc = 0; + __u32 i; /* unregister request (call from llapi_hsm_copytool_fini) */ for (i = 0; i < lmv->desc.ld_tgt_count; i++) { /* best effort: try to clean as much as possible - * (continue on error) */ + * (continue on error) + */ obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp, len, lk, uarg); } @@ -808,23 +807,25 @@ static int lmv_hsm_ct_unregister(struct lmv_obd *lmv, unsigned int cmd, int len, * and will unregister automatically. */ rc = libcfs_kkuc_group_rem(lk->lk_uid, lk->lk_group); + return rc; } static int lmv_hsm_ct_register(struct lmv_obd *lmv, unsigned int cmd, int len, - struct lustre_kernelcomm *lk, void *uarg) + struct lustre_kernelcomm *lk, void __user *uarg) { - struct file *filp; - int i, j, err; - int rc = 0; - bool any_set = false; + struct file *filp; + __u32 i, j; + int err, rc = 0; + bool any_set = false; + struct kkuc_ct_data kcd = { 0 }; /* All or nothing: try to register to all MDS. * In case of failure, unregister from previous MDS, - * except if it because of inactive target. */ + * except if it because of inactive target. + */ for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - err = obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp, - len, lk, uarg); + err = obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp, len, lk, uarg); if (err) { if (lmv->tgts[i]->ltd_active) { /* permanent error */ @@ -836,13 +837,13 @@ static int lmv_hsm_ct_register(struct lmv_obd *lmv, unsigned int cmd, int len, /* unregister from previous MDS */ for (j = 0; j < i; j++) obd_iocontrol(cmd, - lmv->tgts[j]->ltd_exp, - len, lk, uarg); + lmv->tgts[j]->ltd_exp, + len, lk, uarg); return rc; } /* else: transient error. - * kuc will register to the missing MDT - * when it is back */ + * kuc will register to the missing MDT when it is back + */ } else { any_set = true; } @@ -854,17 +855,25 @@ static int lmv_hsm_ct_register(struct lmv_obd *lmv, unsigned int cmd, int len, /* at least one registration done, with no failure */ filp = fget(lk->lk_wfd); - if (filp == NULL) { + if (!filp) return -EBADF; + + kcd.kcd_magic = KKUC_CT_DATA_MAGIC; + kcd.kcd_uuid = lmv->cluuid; + kcd.kcd_archive = lk->lk_data; + + rc = libcfs_kkuc_group_add(filp, lk->lk_uid, lk->lk_group, + &kcd, sizeof(kcd)); + if (rc) { + if (filp) + fput(filp); } - rc = libcfs_kkuc_group_add(filp, lk->lk_uid, lk->lk_group, lk->lk_data); - if (rc != 0 && filp != NULL) - fput(filp); + return rc; } static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, - int len, void *karg, void *uarg) + int len, void *karg, void __user *uarg) { struct obd_device *obddev = class_exp2obd(exp); struct lmv_obd *lmv = &obddev->u.lmv; @@ -887,8 +896,7 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, if (index >= count) return -ENODEV; - if (lmv->tgts[index] == NULL || - lmv->tgts[index]->ltd_active == 0) + if (!lmv->tgts[index] || lmv->tgts[index]->ltd_active == 0) return -ENODATA; mdc_obd = class_exp2obd(lmv->tgts[index]->ltd_exp); @@ -897,8 +905,8 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, /* copy UUID */ if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(mdc_obd), - min((int) data->ioc_plen2, - (int) sizeof(struct obd_uuid)))) + min((int)data->ioc_plen2, + (int)sizeof(struct obd_uuid)))) return -EFAULT; rc = obd_statfs(NULL, lmv->tgts[index]->ltd_exp, &stat_buf, @@ -907,8 +915,8 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, if (rc) return rc; if (copy_to_user(data->ioc_pbuf1, &stat_buf, - min((int) data->ioc_plen1, - (int) sizeof(stat_buf)))) + min((int)data->ioc_plen1, + (int)sizeof(stat_buf)))) return -EFAULT; break; } @@ -922,18 +930,18 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, return -EINVAL; tgt = lmv->tgts[qctl->qc_idx]; - if (tgt == NULL || tgt->ltd_exp == NULL) + if (!tgt || !tgt->ltd_exp) return -EINVAL; } else if (qctl->qc_valid == QC_UUID) { for (i = 0; i < count; i++) { tgt = lmv->tgts[i]; - if (tgt == NULL) + if (!tgt) continue; if (!obd_uuid_equals(&tgt->ltd_uuid, &qctl->obd_uuid)) continue; - if (tgt->ltd_exp == NULL) + if (!tgt->ltd_exp) return -EINVAL; break; @@ -967,8 +975,8 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, if (icc->icc_mdtindex >= count) return -ENODEV; - if (lmv->tgts[icc->icc_mdtindex] == NULL || - lmv->tgts[icc->icc_mdtindex]->ltd_exp == NULL || + if (!lmv->tgts[icc->icc_mdtindex] || + !lmv->tgts[icc->icc_mdtindex]->ltd_exp || lmv->tgts[icc->icc_mdtindex]->ltd_active == 0) return -ENODEV; rc = obd_iocontrol(cmd, lmv->tgts[icc->icc_mdtindex]->ltd_exp, @@ -976,7 +984,7 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, break; } case LL_IOC_GET_CONNECT_FLAGS: { - if (lmv->tgts[0] == NULL) + if (!lmv->tgts[0]) return -ENODATA; rc = obd_iocontrol(cmd, lmv->tgts[0]->ltd_exp, len, karg, uarg); break; @@ -993,10 +1001,10 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, tgt = lmv_find_target(lmv, &op_data->op_fid1); if (IS_ERR(tgt)) - return PTR_ERR(tgt); + return PTR_ERR(tgt); - if (tgt->ltd_exp == NULL) - return -EINVAL; + if (!tgt->ltd_exp) + return -EINVAL; rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg); break; @@ -1021,7 +1029,8 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, /* if the request is about a single fid * or if there is a single MDS, no need to split - * the request. */ + * the request. + */ if (reqcount == 1 || count == 1) { tgt = lmv_find_target(lmv, &hur->hur_user_item[0].hui_fid); @@ -1044,7 +1053,7 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, hur_user_item[nr]) + hur->hur_request.hr_data_len; req = libcfs_kvzalloc(reqlen, GFP_NOFS); - if (req == NULL) + if (!req) return -ENOMEM; lmv_hsm_req_build(lmv, hur, lmv->tgts[i], req); @@ -1070,7 +1079,7 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, if (IS_ERR(tgt2)) return PTR_ERR(tgt2); - if ((tgt1->ltd_exp == NULL) || (tgt2->ltd_exp == NULL)) + if (!tgt1->ltd_exp || !tgt2->ltd_exp) return -EINVAL; /* only files on same MDT can have their layouts swapped */ @@ -1094,11 +1103,11 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, struct obd_device *mdc_obd; int err; - if (lmv->tgts[i] == NULL || - lmv->tgts[i]->ltd_exp == NULL) + if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp) continue; /* ll_umount_begin() sets force flag but for lmv, not - * mdc. Let's pass it through */ + * mdc. Let's pass it through + */ mdc_obd = class_exp2obd(lmv->tgts[i]->ltd_exp); mdc_obd->obd_force = obddev->obd_force; err = obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp, len, @@ -1122,51 +1131,6 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, return rc; } -#if 0 -static int lmv_all_chars_policy(int count, const char *name, - int len) -{ - unsigned int c = 0; - - while (len > 0) - c += name[--len]; - c = c % count; - return c; -} - -static int lmv_nid_policy(struct lmv_obd *lmv) -{ - struct obd_import *imp; - __u32 id; - - /* - * XXX: To get nid we assume that underlying obd device is mdc. - */ - imp = class_exp2cliimp(lmv->tgts[0].ltd_exp); - id = imp->imp_connection->c_self ^ (imp->imp_connection->c_self >> 32); - return id % lmv->desc.ld_tgt_count; -} - -static int lmv_choose_mds(struct lmv_obd *lmv, struct md_op_data *op_data, - enum placement_policy placement) -{ - switch (placement) { - case PLACEMENT_CHAR_POLICY: - return lmv_all_chars_policy(lmv->desc.ld_tgt_count, - op_data->op_name, - op_data->op_namelen); - case PLACEMENT_NID_POLICY: - return lmv_nid_policy(lmv); - - default: - break; - } - - CERROR("Unsupported placement policy %x\n", placement); - return -EINVAL; -} -#endif - /** * This is _inode_ placement policy function (not name). */ @@ -1175,7 +1139,7 @@ static int lmv_placement_policy(struct obd_device *obd, { struct lmv_obd *lmv = &obd->u.lmv; - LASSERT(mds != NULL); + LASSERT(mds); if (lmv->desc.ld_tgt_count == 1) { *mds = 0; @@ -1205,7 +1169,8 @@ static int lmv_placement_policy(struct obd_device *obd, } /* Allocate new fid on target according to operation type and parent - * home mds. */ + * home mds. + */ *mds = op_data->op_mds; return 0; } @@ -1225,7 +1190,7 @@ int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid, u32 mds) */ mutex_lock(&tgt->ltd_fid_mutex); - if (tgt->ltd_active == 0 || tgt->ltd_exp == NULL) { + if (tgt->ltd_active == 0 || !tgt->ltd_exp) { rc = -ENODEV; goto out; } @@ -1252,8 +1217,8 @@ int lmv_fid_alloc(struct obd_export *exp, struct lu_fid *fid, u32 mds = 0; int rc; - LASSERT(op_data != NULL); - LASSERT(fid != NULL); + LASSERT(op_data); + LASSERT(fid); rc = lmv_placement_policy(obd, op_data, &mds); if (rc) { @@ -1291,7 +1256,7 @@ static int lmv_setup(struct obd_device *obd, struct lustre_cfg *lcfg) } lmv->tgts = kcalloc(32, sizeof(*lmv->tgts), GFP_NOFS); - if (lmv->tgts == NULL) + if (!lmv->tgts) return -ENOMEM; lmv->tgts_size = 32; @@ -1332,11 +1297,11 @@ static int lmv_cleanup(struct obd_device *obd) struct lmv_obd *lmv = &obd->u.lmv; fld_client_fini(&lmv->lmv_fld); - if (lmv->tgts != NULL) { + if (lmv->tgts) { int i; for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - if (lmv->tgts[i] == NULL) + if (!lmv->tgts[i]) continue; lmv_del_target(lmv, i); } @@ -1357,7 +1322,8 @@ static int lmv_process_config(struct obd_device *obd, u32 len, void *buf) switch (lcfg->lcfg_command) { case LCFG_ADD_MDC: /* modify_mdc_tgts add 0:lustre-clilmv 1:lustre-MDT0000_UUID - * 2:0 3:1 4:lustre-MDT0000-mdc_UUID */ + * 2:0 3:1 4:lustre-MDT0000-mdc_UUID + */ if (LUSTRE_CFG_BUFLEN(lcfg, 1) > sizeof(obd_uuid.uuid)) { rc = -EINVAL; goto out; @@ -1402,7 +1368,7 @@ static int lmv_statfs(const struct lu_env *env, struct obd_export *exp, return -ENOMEM; for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL) + if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp) continue; rc = obd_statfs(env, lmv->tgts[i]->ltd_exp, temp, @@ -1421,7 +1387,8 @@ static int lmv_statfs(const struct lu_env *env, struct obd_export *exp, * i.e. mount does not need the merged osfs * from all of MDT. * And also clients can be mounted as long as - * MDT0 is in service*/ + * MDT0 is in service + */ if (flags & OBD_STATFS_FOR_MDT0) goto out_free_temp; } else { @@ -1547,7 +1514,7 @@ static int lmv_null_inode(struct obd_export *exp, const struct lu_fid *fid) * space of MDT storing inode. */ for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL) + if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp) continue; md_null_inode(lmv->tgts[i]->ltd_exp, fid); } @@ -1575,7 +1542,7 @@ static int lmv_find_cbdata(struct obd_export *exp, const struct lu_fid *fid, * space of MDT storing inode. */ for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL) + if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp) continue; rc = md_find_cbdata(lmv->tgts[i]->ltd_exp, fid, it, data); if (rc) @@ -1655,7 +1622,7 @@ static int lmv_create(struct obd_export *exp, struct md_op_data *op_data, cap_effective, rdev, request); if (rc == 0) { - if (*request == NULL) + if (!*request) return rc; CDEBUG(D_INODE, "Created - "DFID"\n", PFID(&op_data->op_fid2)); } @@ -1701,7 +1668,6 @@ lmv_enqueue_remote(struct obd_export *exp, struct ldlm_enqueue_info *einfo, int pmode; body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - LASSERT(body != NULL); if (!(body->valid & OBD_MD_MDS)) return 0; @@ -1808,7 +1774,6 @@ lmv_getattr_name(struct obd_export *exp, struct md_op_data *op_data, body = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_BODY); - LASSERT(body != NULL); if (body->valid & OBD_MD_MDS) { struct lu_fid rid = body->fid1; @@ -1842,7 +1807,8 @@ lmv_getattr_name(struct obd_export *exp, struct md_op_data *op_data, NULL) static int lmv_early_cancel(struct obd_export *exp, struct md_op_data *op_data, - int op_tgt, ldlm_mode_t mode, int bits, int flag) + int op_tgt, enum ldlm_mode mode, int bits, + int flag) { struct lu_fid *fid = md_op_data_fid(op_data, flag); struct obd_device *obd = exp->exp_obd; @@ -2097,7 +2063,7 @@ static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs) while (--nlupgs > 0) { ent = lu_dirent_start(dp); - for (end_dirent = ent; ent != NULL; + for (end_dirent = ent; ent; end_dirent = ent, ent = lu_dirent_next(ent)) ; @@ -2117,7 +2083,8 @@ static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs) break; /* Enlarge the end entry lde_reclen from 0 to - * first entry of next lu_dirpage. */ + * first entry of next lu_dirpage. + */ LASSERT(le16_to_cpu(end_dirent->lde_reclen) == 0); end_dirent->lde_reclen = cpu_to_le16((char *)(dp->ldp_entries) - @@ -2227,7 +2194,7 @@ retry: return rc; body = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_BODY); - if (body == NULL) + if (!body) return -EPROTO; /* Not cross-ref case, just get out of here. */ @@ -2255,7 +2222,8 @@ retry: * 4. Then A will resend unlink RPC to MDT0. (retry 2nd times). * * In theory, it might try unlimited time here, but it should - * be very rare case. */ + * be very rare case. + */ op_data->op_fid2 = body->fid1; ptlrpc_req_finished(*request); *request = NULL; @@ -2270,7 +2238,8 @@ static int lmv_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage) switch (stage) { case OBD_CLEANUP_EARLY: /* XXX: here should be calling obd_precleanup() down to - * stack. */ + * stack. + */ break; case OBD_CLEANUP_EXPORTS: fld_client_debugfs_fini(&lmv->lmv_fld); @@ -2291,7 +2260,7 @@ static int lmv_get_info(const struct lu_env *env, struct obd_export *exp, int rc = 0; obd = class_exp2obd(exp); - if (obd == NULL) { + if (!obd) { CDEBUG(D_IOCTL, "Invalid client cookie %#llx\n", exp->exp_handle.h_cookie); return -EINVAL; @@ -2312,7 +2281,7 @@ static int lmv_get_info(const struct lu_env *env, struct obd_export *exp, /* * All tgts should be connected when this gets called. */ - if (tgt == NULL || tgt->ltd_exp == NULL) + if (!tgt || !tgt->ltd_exp) continue; if (!obd_get_info(env, tgt->ltd_exp, keylen, key, @@ -2355,7 +2324,7 @@ static int lmv_set_info_async(const struct lu_env *env, struct obd_export *exp, int rc = 0; obd = class_exp2obd(exp); - if (obd == NULL) { + if (!obd) { CDEBUG(D_IOCTL, "Invalid client cookie %#llx\n", exp->exp_handle.h_cookie); return -EINVAL; @@ -2368,7 +2337,7 @@ static int lmv_set_info_async(const struct lu_env *env, struct obd_export *exp, for (i = 0; i < lmv->desc.ld_tgt_count; i++) { tgt = lmv->tgts[i]; - if (tgt == NULL || tgt->ltd_exp == NULL) + if (!tgt || !tgt->ltd_exp) continue; err = obd_set_info_async(env, tgt->ltd_exp, @@ -2403,9 +2372,9 @@ static int lmv_packmd(struct obd_export *exp, struct lov_mds_md **lmmp, return 0; } - if (*lmmp == NULL) { + if (!*lmmp) { *lmmp = libcfs_kvzalloc(mea_size, GFP_NOFS); - if (*lmmp == NULL) + if (!*lmmp) return -ENOMEM; } @@ -2443,10 +2412,10 @@ static int lmv_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, __u32 magic; mea_size = lmv_get_easize(lmv); - if (lsmp == NULL) + if (!lsmp) return mea_size; - if (*lsmp != NULL && lmm == NULL) { + if (*lsmp && !lmm) { kvfree(*tmea); *lsmp = NULL; return 0; @@ -2455,7 +2424,7 @@ static int lmv_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, LASSERT(mea_size == lmm_size); *tmea = libcfs_kvzalloc(mea_size, GFP_NOFS); - if (*tmea == NULL) + if (!*tmea) return -ENOMEM; if (!lmm) @@ -2485,8 +2454,8 @@ static int lmv_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, } static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid, - ldlm_policy_data_t *policy, ldlm_mode_t mode, - ldlm_cancel_flags_t flags, void *opaque) + ldlm_policy_data_t *policy, enum ldlm_mode mode, + enum ldlm_cancel_flags flags, void *opaque) { struct obd_device *obd = exp->exp_obd; struct lmv_obd *lmv = &obd->u.lmv; @@ -2494,10 +2463,10 @@ static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid, int err; int i; - LASSERT(fid != NULL); + LASSERT(fid); for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL || + if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp || lmv->tgts[i]->ltd_active == 0) continue; @@ -2519,14 +2488,16 @@ static int lmv_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data, return rc; } -static ldlm_mode_t lmv_lock_match(struct obd_export *exp, __u64 flags, - const struct lu_fid *fid, ldlm_type_t type, - ldlm_policy_data_t *policy, ldlm_mode_t mode, - struct lustre_handle *lockh) +static enum ldlm_mode lmv_lock_match(struct obd_export *exp, __u64 flags, + const struct lu_fid *fid, + enum ldlm_type type, + ldlm_policy_data_t *policy, + enum ldlm_mode mode, + struct lustre_handle *lockh) { struct obd_device *obd = exp->exp_obd; struct lmv_obd *lmv = &obd->u.lmv; - ldlm_mode_t rc; + enum ldlm_mode rc; int i; CDEBUG(D_INODE, "Lock match for "DFID"\n", PFID(fid)); @@ -2538,8 +2509,7 @@ static ldlm_mode_t lmv_lock_match(struct obd_export *exp, __u64 flags, * one fid was created in. */ for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - if (lmv->tgts[i] == NULL || - lmv->tgts[i]->ltd_exp == NULL || + if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp || lmv->tgts[i]->ltd_active == 0) continue; @@ -2695,7 +2665,7 @@ static int lmv_quotactl(struct obd_device *unused, struct obd_export *exp, tgt = lmv->tgts[i]; - if (tgt == NULL || tgt->ltd_exp == NULL || tgt->ltd_active == 0) + if (!tgt || !tgt->ltd_exp || tgt->ltd_active == 0) continue; if (!tgt->ltd_active) { CDEBUG(D_HA, "mdt %d is inactive.\n", i); @@ -2730,7 +2700,7 @@ static int lmv_quotacheck(struct obd_device *unused, struct obd_export *exp, int err; tgt = lmv->tgts[i]; - if (tgt == NULL || tgt->ltd_exp == NULL || !tgt->ltd_active) { + if (!tgt || !tgt->ltd_exp || !tgt->ltd_active) { CERROR("lmv idx %d inactive\n", i); return -EIO; } @@ -2813,7 +2783,8 @@ static void lmv_exit(void) } MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>"); -MODULE_DESCRIPTION("Lustre Logical Metadata Volume OBD driver"); +MODULE_DESCRIPTION("Lustre Logical Metadata Volume"); +MODULE_VERSION(LUSTRE_VERSION_STRING); MODULE_LICENSE("GPL"); module_init(lmv_init); diff --git a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c index 40cf4d9f0486..b39e364a29ab 100644 --- a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c +++ b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c @@ -138,7 +138,7 @@ static int lmv_desc_uuid_seq_show(struct seq_file *m, void *v) struct obd_device *dev = (struct obd_device *)m->private; struct lmv_obd *lmv; - LASSERT(dev != NULL); + LASSERT(dev); lmv = &dev->u.lmv; seq_printf(m, "%s\n", lmv->desc.ld_uuid.uuid); return 0; @@ -171,7 +171,7 @@ static int lmv_tgt_seq_show(struct seq_file *p, void *v) { struct lmv_tgt_desc *tgt = v; - if (tgt == NULL) + if (!tgt) return 0; seq_printf(p, "%d: %s %sACTIVE\n", tgt->ltd_idx, tgt->ltd_uuid.uuid, diff --git a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h index 66a2492c1cc3..7dd3162b51e9 100644 --- a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h +++ b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h @@ -579,51 +579,49 @@ extern struct kmem_cache *lovsub_req_kmem; extern struct kmem_cache *lov_lock_link_kmem; -int lov_object_init(const struct lu_env *env, struct lu_object *obj, - const struct lu_object_conf *conf); -int lovsub_object_init(const struct lu_env *env, struct lu_object *obj, - const struct lu_object_conf *conf); -int lov_lock_init(const struct lu_env *env, struct cl_object *obj, - struct cl_lock *lock, const struct cl_io *io); -int lov_io_init(const struct lu_env *env, struct cl_object *obj, - struct cl_io *io); -int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj, - struct cl_lock *lock, const struct cl_io *io); - -int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj, - struct cl_lock *lock, const struct cl_io *io); -int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj, - struct cl_lock *lock, const struct cl_io *io); -int lov_io_init_raid0(const struct lu_env *env, struct cl_object *obj, - struct cl_io *io); -int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj, - struct cl_io *io); -int lov_io_init_released(const struct lu_env *env, struct cl_object *obj, - struct cl_io *io); -void lov_lock_unlink(const struct lu_env *env, struct lov_lock_link *link, - struct lovsub_lock *sub); +int lov_object_init(const struct lu_env *env, struct lu_object *obj, + const struct lu_object_conf *conf); +int lovsub_object_init(const struct lu_env *env, struct lu_object *obj, + const struct lu_object_conf *conf); +int lov_lock_init(const struct lu_env *env, struct cl_object *obj, + struct cl_lock *lock, const struct cl_io *io); +int lov_io_init(const struct lu_env *env, struct cl_object *obj, + struct cl_io *io); +int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj, + struct cl_lock *lock, const struct cl_io *io); + +int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj, + struct cl_lock *lock, const struct cl_io *io); +int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj, + struct cl_lock *lock, const struct cl_io *io); +int lov_io_init_raid0(const struct lu_env *env, struct cl_object *obj, + struct cl_io *io); +int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj, + struct cl_io *io); +int lov_io_init_released(const struct lu_env *env, struct cl_object *obj, + struct cl_io *io); +void lov_lock_unlink(const struct lu_env *env, struct lov_lock_link *link, + struct lovsub_lock *sub); struct lov_io_sub *lov_sub_get(const struct lu_env *env, struct lov_io *lio, int stripe); -void lov_sub_put(struct lov_io_sub *sub); -int lov_sublock_modify(const struct lu_env *env, struct lov_lock *lov, - struct lovsub_lock *sublock, - const struct cl_lock_descr *d, int idx); - -int lov_page_init(const struct lu_env *env, struct cl_object *ob, - struct cl_page *page, struct page *vmpage); -int lovsub_page_init(const struct lu_env *env, struct cl_object *ob, - struct cl_page *page, struct page *vmpage); - -int lov_page_init_empty(const struct lu_env *env, - struct cl_object *obj, - struct cl_page *page, struct page *vmpage); -int lov_page_init_raid0(const struct lu_env *env, - struct cl_object *obj, - struct cl_page *page, struct page *vmpage); +void lov_sub_put(struct lov_io_sub *sub); +int lov_sublock_modify(const struct lu_env *env, struct lov_lock *lov, + struct lovsub_lock *sublock, + const struct cl_lock_descr *d, int idx); + +int lov_page_init(const struct lu_env *env, struct cl_object *ob, + struct cl_page *page, struct page *vmpage); +int lovsub_page_init(const struct lu_env *env, struct cl_object *ob, + struct cl_page *page, struct page *vmpage); + +int lov_page_init_empty(const struct lu_env *env, struct cl_object *obj, + struct cl_page *page, struct page *vmpage); +int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj, + struct cl_page *page, struct page *vmpage); struct lu_object *lov_object_alloc(const struct lu_env *env, - const struct lu_object_header *hdr, - struct lu_device *dev); + const struct lu_object_header *hdr, + struct lu_device *dev); struct lu_object *lovsub_object_alloc(const struct lu_env *env, const struct lu_object_header *hdr, struct lu_device *dev); @@ -631,9 +629,8 @@ struct lu_object *lovsub_object_alloc(const struct lu_env *env, struct lov_lock_link *lov_lock_link_find(const struct lu_env *env, struct lov_lock *lck, struct lovsub_lock *sub); -struct lov_io_sub *lov_page_subio(const struct lu_env *env, - struct lov_io *lio, - const struct cl_page_slice *slice); +struct lov_io_sub *lov_page_subio(const struct lu_env *env, struct lov_io *lio, + const struct cl_page_slice *slice); #define lov_foreach_target(lov, var) \ for (var = 0; var < lov_targets_nr(lov); ++var) @@ -651,7 +648,7 @@ static inline struct lov_session *lov_env_session(const struct lu_env *env) struct lov_session *ses; ses = lu_context_key_get(env->le_ses, &lov_session_key); - LASSERT(ses != NULL); + LASSERT(ses); return ses; } @@ -759,7 +756,7 @@ static inline struct lovsub_lock *cl2sub_lock(const struct cl_lock *lock) const struct cl_lock_slice *slice; slice = cl_lock_at(lock, &lovsub_device_type); - LASSERT(slice != NULL); + LASSERT(slice); return cl2lovsub_lock(slice); } @@ -798,7 +795,7 @@ static inline struct cl_page *lov_sub_page(const struct cl_page_slice *slice) } static inline struct lov_io *cl2lov_io(const struct lu_env *env, - const struct cl_io_slice *ios) + const struct cl_io_slice *ios) { struct lov_io *lio; @@ -817,7 +814,7 @@ static inline struct lov_thread_info *lov_env_info(const struct lu_env *env) struct lov_thread_info *info; info = lu_context_key_get(&env->le_ctx, &lov_key); - LASSERT(info != NULL); + LASSERT(info); return info; } diff --git a/drivers/staging/lustre/lustre/lov/lov_dev.c b/drivers/staging/lustre/lustre/lov/lov_dev.c index 3733fdc88c8c..532ef87dfb44 100644 --- a/drivers/staging/lustre/lustre/lov/lov_dev.c +++ b/drivers/staging/lustre/lustre/lov/lov_dev.c @@ -142,8 +142,8 @@ static void *lov_key_init(const struct lu_context *ctx, { struct lov_thread_info *info; - info = kmem_cache_alloc(lov_thread_kmem, GFP_NOFS | __GFP_ZERO); - if (info != NULL) + info = kmem_cache_zalloc(lov_thread_kmem, GFP_NOFS); + if (info) INIT_LIST_HEAD(&info->lti_closure.clc_list); else info = ERR_PTR(-ENOMEM); @@ -170,8 +170,8 @@ static void *lov_session_key_init(const struct lu_context *ctx, { struct lov_session *info; - info = kmem_cache_alloc(lov_session_kmem, GFP_NOFS | __GFP_ZERO); - if (info == NULL) + info = kmem_cache_zalloc(lov_session_kmem, GFP_NOFS); + if (!info) info = ERR_PTR(-ENOMEM); return info; } @@ -199,15 +199,15 @@ static struct lu_device *lov_device_fini(const struct lu_env *env, int i; struct lov_device *ld = lu2lov_dev(d); - LASSERT(ld->ld_lov != NULL); - if (ld->ld_target == NULL) + LASSERT(ld->ld_lov); + if (!ld->ld_target) return NULL; lov_foreach_target(ld, i) { struct lovsub_device *lsd; lsd = ld->ld_target[i]; - if (lsd != NULL) { + if (lsd) { cl_stack_fini(env, lovsub2cl_dev(lsd)); ld->ld_target[i] = NULL; } @@ -222,8 +222,8 @@ static int lov_device_init(const struct lu_env *env, struct lu_device *d, int i; int rc = 0; - LASSERT(d->ld_site != NULL); - if (ld->ld_target == NULL) + LASSERT(d->ld_site); + if (!ld->ld_target) return rc; lov_foreach_target(ld, i) { @@ -232,7 +232,7 @@ static int lov_device_init(const struct lu_env *env, struct lu_device *d, struct lov_tgt_desc *desc; desc = ld->ld_lov->lov_tgts[i]; - if (desc == NULL) + if (!desc) continue; cl = cl_type_setup(env, d->ld_site, &lovsub_device_type, @@ -261,8 +261,8 @@ static int lov_req_init(const struct lu_env *env, struct cl_device *dev, struct lov_req *lr; int result; - lr = kmem_cache_alloc(lov_req_kmem, GFP_NOFS | __GFP_ZERO); - if (lr != NULL) { + lr = kmem_cache_zalloc(lov_req_kmem, GFP_NOFS); + if (lr) { cl_req_slice_add(req, &lr->lr_cl, dev, &lov_req_ops); result = 0; } else @@ -282,9 +282,9 @@ static void lov_emerg_free(struct lov_device_emerg **emrg, int nr) struct lov_device_emerg *em; em = emrg[i]; - if (em != NULL) { + if (em) { LASSERT(em->emrg_page_list.pl_nr == 0); - if (em->emrg_env != NULL) + if (em->emrg_env) cl_env_put(em->emrg_env, &em->emrg_refcheck); kfree(em); } @@ -300,7 +300,7 @@ static struct lu_device *lov_device_free(const struct lu_env *env, cl_device_fini(lu2cl_dev(d)); kfree(ld->ld_target); - if (ld->ld_emrg != NULL) + if (ld->ld_emrg) lov_emerg_free(ld->ld_emrg, nr); kfree(ld); return NULL; @@ -311,7 +311,7 @@ static void lov_cl_del_target(const struct lu_env *env, struct lu_device *dev, { struct lov_device *ld = lu2lov_dev(dev); - if (ld->ld_target[index] != NULL) { + if (ld->ld_target[index]) { cl_stack_fini(env, lovsub2cl_dev(ld->ld_target[index])); ld->ld_target[index] = NULL; } @@ -324,17 +324,17 @@ static struct lov_device_emerg **lov_emerg_alloc(int nr) int result; emerg = kcalloc(nr, sizeof(emerg[0]), GFP_NOFS); - if (emerg == NULL) + if (!emerg) return ERR_PTR(-ENOMEM); for (result = i = 0; i < nr && result == 0; i++) { struct lov_device_emerg *em; em = kzalloc(sizeof(*em), GFP_NOFS); - if (em != NULL) { + if (em) { emerg[i] = em; cl_page_list_init(&em->emrg_page_list); em->emrg_env = cl_env_alloc(&em->emrg_refcheck, - LCT_REMEMBER|LCT_NOREF); + LCT_REMEMBER | LCT_NOREF); if (!IS_ERR(em->emrg_env)) em->emrg_env->le_ctx.lc_cookie = 0x2; else { @@ -370,7 +370,7 @@ static int lov_expand_targets(const struct lu_env *env, struct lov_device *dev) return PTR_ERR(emerg); newd = kcalloc(tgt_size, sz, GFP_NOFS); - if (newd != NULL) { + if (newd) { mutex_lock(&dev->ld_mutex); if (sub_size > 0) { memcpy(newd, dev->ld_target, sub_size * sz); @@ -379,7 +379,7 @@ static int lov_expand_targets(const struct lu_env *env, struct lov_device *dev) dev->ld_target = newd; dev->ld_target_nr = tgt_size; - if (dev->ld_emrg != NULL) + if (dev->ld_emrg) lov_emerg_free(dev->ld_emrg, sub_size); dev->ld_emrg = emerg; mutex_unlock(&dev->ld_mutex); @@ -404,8 +404,6 @@ static int lov_cl_add_target(const struct lu_env *env, struct lu_device *dev, obd_getref(obd); tgt = obd->u.lov.lov_tgts[index]; - LASSERT(tgt != NULL); - LASSERT(tgt->ltd_obd != NULL); if (!tgt->ltd_obd->obd_set_up) { CERROR("Target %s not set up\n", obd_uuid2str(&tgt->ltd_uuid)); @@ -414,7 +412,7 @@ static int lov_cl_add_target(const struct lu_env *env, struct lu_device *dev, rc = lov_expand_targets(env, ld); if (rc == 0 && ld->ld_flags & LOV_DEV_INITIALIZED) { - LASSERT(dev->ld_site != NULL); + LASSERT(dev->ld_site); cl = cl_type_setup(env, dev->ld_site, &lovsub_device_type, tgt->ltd_obd->obd_lu_dev); @@ -492,7 +490,7 @@ static struct lu_device *lov_device_alloc(const struct lu_env *env, /* setup the LOV OBD */ obd = class_name2obd(lustre_cfg_string(cfg, 0)); - LASSERT(obd != NULL); + LASSERT(obd); rc = lov_setup(obd, cfg); if (rc) { lov_device_free(env, d); diff --git a/drivers/staging/lustre/lustre/lov/lov_ea.c b/drivers/staging/lustre/lustre/lov/lov_ea.c index b3c9c85aab9d..b6529401c713 100644 --- a/drivers/staging/lustre/lustre/lov/lov_ea.c +++ b/drivers/staging/lustre/lustre/lov/lov_ea.c @@ -100,8 +100,8 @@ struct lov_stripe_md *lsm_alloc_plain(__u16 stripe_count, int *size) return NULL; for (i = 0; i < stripe_count; i++) { - loi = kmem_cache_alloc(lov_oinfo_slab, GFP_NOFS | __GFP_ZERO); - if (loi == NULL) + loi = kmem_cache_zalloc(lov_oinfo_slab, GFP_NOFS); + if (!loi) goto err; lsm->lsm_oinfo[i] = loi; } @@ -141,7 +141,7 @@ static void lsm_unpackmd_common(struct lov_stripe_md *lsm, static void lsm_stripe_by_index_plain(struct lov_stripe_md *lsm, int *stripeno, - u64 *lov_off, u64 *swidth) + u64 *lov_off, u64 *swidth) { if (swidth) *swidth = (u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count; @@ -162,12 +162,13 @@ static int lsm_destroy_plain(struct lov_stripe_md *lsm, struct obdo *oa, } /* Find minimum stripe maxbytes value. For inactive or - * reconnecting targets use LUSTRE_STRIPE_MAXBYTES. */ + * reconnecting targets use LUSTRE_STRIPE_MAXBYTES. + */ static void lov_tgt_maxbytes(struct lov_tgt_desc *tgt, __u64 *stripe_maxbytes) { struct obd_import *imp = tgt->ltd_obd->u.cli.cl_import; - if (imp == NULL || !tgt->ltd_active) { + if (!imp || !tgt->ltd_active) { *stripe_maxbytes = LUSTRE_STRIPE_MAXBYTES; return; } diff --git a/drivers/staging/lustre/lustre/lov/lov_internal.h b/drivers/staging/lustre/lustre/lov/lov_internal.h index 2d00bad58e35..590f9326af37 100644 --- a/drivers/staging/lustre/lustre/lov/lov_internal.h +++ b/drivers/staging/lustre/lustre/lov/lov_internal.h @@ -43,7 +43,8 @@ /* lov_do_div64(a, b) returns a % b, and a = a / b. * The 32-bit code is LOV-specific due to knowing about stripe limits in * order to reduce the divisor to a 32-bit number. If the divisor is - * already a 32-bit value the compiler handles this directly. */ + * already a 32-bit value the compiler handles this directly. + */ #if BITS_PER_LONG == 64 # define lov_do_div64(n, base) ({ \ uint64_t __base = (base); \ @@ -92,7 +93,8 @@ struct lov_request_set { atomic_t set_refcount; struct obd_export *set_exp; /* XXX: There is @set_exp already, however obd_statfs gets obd_device - only. */ + * only. + */ struct obd_device *set_obd; int set_count; atomic_t set_completes; @@ -114,7 +116,6 @@ void lov_finish_set(struct lov_request_set *set); static inline void lov_get_reqset(struct lov_request_set *set) { - LASSERT(set != NULL); LASSERT(atomic_read(&set->set_refcount) > 0); atomic_inc(&set->set_refcount); } @@ -137,12 +138,10 @@ int lov_merge_lvb_kms(struct lov_stripe_md *lsm, struct ost_lvb *lvb, __u64 *kms_place); /* lov_offset.c */ -u64 lov_stripe_size(struct lov_stripe_md *lsm, u64 ost_size, - int stripeno); +u64 lov_stripe_size(struct lov_stripe_md *lsm, u64 ost_size, int stripeno); int lov_stripe_offset(struct lov_stripe_md *lsm, u64 lov_off, int stripeno, u64 *u64); -u64 lov_size_to_stripe(struct lov_stripe_md *lsm, u64 file_size, - int stripeno); +u64 lov_size_to_stripe(struct lov_stripe_md *lsm, u64 file_size, int stripeno); int lov_stripe_intersects(struct lov_stripe_md *lsm, int stripeno, u64 start, u64 end, u64 *obd_start, u64 *obd_end); @@ -197,7 +196,7 @@ int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmm, int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, struct lov_mds_md *lmm, int lmm_bytes); int lov_getstripe(struct obd_export *exp, - struct lov_stripe_md *lsm, struct lov_user_md *lump); + struct lov_stripe_md *lsm, struct lov_user_md __user *lump); int lov_alloc_memmd(struct lov_stripe_md **lsmp, __u16 stripe_count, int pattern, int magic); int lov_free_memmd(struct lov_stripe_md **lsmp); diff --git a/drivers/staging/lustre/lustre/lov/lov_io.c b/drivers/staging/lustre/lustre/lov/lov_io.c index 93fe69eb2560..4296aacd84fc 100644 --- a/drivers/staging/lustre/lustre/lov/lov_io.c +++ b/drivers/staging/lustre/lustre/lov/lov_io.c @@ -60,7 +60,7 @@ static inline void lov_sub_exit(struct lov_io_sub *sub) static void lov_io_sub_fini(const struct lu_env *env, struct lov_io *lio, struct lov_io_sub *sub) { - if (sub->sub_io != NULL) { + if (sub->sub_io) { if (sub->sub_io_initialized) { lov_sub_enter(sub); cl_io_fini(sub->sub_env, sub->sub_io); @@ -74,7 +74,7 @@ static void lov_io_sub_fini(const struct lu_env *env, struct lov_io *lio, kfree(sub->sub_io); sub->sub_io = NULL; } - if (sub->sub_env != NULL && !IS_ERR(sub->sub_env)) { + if (!IS_ERR_OR_NULL(sub->sub_env)) { if (!sub->sub_borrowed) cl_env_put(sub->sub_env, &sub->sub_refcheck); sub->sub_env = NULL; @@ -143,11 +143,11 @@ static int lov_io_sub_init(const struct lu_env *env, struct lov_io *lio, int stripe = sub->sub_stripe; int result; - LASSERT(sub->sub_io == NULL); - LASSERT(sub->sub_env == NULL); + LASSERT(!sub->sub_io); + LASSERT(!sub->sub_env); LASSERT(sub->sub_stripe < lio->lis_stripe_count); - if (unlikely(lov_r0(lov)->lo_sub[stripe] == NULL)) + if (unlikely(!lov_r0(lov)->lo_sub[stripe])) return -EIO; result = 0; @@ -252,7 +252,6 @@ static int lov_page_stripe(const struct cl_page *page) subobj = lu2lovsub( lu_object_locate(page->cp_child->cp_obj->co_lu.lo_header, &lovsub_device_type)); - LASSERT(subobj != NULL); return subobj->lso_index; } @@ -263,9 +262,9 @@ struct lov_io_sub *lov_page_subio(const struct lu_env *env, struct lov_io *lio, struct cl_page *page = slice->cpl_page; int stripe; - LASSERT(lio->lis_cl.cis_io != NULL); + LASSERT(lio->lis_cl.cis_io); LASSERT(cl2lov(slice->cpl_obj) == lio->lis_object); - LASSERT(lsm != NULL); + LASSERT(lsm); LASSERT(lio->lis_nr_subios > 0); stripe = lov_page_stripe(page); @@ -278,7 +277,7 @@ static int lov_io_subio_init(const struct lu_env *env, struct lov_io *lio, struct lov_stripe_md *lsm = lio->lis_object->lo_lsm; int result; - LASSERT(lio->lis_object != NULL); + LASSERT(lio->lis_object); /* * Need to be optimized, we can't afford to allocate a piece of memory @@ -288,7 +287,7 @@ static int lov_io_subio_init(const struct lu_env *env, struct lov_io *lio, libcfs_kvzalloc(lsm->lsm_stripe_count * sizeof(lio->lis_subs[0]), GFP_NOFS); - if (lio->lis_subs != NULL) { + if (lio->lis_subs) { lio->lis_nr_subios = lio->lis_stripe_count; lio->lis_single_subio_index = -1; lio->lis_active_subios = 0; @@ -304,7 +303,6 @@ static void lov_io_slice_init(struct lov_io *lio, io->ci_result = 0; lio->lis_object = obj; - LASSERT(obj->lo_lsm != NULL); lio->lis_stripe_count = obj->lo_lsm->lsm_stripe_count; switch (io->ci_type) { @@ -358,7 +356,7 @@ static void lov_io_fini(const struct lu_env *env, const struct cl_io_slice *ios) struct lov_object *lov = cl2lov(ios->cis_obj); int i; - if (lio->lis_subs != NULL) { + if (lio->lis_subs) { for (i = 0; i < lio->lis_nr_subios; i++) lov_io_sub_fini(env, lio, &lio->lis_subs[i]); kvfree(lio->lis_subs); @@ -395,7 +393,7 @@ static int lov_io_iter_init(const struct lu_env *env, endpos, &start, &end)) continue; - if (unlikely(lov_r0(lio->lis_object)->lo_sub[stripe] == NULL)) { + if (unlikely(!lov_r0(lio->lis_object)->lo_sub[stripe])) { if (ios->cis_io->ci_type == CIT_READ || ios->cis_io->ci_type == CIT_WRITE || ios->cis_io->ci_type == CIT_FAULT) @@ -601,13 +599,13 @@ static int lov_io_submit(const struct lu_env *env, return rc; } - LASSERT(lio->lis_subs != NULL); + LASSERT(lio->lis_subs); if (alloc) { stripes_qin = libcfs_kvzalloc(sizeof(*stripes_qin) * lio->lis_nr_subios, GFP_NOFS); - if (stripes_qin == NULL) + if (!stripes_qin) return -ENOMEM; for (stripe = 0; stripe < lio->lis_nr_subios; stripe++) @@ -949,13 +947,13 @@ int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj, } int lov_io_init_released(const struct lu_env *env, struct cl_object *obj, - struct cl_io *io) + struct cl_io *io) { struct lov_object *lov = cl2lov(obj); struct lov_io *lio = lov_env_io(env); int result; - LASSERT(lov->lo_lsm != NULL); + LASSERT(lov->lo_lsm); lio->lis_object = lov; switch (io->ci_type) { diff --git a/drivers/staging/lustre/lustre/lov/lov_lock.c b/drivers/staging/lustre/lustre/lov/lov_lock.c index d866791d7b22..ae854bc25dbe 100644 --- a/drivers/staging/lustre/lustre/lov/lov_lock.c +++ b/drivers/staging/lustre/lustre/lov/lov_lock.c @@ -115,7 +115,7 @@ static void lov_sublock_adopt(const struct lu_env *env, struct lov_lock *lck, /* * check that sub-lock doesn't have lock link to this top-lock. */ - LASSERT(lov_lock_link_find(env, lck, lsl) == NULL); + LASSERT(!lov_lock_link_find(env, lck, lsl)); LASSERT(idx < lck->lls_nr); lck->lls_sub[idx].sub_lock = lsl; @@ -144,8 +144,8 @@ static struct cl_lock *lov_sublock_alloc(const struct lu_env *env, LASSERT(idx < lck->lls_nr); - link = kmem_cache_alloc(lov_lock_link_kmem, GFP_NOFS | __GFP_ZERO); - if (link != NULL) { + link = kmem_cache_zalloc(lov_lock_link_kmem, GFP_NOFS); + if (link) { struct lov_sublock_env *subenv; struct lov_lock_sub *lls; struct cl_lock_descr *descr; @@ -160,7 +160,8 @@ static struct cl_lock *lov_sublock_alloc(const struct lu_env *env, * to remember the subio. This is because lock is able * to be cached, but this is not true for IO. This * further means a sublock might be referenced in - * different io context. -jay */ + * different io context. -jay + */ sublock = cl_lock_hold(subenv->lse_env, subenv->lse_io, descr, "lov-parent", parent); @@ -220,7 +221,7 @@ static int lov_sublock_lock(const struct lu_env *env, LASSERT(!(lls->sub_flags & LSF_HELD)); link = lov_lock_link_find(env, lck, sublock); - LASSERT(link != NULL); + LASSERT(link); lov_lock_unlink(env, link, sublock); lov_sublock_unlock(env, sublock, closure, NULL); lck->lls_cancel_race = 1; @@ -263,7 +264,7 @@ static int lov_subresult(int result, int rc) int rc_rank; LASSERTF(result <= 0 || result == CLO_REPEAT || result == CLO_WAIT, - "result = %d", result); + "result = %d\n", result); LASSERTF(rc <= 0 || rc == CLO_REPEAT || rc == CLO_WAIT, "rc = %d\n", rc); CLASSERT(CLO_WAIT < CLO_REPEAT); @@ -309,14 +310,14 @@ static int lov_lock_sub_init(const struct lu_env *env, * XXX for wide striping smarter algorithm is desirable, * breaking out of the loop, early. */ - if (likely(r0->lo_sub[i] != NULL) && + if (likely(r0->lo_sub[i]) && lov_stripe_intersects(loo->lo_lsm, i, file_start, file_end, &start, &end)) nr++; } LASSERT(nr > 0); lck->lls_sub = libcfs_kvzalloc(nr * sizeof(lck->lls_sub[0]), GFP_NOFS); - if (lck->lls_sub == NULL) + if (!lck->lls_sub) return -ENOMEM; lck->lls_nr = nr; @@ -328,14 +329,14 @@ static int lov_lock_sub_init(const struct lu_env *env, * top-lock. */ for (i = 0, nr = 0; i < r0->lo_nr; ++i) { - if (likely(r0->lo_sub[i] != NULL) && + if (likely(r0->lo_sub[i]) && lov_stripe_intersects(loo->lo_lsm, i, file_start, file_end, &start, &end)) { struct cl_lock_descr *descr; descr = &lck->lls_sub[nr].sub_descr; - LASSERT(descr->cld_obj == NULL); + LASSERT(!descr->cld_obj); descr->cld_obj = lovsub2cl(r0->lo_sub[i]); descr->cld_start = cl_index(descr->cld_obj, start); descr->cld_end = cl_index(descr->cld_obj, end); @@ -369,7 +370,6 @@ static int lov_sublock_release(const struct lu_env *env, struct lov_lock *lck, struct cl_lock *sublock; int dying; - LASSERT(lck->lls_sub[i].sub_lock != NULL); sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock; LASSERT(cl_lock_is_mutexed(sublock)); @@ -413,7 +413,6 @@ static void lov_sublock_hold(const struct lu_env *env, struct lov_lock *lck, if (!(lck->lls_sub[i].sub_flags & LSF_HELD)) { struct cl_lock *sublock; - LASSERT(lck->lls_sub[i].sub_lock != NULL); sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock; LASSERT(cl_lock_is_mutexed(sublock)); LASSERT(sublock->cll_state != CLS_FREEING); @@ -435,13 +434,13 @@ static void lov_lock_fini(const struct lu_env *env, lck = cl2lov_lock(slice); LASSERT(lck->lls_nr_filled == 0); - if (lck->lls_sub != NULL) { + if (lck->lls_sub) { for (i = 0; i < lck->lls_nr; ++i) /* * No sub-locks exists at this point, as sub-lock has * a reference on its parent. */ - LASSERT(lck->lls_sub[i].sub_lock == NULL); + LASSERT(!lck->lls_sub[i].sub_lock); kvfree(lck->lls_sub); } kmem_cache_free(lov_lock_kmem, lck); @@ -479,7 +478,8 @@ static int lov_lock_enqueue_one(const struct lu_env *env, struct lov_lock *lck, result = cl_enqueue_try(env, sublock, io, enqflags); if ((sublock->cll_state == CLS_ENQUEUED) && !(enqflags & CEF_AGL)) { /* if it is enqueued, try to `wait' on it---maybe it's already - * granted */ + * granted + */ result = cl_wait_try(env, sublock); if (result == CLO_REENQUEUED) result = CLO_WAIT; @@ -515,12 +515,13 @@ static int lov_sublock_fill(const struct lu_env *env, struct cl_lock *parent, if (!IS_ERR(sublock)) { cl_lock_get_trust(sublock); if (parent->cll_state == CLS_QUEUING && - lck->lls_sub[idx].sub_lock == NULL) { + !lck->lls_sub[idx].sub_lock) { lov_sublock_adopt(env, lck, sublock, idx, link); } else { kmem_cache_free(lov_lock_link_kmem, link); /* other thread allocated sub-lock, or enqueue is no - * longer going on */ + * longer going on + */ cl_lock_mutex_put(env, parent); cl_lock_unhold(env, sublock, "lov-parent", parent); cl_lock_mutex_get(env, parent); @@ -574,10 +575,11 @@ static int lov_lock_enqueue(const struct lu_env *env, * Sub-lock might have been canceled, while top-lock was * cached. */ - if (sub == NULL) { + if (!sub) { result = lov_sublock_fill(env, lock, io, lck, i); /* lov_sublock_fill() released @lock mutex, - * restart. */ + * restart. + */ break; } sublock = sub->lss_cl.cls_lock; @@ -605,7 +607,8 @@ static int lov_lock_enqueue(const struct lu_env *env, /* take recursive mutex of sublock */ cl_lock_mutex_get(env, sublock); /* need to release all locks in closure - * otherwise it may deadlock. LU-2683.*/ + * otherwise it may deadlock. LU-2683. + */ lov_sublock_unlock(env, sub, closure, subenv); /* sublock and parent are held. */ @@ -620,7 +623,7 @@ static int lov_lock_enqueue(const struct lu_env *env, break; } } else { - LASSERT(sublock->cll_conflict == NULL); + LASSERT(!sublock->cll_conflict); lov_sublock_unlock(env, sub, closure, subenv); } } @@ -649,11 +652,12 @@ static int lov_lock_unuse(const struct lu_env *env, /* top-lock state cannot change concurrently, because single * thread (one that released the last hold) carries unlocking - * to the completion. */ + * to the completion. + */ LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT); lls = &lck->lls_sub[i]; sub = lls->sub_lock; - if (sub == NULL) + if (!sub) continue; sublock = sub->lss_cl.cls_lock; @@ -679,7 +683,7 @@ static int lov_lock_unuse(const struct lu_env *env, } static void lov_lock_cancel(const struct lu_env *env, - const struct cl_lock_slice *slice) + const struct cl_lock_slice *slice) { struct lov_lock *lck = cl2lov_lock(slice); struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock); @@ -695,10 +699,11 @@ static void lov_lock_cancel(const struct lu_env *env, /* top-lock state cannot change concurrently, because single * thread (one that released the last hold) carries unlocking - * to the completion. */ + * to the completion. + */ lls = &lck->lls_sub[i]; sub = lls->sub_lock; - if (sub == NULL) + if (!sub) continue; sublock = sub->lss_cl.cls_lock; @@ -757,7 +762,6 @@ again: lls = &lck->lls_sub[i]; sub = lls->sub_lock; - LASSERT(sub != NULL); sublock = sub->lss_cl.cls_lock; rc = lov_sublock_lock(env, lck, lls, closure, &subenv); if (rc == 0) { @@ -776,8 +780,9 @@ again: if (result != 0) break; } - /* Each sublock only can be reenqueued once, so will not loop for - * ever. */ + /* Each sublock only can be reenqueued once, so will not loop + * forever. + */ if (result == 0 && reenqueued != 0) goto again; cl_lock_closure_fini(closure); @@ -805,7 +810,7 @@ static int lov_lock_use(const struct lu_env *env, lls = &lck->lls_sub[i]; sub = lls->sub_lock; - if (sub == NULL) { + if (!sub) { /* * Sub-lock might have been canceled, while top-lock was * cached. @@ -826,7 +831,8 @@ static int lov_lock_use(const struct lu_env *env, i, 1, rc); } else if (sublock->cll_state == CLS_NEW) { /* Sub-lock might have been canceled, while - * top-lock was cached. */ + * top-lock was cached. + */ result = -ESTALE; lov_sublock_release(env, lck, i, 1, result); } @@ -852,45 +858,6 @@ static int lov_lock_use(const struct lu_env *env, return result; } -#if 0 -static int lock_lock_multi_match() -{ - struct cl_lock *lock = slice->cls_lock; - struct cl_lock_descr *subneed = &lov_env_info(env)->lti_ldescr; - struct lov_object *loo = cl2lov(lov->lls_cl.cls_obj); - struct lov_layout_raid0 *r0 = lov_r0(loo); - struct lov_lock_sub *sub; - struct cl_object *subobj; - u64 fstart; - u64 fend; - u64 start; - u64 end; - int i; - - fstart = cl_offset(need->cld_obj, need->cld_start); - fend = cl_offset(need->cld_obj, need->cld_end + 1) - 1; - subneed->cld_mode = need->cld_mode; - cl_lock_mutex_get(env, lock); - for (i = 0; i < lov->lls_nr; ++i) { - sub = &lov->lls_sub[i]; - if (sub->sub_lock == NULL) - continue; - subobj = sub->sub_descr.cld_obj; - if (!lov_stripe_intersects(loo->lo_lsm, sub->sub_stripe, - fstart, fend, &start, &end)) - continue; - subneed->cld_start = cl_index(subobj, start); - subneed->cld_end = cl_index(subobj, end); - subneed->cld_obj = subobj; - if (!cl_lock_ext_match(&sub->sub_got, subneed)) { - result = 0; - break; - } - } - cl_lock_mutex_put(env, lock); -} -#endif - /** * Check if the extent region \a descr is covered by \a child against the * specific \a stripe. @@ -922,10 +889,10 @@ static int lov_lock_stripe_is_matching(const struct lu_env *env, idx = lov_stripe_number(lsm, start); if (idx == stripe || - unlikely(lov_r0(lov)->lo_sub[idx] == NULL)) { + unlikely(!lov_r0(lov)->lo_sub[idx])) { idx = lov_stripe_number(lsm, end); if (idx == stripe || - unlikely(lov_r0(lov)->lo_sub[idx] == NULL)) + unlikely(!lov_r0(lov)->lo_sub[idx])) result = 1; } } @@ -970,7 +937,8 @@ static int lov_lock_fits_into(const struct lu_env *env, LASSERT(lov->lls_nr > 0); /* for top lock, it's necessary to match enq flags otherwise it will - * run into problem if a sublock is missing and reenqueue. */ + * run into problem if a sublock is missing and reenqueue. + */ if (need->cld_enq_flags != lov->lls_orig.cld_enq_flags) return 0; @@ -1074,7 +1042,7 @@ static void lov_lock_delete(const struct lu_env *env, struct lov_lock_sub *lls = &lck->lls_sub[i]; struct lovsub_lock *lsl = lls->sub_lock; - if (lsl == NULL) /* already removed */ + if (!lsl) /* already removed */ continue; rc = lov_sublock_lock(env, lck, lls, closure, NULL); @@ -1090,9 +1058,9 @@ static void lov_lock_delete(const struct lu_env *env, lov_sublock_release(env, lck, i, 1, 0); link = lov_lock_link_find(env, lck, lsl); - LASSERT(link != NULL); + LASSERT(link); lov_lock_unlink(env, link, lsl); - LASSERT(lck->lls_sub[i].sub_lock == NULL); + LASSERT(!lck->lls_sub[i].sub_lock); lov_sublock_unlock(env, lsl, closure, NULL); } @@ -1112,7 +1080,7 @@ static int lov_lock_print(const struct lu_env *env, void *cookie, sub = &lck->lls_sub[i]; (*p)(env, cookie, " %d %x: ", i, sub->sub_flags); - if (sub->sub_lock != NULL) + if (sub->sub_lock) cl_lock_print(env, cookie, p, sub->sub_lock->lss_cl.cls_lock); else @@ -1139,8 +1107,8 @@ int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj, struct lov_lock *lck; int result; - lck = kmem_cache_alloc(lov_lock_kmem, GFP_NOFS | __GFP_ZERO); - if (lck != NULL) { + lck = kmem_cache_zalloc(lov_lock_kmem, GFP_NOFS); + if (lck) { cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops); result = lov_lock_sub_init(env, lck, io); } else @@ -1157,7 +1125,8 @@ static void lov_empty_lock_fini(const struct lu_env *env, } static int lov_empty_lock_print(const struct lu_env *env, void *cookie, - lu_printer_t p, const struct cl_lock_slice *slice) + lu_printer_t p, + const struct cl_lock_slice *slice) { (*p)(env, cookie, "empty\n"); return 0; @@ -1170,13 +1139,13 @@ static const struct cl_lock_operations lov_empty_lock_ops = { }; int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj, - struct cl_lock *lock, const struct cl_io *io) + struct cl_lock *lock, const struct cl_io *io) { struct lov_lock *lck; int result = -ENOMEM; - lck = kmem_cache_alloc(lov_lock_kmem, GFP_NOFS | __GFP_ZERO); - if (lck != NULL) { + lck = kmem_cache_zalloc(lov_lock_kmem, GFP_NOFS); + if (lck) { cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_empty_lock_ops); lck->lls_orig = lock->cll_descr; result = 0; diff --git a/drivers/staging/lustre/lustre/lov/lov_merge.c b/drivers/staging/lustre/lustre/lov/lov_merge.c index 97115bec7cca..029cd4d62796 100644 --- a/drivers/staging/lustre/lustre/lov/lov_merge.c +++ b/drivers/staging/lustre/lustre/lov/lov_merge.c @@ -129,7 +129,8 @@ int lov_adjust_kms(struct obd_export *exp, struct lov_stripe_md *lsm, "stripe %d KMS %sing %llu->%llu\n", stripe, kms > loi->loi_kms ? "increase":"shrink", loi->loi_kms, kms); - loi_kms_set(loi, loi->loi_lvb.lvb_size = kms); + loi->loi_lvb.lvb_size = kms; + loi_kms_set(loi, loi->loi_lvb.lvb_size); } return 0; } diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c index 6c2bdfe9cdcf..5daa7faf4dda 100644 --- a/drivers/staging/lustre/lustre/lov/lov_obd.c +++ b/drivers/staging/lustre/lustre/lov/lov_obd.c @@ -61,7 +61,8 @@ #include "lov_internal.h" /* Keep a refcount of lov->tgt usage to prevent racing with addition/deletion. - Any function that expects lov_tgts to remain stationary must take a ref. */ + * Any function that expects lov_tgts to remain stationary must take a ref. + */ static void lov_getref(struct obd_device *obd) { struct lov_obd *lov = &obd->u.lov; @@ -96,7 +97,8 @@ static void lov_putref(struct obd_device *obd) list_add(&tgt->ltd_kill, &kill); /* XXX - right now there is a dependency on ld_tgt_count * being the maximum tgt index for computing the - * mds_max_easize. So we can't shrink it. */ + * mds_max_easize. So we can't shrink it. + */ lov_ost_pool_remove(&lov->lov_packed, i); lov->lov_tgts[i] = NULL; lov->lov_death_row--; @@ -158,7 +160,8 @@ int lov_connect_obd(struct obd_device *obd, __u32 index, int activate, if (activate) { tgt_obd->obd_no_recov = 0; /* FIXME this is probably supposed to be - ptlrpc_set_import_active. Horrible naming. */ + * ptlrpc_set_import_active. Horrible naming. + */ ptlrpc_activate_import(imp); } @@ -262,7 +265,7 @@ static int lov_disconnect_obd(struct obd_device *obd, struct lov_tgt_desc *tgt) osc_obd = class_exp2obd(tgt->ltd_exp); CDEBUG(D_CONFIG, "%s: disconnecting target %s\n", - obd->obd_name, osc_obd ? osc_obd->obd_name : "NULL"); + obd->obd_name, osc_obd ? osc_obd->obd_name : "NULL"); if (tgt->ltd_active) { tgt->ltd_active = 0; @@ -315,7 +318,8 @@ static int lov_disconnect(struct obd_export *exp) } /* Let's hold another reference so lov_del_obd doesn't spin through - putref every time */ + * putref every time + */ obd_getref(obd); for (i = 0; i < lov->desc.ld_tgt_count; i++) { @@ -358,7 +362,7 @@ static int lov_set_osc_active(struct obd_device *obd, struct obd_uuid *uuid, * LU-642, initially inactive OSC could miss the obd_connect, * we make up for it here. */ - if (ev == OBD_NOTIFY_ACTIVATE && tgt->ltd_exp == NULL && + if (ev == OBD_NOTIFY_ACTIVATE && !tgt->ltd_exp && obd_uuid_equals(uuid, &tgt->ltd_uuid)) { struct obd_uuid lov_osc_uuid = {"LOV_OSC_UUID"}; @@ -399,10 +403,9 @@ static int lov_set_osc_active(struct obd_device *obd, struct obd_uuid *uuid, CDEBUG(D_INFO, "OSC %s already %sactive!\n", uuid->uuid, active ? "" : "in"); goto out; - } else { - CDEBUG(D_CONFIG, "Marking OSC %s %sactive\n", - obd_uuid2str(uuid), active ? "" : "in"); } + CDEBUG(D_CONFIG, "Marking OSC %s %sactive\n", + obd_uuid2str(uuid), active ? "" : "in"); lov->lov_tgts[index]->ltd_active = active; if (active) { @@ -481,7 +484,8 @@ static int lov_notify(struct obd_device *obd, struct obd_device *watched, continue; /* don't send sync event if target not - * connected/activated */ + * connected/activated + */ if (is_sync && !lov->lov_tgts[i]->ltd_active) continue; @@ -521,12 +525,12 @@ static int lov_add_target(struct obd_device *obd, struct obd_uuid *uuidp, tgt_obd = class_find_client_obd(uuidp, LUSTRE_OSC_NAME, &obd->obd_uuid); - if (tgt_obd == NULL) + if (!tgt_obd) return -EINVAL; mutex_lock(&lov->lov_lock); - if ((index < lov->lov_tgt_size) && (lov->lov_tgts[index] != NULL)) { + if ((index < lov->lov_tgt_size) && lov->lov_tgts[index]) { tgt = lov->lov_tgts[index]; CERROR("UUID %s already assigned at LOV target index %d\n", obd_uuid2str(&tgt->ltd_uuid), index); @@ -543,7 +547,7 @@ static int lov_add_target(struct obd_device *obd, struct obd_uuid *uuidp, while (newsize < index + 1) newsize <<= 1; newtgts = kcalloc(newsize, sizeof(*newtgts), GFP_NOFS); - if (newtgts == NULL) { + if (!newtgts) { mutex_unlock(&lov->lov_lock); return -ENOMEM; } @@ -590,14 +594,15 @@ static int lov_add_target(struct obd_device *obd, struct obd_uuid *uuidp, mutex_unlock(&lov->lov_lock); CDEBUG(D_CONFIG, "idx=%d ltd_gen=%d ld_tgt_count=%d\n", - index, tgt->ltd_gen, lov->desc.ld_tgt_count); + index, tgt->ltd_gen, lov->desc.ld_tgt_count); rc = obd_notify(obd, tgt_obd, OBD_NOTIFY_CREATE, &index); if (lov->lov_connects == 0) { /* lov_connect hasn't been called yet. We'll do the - lov_connect_obd on this target when that fn first runs, - because we don't know the connect flags yet. */ + * lov_connect_obd on this target when that fn first runs, + * because we don't know the connect flags yet. + */ return 0; } @@ -613,11 +618,11 @@ static int lov_add_target(struct obd_device *obd, struct obd_uuid *uuidp, goto out; } - if (lov->lov_cache != NULL) { + if (lov->lov_cache) { rc = obd_set_info_async(NULL, tgt->ltd_exp, - sizeof(KEY_CACHE_SET), KEY_CACHE_SET, - sizeof(struct cl_client_cache), lov->lov_cache, - NULL); + sizeof(KEY_CACHE_SET), KEY_CACHE_SET, + sizeof(struct cl_client_cache), + lov->lov_cache, NULL); if (rc < 0) goto out; } @@ -702,8 +707,9 @@ static void __lov_del_obd(struct obd_device *obd, struct lov_tgt_desc *tgt) kfree(tgt); /* Manual cleanup - no cleanup logs to clean up the osc's. We must - do it ourselves. And we can't do it from lov_cleanup, - because we just lost our only reference to it. */ + * do it ourselves. And we can't do it from lov_cleanup, + * because we just lost our only reference to it. + */ if (osc_obd) class_manual_cleanup(osc_obd); } @@ -773,9 +779,9 @@ int lov_setup(struct obd_device *obd, struct lustre_cfg *lcfg) if (desc->ld_magic != LOV_DESC_MAGIC) { if (desc->ld_magic == __swab32(LOV_DESC_MAGIC)) { - CDEBUG(D_OTHER, "%s: Swabbing lov desc %p\n", - obd->obd_name, desc); - lustre_swab_lov_desc(desc); + CDEBUG(D_OTHER, "%s: Swabbing lov desc %p\n", + obd->obd_name, desc); + lustre_swab_lov_desc(desc); } else { CERROR("%s: Bad lov desc magic: %#x\n", obd->obd_name, desc->ld_magic); @@ -859,7 +865,8 @@ static int lov_cleanup(struct obd_device *obd) /* free pool structs */ CDEBUG(D_INFO, "delete pool %p\n", pool); /* In the function below, .hs_keycmp resolves to - * pool_hashkey_keycmp() */ + * pool_hashkey_keycmp() + */ /* coverity[overrun-buffer-val] */ lov_pool_del(obd, pool->pool_name); } @@ -879,8 +886,9 @@ static int lov_cleanup(struct obd_device *obd) if (lov->lov_tgts[i]->ltd_active || atomic_read(&lov->lov_refcount)) /* We should never get here - these - should have been removed in the - disconnect. */ + * should have been removed in the + * disconnect. + */ CERROR("lov tgt %d not cleaned! deathrow=%d, lovrc=%d\n", i, lov->lov_death_row, atomic_read(&lov->lov_refcount)); @@ -981,7 +989,7 @@ static int lov_recreate(struct obd_export *exp, struct obdo *src_oa, ost_idx = src_oa->o_nlink; lsm = *ea; - if (lsm == NULL) { + if (!lsm) { rc = -EINVAL; goto out; } @@ -1025,8 +1033,8 @@ static int lov_create(const struct lu_env *env, struct obd_export *exp, struct lov_obd *lov; int rc = 0; - LASSERT(ea != NULL); - if (exp == NULL) + LASSERT(ea); + if (!exp) return -EINVAL; if ((src_oa->o_valid & OBD_MD_FLFLAGS) && @@ -1043,7 +1051,7 @@ static int lov_create(const struct lu_env *env, struct obd_export *exp, /* Recreate a specific object id at the given OST index */ if ((src_oa->o_valid & OBD_MD_FLFLAGS) && (src_oa->o_flags & OBD_FL_RECREATE_OBJS)) { - rc = lov_recreate(exp, src_oa, ea, oti); + rc = lov_recreate(exp, src_oa, ea, oti); } obd_putref(exp->exp_obd); @@ -1052,7 +1060,7 @@ static int lov_create(const struct lu_env *env, struct obd_export *exp, #define ASSERT_LSM_MAGIC(lsmp) \ do { \ - LASSERT((lsmp) != NULL); \ + LASSERT((lsmp)); \ LASSERTF(((lsmp)->lsm_magic == LOV_MAGIC_V1 || \ (lsmp)->lsm_magic == LOV_MAGIC_V3), \ "%p->lsm_magic=%x\n", (lsmp), (lsmp)->lsm_magic); \ @@ -1065,7 +1073,6 @@ static int lov_destroy(const struct lu_env *env, struct obd_export *exp, struct lov_request_set *set; struct obd_info oinfo; struct lov_request *req; - struct list_head *pos; struct lov_obd *lov; int rc = 0, err = 0; @@ -1085,9 +1092,7 @@ static int lov_destroy(const struct lu_env *env, struct obd_export *exp, if (rc) goto out; - list_for_each(pos, &set->set_list) { - req = list_entry(pos, struct lov_request, rq_link); - + list_for_each_entry(req, &set->set_list, rq_link) { if (oa->o_valid & OBD_MD_FLCOOKIE) oti->oti_logcookies = set->set_cookies + req->rq_stripe; @@ -1105,10 +1110,9 @@ static int lov_destroy(const struct lu_env *env, struct obd_export *exp, } } - if (rc == 0) { - LASSERT(lsm_op_find(lsm->lsm_magic) != NULL); + if (rc == 0) rc = lsm_op_find(lsm->lsm_magic)->lsm_destroy(lsm, oa, md_exp); - } + err = lov_fini_destroy_set(set); out: obd_putref(exp->exp_obd); @@ -1129,11 +1133,10 @@ static int lov_getattr_interpret(struct ptlrpc_request_set *rqset, } static int lov_getattr_async(struct obd_export *exp, struct obd_info *oinfo, - struct ptlrpc_request_set *rqset) + struct ptlrpc_request_set *rqset) { struct lov_request_set *lovset; struct lov_obd *lov; - struct list_head *pos; struct lov_request *req; int rc = 0, err; @@ -1153,9 +1156,7 @@ static int lov_getattr_async(struct obd_export *exp, struct obd_info *oinfo, POSTID(&oinfo->oi_md->lsm_oi), oinfo->oi_md->lsm_stripe_count, oinfo->oi_md->lsm_stripe_size); - list_for_each(pos, &lovset->set_list) { - req = list_entry(pos, struct lov_request, rq_link); - + list_for_each_entry(req, &lovset->set_list, rq_link) { CDEBUG(D_INFO, "objid " DOSTID "[%d] has subobj " DOSTID " at idx%u\n", POSTID(&oinfo->oi_oa->o_oi), req->rq_stripe, POSTID(&req->rq_oi.oi_oa->o_oi), req->rq_idx); @@ -1174,7 +1175,7 @@ static int lov_getattr_async(struct obd_export *exp, struct obd_info *oinfo, if (!list_empty(&rqset->set_requests)) { LASSERT(rc == 0); - LASSERT(rqset->set_interpret == NULL); + LASSERT(!rqset->set_interpret); rqset->set_interpret = lov_getattr_interpret; rqset->set_arg = (void *)lovset; return rc; @@ -1199,14 +1200,14 @@ static int lov_setattr_interpret(struct ptlrpc_request_set *rqset, } /* If @oti is given, the request goes from MDS and responses from OSTs are not - needed. Otherwise, a client is waiting for responses. */ + * needed. Otherwise, a client is waiting for responses. + */ static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo, struct obd_trans_info *oti, struct ptlrpc_request_set *rqset) { struct lov_request_set *set; struct lov_request *req; - struct list_head *pos; struct lov_obd *lov; int rc = 0; @@ -1230,9 +1231,7 @@ static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo, oinfo->oi_md->lsm_stripe_count, oinfo->oi_md->lsm_stripe_size); - list_for_each(pos, &set->set_list) { - req = list_entry(pos, struct lov_request, rq_link); - + list_for_each_entry(req, &set->set_list, rq_link) { if (oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE) oti->oti_logcookies = set->set_cookies + req->rq_stripe; @@ -1262,7 +1261,7 @@ static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo, return rc ? rc : err; } - LASSERT(rqset->set_interpret == NULL); + LASSERT(!rqset->set_interpret); rqset->set_interpret = lov_setattr_interpret; rqset->set_arg = (void *)set; @@ -1272,7 +1271,8 @@ static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo, /* find any ldlm lock of the inode in lov * return 0 not find * 1 find one - * < 0 error */ + * < 0 error + */ static int lov_find_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm, ldlm_iterator_t it, void *data) @@ -1326,20 +1326,17 @@ static int lov_statfs_async(struct obd_export *exp, struct obd_info *oinfo, struct obd_device *obd = class_exp2obd(exp); struct lov_request_set *set; struct lov_request *req; - struct list_head *pos; struct lov_obd *lov; int rc = 0; - LASSERT(oinfo != NULL); - LASSERT(oinfo->oi_osfs != NULL); + LASSERT(oinfo->oi_osfs); lov = &obd->u.lov; rc = lov_prep_statfs_set(obd, oinfo, &set); if (rc) return rc; - list_for_each(pos, &set->set_list) { - req = list_entry(pos, struct lov_request, rq_link); + list_for_each_entry(req, &set->set_list, rq_link) { rc = obd_statfs_async(lov->lov_tgts[req->rq_idx]->ltd_exp, &req->rq_oi, max_age, rqset); if (rc) @@ -1355,7 +1352,7 @@ static int lov_statfs_async(struct obd_export *exp, struct obd_info *oinfo, return rc ? rc : err; } - LASSERT(rqset->set_interpret == NULL); + LASSERT(!rqset->set_interpret); rqset->set_interpret = lov_statfs_interpret; rqset->set_arg = (void *)set; return 0; @@ -1369,9 +1366,10 @@ static int lov_statfs(const struct lu_env *env, struct obd_export *exp, int rc = 0; /* for obdclass we forbid using obd_statfs_rqset, but prefer using async - * statfs requests */ + * statfs requests + */ set = ptlrpc_prep_set(); - if (set == NULL) + if (!set) return -ENOMEM; oinfo.oi_osfs = osfs; @@ -1385,7 +1383,7 @@ static int lov_statfs(const struct lu_env *env, struct obd_export *exp, } static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len, - void *karg, void *uarg) + void *karg, void __user *uarg) { struct obd_device *obddev = class_exp2obd(exp); struct lov_obd *lov = &obddev->u.lov; @@ -1416,11 +1414,13 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len, /* copy UUID */ if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(osc_obd), - min((int) data->ioc_plen2, - (int) sizeof(struct obd_uuid)))) + min((int)data->ioc_plen2, + (int)sizeof(struct obd_uuid)))) return -EFAULT; - flags = uarg ? *(__u32 *)uarg : 0; + memcpy(&flags, data->ioc_inlbuf1, sizeof(__u32)); + flags = flags & LL_STATFS_NODELAY ? OBD_STATFS_NODELAY : 0; + /* got statfs data */ rc = obd_statfs(NULL, lov->lov_tgts[index]->ltd_exp, &stat_buf, cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS), @@ -1428,8 +1428,8 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len, if (rc) return rc; if (copy_to_user(data->ioc_pbuf1, &stat_buf, - min((int) data->ioc_plen1, - (int) sizeof(stat_buf)))) + min((int)data->ioc_plen1, + (int)sizeof(stat_buf)))) return -EFAULT; break; } @@ -1501,7 +1501,7 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len, &qctl->obd_uuid)) continue; - if (tgt->ltd_exp == NULL) + if (!tgt->ltd_exp) return -EINVAL; break; @@ -1543,14 +1543,15 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len, continue; /* ll_umount_begin() sets force flag but for lov, not - * osc. Let's pass it through */ + * osc. Let's pass it through + */ osc_obd = class_exp2obd(lov->lov_tgts[i]->ltd_exp); osc_obd->obd_force = obddev->obd_force; err = obd_iocontrol(cmd, lov->lov_tgts[i]->ltd_exp, len, karg, uarg); - if (err == -ENODATA && cmd == OBD_IOC_POLL_QUOTACHECK) { + if (err == -ENODATA && cmd == OBD_IOC_POLL_QUOTACHECK) return err; - } else if (err) { + if (err) { if (lov->lov_tgts[i]->ltd_active) { CDEBUG(err == -ENOTTY ? D_IOCTL : D_WARNING, @@ -1620,7 +1621,8 @@ static u64 fiemap_calc_fm_end_offset(struct ll_user_fiemap *fiemap, return -EINVAL; /* If we have finished mapping on previous device, shift logical - * offset to start of next device */ + * offset to start of next device + */ if ((lov_stripe_intersects(lsm, stripe_no, fm_start, fm_end, &lun_start, &lun_end)) != 0 && local_end < lun_end) { @@ -1628,7 +1630,8 @@ static u64 fiemap_calc_fm_end_offset(struct ll_user_fiemap *fiemap, *start_stripe = stripe_no; } else { /* This is a special value to indicate that caller should - * calculate offset in next stripe. */ + * calculate offset in next stripe. + */ fm_end_offset = 0; *start_stripe = (stripe_no + 1) % lsm->lsm_stripe_count; } @@ -1739,7 +1742,7 @@ static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key, buffer_size = fiemap_count_to_size(fm_key->fiemap.fm_extent_count); fm_local = libcfs_kvzalloc(buffer_size, GFP_NOFS); - if (fm_local == NULL) { + if (!fm_local) { rc = -ENOMEM; goto out; } @@ -1759,7 +1762,8 @@ static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key, fm_end = fm_key->oa.o_size; last_stripe = fiemap_calc_last_stripe(lsm, fm_start, fm_end, - actual_start_stripe, &stripe_count); + actual_start_stripe, + &stripe_count); fm_end_offset = fiemap_calc_fm_end_offset(fiemap, lsm, fm_start, fm_end, &start_stripe); @@ -1796,7 +1800,8 @@ static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key, /* If this is a continuation FIEMAP call and we are on * starting stripe then lun_start needs to be set to - * fm_end_offset */ + * fm_end_offset + */ if (fm_end_offset != 0 && cur_stripe == start_stripe) lun_start = fm_end_offset; @@ -1818,7 +1823,8 @@ static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key, len_mapped_single_call = 0; /* If the output buffer is very large and the objects have many - * extents we may need to loop on a single OST repeatedly */ + * extents we may need to loop on a single OST repeatedly + */ ost_eof = 0; ost_done = 0; do { @@ -1874,7 +1880,8 @@ inactive_tgt: if (ext_count == 0) { ost_done = 1; /* If last stripe has hole at the end, - * then we need to return */ + * then we need to return + */ if (cur_stripe_wrap == last_stripe) { fiemap->fm_mapped_extents = 0; goto finish; @@ -1896,7 +1903,8 @@ inactive_tgt: ost_done = 1; /* Clear the EXTENT_LAST flag which can be present on - * last extent */ + * last extent + */ if (lcl_fm_ext[ext_count-1].fe_flags & FIEMAP_EXTENT_LAST) lcl_fm_ext[ext_count - 1].fe_flags &= ~FIEMAP_EXTENT_LAST; @@ -1925,7 +1933,8 @@ inactive_tgt: finish: /* Indicate that we are returning device offsets unless file just has - * single stripe */ + * single stripe + */ if (lsm->lsm_stripe_count > 1) fiemap->fm_flags |= FIEMAP_FLAG_DEVICE_ORDER; @@ -1933,7 +1942,8 @@ finish: goto skip_last_device_calc; /* Check if we have reached the last stripe and whether mapping for that - * stripe is done. */ + * stripe is done. + */ if (cur_stripe_wrap == last_stripe) { if (ost_done || ost_eof) fiemap->fm_extents[current_extent - 1].fe_flags |= @@ -1978,10 +1988,12 @@ static int lov_get_info(const struct lu_env *env, struct obd_export *exp, /* XXX This is another one of those bits that will need to * change if we ever actually support nested LOVs. It uses - * the lock's export to find out which stripe it is. */ + * the lock's export to find out which stripe it is. + */ /* XXX - it's assumed all the locks for deleted OSTs have * been cancelled. Also, the export for deleted OSTs will - * be NULL and won't match the lock's export. */ + * be NULL and won't match the lock's export. + */ for (i = 0; i < lsm->lsm_stripe_count; i++) { loi = lsm->lsm_oinfo[i]; if (lov_oinfo_is_dummy(loi)) @@ -2070,7 +2082,7 @@ static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp, unsigned next_id = 0, mds_con = 0; incr = check_uuid = do_inactive = no_set = 0; - if (set == NULL) { + if (!set) { no_set = 1; set = ptlrpc_prep_set(); if (!set) @@ -2093,7 +2105,7 @@ static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp, } else if (KEY_IS(KEY_MDS_CONN)) { mds_con = 1; } else if (KEY_IS(KEY_CACHE_SET)) { - LASSERT(lov->lov_cache == NULL); + LASSERT(!lov->lov_cache); lov->lov_cache = val; do_inactive = 1; } @@ -2119,12 +2131,12 @@ static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp, /* Only want a specific OSC */ if (mgi->uuid && !obd_uuid_equals(mgi->uuid, - &tgt->ltd_uuid)) + &tgt->ltd_uuid)) continue; err = obd_set_info_async(env, tgt->ltd_exp, - keylen, key, sizeof(int), - &mgi->group, set); + keylen, key, sizeof(int), + &mgi->group, set); } else if (next_id) { err = obd_set_info_async(env, tgt->ltd_exp, keylen, key, vallen, @@ -2136,7 +2148,7 @@ static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp, continue; err = obd_set_info_async(env, tgt->ltd_exp, - keylen, key, vallen, val, set); + keylen, key, vallen, val, set); } if (!rc) @@ -2187,7 +2199,7 @@ static int lov_quotactl(struct obd_device *obd, struct obd_export *exp, oqctl->qc_cmd != Q_INITQUOTA && oqctl->qc_cmd != LUSTRE_Q_SETQUOTA && oqctl->qc_cmd != Q_FINVALIDATE) { - CERROR("bad quota opc %x for lov obd", oqctl->qc_cmd); + CERROR("bad quota opc %x for lov obd\n", oqctl->qc_cmd); return -EFAULT; } @@ -2317,7 +2329,8 @@ static int __init lov_init(void) /* print an address of _any_ initialized kernel symbol from this * module, to allow debugging with gdb that doesn't support data - * symbols from modules.*/ + * symbols from modules. + */ CDEBUG(D_INFO, "Lustre LOV module (%p).\n", &lov_caches); rc = lu_kmem_init(lov_caches); @@ -2325,9 +2338,9 @@ static int __init lov_init(void) return rc; lov_oinfo_slab = kmem_cache_create("lov_oinfo", - sizeof(struct lov_oinfo), - 0, SLAB_HWCACHE_ALIGN, NULL); - if (lov_oinfo_slab == NULL) { + sizeof(struct lov_oinfo), + 0, SLAB_HWCACHE_ALIGN, NULL); + if (!lov_oinfo_slab) { lu_kmem_fini(lov_caches); return -ENOMEM; } @@ -2353,7 +2366,7 @@ static void /*__exit*/ lov_exit(void) } MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>"); -MODULE_DESCRIPTION("Lustre Logical Object Volume OBD driver"); +MODULE_DESCRIPTION("Lustre Logical Object Volume"); MODULE_LICENSE("GPL"); MODULE_VERSION(LUSTRE_VERSION_STRING); diff --git a/drivers/staging/lustre/lustre/lov/lov_object.c b/drivers/staging/lustre/lustre/lov/lov_object.c index 3b79ebc8eccf..1f8ed95a6d89 100644 --- a/drivers/staging/lustre/lustre/lov/lov_object.c +++ b/drivers/staging/lustre/lustre/lov/lov_object.c @@ -59,7 +59,7 @@ struct lov_layout_operations { const struct cl_object_conf *conf, union lov_layout_state *state); int (*llo_delete)(const struct lu_env *env, struct lov_object *lov, - union lov_layout_state *state); + union lov_layout_state *state); void (*llo_fini)(const struct lu_env *env, struct lov_object *lov, union lov_layout_state *state); void (*llo_install)(const struct lu_env *env, struct lov_object *lov, @@ -67,7 +67,7 @@ struct lov_layout_operations { int (*llo_print)(const struct lu_env *env, void *cookie, lu_printer_t p, const struct lu_object *o); int (*llo_page_init)(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, struct page *vmpage); + struct cl_page *page, struct page *vmpage); int (*llo_lock_init)(const struct lu_env *env, struct cl_object *obj, struct cl_lock *lock, const struct cl_io *io); @@ -135,7 +135,8 @@ static int lov_init_sub(const struct lu_env *env, struct lov_object *lov, * Do not leave the object in cache to avoid accessing * freed memory. This is because osc_object is referring to * lov_oinfo of lsm_stripe_data which will be freed due to - * this failure. */ + * this failure. + */ cl_object_kill(env, stripe); cl_object_put(env, stripe); return -EIO; @@ -154,7 +155,7 @@ static int lov_init_sub(const struct lu_env *env, struct lov_object *lov, /* reuse ->coh_attr_guard to protect coh_parent change */ spin_lock(&subhdr->coh_attr_guard); parent = subhdr->coh_parent; - if (parent == NULL) { + if (!parent) { subhdr->coh_parent = hdr; spin_unlock(&subhdr->coh_attr_guard); subhdr->coh_nesting = hdr->coh_nesting + 1; @@ -170,11 +171,12 @@ static int lov_init_sub(const struct lu_env *env, struct lov_object *lov, spin_unlock(&subhdr->coh_attr_guard); old_obj = lu_object_locate(&parent->coh_lu, &lov_device_type); - LASSERT(old_obj != NULL); + LASSERT(old_obj); old_lov = cl2lov(lu2cl(old_obj)); if (old_lov->lo_layout_invalid) { /* the object's layout has already changed but isn't - * refreshed */ + * refreshed + */ lu_object_unhash(env, &stripe->co_lu); result = -EAGAIN; } else { @@ -212,14 +214,14 @@ static int lov_init_raid0(const struct lu_env *env, LOV_MAGIC_V1, LOV_MAGIC_V3, lsm->lsm_magic); } - LASSERT(lov->lo_lsm == NULL); + LASSERT(!lov->lo_lsm); lov->lo_lsm = lsm_addref(lsm); r0->lo_nr = lsm->lsm_stripe_count; LASSERT(r0->lo_nr <= lov_targets_nr(dev)); r0->lo_sub = libcfs_kvzalloc(r0->lo_nr * sizeof(r0->lo_sub[0]), GFP_NOFS); - if (r0->lo_sub != NULL) { + if (r0->lo_sub) { result = 0; subconf->coc_inode = conf->coc_inode; spin_lock_init(&r0->lo_sub_lock); @@ -241,9 +243,10 @@ static int lov_init_raid0(const struct lu_env *env, subdev = lovsub2cl_dev(dev->ld_target[ost_idx]); subconf->u.coc_oinfo = oinfo; - LASSERTF(subdev != NULL, "not init ost %d\n", ost_idx); + LASSERTF(subdev, "not init ost %d\n", ost_idx); /* In the function below, .hs_keycmp resolves to - * lu_obj_hop_keycmp() */ + * lu_obj_hop_keycmp() + */ /* coverity[overrun-buffer-val] */ stripe = lov_sub_find(env, subdev, ofid, subconf); if (!IS_ERR(stripe)) { @@ -263,15 +266,15 @@ out: } static int lov_init_released(const struct lu_env *env, - struct lov_device *dev, struct lov_object *lov, - const struct cl_object_conf *conf, - union lov_layout_state *state) + struct lov_device *dev, struct lov_object *lov, + const struct cl_object_conf *conf, + union lov_layout_state *state) { struct lov_stripe_md *lsm = conf->u.coc_md->lsm; - LASSERT(lsm != NULL); + LASSERT(lsm); LASSERT(lsm_is_released(lsm)); - LASSERT(lov->lo_lsm == NULL); + LASSERT(!lov->lo_lsm); lov->lo_lsm = lsm_addref(lsm); return 0; @@ -310,7 +313,8 @@ static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov, cl_object_put(env, sub); /* ... wait until it is actually destroyed---sub-object clears its - * ->lo_sub[] slot in lovsub_object_fini() */ + * ->lo_sub[] slot in lovsub_object_fini() + */ if (r0->lo_sub[idx] == los) { waiter = &lov_env_info(env)->lti_waiter; init_waitqueue_entry(waiter, current); @@ -318,7 +322,8 @@ static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov, set_current_state(TASK_UNINTERRUPTIBLE); while (1) { /* this wait-queue is signaled at the end of - * lu_object_free(). */ + * lu_object_free(). + */ set_current_state(TASK_UNINTERRUPTIBLE); spin_lock(&r0->lo_sub_lock); if (r0->lo_sub[idx] == los) { @@ -332,7 +337,7 @@ static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov, } remove_wait_queue(&bkt->lsb_marche_funebre, waiter); } - LASSERT(r0->lo_sub[idx] == NULL); + LASSERT(!r0->lo_sub[idx]); } static int lov_delete_raid0(const struct lu_env *env, struct lov_object *lov, @@ -345,11 +350,11 @@ static int lov_delete_raid0(const struct lu_env *env, struct lov_object *lov, dump_lsm(D_INODE, lsm); lov_layout_wait(env, lov); - if (r0->lo_sub != NULL) { + if (r0->lo_sub) { for (i = 0; i < r0->lo_nr; ++i) { struct lovsub_object *los = r0->lo_sub[i]; - if (los != NULL) { + if (los) { cl_locks_prune(env, &los->lso_cl, 1); /* * If top-level object is to be evicted from @@ -374,7 +379,7 @@ static void lov_fini_raid0(const struct lu_env *env, struct lov_object *lov, { struct lov_layout_raid0 *r0 = &state->raid0; - if (r0->lo_sub != NULL) { + if (r0->lo_sub) { kvfree(r0->lo_sub); r0->lo_sub = NULL; } @@ -384,7 +389,7 @@ static void lov_fini_raid0(const struct lu_env *env, struct lov_object *lov, } static void lov_fini_released(const struct lu_env *env, struct lov_object *lov, - union lov_layout_state *state) + union lov_layout_state *state) { dump_lsm(D_INODE, lov->lo_lsm); lov_free_memmd(&lov->lo_lsm); @@ -406,13 +411,13 @@ static int lov_print_raid0(const struct lu_env *env, void *cookie, int i; (*p)(env, cookie, "stripes: %d, %s, lsm{%p 0x%08X %d %u %u}:\n", - r0->lo_nr, lov->lo_layout_invalid ? "invalid" : "valid", lsm, - lsm->lsm_magic, atomic_read(&lsm->lsm_refc), - lsm->lsm_stripe_count, lsm->lsm_layout_gen); + r0->lo_nr, lov->lo_layout_invalid ? "invalid" : "valid", lsm, + lsm->lsm_magic, atomic_read(&lsm->lsm_refc), + lsm->lsm_stripe_count, lsm->lsm_layout_gen); for (i = 0; i < r0->lo_nr; ++i) { struct lu_object *sub; - if (r0->lo_sub[i] != NULL) { + if (r0->lo_sub[i]) { sub = lovsub2lu(r0->lo_sub[i]); lu_object_print(env, cookie, p, sub); } else { @@ -423,16 +428,16 @@ static int lov_print_raid0(const struct lu_env *env, void *cookie, } static int lov_print_released(const struct lu_env *env, void *cookie, - lu_printer_t p, const struct lu_object *o) + lu_printer_t p, const struct lu_object *o) { struct lov_object *lov = lu2lov(o); struct lov_stripe_md *lsm = lov->lo_lsm; (*p)(env, cookie, - "released: %s, lsm{%p 0x%08X %d %u %u}:\n", - lov->lo_layout_invalid ? "invalid" : "valid", lsm, - lsm->lsm_magic, atomic_read(&lsm->lsm_refc), - lsm->lsm_stripe_count, lsm->lsm_layout_gen); + "released: %s, lsm{%p 0x%08X %d %u %u}:\n", + lov->lo_layout_invalid ? "invalid" : "valid", lsm, + lsm->lsm_magic, atomic_read(&lsm->lsm_refc), + lsm->lsm_stripe_count, lsm->lsm_layout_gen); return 0; } @@ -465,7 +470,8 @@ static int lov_attr_get_raid0(const struct lu_env *env, struct cl_object *obj, * context, and this function is called in ccc_lock_state(), it will * hit this assertion. * Anyway, it's still okay to call attr_get w/o type guard as layout - * can't go if locks exist. */ + * can't go if locks exist. + */ /* LASSERT(atomic_read(&lsm->lsm_refc) > 1); */ if (!r0->lo_attr_valid) { @@ -475,7 +481,8 @@ static int lov_attr_get_raid0(const struct lu_env *env, struct cl_object *obj, memset(lvb, 0, sizeof(*lvb)); /* XXX: timestamps can be negative by sanity:test_39m, - * how can it be? */ + * how can it be? + */ lvb->lvb_atime = LLONG_MIN; lvb->lvb_ctime = LLONG_MIN; lvb->lvb_mtime = LLONG_MIN; @@ -569,7 +576,7 @@ static const struct lov_layout_operations lov_dispatch[] = { */ static enum lov_layout_type lov_type(struct lov_stripe_md *lsm) { - if (lsm == NULL) + if (!lsm) return LLT_EMPTY; if (lsm_is_released(lsm)) return LLT_RELEASED; @@ -624,7 +631,7 @@ static void lov_conf_lock(struct lov_object *lov) { LASSERT(lov->lo_owner != current); down_write(&lov->lo_type_guard); - LASSERT(lov->lo_owner == NULL); + LASSERT(!lov->lo_owner); lov->lo_owner = current; } @@ -639,9 +646,9 @@ static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov) struct l_wait_info lwi = { 0 }; while (atomic_read(&lov->lo_active_ios) > 0) { - CDEBUG(D_INODE, "file:"DFID" wait for active IO, now: %d.\n", - PFID(lu_object_fid(lov2lu(lov))), - atomic_read(&lov->lo_active_ios)); + CDEBUG(D_INODE, "file:" DFID " wait for active IO, now: %d.\n", + PFID(lu_object_fid(lov2lu(lov))), + atomic_read(&lov->lo_active_ios)); l_wait_event(lov->lo_waitq, atomic_read(&lov->lo_active_ios) == 0, &lwi); @@ -666,7 +673,7 @@ static int lov_layout_change(const struct lu_env *unused, LASSERT(0 <= lov->lo_type && lov->lo_type < ARRAY_SIZE(lov_dispatch)); - if (conf->u.coc_md != NULL) + if (conf->u.coc_md) llt = lov_type(conf->u.coc_md->lsm); LASSERT(0 <= llt && llt < ARRAY_SIZE(lov_dispatch)); @@ -689,7 +696,7 @@ static int lov_layout_change(const struct lu_env *unused, old_ops->llo_fini(env, lov, &lov->u); LASSERT(atomic_read(&lov->lo_active_ios) == 0); - LASSERT(hdr->coh_tree.rnode == NULL); + LASSERT(!hdr->coh_tree.rnode); LASSERT(hdr->coh_pages == 0); lov->lo_type = LLT_EMPTY; @@ -767,10 +774,10 @@ static int lov_conf_set(const struct lu_env *env, struct cl_object *obj, LASSERT(conf->coc_opc == OBJECT_CONF_SET); - if (conf->u.coc_md != NULL) + if (conf->u.coc_md) lsm = conf->u.coc_md->lsm; - if ((lsm == NULL && lov->lo_lsm == NULL) || - ((lsm != NULL && lov->lo_lsm != NULL) && + if ((!lsm && !lov->lo_lsm) || + ((lsm && lov->lo_lsm) && (lov->lo_lsm->lsm_layout_gen == lsm->lsm_layout_gen) && (lov->lo_lsm->lsm_pattern == lsm->lsm_pattern))) { /* same version of layout */ @@ -818,7 +825,7 @@ static int lov_object_print(const struct lu_env *env, void *cookie, } int lov_page_init(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, struct page *vmpage) + struct cl_page *page, struct page *vmpage) { return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_page_init, env, obj, page, vmpage); @@ -845,7 +852,8 @@ static int lov_attr_get(const struct lu_env *env, struct cl_object *obj, struct cl_attr *attr) { /* do not take lock, as this function is called under a - * spin-lock. Layout is protected from changing by ongoing IO. */ + * spin-lock. Layout is protected from changing by ongoing IO. + */ return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_getattr, env, obj, attr); } @@ -891,8 +899,8 @@ struct lu_object *lov_object_alloc(const struct lu_env *env, struct lov_object *lov; struct lu_object *obj; - lov = kmem_cache_alloc(lov_object_kmem, GFP_NOFS | __GFP_ZERO); - if (lov != NULL) { + lov = kmem_cache_zalloc(lov_object_kmem, GFP_NOFS); + if (lov) { obj = lov2lu(lov); lu_object_init(obj, NULL, dev); lov->lo_cl.co_ops = &lov_ops; @@ -913,11 +921,11 @@ static struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov) struct lov_stripe_md *lsm = NULL; lov_conf_freeze(lov); - if (lov->lo_lsm != NULL) { + if (lov->lo_lsm) { lsm = lsm_addref(lov->lo_lsm); CDEBUG(D_INODE, "lsm %p addref %d/%d by %p.\n", - lsm, atomic_read(&lsm->lsm_refc), - lov->lo_layout_invalid, current); + lsm, atomic_read(&lsm->lsm_refc), + lov->lo_layout_invalid, current); } lov_conf_thaw(lov); return lsm; @@ -928,12 +936,12 @@ struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj) struct lu_object *luobj; struct lov_stripe_md *lsm = NULL; - if (clobj == NULL) + if (!clobj) return NULL; luobj = lu_object_locate(&cl_object_header(clobj)->coh_lu, &lov_device_type); - if (luobj != NULL) + if (luobj) lsm = lov_lsm_addref(lu2lov(luobj)); return lsm; } @@ -941,7 +949,7 @@ EXPORT_SYMBOL(lov_lsm_get); void lov_lsm_put(struct cl_object *unused, struct lov_stripe_md *lsm) { - if (lsm != NULL) + if (lsm) lov_free_memmd(&lsm); } EXPORT_SYMBOL(lov_lsm_put); @@ -953,7 +961,7 @@ int lov_read_and_clear_async_rc(struct cl_object *clob) luobj = lu_object_locate(&cl_object_header(clob)->coh_lu, &lov_device_type); - if (luobj != NULL) { + if (luobj) { struct lov_object *lov = lu2lov(luobj); lov_conf_freeze(lov); @@ -963,7 +971,6 @@ int lov_read_and_clear_async_rc(struct cl_object *clob) int i; lsm = lov->lo_lsm; - LASSERT(lsm != NULL); for (i = 0; i < lsm->lsm_stripe_count; i++) { struct lov_oinfo *loi = lsm->lsm_oinfo[i]; diff --git a/drivers/staging/lustre/lustre/lov/lov_offset.c b/drivers/staging/lustre/lustre/lov/lov_offset.c index aa520aa76e09..ae83eb0f6f36 100644 --- a/drivers/staging/lustre/lustre/lov/lov_offset.c +++ b/drivers/staging/lustre/lustre/lov/lov_offset.c @@ -43,8 +43,7 @@ #include "lov_internal.h" /* compute object size given "stripeno" and the ost size */ -u64 lov_stripe_size(struct lov_stripe_md *lsm, u64 ost_size, - int stripeno) +u64 lov_stripe_size(struct lov_stripe_md *lsm, u64 ost_size, int stripeno) { unsigned long ssize = lsm->lsm_stripe_size; unsigned long stripe_size; @@ -55,7 +54,6 @@ u64 lov_stripe_size(struct lov_stripe_md *lsm, u64 ost_size, if (ost_size == 0) return 0; - LASSERT(lsm_op_find(magic) != NULL); lsm_op_find(magic)->lsm_stripe_by_index(lsm, &stripeno, NULL, &swidth); /* lov_do_div64(a, b) returns a % b, and a = a / b */ @@ -115,7 +113,8 @@ u64 lov_stripe_size(struct lov_stripe_md *lsm, u64 ost_size, * this function returns < 0 when the offset was "before" the stripe and * was moved forward to the start of the stripe in question; 0 when it * falls in the stripe and no shifting was done; > 0 when the offset - * was outside the stripe and was pulled back to its final byte. */ + * was outside the stripe and was pulled back to its final byte. + */ int lov_stripe_offset(struct lov_stripe_md *lsm, u64 lov_off, int stripeno, u64 *obdoff) { @@ -129,8 +128,6 @@ int lov_stripe_offset(struct lov_stripe_md *lsm, u64 lov_off, return 0; } - LASSERT(lsm_op_find(magic) != NULL); - lsm_op_find(magic)->lsm_stripe_by_index(lsm, &stripeno, &lov_off, &swidth); @@ -183,7 +180,6 @@ u64 lov_size_to_stripe(struct lov_stripe_md *lsm, u64 file_size, if (file_size == OBD_OBJECT_EOF) return OBD_OBJECT_EOF; - LASSERT(lsm_op_find(magic) != NULL); lsm_op_find(magic)->lsm_stripe_by_index(lsm, &stripeno, &file_size, &swidth); @@ -213,7 +209,8 @@ u64 lov_size_to_stripe(struct lov_stripe_md *lsm, u64 file_size, /* given an extent in an lov and a stripe, calculate the extent of the stripe * that is contained within the lov extent. this returns true if the given - * stripe does intersect with the lov extent. */ + * stripe does intersect with the lov extent. + */ int lov_stripe_intersects(struct lov_stripe_md *lsm, int stripeno, u64 start, u64 end, u64 *obd_start, u64 *obd_end) { @@ -227,7 +224,8 @@ int lov_stripe_intersects(struct lov_stripe_md *lsm, int stripeno, /* this stripe doesn't intersect the file extent when neither * start or the end intersected the stripe and obd_start and - * obd_end got rounded up to the save value. */ + * obd_end got rounded up to the save value. + */ if (start_side != 0 && end_side != 0 && *obd_start == *obd_end) return 0; @@ -238,7 +236,8 @@ int lov_stripe_intersects(struct lov_stripe_md *lsm, int stripeno, * in the wrong direction and touch it up. * interestingly, this can't underflow since end must be > start * if we passed through the previous check. - * (should we assert for that somewhere?) */ + * (should we assert for that somewhere?) + */ if (end_side != 0) (*obd_end)--; @@ -252,7 +251,6 @@ int lov_stripe_number(struct lov_stripe_md *lsm, u64 lov_off) u64 stripe_off, swidth; int magic = lsm->lsm_magic; - LASSERT(lsm_op_find(magic) != NULL); lsm_op_find(magic)->lsm_stripe_by_offset(lsm, NULL, &lov_off, &swidth); stripe_off = lov_do_div64(lov_off, swidth); diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c index 6b2d1007192b..3925633a99ec 100644 --- a/drivers/staging/lustre/lustre/lov/lov_pack.c +++ b/drivers/staging/lustre/lustre/lov/lov_pack.c @@ -134,17 +134,18 @@ int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp, if ((lmm_magic != LOV_MAGIC_V1) && (lmm_magic != LOV_MAGIC_V3)) { CERROR("bad mem LOV MAGIC: 0x%08X != 0x%08X nor 0x%08X\n", - lmm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3); + lmm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3); return -EINVAL; } if (lsm) { /* If we are just sizing the EA, limit the stripe count - * to the actual number of OSTs in this filesystem. */ + * to the actual number of OSTs in this filesystem. + */ if (!lmmp) { stripe_count = lov_get_stripecnt(lov, lmm_magic, - lsm->lsm_stripe_count); + lsm->lsm_stripe_count); lsm->lsm_stripe_count = stripe_count; } else if (!lsm_is_released(lsm)) { stripe_count = lsm->lsm_stripe_count; @@ -155,7 +156,8 @@ int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp, /* No need to allocate more than maximum supported stripes. * Anyway, this is pretty inaccurate since ld_tgt_count now * represents max index and we should rely on the actual number - * of OSTs instead */ + * of OSTs instead + */ stripe_count = lov_mds_md_max_stripe_count( lov->lov_ocd.ocd_max_easize, lmm_magic); @@ -183,7 +185,7 @@ int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp, return -ENOMEM; } - CDEBUG(D_INFO, "lov_packmd: LOV_MAGIC 0x%08X, lmm_size = %d \n", + CDEBUG(D_INFO, "lov_packmd: LOV_MAGIC 0x%08X, lmm_size = %d\n", lmm_magic, lmm_size); lmmv1 = *lmmp; @@ -241,7 +243,8 @@ __u16 lov_get_stripecnt(struct lov_obd *lov, __u32 magic, __u16 stripe_count) stripe_count = 1; /* stripe count is based on whether ldiskfs can handle - * larger EA sizes */ + * larger EA sizes + */ if (lov->lov_ocd.ocd_connect_flags & OBD_CONNECT_MAX_EASIZE && lov->lov_ocd.ocd_max_easize) max_stripes = lov_mds_md_max_stripe_count( @@ -257,14 +260,15 @@ static int lov_verify_lmm(void *lmm, int lmm_bytes, __u16 *stripe_count) { int rc; - if (lsm_op_find(le32_to_cpu(*(__u32 *)lmm)) == NULL) { + if (!lsm_op_find(le32_to_cpu(*(__u32 *)lmm))) { CERROR("bad disk LOV MAGIC: 0x%08X; dumping LMM (size=%d):\n", le32_to_cpu(*(__u32 *)lmm), lmm_bytes); CERROR("%*phN\n", lmm_bytes, lmm); return -EINVAL; } rc = lsm_op_find(le32_to_cpu(*(__u32 *)lmm))->lsm_lmm_verify(lmm, - lmm_bytes, stripe_count); + lmm_bytes, + stripe_count); return rc; } @@ -306,10 +310,9 @@ int lov_free_memmd(struct lov_stripe_md **lsmp) *lsmp = NULL; LASSERT(atomic_read(&lsm->lsm_refc) > 0); refc = atomic_dec_return(&lsm->lsm_refc); - if (refc == 0) { - LASSERT(lsm_op_find(lsm->lsm_magic) != NULL); + if (refc == 0) lsm_op_find(lsm->lsm_magic)->lsm_free(lsm); - } + return refc; } @@ -359,7 +362,6 @@ int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, if (!lmm) return lsm_size; - LASSERT(lsm_op_find(magic) != NULL); rc = lsm_op_find(magic)->lsm_unpackmd(lov, *lsmp, lmm); if (rc) { lov_free_memmd(lsmp); @@ -376,7 +378,7 @@ int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, * lmm_magic must be LOV_USER_MAGIC. */ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm, - struct lov_user_md *lump) + struct lov_user_md __user *lump) { /* * XXX huge struct allocated on stack. @@ -399,13 +401,15 @@ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm, set_fs(KERNEL_DS); /* we only need the header part from user space to get lmm_magic and - * lmm_stripe_count, (the header part is common to v1 and v3) */ + * lmm_stripe_count, (the header part is common to v1 and v3) + */ lum_size = sizeof(struct lov_user_md_v1); if (copy_from_user(&lum, lump, lum_size)) { rc = -EFAULT; goto out_set; - } else if ((lum.lmm_magic != LOV_USER_MAGIC) && - (lum.lmm_magic != LOV_USER_MAGIC_V3)) { + } + if ((lum.lmm_magic != LOV_USER_MAGIC) && + (lum.lmm_magic != LOV_USER_MAGIC_V3)) { rc = -EINVAL; goto out_set; } diff --git a/drivers/staging/lustre/lustre/lov/lov_page.c b/drivers/staging/lustre/lustre/lov/lov_page.c index 037ae91b74e7..fdcaf8047ad8 100644 --- a/drivers/staging/lustre/lustre/lov/lov_page.c +++ b/drivers/staging/lustre/lustre/lov/lov_page.c @@ -57,7 +57,7 @@ static int lov_page_invariant(const struct cl_page_slice *slice) const struct cl_page *page = slice->cpl_page; const struct cl_page *sub = lov_sub_page(slice); - return ergo(sub != NULL, + return ergo(sub, page->cp_child == sub && sub->cp_parent == page && page->cp_state == sub->cp_state); @@ -70,7 +70,7 @@ static void lov_page_fini(const struct lu_env *env, LINVRNT(lov_page_invariant(slice)); - if (sub != NULL) { + if (sub) { LASSERT(sub->cp_state == CPS_FREEING); lu_ref_del(&sub->cp_reference, "lov", sub->cp_parent); sub->cp_parent = NULL; @@ -151,7 +151,7 @@ static const struct cl_page_operations lov_page_ops = { static void lov_empty_page_fini(const struct lu_env *env, struct cl_page_slice *slice) { - LASSERT(slice->cpl_page->cp_child == NULL); + LASSERT(!slice->cpl_page->cp_child); } int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj, @@ -172,8 +172,7 @@ int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj, offset = cl_offset(obj, page->cp_index); stripe = lov_stripe_number(loo->lo_lsm, offset); LASSERT(stripe < r0->lo_nr); - rc = lov_stripe_offset(loo->lo_lsm, offset, stripe, - &suboff); + rc = lov_stripe_offset(loo->lo_lsm, offset, stripe, &suboff); LASSERT(rc == 0); lpg->lps_invalid = 1; diff --git a/drivers/staging/lustre/lustre/lov/lov_pool.c b/drivers/staging/lustre/lustre/lov/lov_pool.c index b43ce6cd64c2..9ae1d6f42d6e 100644 --- a/drivers/staging/lustre/lustre/lov/lov_pool.c +++ b/drivers/staging/lustre/lustre/lov/lov_pool.c @@ -64,7 +64,7 @@ void lov_pool_putref(struct pool_desc *pool) if (atomic_dec_and_test(&pool->pool_refcount)) { LASSERT(hlist_unhashed(&pool->pool_hash)); LASSERT(list_empty(&pool->pool_list)); - LASSERT(pool->pool_debugfs_entry == NULL); + LASSERT(!pool->pool_debugfs_entry); lov_ost_pool_free(&(pool->pool_rr.lqr_pool)); lov_ost_pool_free(&(pool->pool_obds)); kfree(pool); @@ -152,9 +152,8 @@ struct cfs_hash_ops pool_hash_operations = { }; -/* ifdef needed for liblustre support */ /* - * pool /proc seq_file methods + * pool debugfs seq_file methods */ /* * iterator is used to go through the target pool entries @@ -174,7 +173,7 @@ static void *pool_proc_next(struct seq_file *s, void *v, loff_t *pos) struct pool_iterator *iter = (struct pool_iterator *)s->private; int prev_idx; - LASSERTF(iter->magic == POOL_IT_MAGIC, "%08X", iter->magic); + LASSERTF(iter->magic == POOL_IT_MAGIC, "%08X\n", iter->magic); /* test if end of file */ if (*pos >= pool_tgt_count(iter->pool)) @@ -204,7 +203,8 @@ static void *pool_proc_start(struct seq_file *s, loff_t *pos) if ((pool_tgt_count(pool) == 0) || (*pos >= pool_tgt_count(pool))) { /* iter is not created, so stop() has no way to - * find pool to dec ref */ + * find pool to dec ref + */ lov_pool_putref(pool); return NULL; } @@ -217,7 +217,8 @@ static void *pool_proc_start(struct seq_file *s, loff_t *pos) iter->idx = 0; /* we use seq_file private field to memorized iterator so - * we can free it at stop() */ + * we can free it at stop() + */ /* /!\ do not forget to restore it to pool before freeing it */ s->private = iter; if (*pos > 0) { @@ -226,8 +227,8 @@ static void *pool_proc_start(struct seq_file *s, loff_t *pos) i = 0; do { - ptr = pool_proc_next(s, &iter, &i); - } while ((i < *pos) && (ptr != NULL)); + ptr = pool_proc_next(s, &iter, &i); + } while ((i < *pos) && ptr); return ptr; } return iter; @@ -239,15 +240,16 @@ static void pool_proc_stop(struct seq_file *s, void *v) /* in some cases stop() method is called 2 times, without * calling start() method (see seq_read() from fs/seq_file.c) - * we have to free only if s->private is an iterator */ + * we have to free only if s->private is an iterator + */ if ((iter) && (iter->magic == POOL_IT_MAGIC)) { /* we restore s->private so next call to pool_proc_start() - * will work */ + * will work + */ s->private = iter->pool; lov_pool_putref(iter->pool); kfree(iter); } - return; } static int pool_proc_show(struct seq_file *s, void *v) @@ -255,8 +257,8 @@ static int pool_proc_show(struct seq_file *s, void *v) struct pool_iterator *iter = (struct pool_iterator *)v; struct lov_tgt_desc *tgt; - LASSERTF(iter->magic == POOL_IT_MAGIC, "%08X", iter->magic); - LASSERT(iter->pool != NULL); + LASSERTF(iter->magic == POOL_IT_MAGIC, "%08X\n", iter->magic); + LASSERT(iter->pool); LASSERT(iter->idx <= pool_tgt_count(iter->pool)); down_read(&pool_tgt_rw_sem(iter->pool)); @@ -305,7 +307,7 @@ int lov_ost_pool_init(struct ost_pool *op, unsigned int count) init_rwsem(&op->op_rw_sem); op->op_size = count; op->op_array = kcalloc(op->op_size, sizeof(op->op_array[0]), GFP_NOFS); - if (op->op_array == NULL) { + if (!op->op_array) { op->op_size = 0; return -ENOMEM; } @@ -325,7 +327,7 @@ int lov_ost_pool_extend(struct ost_pool *op, unsigned int min_count) new_size = max(min_count, 2 * op->op_size); new = kcalloc(new_size, sizeof(op->op_array[0]), GFP_NOFS); - if (new == NULL) + if (!new) return -ENOMEM; /* copy old array to new one */ @@ -429,8 +431,7 @@ int lov_pool_new(struct obd_device *obd, char *poolname) INIT_HLIST_NODE(&new_pool->pool_hash); - /* we need this assert seq_file is not implemented for liblustre */ - /* get ref for /proc file */ + /* get ref for debugfs file */ lov_pool_getref(new_pool); new_pool->pool_debugfs_entry = ldebugfs_add_simple( lov->lov_pool_debugfs_entry, @@ -443,7 +444,7 @@ int lov_pool_new(struct obd_device *obd, char *poolname) lov_pool_putref(new_pool); } CDEBUG(D_INFO, "pool %p - proc %p\n", - new_pool, new_pool->pool_debugfs_entry); + new_pool, new_pool->pool_debugfs_entry); spin_lock(&obd->obd_dev_lock); list_add_tail(&new_pool->pool_list, &lov->lov_pool_list); @@ -487,7 +488,7 @@ int lov_pool_del(struct obd_device *obd, char *poolname) /* lookup and kill hash reference */ pool = cfs_hash_del_key(lov->lov_pools_hash_body, poolname); - if (pool == NULL) + if (!pool) return -ENOENT; if (!IS_ERR_OR_NULL(pool->pool_debugfs_entry)) { @@ -518,7 +519,7 @@ int lov_pool_add(struct obd_device *obd, char *poolname, char *ostname) lov = &(obd->u.lov); pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname); - if (pool == NULL) + if (!pool) return -ENOENT; obd_str2uuid(&ost_uuid, ostname); @@ -564,7 +565,7 @@ int lov_pool_remove(struct obd_device *obd, char *poolname, char *ostname) lov = &(obd->u.lov); pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname); - if (pool == NULL) + if (!pool) return -ENOENT; obd_str2uuid(&ost_uuid, ostname); @@ -632,12 +633,12 @@ struct pool_desc *lov_find_pool(struct lov_obd *lov, char *poolname) pool = NULL; if (poolname[0] != '\0') { pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname); - if (pool == NULL) + if (!pool) CWARN("Request for an unknown pool ("LOV_POOLNAMEF")\n", poolname); - if ((pool != NULL) && (pool_tgt_count(pool) == 0)) { + if (pool && (pool_tgt_count(pool) == 0)) { CWARN("Request for an empty pool ("LOV_POOLNAMEF")\n", - poolname); + poolname); /* pool is ignored, so we remove ref on it */ lov_pool_putref(pool); pool = NULL; diff --git a/drivers/staging/lustre/lustre/lov/lov_request.c b/drivers/staging/lustre/lustre/lov/lov_request.c index 42deda71f577..7178a02d6267 100644 --- a/drivers/staging/lustre/lustre/lov/lov_request.c +++ b/drivers/staging/lustre/lustre/lov/lov_request.c @@ -156,7 +156,7 @@ static int lov_check_and_wait_active(struct lov_obd *lov, int ost_idx) tgt = lov->lov_tgts[ost_idx]; - if (unlikely(tgt == NULL)) { + if (unlikely(!tgt)) { rc = 0; goto out; } @@ -178,7 +178,7 @@ static int lov_check_and_wait_active(struct lov_obd *lov, int ost_idx) cfs_time_seconds(1), NULL, NULL); rc = l_wait_event(waitq, lov_check_set(lov, ost_idx), &lwi); - if (tgt != NULL && tgt->ltd_active) + if (tgt->ltd_active) return 1; return 0; @@ -190,28 +190,23 @@ out: static int common_attr_done(struct lov_request_set *set) { - struct list_head *pos; struct lov_request *req; struct obdo *tmp_oa; int rc = 0, attrset = 0; - LASSERT(set->set_oi != NULL); - - if (set->set_oi->oi_oa == NULL) + if (!set->set_oi->oi_oa) return 0; if (!atomic_read(&set->set_success)) return -EIO; - tmp_oa = kmem_cache_alloc(obdo_cachep, GFP_NOFS | __GFP_ZERO); - if (tmp_oa == NULL) { + tmp_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); + if (!tmp_oa) { rc = -ENOMEM; goto out; } - list_for_each(pos, &set->set_list) { - req = list_entry(pos, struct lov_request, rq_link); - + list_for_each_entry(req, &set->set_list, rq_link) { if (!req->rq_complete || req->rq_rc) continue; if (req->rq_oi.oi_oa->o_valid == 0) /* inactive stripe */ @@ -227,7 +222,8 @@ static int common_attr_done(struct lov_request_set *set) if ((set->set_oi->oi_oa->o_valid & OBD_MD_FLEPOCH) && (set->set_oi->oi_md->lsm_stripe_count != attrset)) { /* When we take attributes of some epoch, we require all the - * ost to be active. */ + * ost to be active. + */ CERROR("Not all the stripes had valid attrs\n"); rc = -EIO; goto out; @@ -246,7 +242,7 @@ int lov_fini_getattr_set(struct lov_request_set *set) { int rc = 0; - if (set == NULL) + if (!set) return 0; LASSERT(set->set_exp); if (atomic_read(&set->set_completes)) @@ -258,7 +254,8 @@ int lov_fini_getattr_set(struct lov_request_set *set) } /* The callback for osc_getattr_async that finalizes a request info when a - * response is received. */ + * response is received. + */ static int cb_getattr_update(void *cookie, int rc) { struct obd_info *oinfo = cookie; @@ -310,9 +307,8 @@ int lov_prep_getattr_set(struct obd_export *exp, struct obd_info *oinfo, req->rq_stripe = i; req->rq_idx = loi->loi_ost_idx; - req->rq_oi.oi_oa = kmem_cache_alloc(obdo_cachep, - GFP_NOFS | __GFP_ZERO); - if (req->rq_oi.oi_oa == NULL) { + req->rq_oi.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); + if (!req->rq_oi.oi_oa) { kfree(req); rc = -ENOMEM; goto out_set; @@ -337,7 +333,7 @@ out_set: int lov_fini_destroy_set(struct lov_request_set *set) { - if (set == NULL) + if (!set) return 0; LASSERT(set->set_exp); if (atomic_read(&set->set_completes)) { @@ -368,7 +364,7 @@ int lov_prep_destroy_set(struct obd_export *exp, struct obd_info *oinfo, set->set_oi->oi_md = lsm; set->set_oi->oi_oa = src_oa; set->set_oti = oti; - if (oti != NULL && src_oa->o_valid & OBD_MD_FLCOOKIE) + if (oti && src_oa->o_valid & OBD_MD_FLCOOKIE) set->set_cookies = oti->oti_logcookies; for (i = 0; i < lsm->lsm_stripe_count; i++) { @@ -393,9 +389,8 @@ int lov_prep_destroy_set(struct obd_export *exp, struct obd_info *oinfo, req->rq_stripe = i; req->rq_idx = loi->loi_ost_idx; - req->rq_oi.oi_oa = kmem_cache_alloc(obdo_cachep, - GFP_NOFS | __GFP_ZERO); - if (req->rq_oi.oi_oa == NULL) { + req->rq_oi.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); + if (!req->rq_oi.oi_oa) { kfree(req); rc = -ENOMEM; goto out_set; @@ -419,7 +414,7 @@ int lov_fini_setattr_set(struct lov_request_set *set) { int rc = 0; - if (set == NULL) + if (!set) return 0; LASSERT(set->set_exp); if (atomic_read(&set->set_completes)) { @@ -460,7 +455,8 @@ int lov_update_setattr_set(struct lov_request_set *set, } /* The callback for osc_setattr_async that finalizes a request info when a - * response is received. */ + * response is received. + */ static int cb_setattr_update(void *cookie, int rc) { struct obd_info *oinfo = cookie; @@ -486,7 +482,7 @@ int lov_prep_setattr_set(struct obd_export *exp, struct obd_info *oinfo, set->set_exp = exp; set->set_oti = oti; set->set_oi = oinfo; - if (oti != NULL && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE) + if (oti && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE) set->set_cookies = oti->oti_logcookies; for (i = 0; i < oinfo->oi_md->lsm_stripe_count; i++) { @@ -509,9 +505,8 @@ int lov_prep_setattr_set(struct obd_export *exp, struct obd_info *oinfo, req->rq_stripe = i; req->rq_idx = loi->loi_ost_idx; - req->rq_oi.oi_oa = kmem_cache_alloc(obdo_cachep, - GFP_NOFS | __GFP_ZERO); - if (req->rq_oi.oi_oa == NULL) { + req->rq_oi.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); + if (!req->rq_oi.oi_oa) { kfree(req); rc = -ENOMEM; goto out_set; @@ -581,7 +576,7 @@ int lov_fini_statfs_set(struct lov_request_set *set) { int rc = 0; - if (set == NULL) + if (!set) return 0; if (atomic_read(&set->set_completes)) { @@ -648,7 +643,8 @@ static void lov_update_statfs(struct obd_statfs *osfs, } /* The callback for osc_statfs_async that finalizes a request info when a - * response is received. */ + * response is received. + */ static int cb_statfs_update(void *cookie, int rc) { struct obd_info *oinfo = cookie; @@ -668,7 +664,8 @@ static int cb_statfs_update(void *cookie, int rc) lov_sfs = oinfo->oi_osfs; success = atomic_read(&set->set_success); /* XXX: the same is done in lov_update_common_set, however - lovset->set_exp is not initialized. */ + * lovset->set_exp is not initialized. + */ lov_update_set(set, lovreq, rc); if (rc) goto out; @@ -718,7 +715,7 @@ int lov_prep_statfs_set(struct obd_device *obd, struct obd_info *oinfo, for (i = 0; i < lov->desc.ld_tgt_count; i++) { struct lov_request *req; - if (lov->lov_tgts[i] == NULL || + if (!lov->lov_tgts[i] || (!lov_check_and_wait_active(lov, i) && (oinfo->oi_flags & OBD_STATFS_NODELAY))) { CDEBUG(D_HA, "lov idx %d inactive\n", i); @@ -726,7 +723,8 @@ int lov_prep_statfs_set(struct obd_device *obd, struct obd_info *oinfo, } /* skip targets that have been explicitly disabled by the - * administrator */ + * administrator + */ if (!lov->lov_tgts[i]->ltd_exp) { CDEBUG(D_HA, "lov idx %d administratively disabled\n", i); continue; diff --git a/drivers/staging/lustre/lustre/lov/lovsub_dev.c b/drivers/staging/lustre/lustre/lov/lovsub_dev.c index f1795c3e2db5..c335c020f4f4 100644 --- a/drivers/staging/lustre/lustre/lov/lovsub_dev.c +++ b/drivers/staging/lustre/lustre/lov/lovsub_dev.c @@ -101,7 +101,6 @@ static int lovsub_device_init(const struct lu_env *env, struct lu_device *d, next->ld_site = d->ld_site; ldt = next->ld_type; - LASSERT(ldt != NULL); rc = ldt->ldt_ops->ldto_device_init(env, next, ldt->ldt_name, NULL); if (rc) { next->ld_site = NULL; @@ -148,8 +147,8 @@ static int lovsub_req_init(const struct lu_env *env, struct cl_device *dev, struct lovsub_req *lsr; int result; - lsr = kmem_cache_alloc(lovsub_req_kmem, GFP_NOFS | __GFP_ZERO); - if (lsr != NULL) { + lsr = kmem_cache_zalloc(lovsub_req_kmem, GFP_NOFS); + if (lsr) { cl_req_slice_add(req, &lsr->lsrq_cl, dev, &lovsub_req_ops); result = 0; } else @@ -175,7 +174,7 @@ static struct lu_device *lovsub_device_alloc(const struct lu_env *env, struct lovsub_device *lsd; lsd = kzalloc(sizeof(*lsd), GFP_NOFS); - if (lsd != NULL) { + if (lsd) { int result; result = cl_device_init(&lsd->acid_cl, t); diff --git a/drivers/staging/lustre/lustre/lov/lovsub_lock.c b/drivers/staging/lustre/lustre/lov/lovsub_lock.c index 1a3e30a14895..3bb0c9068a90 100644 --- a/drivers/staging/lustre/lustre/lov/lovsub_lock.c +++ b/drivers/staging/lustre/lustre/lov/lovsub_lock.c @@ -148,7 +148,8 @@ static void lovsub_lock_descr_map(const struct cl_lock_descr *in, { pgoff_t size; /* stripe size in pages */ pgoff_t skip; /* how many pages in every stripe are occupied by - * "other" stripes */ + * "other" stripes + */ pgoff_t start; pgoff_t end; @@ -284,7 +285,8 @@ static int lovsub_lock_delete_one(const struct lu_env *env, switch (parent->cll_state) { case CLS_ENQUEUED: /* See LU-1355 for the case that a glimpse lock is - * interrupted by signal */ + * interrupted by signal + */ LASSERT(parent->cll_flags & CLF_CANCELLED); break; case CLS_QUEUING: @@ -402,7 +404,7 @@ static void lovsub_lock_delete(const struct lu_env *env, restart = 0; list_for_each_entry_safe(scan, temp, - &sub->lss_parents, lll_list) { + &sub->lss_parents, lll_list) { lov = scan->lll_super; subdata = &lov->lls_sub[scan->lll_idx]; lovsub_parent_lock(env, lov); @@ -429,7 +431,7 @@ static int lovsub_lock_print(const struct lu_env *env, void *cookie, list_for_each_entry(scan, &sub->lss_parents, lll_list) { lov = scan->lll_super; (*p)(env, cookie, "[%d %p ", scan->lll_idx, lov); - if (lov != NULL) + if (lov) cl_lock_descr_print(env, cookie, p, &lov->lls_cl.cls_lock->cll_descr); (*p)(env, cookie, "] "); @@ -453,8 +455,8 @@ int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj, struct lovsub_lock *lsk; int result; - lsk = kmem_cache_alloc(lovsub_lock_kmem, GFP_NOFS | __GFP_ZERO); - if (lsk != NULL) { + lsk = kmem_cache_zalloc(lovsub_lock_kmem, GFP_NOFS); + if (lsk) { INIT_LIST_HEAD(&lsk->lss_parents); cl_lock_slice_add(lock, &lsk->lss_cl, obj, &lovsub_lock_ops); result = 0; diff --git a/drivers/staging/lustre/lustre/lov/lovsub_object.c b/drivers/staging/lustre/lustre/lov/lovsub_object.c index 5ba5ee1b8681..6c5430d938d0 100644 --- a/drivers/staging/lustre/lustre/lov/lovsub_object.c +++ b/drivers/staging/lustre/lustre/lov/lovsub_object.c @@ -63,7 +63,7 @@ int lovsub_object_init(const struct lu_env *env, struct lu_object *obj, under = &dev->acid_next->cd_lu_dev; below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under); - if (below != NULL) { + if (below) { lu_object_add(obj, below); cl_object_page_init(lu2cl(obj), sizeof(struct lovsub_page)); result = 0; @@ -143,8 +143,8 @@ struct lu_object *lovsub_object_alloc(const struct lu_env *env, struct lovsub_object *los; struct lu_object *obj; - los = kmem_cache_alloc(lovsub_object_kmem, GFP_NOFS | __GFP_ZERO); - if (los != NULL) { + los = kmem_cache_zalloc(lovsub_object_kmem, GFP_NOFS); + if (los) { struct cl_object_header *hdr; obj = lovsub2lu(los); diff --git a/drivers/staging/lustre/lustre/lov/lovsub_page.c b/drivers/staging/lustre/lustre/lov/lovsub_page.c index 3f00ce9677b7..2d945532b78e 100644 --- a/drivers/staging/lustre/lustre/lov/lovsub_page.c +++ b/drivers/staging/lustre/lustre/lov/lovsub_page.c @@ -60,7 +60,7 @@ static const struct cl_page_operations lovsub_page_ops = { }; int lovsub_page_init(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, struct page *unused) + struct cl_page *page, struct page *unused) { struct lovsub_page *lsb = cl_object_page_slice(obj, page); diff --git a/drivers/staging/lustre/lustre/lov/lproc_lov.c b/drivers/staging/lustre/lustre/lov/lproc_lov.c index 337241d84980..0dcb6b6a7782 100644 --- a/drivers/staging/lustre/lustre/lov/lproc_lov.c +++ b/drivers/staging/lustre/lustre/lov/lproc_lov.c @@ -46,22 +46,22 @@ static int lov_stripesize_seq_show(struct seq_file *m, void *v) struct obd_device *dev = (struct obd_device *)m->private; struct lov_desc *desc; - LASSERT(dev != NULL); + LASSERT(dev); desc = &dev->u.lov.desc; seq_printf(m, "%llu\n", desc->ld_default_stripe_size); return 0; } static ssize_t lov_stripesize_seq_write(struct file *file, - const char __user *buffer, - size_t count, loff_t *off) + const char __user *buffer, + size_t count, loff_t *off) { struct obd_device *dev = ((struct seq_file *)file->private_data)->private; struct lov_desc *desc; __u64 val; int rc; - LASSERT(dev != NULL); + LASSERT(dev); desc = &dev->u.lov.desc; rc = lprocfs_write_u64_helper(buffer, count, &val); if (rc) @@ -79,22 +79,22 @@ static int lov_stripeoffset_seq_show(struct seq_file *m, void *v) struct obd_device *dev = (struct obd_device *)m->private; struct lov_desc *desc; - LASSERT(dev != NULL); + LASSERT(dev); desc = &dev->u.lov.desc; seq_printf(m, "%llu\n", desc->ld_default_stripe_offset); return 0; } static ssize_t lov_stripeoffset_seq_write(struct file *file, - const char __user *buffer, - size_t count, loff_t *off) + const char __user *buffer, + size_t count, loff_t *off) { struct obd_device *dev = ((struct seq_file *)file->private_data)->private; struct lov_desc *desc; __u64 val; int rc; - LASSERT(dev != NULL); + LASSERT(dev); desc = &dev->u.lov.desc; rc = lprocfs_write_u64_helper(buffer, count, &val); if (rc) @@ -111,21 +111,21 @@ static int lov_stripetype_seq_show(struct seq_file *m, void *v) struct obd_device *dev = (struct obd_device *)m->private; struct lov_desc *desc; - LASSERT(dev != NULL); + LASSERT(dev); desc = &dev->u.lov.desc; seq_printf(m, "%u\n", desc->ld_pattern); return 0; } static ssize_t lov_stripetype_seq_write(struct file *file, - const char __user *buffer, - size_t count, loff_t *off) + const char __user *buffer, + size_t count, loff_t *off) { struct obd_device *dev = ((struct seq_file *)file->private_data)->private; struct lov_desc *desc; int val, rc; - LASSERT(dev != NULL); + LASSERT(dev); desc = &dev->u.lov.desc; rc = lprocfs_write_helper(buffer, count, &val); if (rc) @@ -143,21 +143,21 @@ static int lov_stripecount_seq_show(struct seq_file *m, void *v) struct obd_device *dev = (struct obd_device *)m->private; struct lov_desc *desc; - LASSERT(dev != NULL); + LASSERT(dev); desc = &dev->u.lov.desc; seq_printf(m, "%d\n", (__s16)(desc->ld_default_stripe_count + 1) - 1); return 0; } static ssize_t lov_stripecount_seq_write(struct file *file, - const char __user *buffer, - size_t count, loff_t *off) + const char __user *buffer, + size_t count, loff_t *off) { struct obd_device *dev = ((struct seq_file *)file->private_data)->private; struct lov_desc *desc; int val, rc; - LASSERT(dev != NULL); + LASSERT(dev); desc = &dev->u.lov.desc; rc = lprocfs_write_helper(buffer, count, &val); if (rc) @@ -199,7 +199,7 @@ static int lov_desc_uuid_seq_show(struct seq_file *m, void *v) struct obd_device *dev = (struct obd_device *)m->private; struct lov_obd *lov; - LASSERT(dev != NULL); + LASSERT(dev); lov = &dev->u.lov; seq_printf(m, "%s\n", lov->desc.ld_uuid.uuid); return 0; diff --git a/drivers/staging/lustre/lustre/mdc/mdc_internal.h b/drivers/staging/lustre/lustre/mdc/mdc_internal.h index 3d2997a161b6..c5519aeb0d8a 100644 --- a/drivers/staging/lustre/lustre/mdc/mdc_internal.h +++ b/drivers/staging/lustre/lustre/mdc/mdc_internal.h @@ -53,7 +53,7 @@ void mdc_readdir_pack(struct ptlrpc_request *req, __u64 pgoff, __u32 size, void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, int flags, struct md_op_data *data, int ea_size); void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data, - void *ea, int ealen, void *ea2, int ea2len); + void *ea, int ealen, void *ea2, int ea2len); void mdc_create_pack(struct ptlrpc_request *req, struct md_op_data *op_data, const void *data, int datalen, __u32 mode, __u32 uid, __u32 gid, cfs_cap_t capability, __u64 rdev); @@ -90,7 +90,7 @@ int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo, struct ptlrpc_request **req, __u64 extra_lock_flags); int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid, - struct list_head *cancels, ldlm_mode_t mode, + struct list_head *cancels, enum ldlm_mode mode, __u64 bits); /* mdc/mdc_request.c */ int mdc_fid_alloc(struct obd_export *exp, struct lu_fid *fid, @@ -119,8 +119,8 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data, int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data, struct ptlrpc_request **request); int mdc_cancel_unused(struct obd_export *exp, const struct lu_fid *fid, - ldlm_policy_data_t *policy, ldlm_mode_t mode, - ldlm_cancel_flags_t flags, void *opaque); + ldlm_policy_data_t *policy, enum ldlm_mode mode, + enum ldlm_cancel_flags flags, void *opaque); int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it, struct lu_fid *fid, __u64 *bits); @@ -129,10 +129,10 @@ int mdc_intent_getattr_async(struct obd_export *exp, struct md_enqueue_info *minfo, struct ldlm_enqueue_info *einfo); -ldlm_mode_t mdc_lock_match(struct obd_export *exp, __u64 flags, - const struct lu_fid *fid, ldlm_type_t type, - ldlm_policy_data_t *policy, ldlm_mode_t mode, - struct lustre_handle *lockh); +enum ldlm_mode mdc_lock_match(struct obd_export *exp, __u64 flags, + const struct lu_fid *fid, enum ldlm_type type, + ldlm_policy_data_t *policy, enum ldlm_mode mode, + struct lustre_handle *lockh); static inline int mdc_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req, int opc, diff --git a/drivers/staging/lustre/lustre/mdc/mdc_lib.c b/drivers/staging/lustre/lustre/mdc/mdc_lib.c index 7218532ffea3..b3bfdcb73670 100644 --- a/drivers/staging/lustre/lustre/mdc/mdc_lib.c +++ b/drivers/staging/lustre/lustre/mdc/mdc_lib.c @@ -41,8 +41,6 @@ static void __mdc_pack_body(struct mdt_body *b, __u32 suppgid) { - LASSERT(b != NULL); - b->suppgid = suppgid; b->uid = from_kuid(&init_user_ns, current_uid()); b->gid = from_kgid(&init_user_ns, current_gid()); @@ -83,7 +81,6 @@ void mdc_pack_body(struct ptlrpc_request *req, const struct lu_fid *fid, { struct mdt_body *b = req_capsule_client_get(&req->rq_pill, &RMF_MDT_BODY); - LASSERT(b != NULL); b->valid = valid; b->eadatasize = ea_size; b->flags = flags; @@ -323,7 +320,7 @@ void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data, return; lum = req_capsule_client_get(&req->rq_pill, &RMF_EADATA); - if (ea == NULL) { /* Remove LOV EA */ + if (!ea) { /* Remove LOV EA */ lum->lmm_magic = LOV_USER_MAGIC_V1; lum->lmm_stripe_size = 0; lum->lmm_stripe_count = 0; @@ -346,7 +343,6 @@ void mdc_unlink_pack(struct ptlrpc_request *req, struct md_op_data *op_data) CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_unlink)); rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT); - LASSERT(rec != NULL); rec->ul_opcode = op_data->op_cli_flags & CLI_RM_ENTRY ? REINT_RMENTRY : REINT_UNLINK; @@ -362,7 +358,7 @@ void mdc_unlink_pack(struct ptlrpc_request *req, struct md_op_data *op_data) rec->ul_bias = op_data->op_bias; tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME); - LASSERT(tmp != NULL); + LASSERT(tmp); LOGL0(op_data->op_name, op_data->op_namelen, tmp); } @@ -373,7 +369,6 @@ void mdc_link_pack(struct ptlrpc_request *req, struct md_op_data *op_data) CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_link)); rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT); - LASSERT(rec != NULL); rec->lk_opcode = REINT_LINK; rec->lk_fsuid = op_data->op_fsuid; /* current->fsuid; */ @@ -456,10 +451,9 @@ static void mdc_hsm_release_pack(struct ptlrpc_request *req, struct ldlm_lock *lock; data = req_capsule_client_get(&req->rq_pill, &RMF_CLOSE_DATA); - LASSERT(data != NULL); lock = ldlm_handle2lock(&op_data->op_lease_handle); - if (lock != NULL) { + if (lock) { data->cd_handle = lock->l_remote_handle; ldlm_lock_put(lock); } @@ -495,7 +489,8 @@ static int mdc_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw) /* We record requests in flight in cli->cl_r_in_flight here. * There is only one write rpc possible in mdc anyway. If this to change - * in the future - the code may need to be revisited. */ + * in the future - the code may need to be revisited. + */ int mdc_enter_request(struct client_obd *cli) { int rc = 0; diff --git a/drivers/staging/lustre/lustre/mdc/mdc_locks.c b/drivers/staging/lustre/lustre/mdc/mdc_locks.c index ef9a1e124ea4..958a164f620d 100644 --- a/drivers/staging/lustre/lustre/mdc/mdc_locks.c +++ b/drivers/staging/lustre/lustre/mdc/mdc_locks.c @@ -129,7 +129,7 @@ int mdc_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data, lock = ldlm_handle2lock((struct lustre_handle *)lockh); - LASSERT(lock != NULL); + LASSERT(lock); lock_res_and_lock(lock); if (lock->l_resource->lr_lvb_inode && lock->l_resource->lr_lvb_inode != data) { @@ -151,13 +151,13 @@ int mdc_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data, return 0; } -ldlm_mode_t mdc_lock_match(struct obd_export *exp, __u64 flags, - const struct lu_fid *fid, ldlm_type_t type, - ldlm_policy_data_t *policy, ldlm_mode_t mode, - struct lustre_handle *lockh) +enum ldlm_mode mdc_lock_match(struct obd_export *exp, __u64 flags, + const struct lu_fid *fid, enum ldlm_type type, + ldlm_policy_data_t *policy, enum ldlm_mode mode, + struct lustre_handle *lockh) { struct ldlm_res_id res_id; - ldlm_mode_t rc; + enum ldlm_mode rc; fid_build_reg_res_name(fid, &res_id); /* LU-4405: Clear bits not supported by server */ @@ -170,8 +170,8 @@ ldlm_mode_t mdc_lock_match(struct obd_export *exp, __u64 flags, int mdc_cancel_unused(struct obd_export *exp, const struct lu_fid *fid, ldlm_policy_data_t *policy, - ldlm_mode_t mode, - ldlm_cancel_flags_t flags, + enum ldlm_mode mode, + enum ldlm_cancel_flags flags, void *opaque) { struct ldlm_res_id res_id; @@ -191,12 +191,12 @@ int mdc_null_inode(struct obd_export *exp, struct ldlm_resource *res; struct ldlm_namespace *ns = class_exp2obd(exp)->obd_namespace; - LASSERTF(ns != NULL, "no namespace passed\n"); + LASSERTF(ns, "no namespace passed\n"); fid_build_reg_res_name(fid, &res_id); res = ldlm_resource_get(ns, NULL, &res_id, 0, 0); - if (res == NULL) + if (!res) return 0; lock_res(res); @@ -210,7 +210,8 @@ int mdc_null_inode(struct obd_export *exp, /* find any ldlm lock of the inode in mdc * return 0 not find * 1 find one - * < 0 error */ + * < 0 error + */ int mdc_find_cbdata(struct obd_export *exp, const struct lu_fid *fid, ldlm_iterator_t it, void *data) @@ -252,7 +253,8 @@ static inline void mdc_clear_replay_flag(struct ptlrpc_request *req, int rc) * OOM here may cause recovery failure if lmm is needed (only for the * original open if the MDS crashed just when this client also OOM'd) * but this is incredibly unlikely, and questionable whether the client - * could do MDS recovery under OOM anyways... */ + * could do MDS recovery under OOM anyways... + */ static void mdc_realloc_openmsg(struct ptlrpc_request *req, struct mdt_body *body) { @@ -317,7 +319,7 @@ static struct ptlrpc_request *mdc_intent_open_pack(struct obd_export *exp, req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_INTENT_OPEN); - if (req == NULL) { + if (!req) { ldlm_lock_list_put(&cancels, l_bl_ast, count); return ERR_PTR(-ENOMEM); } @@ -364,8 +366,8 @@ mdc_intent_getxattr_pack(struct obd_export *exp, LIST_HEAD(cancels); req = ptlrpc_request_alloc(class_exp2cliimp(exp), - &RQF_LDLM_INTENT_GETXATTR); - if (req == NULL) + &RQF_LDLM_INTENT_GETXATTR); + if (!req) return ERR_PTR(-ENOMEM); rc = ldlm_prep_enqueue_req(exp, req, &cancels, count); @@ -384,14 +386,12 @@ mdc_intent_getxattr_pack(struct obd_export *exp, mdc_pack_body(req, &op_data->op_fid1, op_data->op_valid, maxdata, -1, 0); - req_capsule_set_size(&req->rq_pill, &RMF_EADATA, - RCL_SERVER, maxdata); + req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_SERVER, maxdata); - req_capsule_set_size(&req->rq_pill, &RMF_EAVALS, - RCL_SERVER, maxdata); + req_capsule_set_size(&req->rq_pill, &RMF_EAVALS, RCL_SERVER, maxdata); req_capsule_set_size(&req->rq_pill, &RMF_EAVALS_LENS, - RCL_SERVER, maxdata); + RCL_SERVER, maxdata); ptlrpc_request_set_replen(req); @@ -409,7 +409,7 @@ static struct ptlrpc_request *mdc_intent_unlink_pack(struct obd_export *exp, req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_INTENT_UNLINK); - if (req == NULL) + if (!req) return ERR_PTR(-ENOMEM); req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, @@ -437,8 +437,8 @@ static struct ptlrpc_request *mdc_intent_unlink_pack(struct obd_export *exp, } static struct ptlrpc_request *mdc_intent_getattr_pack(struct obd_export *exp, - struct lookup_intent *it, - struct md_op_data *op_data) + struct lookup_intent *it, + struct md_op_data *op_data) { struct ptlrpc_request *req; struct obd_device *obddev = class_exp2obd(exp); @@ -453,7 +453,7 @@ static struct ptlrpc_request *mdc_intent_getattr_pack(struct obd_export *exp, req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_INTENT_GETATTR); - if (req == NULL) + if (!req) return ERR_PTR(-ENOMEM); req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, @@ -496,8 +496,8 @@ static struct ptlrpc_request *mdc_intent_layout_pack(struct obd_export *exp, int rc; req = ptlrpc_request_alloc(class_exp2cliimp(exp), - &RQF_LDLM_INTENT_LAYOUT); - if (req == NULL) + &RQF_LDLM_INTENT_LAYOUT); + if (!req) return ERR_PTR(-ENOMEM); req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, 0); @@ -514,7 +514,8 @@ static struct ptlrpc_request *mdc_intent_layout_pack(struct obd_export *exp, /* pack the layout intent request */ layout = req_capsule_client_get(&req->rq_pill, &RMF_LAYOUT_INTENT); /* LAYOUT_INTENT_ACCESS is generic, specific operation will be - * set for replication */ + * set for replication + */ layout->li_opc = LAYOUT_INTENT_ACCESS; req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, @@ -530,7 +531,7 @@ mdc_enqueue_pack(struct obd_export *exp, int lvb_len) int rc; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE); - if (req == NULL) + if (!req) return ERR_PTR(-ENOMEM); rc = ldlm_prep_enqueue_req(exp, req, NULL, 0); @@ -561,7 +562,8 @@ static int mdc_finish_enqueue(struct obd_export *exp, LASSERT(rc >= 0); /* Similarly, if we're going to replay this request, we don't want to - * actually get a lock, just perform the intent. */ + * actually get a lock, just perform the intent. + */ if (req->rq_transno || req->rq_replay) { lockreq = req_capsule_client_get(pill, &RMF_DLM_REQ); lockreq->lock_flags |= ldlm_flags_to_wire(LDLM_FL_INTENT_ONLY); @@ -573,10 +575,10 @@ static int mdc_finish_enqueue(struct obd_export *exp, rc = 0; } else { /* rc = 0 */ lock = ldlm_handle2lock(lockh); - LASSERT(lock != NULL); /* If the server gave us back a different lock mode, we should - * fix up our variables. */ + * fix up our variables. + */ if (lock->l_req_mode != einfo->ei_mode) { ldlm_lock_addref(lockh, lock->l_req_mode); ldlm_lock_decref(lockh, einfo->ei_mode); @@ -586,7 +588,6 @@ static int mdc_finish_enqueue(struct obd_export *exp, } lockrep = req_capsule_server_get(pill, &RMF_DLM_REP); - LASSERT(lockrep != NULL); /* checked by ldlm_cli_enqueue() */ intent->it_disposition = (int)lockrep->lock_policy_res1; intent->it_status = (int)lockrep->lock_policy_res2; @@ -595,7 +596,8 @@ static int mdc_finish_enqueue(struct obd_export *exp, intent->it_data = req; /* Technically speaking rq_transno must already be zero if - * it_status is in error, so the check is a bit redundant */ + * it_status is in error, so the check is a bit redundant + */ if ((!req->rq_transno || intent->it_status < 0) && req->rq_replay) mdc_clear_replay_flag(req, intent->it_status); @@ -605,7 +607,8 @@ static int mdc_finish_enqueue(struct obd_export *exp, * * It's important that we do this first! Otherwise we might exit the * function without doing so, and try to replay a failed create - * (bug 3440) */ + * (bug 3440) + */ if (it->it_op & IT_OPEN && req->rq_replay && (!it_disposition(it, DISP_OPEN_OPEN) || intent->it_status != 0)) mdc_clear_replay_flag(req, intent->it_status); @@ -618,7 +621,7 @@ static int mdc_finish_enqueue(struct obd_export *exp, struct mdt_body *body; body = req_capsule_server_get(pill, &RMF_MDT_BODY); - if (body == NULL) { + if (!body) { CERROR("Can't swab mdt_body\n"); return -EPROTO; } @@ -645,11 +648,12 @@ static int mdc_finish_enqueue(struct obd_export *exp, */ eadata = req_capsule_server_sized_get(pill, &RMF_MDT_MD, body->eadatasize); - if (eadata == NULL) + if (!eadata) return -EPROTO; /* save lvb data and length in case this is for layout - * lock */ + * lock + */ lvb_data = eadata; lvb_len = body->eadatasize; @@ -690,31 +694,32 @@ static int mdc_finish_enqueue(struct obd_export *exp, LASSERT(client_is_remote(exp)); perm = req_capsule_server_swab_get(pill, &RMF_ACL, lustre_swab_mdt_remote_perm); - if (perm == NULL) + if (!perm) return -EPROTO; } } else if (it->it_op & IT_LAYOUT) { /* maybe the lock was granted right away and layout - * is packed into RMF_DLM_LVB of req */ + * is packed into RMF_DLM_LVB of req + */ lvb_len = req_capsule_get_size(pill, &RMF_DLM_LVB, RCL_SERVER); if (lvb_len > 0) { lvb_data = req_capsule_server_sized_get(pill, &RMF_DLM_LVB, lvb_len); - if (lvb_data == NULL) + if (!lvb_data) return -EPROTO; } } /* fill in stripe data for layout lock */ lock = ldlm_handle2lock(lockh); - if (lock != NULL && ldlm_has_layout(lock) && lvb_data != NULL) { + if (lock && ldlm_has_layout(lock) && lvb_data) { void *lmm; LDLM_DEBUG(lock, "layout lock returned by: %s, lvb_len: %d\n", - ldlm_it2str(it->it_op), lvb_len); + ldlm_it2str(it->it_op), lvb_len); lmm = libcfs_kvzalloc(lvb_len, GFP_NOFS); - if (lmm == NULL) { + if (!lmm) { LDLM_LOCK_PUT(lock); return -ENOMEM; } @@ -722,24 +727,25 @@ static int mdc_finish_enqueue(struct obd_export *exp, /* install lvb_data */ lock_res_and_lock(lock); - if (lock->l_lvb_data == NULL) { + if (!lock->l_lvb_data) { lock->l_lvb_type = LVB_T_LAYOUT; lock->l_lvb_data = lmm; lock->l_lvb_len = lvb_len; lmm = NULL; } unlock_res_and_lock(lock); - if (lmm != NULL) + if (lmm) kvfree(lmm); } - if (lock != NULL) + if (lock) LDLM_LOCK_PUT(lock); return rc; } /* We always reserve enough space in the reply packet for a stripe MD, because - * we don't know in advance the file type. */ + * we don't know in advance the file type. + */ int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo, struct lookup_intent *it, struct md_op_data *op_data, struct lustre_handle *lockh, void *lmm, int lmmsize, @@ -782,14 +788,15 @@ int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo, policy = &getxattr_policy; } - LASSERT(reqp == NULL); + LASSERT(!reqp); generation = obddev->u.cli.cl_import->imp_generation; resend: flags = saved_flags; if (!it) { /* The only way right now is FLOCK, in this case we hide flock - policy as lmm, but lmmsize is 0 */ + * policy as lmm, but lmmsize is 0 + */ LASSERT(lmm && lmmsize == 0); LASSERTF(einfo->ei_type == LDLM_FLOCK, "lock type %d\n", einfo->ei_type); @@ -823,9 +830,10 @@ resend: if (IS_ERR(req)) return PTR_ERR(req); - if (req != NULL && it && it->it_op & IT_CREAT) + if (req && it && it->it_op & IT_CREAT) /* ask ptlrpc not to resend on EINPROGRESS since we have our own - * retry logic */ + * retry logic + */ req->rq_no_retry_einprogress = 1; if (resends) { @@ -836,7 +844,8 @@ resend: /* It is important to obtain rpc_lock first (if applicable), so that * threads that are serialised with rpc_lock are not polluting our - * rpcs in flight counter. We do not do flock request limiting, though*/ + * rpcs in flight counter. We do not do flock request limiting, though + */ if (it) { mdc_get_rpc_lock(obddev->u.cli.cl_rpc_lock, it); rc = mdc_enter_request(&obddev->u.cli); @@ -852,13 +861,14 @@ resend: 0, lvb_type, lockh, 0); if (!it) { /* For flock requests we immediately return without further - delay and let caller deal with the rest, since rest of - this function metadata processing makes no sense for flock - requests anyway. But in case of problem during comms with - Server (ETIMEDOUT) or any signal/kill attempt (EINTR), we - can not rely on caller and this mainly for F_UNLCKs - (explicits or automatically generated by Kernel to clean - current FLocks upon exit) that can't be trashed */ + * delay and let caller deal with the rest, since rest of + * this function metadata processing makes no sense for flock + * requests anyway. But in case of problem during comms with + * Server (ETIMEDOUT) or any signal/kill attempt (EINTR), we + * can not rely on caller and this mainly for F_UNLCKs + * (explicits or automatically generated by Kernel to clean + * current FLocks upon exit) that can't be trashed + */ if ((rc == -EINTR) || (rc == -ETIMEDOUT)) goto resend; return rc; @@ -878,13 +888,13 @@ resend: } lockrep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP); - LASSERT(lockrep != NULL); lockrep->lock_policy_res2 = ptlrpc_status_ntoh(lockrep->lock_policy_res2); /* Retry the create infinitely when we get -EINPROGRESS from - * server. This is required by the new quota design. */ + * server. This is required by the new quota design. + */ if (it->it_op & IT_CREAT && (int)lockrep->lock_policy_res2 == -EINPROGRESS) { mdc_clear_replay_flag(req, rc); @@ -930,13 +940,13 @@ static int mdc_finish_intent_lock(struct obd_export *exp, struct ldlm_lock *lock; int rc; - LASSERT(request != NULL); LASSERT(request != LP_POISON); LASSERT(request->rq_repmsg != LP_POISON); if (!it_disposition(it, DISP_IT_EXECD)) { /* The server failed before it even started executing the - * intent, i.e. because it couldn't unpack the request. */ + * intent, i.e. because it couldn't unpack the request. + */ LASSERT(it->d.lustre.it_status != 0); return it->d.lustre.it_status; } @@ -945,10 +955,11 @@ static int mdc_finish_intent_lock(struct obd_export *exp, return rc; mdt_body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY); - LASSERT(mdt_body != NULL); /* mdc_enqueue checked */ + LASSERT(mdt_body); /* mdc_enqueue checked */ /* If we were revalidating a fid/name pair, mark the intent in - * case we fail and get called again from lookup */ + * case we fail and get called again from lookup + */ if (fid_is_sane(&op_data->op_fid2) && it->it_create_mode & M_CHECK_STALE && it->it_op != IT_GETATTR) { @@ -957,7 +968,8 @@ static int mdc_finish_intent_lock(struct obd_export *exp, /* sever can return one of two fids: * op_fid2 - new allocated fid - if file is created. * op_fid3 - existent fid - if file only open. - * op_fid3 is saved in lmv_intent_open */ + * op_fid3 is saved in lmv_intent_open + */ if ((!lu_fid_eq(&op_data->op_fid2, &mdt_body->fid1)) && (!lu_fid_eq(&op_data->op_fid3, &mdt_body->fid1))) { CDEBUG(D_DENTRY, "Found stale data "DFID"("DFID")/"DFID @@ -1001,7 +1013,8 @@ static int mdc_finish_intent_lock(struct obd_export *exp, * one. We have to set the data here instead of in * mdc_enqueue, because we need to use the child's inode as * the l_ast_data to match, and that's not available until - * intent_finish has performed the iget().) */ + * intent_finish has performed the iget().) + */ lock = ldlm_handle2lock(lockh); if (lock) { ldlm_policy_data_t policy = lock->l_policy_data; @@ -1036,11 +1049,12 @@ int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it, { /* We could just return 1 immediately, but since we should only * be called in revalidate_it if we already have a lock, let's - * verify that. */ + * verify that. + */ struct ldlm_res_id res_id; struct lustre_handle lockh; ldlm_policy_data_t policy; - ldlm_mode_t mode; + enum ldlm_mode mode; if (it->d.lustre.it_lock_handle) { lockh.cookie = it->d.lustre.it_lock_handle; @@ -1059,10 +1073,12 @@ int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it, * Unfortunately, if the bits are split across multiple * locks, there's no easy way to match all of them here, * so an extra RPC would be performed to fetch all - * of those bits at once for now. */ + * of those bits at once for now. + */ /* For new MDTs(> 2.4), UPDATE|PERM should be enough, * but for old MDTs (< 2.4), permission is covered - * by LOOKUP lock, so it needs to match all bits here.*/ + * by LOOKUP lock, so it needs to match all bits here. + */ policy.l_inodebits.bits = MDS_INODELOCK_UPDATE | MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM; @@ -1076,7 +1092,7 @@ int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it, } mode = mdc_lock_match(exp, LDLM_FL_BLOCK_GRANTED, fid, - LDLM_IBITS, &policy, + LDLM_IBITS, &policy, LCK_CR | LCK_CW | LCK_PR | LCK_PW, &lockh); } @@ -1147,11 +1163,13 @@ int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data, (it->it_op & (IT_LOOKUP | IT_GETATTR))) { /* We could just return 1 immediately, but since we should only * be called in revalidate_it if we already have a lock, let's - * verify that. */ + * verify that. + */ it->d.lustre.it_lock_handle = 0; rc = mdc_revalidate_lock(exp, it, &op_data->op_fid2, NULL); /* Only return failure if it was not GETATTR by cfid - (from inode_revalidate) */ + * (from inode_revalidate) + */ if (rc || op_data->op_namelen != 0) return rc; } @@ -1206,7 +1224,6 @@ static int mdc_intent_getattr_async_interpret(const struct lu_env *env, } lockrep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP); - LASSERT(lockrep != NULL); lockrep->lock_policy_res2 = ptlrpc_status_ntoh(lockrep->lock_policy_res2); @@ -1235,7 +1252,8 @@ int mdc_intent_getattr_async(struct obd_export *exp, struct ldlm_res_id res_id; /*XXX: Both MDS_INODELOCK_LOOKUP and MDS_INODELOCK_UPDATE are needed * for statahead currently. Consider CMD in future, such two bits - * maybe managed by different MDS, should be adjusted then. */ + * maybe managed by different MDS, should be adjusted then. + */ ldlm_policy_data_t policy = { .l_inodebits = { MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE } @@ -1244,9 +1262,9 @@ int mdc_intent_getattr_async(struct obd_export *exp, __u64 flags = LDLM_FL_HAS_INTENT; CDEBUG(D_DLMTRACE, - "name: %.*s in inode "DFID", intent: %s flags %#Lo\n", - op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1), - ldlm_it2str(it->it_op), it->it_flags); + "name: %.*s in inode " DFID ", intent: %s flags %#Lo\n", + op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1), + ldlm_it2str(it->it_op), it->it_flags); fid_build_reg_res_name(&op_data->op_fid1, &res_id); req = mdc_intent_getattr_pack(exp, it, op_data); diff --git a/drivers/staging/lustre/lustre/mdc/mdc_reint.c b/drivers/staging/lustre/lustre/mdc/mdc_reint.c index ac7695a10753..4ef3db147f87 100644 --- a/drivers/staging/lustre/lustre/mdc/mdc_reint.c +++ b/drivers/staging/lustre/lustre/mdc/mdc_reint.c @@ -65,9 +65,10 @@ static int mdc_reint(struct ptlrpc_request *request, /* Find and cancel locally locks matched by inode @bits & @mode in the resource * found by @fid. Found locks are added into @cancel list. Returns the amount of - * locks added to @cancels list. */ + * locks added to @cancels list. + */ int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid, - struct list_head *cancels, ldlm_mode_t mode, + struct list_head *cancels, enum ldlm_mode mode, __u64 bits) { struct ldlm_namespace *ns = exp->exp_obd->obd_namespace; @@ -81,14 +82,15 @@ int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid, * * This distinguishes from a case when ELC is not supported originally, * when we still want to cancel locks in advance and just cancel them - * locally, without sending any RPC. */ + * locally, without sending any RPC. + */ if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns)) return 0; fid_build_reg_res_name(fid, &res_id); res = ldlm_resource_get(exp->exp_obd->obd_namespace, NULL, &res_id, 0, 0); - if (res == NULL) + if (!res) return 0; LDLM_RESOURCE_ADDREF(res); /* Initialize ibits lock policy. */ @@ -111,8 +113,6 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data, int count = 0, rc; __u64 bits; - LASSERT(op_data != NULL); - bits = MDS_INODELOCK_UPDATE; if (op_data->op_attr.ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) bits |= MDS_INODELOCK_LOOKUP; @@ -123,7 +123,7 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data, &cancels, LCK_EX, bits); req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_REINT_SETATTR); - if (req == NULL) { + if (!req) { ldlm_lock_list_put(&cancels, l_bl_ast, count); return -ENOMEM; } @@ -151,10 +151,10 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data, ptlrpc_request_set_replen(req); if (mod && (op_data->op_flags & MF_EPOCH_OPEN) && req->rq_import->imp_replayable) { - LASSERT(*mod == NULL); + LASSERT(!*mod); *mod = obd_mod_alloc(); - if (*mod == NULL) { + if (!*mod) { DEBUG_REQ(D_ERROR, req, "Can't allocate md_open_data"); } else { req->rq_replay = 1; @@ -181,8 +181,6 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data, epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH); body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - LASSERT(epoch != NULL); - LASSERT(body != NULL); epoch->handle = body->handle; epoch->ioepoch = body->ioepoch; req->rq_replay_cb = mdc_replay_open; @@ -195,7 +193,7 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data, *request = req; if (rc && req->rq_commit_cb) { /* Put an extra reference on \var mod on error case. */ - if (mod != NULL && *mod != NULL) + if (mod && *mod) obd_mod_put(*mod); req->rq_commit_cb(req); } @@ -237,7 +235,7 @@ rebuild: req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_REINT_CREATE_RMT_ACL); - if (req == NULL) { + if (!req) { ldlm_lock_list_put(&cancels, l_bl_ast, count); return -ENOMEM; } @@ -262,7 +260,8 @@ rebuild: ptlrpc_request_set_replen(req); /* ask ptlrpc not to resend on EINPROGRESS since we have our own retry - * logic here */ + * logic here + */ req->rq_no_retry_einprogress = 1; if (resends) { @@ -280,7 +279,8 @@ rebuild: goto resend; } else if (rc == -EINPROGRESS) { /* Retry create infinitely until succeed or get other - * error code. */ + * error code. + */ ptlrpc_req_finished(req); resends++; @@ -308,7 +308,7 @@ int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data, struct ptlrpc_request *req = *request; int count = 0, rc; - LASSERT(req == NULL); + LASSERT(!req); if ((op_data->op_flags & MF_MDC_CANCEL_FID1) && (fid_is_sane(&op_data->op_fid1)) && @@ -324,7 +324,7 @@ int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data, MDS_INODELOCK_FULL); req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_REINT_UNLINK); - if (req == NULL) { + if (!req) { ldlm_lock_list_put(&cancels, l_bl_ast, count); return -ENOMEM; } @@ -373,7 +373,7 @@ int mdc_link(struct obd_export *exp, struct md_op_data *op_data, MDS_INODELOCK_UPDATE); req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_REINT_LINK); - if (req == NULL) { + if (!req) { ldlm_lock_list_put(&cancels, l_bl_ast, count); return -ENOMEM; } @@ -422,14 +422,14 @@ int mdc_rename(struct obd_export *exp, struct md_op_data *op_data, &cancels, LCK_EX, MDS_INODELOCK_LOOKUP); if ((op_data->op_flags & MF_MDC_CANCEL_FID4) && - (fid_is_sane(&op_data->op_fid4))) + (fid_is_sane(&op_data->op_fid4))) count += mdc_resource_get_unused(exp, &op_data->op_fid4, &cancels, LCK_EX, MDS_INODELOCK_FULL); req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_REINT_RENAME); - if (req == NULL) { + if (!req) { ldlm_lock_list_put(&cancels, l_bl_ast, count); return -ENOMEM; } diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c index 57e0fc1e8549..55dd8ef9525b 100644 --- a/drivers/staging/lustre/lustre/mdc/mdc_request.c +++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c @@ -48,6 +48,7 @@ #include "../include/lprocfs_status.h" #include "../include/lustre_param.h" #include "../include/lustre_log.h" +#include "../include/lustre_kernelcomm.h" #include "mdc_internal.h" @@ -62,7 +63,8 @@ static inline int mdc_queue_wait(struct ptlrpc_request *req) /* mdc_enter_request() ensures that this client has no more * than cl_max_rpcs_in_flight RPCs simultaneously inf light - * against an MDT. */ + * against an MDT. + */ rc = mdc_enter_request(cli); if (rc != 0) return rc; @@ -82,7 +84,7 @@ static int mdc_getstatus(struct obd_export *exp, struct lu_fid *rootfid) req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_MDS_GETSTATUS, LUSTRE_MDS_VERSION, MDS_GETSTATUS); - if (req == NULL) + if (!req) return -ENOMEM; mdc_pack_body(req, NULL, 0, 0, -1, 0); @@ -95,7 +97,7 @@ static int mdc_getstatus(struct obd_export *exp, struct lu_fid *rootfid) goto out; body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - if (body == NULL) { + if (!body) { rc = -EPROTO; goto out; } @@ -135,7 +137,7 @@ static int mdc_getattr_common(struct obd_export *exp, /* sanity check for the reply */ body = req_capsule_server_get(pill, &RMF_MDT_BODY); - if (body == NULL) + if (!body) return -EPROTO; CDEBUG(D_NET, "mode: %o\n", body->mode); @@ -145,7 +147,7 @@ static int mdc_getattr_common(struct obd_export *exp, eadata = req_capsule_server_sized_get(pill, &RMF_MDT_MD, body->eadatasize); - if (eadata == NULL) + if (!eadata) return -EPROTO; } @@ -155,7 +157,7 @@ static int mdc_getattr_common(struct obd_export *exp, LASSERT(client_is_remote(exp)); perm = req_capsule_server_swab_get(pill, &RMF_ACL, lustre_swab_mdt_remote_perm); - if (perm == NULL) + if (!perm) return -EPROTO; } @@ -163,7 +165,7 @@ static int mdc_getattr_common(struct obd_export *exp, } static int mdc_getattr(struct obd_export *exp, struct md_op_data *op_data, - struct ptlrpc_request **request) + struct ptlrpc_request **request) { struct ptlrpc_request *req; int rc; @@ -175,7 +177,7 @@ static int mdc_getattr(struct obd_export *exp, struct md_op_data *op_data, } *request = NULL; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_GETATTR); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR); @@ -205,7 +207,7 @@ static int mdc_getattr(struct obd_export *exp, struct md_op_data *op_data, } static int mdc_getattr_name(struct obd_export *exp, struct md_op_data *op_data, - struct ptlrpc_request **request) + struct ptlrpc_request **request) { struct ptlrpc_request *req; int rc; @@ -213,7 +215,7 @@ static int mdc_getattr_name(struct obd_export *exp, struct md_op_data *op_data, *request = NULL; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_GETATTR_NAME); - if (req == NULL) + if (!req) return -ENOMEM; req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, @@ -260,7 +262,7 @@ static int mdc_is_subdir(struct obd_export *exp, req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_MDS_IS_SUBDIR, LUSTRE_MDS_VERSION, MDS_IS_SUBDIR); - if (req == NULL) + if (!req) return -ENOMEM; mdc_is_subdir_pack(req, pfid, cfid, 0); @@ -289,7 +291,7 @@ static int mdc_xattr_common(struct obd_export *exp, *request = NULL; req = ptlrpc_request_alloc(class_exp2cliimp(exp), fmt); - if (req == NULL) + if (!req) return -ENOMEM; if (xattr_name) { @@ -424,7 +426,7 @@ static int mdc_unpack_acl(struct ptlrpc_request *req, struct lustre_md *md) return -EPROTO; acl = posix_acl_from_xattr(&init_user_ns, buf, body->aclsize); - if (acl == NULL) + if (!acl) return 0; if (IS_ERR(acl)) { @@ -460,7 +462,6 @@ static int mdc_get_lustre_md(struct obd_export *exp, memset(md, 0, sizeof(*md)); md->body = req_capsule_server_get(pill, &RMF_MDT_BODY); - LASSERT(md->body != NULL); if (md->body->valid & OBD_MD_FLEASIZE) { int lmmsize; @@ -592,17 +593,16 @@ void mdc_replay_open(struct ptlrpc_request *req) struct lustre_handle old; struct mdt_body *body; - if (mod == NULL) { + if (!mod) { DEBUG_REQ(D_ERROR, req, "Can't properly replay without open data."); return; } body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - LASSERT(body != NULL); och = mod->mod_och; - if (och != NULL) { + if (och) { struct lustre_handle *file_fh; LASSERT(och->och_magic == OBD_CLIENT_HANDLE_MAGIC); @@ -614,7 +614,7 @@ void mdc_replay_open(struct ptlrpc_request *req) *file_fh = body->handle; } close_req = mod->mod_close_req; - if (close_req != NULL) { + if (close_req) { __u32 opc = lustre_msg_get_opc(close_req->rq_reqmsg); struct mdt_ioepoch *epoch; @@ -623,7 +623,7 @@ void mdc_replay_open(struct ptlrpc_request *req) &RMF_MDT_EPOCH); LASSERT(epoch); - if (och != NULL) + if (och) LASSERT(!memcmp(&old, &epoch->handle, sizeof(old))); DEBUG_REQ(D_HA, close_req, "updating close body with new fh"); epoch->handle = body->handle; @@ -634,7 +634,7 @@ void mdc_commit_open(struct ptlrpc_request *req) { struct md_open_data *mod = req->rq_cb_data; - if (mod == NULL) + if (!mod) return; /** @@ -674,15 +674,15 @@ int mdc_set_open_replay_data(struct obd_export *exp, rec = req_capsule_client_get(&open_req->rq_pill, &RMF_REC_REINT); body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY); - LASSERT(rec != NULL); + LASSERT(rec); /* Incoming message in my byte order (it's been swabbed). */ /* Outgoing messages always in my byte order. */ - LASSERT(body != NULL); + LASSERT(body); /* Only if the import is replayable, we set replay_open data */ if (och && imp->imp_replayable) { mod = obd_mod_alloc(); - if (mod == NULL) { + if (!mod) { DEBUG_REQ(D_ERROR, open_req, "Can't allocate md_open_data"); return 0; @@ -748,11 +748,11 @@ static int mdc_clear_open_replay_data(struct obd_export *exp, * It is possible to not have \var mod in a case of eviction between * lookup and ll_file_open(). **/ - if (mod == NULL) + if (!mod) return 0; LASSERT(mod != LP_POISON); - LASSERT(mod->mod_open_req != NULL); + LASSERT(mod->mod_open_req); mdc_free_open(mod); mod->mod_och = NULL; @@ -803,7 +803,7 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data, *request = NULL; req = ptlrpc_request_alloc(class_exp2cliimp(exp), req_fmt); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_CLOSE); @@ -814,13 +814,14 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data, /* To avoid a livelock (bug 7034), we need to send CLOSE RPCs to a * portal whose threads are not taking any DLM locks and are therefore - * always progressing */ + * always progressing + */ req->rq_request_portal = MDS_READPAGE_PORTAL; ptlrpc_at_set_req_timeout(req); /* Ensure that this close's handle is fixed up during replay. */ - if (likely(mod != NULL)) { - LASSERTF(mod->mod_open_req != NULL && + if (likely(mod)) { + LASSERTF(mod->mod_open_req && mod->mod_open_req->rq_type != LI_POISON, "POISONED open %p!\n", mod->mod_open_req); @@ -828,7 +829,8 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data, DEBUG_REQ(D_HA, mod->mod_open_req, "matched open"); /* We no longer want to preserve this open for replay even - * though the open was committed. b=3632, b=3633 */ + * though the open was committed. b=3632, b=3633 + */ spin_lock(&mod->mod_open_req->rq_lock); mod->mod_open_req->rq_replay = 0; spin_unlock(&mod->mod_open_req->rq_lock); @@ -850,7 +852,7 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data, rc = ptlrpc_queue_wait(req); mdc_put_rpc_lock(obd->u.cli.cl_close_lock, NULL); - if (req->rq_repmsg == NULL) { + if (!req->rq_repmsg) { CDEBUG(D_RPCTRACE, "request failed to send: %p, %d\n", req, req->rq_status); if (rc == 0) @@ -866,7 +868,7 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data, rc = -rc; } body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - if (body == NULL) + if (!body) rc = -EPROTO; } else if (rc == -ESTALE) { /** @@ -876,7 +878,6 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data, */ if (mod) { DEBUG_REQ(D_HA, req, "Reset ESTALE = %d", rc); - LASSERT(mod->mod_open_req != NULL); if (mod->mod_open_req->rq_committed) rc = 0; } @@ -886,7 +887,8 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data, if (rc != 0) mod->mod_close_req = NULL; /* Since now, mod is accessed through open_req only, - * thus close req does not keep a reference on mod anymore. */ + * thus close req does not keep a reference on mod anymore. + */ obd_mod_put(mod); } *request = req; @@ -903,7 +905,7 @@ static int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data, req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_DONE_WRITING); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_DONE_WRITING); @@ -912,15 +914,16 @@ static int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data, return rc; } - if (mod != NULL) { - LASSERTF(mod->mod_open_req != NULL && + if (mod) { + LASSERTF(mod->mod_open_req && mod->mod_open_req->rq_type != LI_POISON, "POISONED setattr %p!\n", mod->mod_open_req); mod->mod_close_req = req; DEBUG_REQ(D_HA, mod->mod_open_req, "matched setattr"); /* We no longer want to preserve this setattr for replay even - * though the open was committed. b=3632, b=3633 */ + * though the open was committed. b=3632, b=3633 + */ spin_lock(&mod->mod_open_req->rq_lock); mod->mod_open_req->rq_replay = 0; spin_unlock(&mod->mod_open_req->rq_lock); @@ -940,7 +943,6 @@ static int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data, * Let's check if mod exists and return no error in that case */ if (mod) { - LASSERT(mod->mod_open_req != NULL); if (mod->mod_open_req->rq_committed) rc = 0; } @@ -949,11 +951,12 @@ static int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data, if (mod) { if (rc != 0) mod->mod_close_req = NULL; - LASSERT(mod->mod_open_req != NULL); + LASSERT(mod->mod_open_req); mdc_free_open(mod); /* Since now, mod is accessed through setattr req only, - * thus DW req does not keep a reference on mod anymore. */ + * thus DW req does not keep a reference on mod anymore. + */ obd_mod_put(mod); } @@ -978,7 +981,7 @@ static int mdc_readpage(struct obd_export *exp, struct md_op_data *op_data, restart_bulk: req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_READPAGE); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_READPAGE); @@ -992,7 +995,7 @@ restart_bulk: desc = ptlrpc_prep_bulk_imp(req, op_data->op_npages, 1, BULK_PUT_SINK, MDS_BULK_PORTAL); - if (desc == NULL) { + if (!desc) { ptlrpc_request_free(req); return -ENOMEM; } @@ -1033,8 +1036,8 @@ restart_bulk: if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) { CERROR("Unexpected # bytes transferred: %d (%ld expected)\n", - req->rq_bulk->bd_nob_transferred, - PAGE_CACHE_SIZE * op_data->op_npages); + req->rq_bulk->bd_nob_transferred, + PAGE_CACHE_SIZE * op_data->op_npages); ptlrpc_req_finished(req); return -EPROTO; } @@ -1066,7 +1069,7 @@ static int mdc_statfs(const struct lu_env *env, req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_STATFS, LUSTRE_MDS_VERSION, MDS_STATFS); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto output; } @@ -1088,7 +1091,7 @@ static int mdc_statfs(const struct lu_env *env, } msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS); - if (msfs == NULL) { + if (!msfs) { rc = -EPROTO; goto out; } @@ -1161,7 +1164,7 @@ static int mdc_ioc_hsm_progress(struct obd_export *exp, req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_PROGRESS, LUSTRE_MDS_VERSION, MDS_HSM_PROGRESS); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto out; } @@ -1170,7 +1173,7 @@ static int mdc_ioc_hsm_progress(struct obd_export *exp, /* Copy hsm_progress struct */ req_hpk = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_PROGRESS); - if (req_hpk == NULL) { + if (!req_hpk) { rc = -EPROTO; goto out; } @@ -1195,7 +1198,7 @@ static int mdc_ioc_hsm_ct_register(struct obd_import *imp, __u32 archives) req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_CT_REGISTER, LUSTRE_MDS_VERSION, MDS_HSM_CT_REGISTER); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto out; } @@ -1205,7 +1208,7 @@ static int mdc_ioc_hsm_ct_register(struct obd_import *imp, __u32 archives) /* Copy hsm_progress struct */ archive_mask = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_ARCHIVE); - if (archive_mask == NULL) { + if (!archive_mask) { rc = -EPROTO; goto out; } @@ -1230,7 +1233,7 @@ static int mdc_ioc_hsm_current_action(struct obd_export *exp, req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_HSM_ACTION); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_ACTION); @@ -1250,7 +1253,7 @@ static int mdc_ioc_hsm_current_action(struct obd_export *exp, req_hca = req_capsule_server_get(&req->rq_pill, &RMF_MDS_HSM_CURRENT_ACTION); - if (req_hca == NULL) { + if (!req_hca) { rc = -EPROTO; goto out; } @@ -1270,7 +1273,7 @@ static int mdc_ioc_hsm_ct_unregister(struct obd_import *imp) req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_CT_UNREGISTER, LUSTRE_MDS_VERSION, MDS_HSM_CT_UNREGISTER); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto out; } @@ -1295,7 +1298,7 @@ static int mdc_ioc_hsm_state_get(struct obd_export *exp, req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_HSM_STATE_GET); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_STATE_GET); @@ -1314,7 +1317,7 @@ static int mdc_ioc_hsm_state_get(struct obd_export *exp, goto out; req_hus = req_capsule_server_get(&req->rq_pill, &RMF_HSM_USER_STATE); - if (req_hus == NULL) { + if (!req_hus) { rc = -EPROTO; goto out; } @@ -1336,7 +1339,7 @@ static int mdc_ioc_hsm_state_set(struct obd_export *exp, req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_HSM_STATE_SET); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_STATE_SET); @@ -1350,7 +1353,7 @@ static int mdc_ioc_hsm_state_set(struct obd_export *exp, /* Copy states */ req_hss = req_capsule_client_get(&req->rq_pill, &RMF_HSM_STATE_SET); - if (req_hss == NULL) { + if (!req_hss) { rc = -EPROTO; goto out; } @@ -1375,7 +1378,7 @@ static int mdc_ioc_hsm_request(struct obd_export *exp, int rc; req = ptlrpc_request_alloc(imp, &RQF_MDS_HSM_REQUEST); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto out; } @@ -1396,7 +1399,7 @@ static int mdc_ioc_hsm_request(struct obd_export *exp, /* Copy hsm_request struct */ req_hr = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_REQUEST); - if (req_hr == NULL) { + if (!req_hr) { rc = -EPROTO; goto out; } @@ -1404,7 +1407,7 @@ static int mdc_ioc_hsm_request(struct obd_export *exp, /* Copy hsm_user_item structs */ req_hui = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_USER_ITEM); - if (req_hui == NULL) { + if (!req_hui) { rc = -EPROTO; goto out; } @@ -1413,7 +1416,7 @@ static int mdc_ioc_hsm_request(struct obd_export *exp, /* Copy opaque field */ req_opaque = req_capsule_client_get(&req->rq_pill, &RMF_GENERIC_DATA); - if (req_opaque == NULL) { + if (!req_opaque) { rc = -EPROTO; goto out; } @@ -1512,7 +1515,7 @@ static int mdc_changelog_send_thread(void *csdata) /* Set up the remote catalog handle */ ctxt = llog_get_context(cs->cs_obd, LLOG_CHANGELOG_REPL_CTXT); - if (ctxt == NULL) { + if (!ctxt) { rc = -ENOENT; goto out; } @@ -1553,6 +1556,7 @@ static int mdc_ioc_changelog_send(struct obd_device *obd, struct ioc_changelog *icc) { struct changelog_show *cs; + struct task_struct *task; int rc; /* Freed in mdc_changelog_send_thread */ @@ -1570,15 +1574,20 @@ static int mdc_ioc_changelog_send(struct obd_device *obd, * New thread because we should return to user app before * writing into our pipe */ - rc = PTR_ERR(kthread_run(mdc_changelog_send_thread, cs, - "mdc_clg_send_thread")); - if (!IS_ERR_VALUE(rc)) { - CDEBUG(D_CHANGELOG, "start changelog thread\n"); - return 0; + task = kthread_run(mdc_changelog_send_thread, cs, + "mdc_clg_send_thread"); + if (IS_ERR(task)) { + rc = PTR_ERR(task); + CERROR("%s: can't start changelog thread: rc = %d\n", + obd->obd_name, rc); + kfree(cs); + } else { + rc = 0; + CDEBUG(D_CHANGELOG, "%s: started changelog thread\n", + obd->obd_name); } CERROR("Failed to start changelog thread: %d\n", rc); - kfree(cs); return rc; } @@ -1596,7 +1605,7 @@ static int mdc_quotacheck(struct obd_device *unused, struct obd_export *exp, req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_MDS_QUOTACHECK, LUSTRE_MDS_VERSION, MDS_QUOTACHECK); - if (req == NULL) + if (!req) return -ENOMEM; body = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL); @@ -1605,7 +1614,8 @@ static int mdc_quotacheck(struct obd_device *unused, struct obd_export *exp, ptlrpc_request_set_replen(req); /* the next poll will find -ENODATA, that means quotacheck is - * going on */ + * going on + */ cli->cl_qchk_stat = -ENODATA; rc = ptlrpc_queue_wait(req); if (rc) @@ -1640,7 +1650,7 @@ static int mdc_quotactl(struct obd_device *unused, struct obd_export *exp, req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_MDS_QUOTACTL, LUSTRE_MDS_VERSION, MDS_QUOTACTL); - if (req == NULL) + if (!req) return -ENOMEM; oqc = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL); @@ -1694,7 +1704,7 @@ static int mdc_ioc_swap_layouts(struct obd_export *exp, req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_SWAP_LAYOUTS); - if (req == NULL) { + if (!req) { ldlm_lock_list_put(&cancels, l_bl_ast, count); return -ENOMEM; } @@ -1721,7 +1731,7 @@ static int mdc_ioc_swap_layouts(struct obd_export *exp, } static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len, - void *karg, void *uarg) + void *karg, void __user *uarg) { struct obd_device *obd = exp->exp_obd; struct obd_ioctl_data *data = karg; @@ -1729,7 +1739,8 @@ static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len, int rc; if (!try_module_get(THIS_MODULE)) { - CERROR("Can't get module. Is it alive?"); + CERROR("%s: cannot get module '%s'\n", obd->obd_name, + module_name(THIS_MODULE)); return -EINVAL; } switch (cmd) { @@ -1805,7 +1816,7 @@ static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len, /* copy UUID */ if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(obd), min_t(size_t, data->ioc_plen2, - sizeof(struct obd_uuid)))) { + sizeof(struct obd_uuid)))) { rc = -EFAULT; goto out; } @@ -1818,7 +1829,7 @@ static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len, if (copy_to_user(data->ioc_pbuf1, &stat_buf, min_t(size_t, data->ioc_plen1, - sizeof(stat_buf)))) { + sizeof(stat_buf)))) { rc = -EFAULT; goto out; } @@ -1880,7 +1891,7 @@ static int mdc_get_info_rpc(struct obd_export *exp, int rc = -EINVAL; req = ptlrpc_request_alloc(imp, &RQF_MDS_GET_INFO); - if (req == NULL) + if (!req) return -ENOMEM; req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_KEY, @@ -1905,7 +1916,8 @@ static int mdc_get_info_rpc(struct obd_export *exp, rc = ptlrpc_queue_wait(req); /* -EREMOTE means the get_info result is partial, and it needs to - * continue on another MDT, see fid2path part in lmv_iocontrol */ + * continue on another MDT, see fid2path part in lmv_iocontrol + */ if (rc == 0 || rc == -EREMOTE) { tmp = req_capsule_server_get(&req->rq_pill, &RMF_GETINFO_VAL); memcpy(val, tmp, vallen); @@ -2013,21 +2025,27 @@ static int mdc_hsm_copytool_send(int len, void *val) /** * callback function passed to kuc for re-registering each HSM copytool * running on MDC, after MDT shutdown/recovery. - * @param data archive id served by the copytool + * @param data copytool registration data * @param cb_arg callback argument (obd_import) */ -static int mdc_hsm_ct_reregister(__u32 data, void *cb_arg) +static int mdc_hsm_ct_reregister(void *data, void *cb_arg) { + struct kkuc_ct_data *kcd = data; struct obd_import *imp = (struct obd_import *)cb_arg; - __u32 archive = data; int rc; - CDEBUG(D_HA, "recover copytool registration to MDT (archive=%#x)\n", - archive); - rc = mdc_ioc_hsm_ct_register(imp, archive); + if (!kcd || kcd->kcd_magic != KKUC_CT_DATA_MAGIC) + return -EPROTO; + + if (!obd_uuid_equals(&kcd->kcd_uuid, &imp->imp_obd->obd_uuid)) + return 0; + + CDEBUG(D_HA, "%s: recover copytool registration to MDT (archive=%#x)\n", + imp->imp_obd->obd_name, kcd->kcd_archive); + rc = mdc_ioc_hsm_ct_register(imp, kcd->kcd_archive); /* ignore error if the copytool is already registered */ - return ((rc != 0) && (rc != -EEXIST)) ? rc : 0; + return (rc == -EEXIST) ? 0 : rc; } static int mdc_set_info_async(const struct lu_env *env, @@ -2133,7 +2151,7 @@ static int mdc_sync(struct obd_export *exp, const struct lu_fid *fid, *request = NULL; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_SYNC); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_SYNC); @@ -2175,7 +2193,7 @@ static int mdc_import_event(struct obd_device *obd, struct obd_import *imp, * Flush current sequence to make client obtain new one * from server in case of disconnect/reconnect. */ - if (cli->cl_seq != NULL) + if (cli->cl_seq) seq_client_flush(cli->cl_seq); rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE, NULL); @@ -2238,7 +2256,8 @@ static int mdc_cancel_for_recovery(struct ldlm_lock *lock) /* FIXME: if we ever get into a situation where there are too many * opened files with open locks on a single node, then we really - * should replay these open locks to reget it */ + * should replay these open locks to reget it + */ if (lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_OPEN) return 0; @@ -2422,7 +2441,7 @@ static int mdc_get_remote_perm(struct obd_export *exp, const struct lu_fid *fid, *request = NULL; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_GETATTR); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR); @@ -2519,6 +2538,7 @@ static void /*__exit*/ mdc_exit(void) MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>"); MODULE_DESCRIPTION("Lustre Metadata Client"); +MODULE_VERSION(LUSTRE_VERSION_STRING); MODULE_LICENSE("GPL"); module_init(mdc_init); diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c index ab4800c20a95..65caffe8c42e 100644 --- a/drivers/staging/lustre/lustre/mgc/mgc_request.c +++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c @@ -90,7 +90,8 @@ static int mgc_name2resid(char *name, int len, struct ldlm_res_id *res_id, int mgc_fsname2resid(char *fsname, struct ldlm_res_id *res_id, int type) { /* fsname is at most 8 chars long, maybe contain "-". - * e.g. "lustre", "SUN-000" */ + * e.g. "lustre", "SUN-000" + */ return mgc_name2resid(fsname, strlen(fsname), res_id, type); } EXPORT_SYMBOL(mgc_fsname2resid); @@ -102,7 +103,8 @@ static int mgc_logname2resid(char *logname, struct ldlm_res_id *res_id, int type /* logname consists of "fsname-nodetype". * e.g. "lustre-MDT0001", "SUN-000-client" - * there is an exception: llog "params" */ + * there is an exception: llog "params" + */ name_end = strrchr(logname, '-'); if (!name_end) len = strlen(logname); @@ -125,7 +127,8 @@ static int config_log_get(struct config_llog_data *cld) } /* Drop a reference to a config log. When no longer referenced, - we can free the config log data */ + * we can free the config log data + */ static void config_log_put(struct config_llog_data *cld) { CDEBUG(D_INFO, "log %s refs %d\n", cld->cld_logname, @@ -162,7 +165,7 @@ struct config_llog_data *config_log_find(char *logname, struct config_llog_data *found = NULL; void *instance; - LASSERT(logname != NULL); + LASSERT(logname); instance = cfg ? cfg->cfg_instance : NULL; spin_lock(&config_list_lock); @@ -242,17 +245,18 @@ struct config_llog_data *do_config_log_add(struct obd_device *obd, return cld; } -static struct config_llog_data *config_recover_log_add(struct obd_device *obd, - char *fsname, - struct config_llog_instance *cfg, - struct super_block *sb) +static struct config_llog_data * +config_recover_log_add(struct obd_device *obd, char *fsname, + struct config_llog_instance *cfg, + struct super_block *sb) { struct config_llog_instance lcfg = *cfg; struct config_llog_data *cld; char logname[32]; /* we have to use different llog for clients and mdts for cmd - * where only clients are notified if one of cmd server restarts */ + * where only clients are notified if one of cmd server restarts + */ LASSERT(strlen(fsname) < sizeof(logname) / 2); strcpy(logname, fsname); LASSERT(lcfg.cfg_instance); @@ -262,8 +266,9 @@ static struct config_llog_data *config_recover_log_add(struct obd_device *obd, return cld; } -static struct config_llog_data *config_params_log_add(struct obd_device *obd, - struct config_llog_instance *cfg, struct super_block *sb) +static struct config_llog_data * +config_params_log_add(struct obd_device *obd, + struct config_llog_instance *cfg, struct super_block *sb) { struct config_llog_instance lcfg = *cfg; struct config_llog_data *cld; @@ -300,7 +305,7 @@ static int config_log_add(struct obd_device *obd, char *logname, * <fsname>-sptlrpc. multiple regular logs may share one sptlrpc log. */ ptr = strrchr(logname, '-'); - if (ptr == NULL || ptr - logname > 8) { + if (!ptr || ptr - logname > 8) { CERROR("logname %s is too long\n", logname); return -EINVAL; } @@ -309,7 +314,7 @@ static int config_log_add(struct obd_device *obd, char *logname, strcpy(seclogname + (ptr - logname), "-sptlrpc"); sptlrpc_cld = config_log_find(seclogname, NULL); - if (sptlrpc_cld == NULL) { + if (!sptlrpc_cld) { sptlrpc_cld = do_config_log_add(obd, seclogname, CONFIG_T_SPTLRPC, NULL, NULL); if (IS_ERR(sptlrpc_cld)) { @@ -339,7 +344,16 @@ static int config_log_add(struct obd_device *obd, char *logname, LASSERT(lsi->lsi_lmd); if (!(lsi->lsi_lmd->lmd_flags & LMD_FLG_NOIR)) { struct config_llog_data *recover_cld; - *strrchr(seclogname, '-') = 0; + + ptr = strrchr(seclogname, '-'); + if (ptr) { + *ptr = 0; + } else { + CERROR("%s: sptlrpc log name not correct, %s: rc = %d\n", + obd->obd_name, seclogname, -EINVAL); + config_log_put(cld); + return -EINVAL; + } recover_cld = config_recover_log_add(obd, seclogname, cfg, sb); if (IS_ERR(recover_cld)) { rc = PTR_ERR(recover_cld); @@ -376,7 +390,7 @@ static int config_log_end(char *logname, struct config_llog_instance *cfg) int rc = 0; cld = config_log_find(logname, cfg); - if (cld == NULL) + if (!cld) return -ENOENT; mutex_lock(&cld->cld_lock); @@ -450,16 +464,16 @@ int lprocfs_mgc_rd_ir_state(struct seq_file *m, void *data) ocd = &imp->imp_connect_data; seq_printf(m, "imperative_recovery: %s\n", - OCD_HAS_FLAG(ocd, IMP_RECOV) ? "ENABLED" : "DISABLED"); + OCD_HAS_FLAG(ocd, IMP_RECOV) ? "ENABLED" : "DISABLED"); seq_printf(m, "client_state:\n"); spin_lock(&config_list_lock); list_for_each_entry(cld, &config_llog_list, cld_list_chain) { - if (cld->cld_recover == NULL) + if (!cld->cld_recover) continue; - seq_printf(m, " - { client: %s, nidtbl_version: %u }\n", - cld->cld_logname, - cld->cld_recover->cld_cfg.cfg_last_idx); + seq_printf(m, " - { client: %s, nidtbl_version: %u }\n", + cld->cld_logname, + cld->cld_recover->cld_cfg.cfg_last_idx); } spin_unlock(&config_list_lock); @@ -483,8 +497,9 @@ static void do_requeue(struct config_llog_data *cld) LASSERT(atomic_read(&cld->cld_refcount) > 0); /* Do not run mgc_process_log on a disconnected export or an - export which is being disconnected. Take the client - semaphore to make the check non-racy. */ + * export which is being disconnected. Take the client + * semaphore to make the check non-racy. + */ down_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem); if (cld->cld_mgcexp->exp_obd->u.cli.cl_conn_count != 0) { CDEBUG(D_MGC, "updating log %s\n", cld->cld_logname); @@ -529,8 +544,9 @@ static int mgc_requeue_thread(void *data) } /* Always wait a few seconds to allow the server who - caused the lock revocation to finish its setup, plus some - random so everyone doesn't try to reconnect at once. */ + * caused the lock revocation to finish its setup, plus some + * random so everyone doesn't try to reconnect at once. + */ to = MGC_TIMEOUT_MIN_SECONDS * HZ; to += rand * HZ / 100; /* rand is centi-seconds */ lwi = LWI_TIMEOUT(to, NULL, NULL); @@ -549,8 +565,7 @@ static int mgc_requeue_thread(void *data) spin_lock(&config_list_lock); rq_state &= ~RQ_PRECLEANUP; - list_for_each_entry(cld, &config_llog_list, - cld_list_chain) { + list_for_each_entry(cld, &config_llog_list, cld_list_chain) { if (!cld->cld_lostlock) continue; @@ -559,7 +574,8 @@ static int mgc_requeue_thread(void *data) LASSERT(atomic_read(&cld->cld_refcount) > 0); /* Whether we enqueued again or not in mgc_process_log, - * we're done with the ref from the old enqueue */ + * we're done with the ref from the old enqueue + */ if (cld_prev) config_log_put(cld_prev); cld_prev = cld; @@ -575,7 +591,8 @@ static int mgc_requeue_thread(void *data) config_log_put(cld_prev); /* break after scanning the list so that we can drop - * refcount to losing lock clds */ + * refcount to losing lock clds + */ if (unlikely(stopped)) { spin_lock(&config_list_lock); break; @@ -598,7 +615,8 @@ static int mgc_requeue_thread(void *data) } /* Add a cld to the list to requeue. Start the requeue thread if needed. - We are responsible for dropping the config log reference from here on out. */ + * We are responsible for dropping the config log reference from here on out. + */ static void mgc_requeue_add(struct config_llog_data *cld) { CDEBUG(D_INFO, "log %s: requeue (r=%d sp=%d st=%x)\n", @@ -635,7 +653,8 @@ static int mgc_llog_init(const struct lu_env *env, struct obd_device *obd) int rc; /* setup only remote ctxt, the local disk context is switched per each - * filesystem during mgc_fs_setup() */ + * filesystem during mgc_fs_setup() + */ rc = llog_setup(env, obd, &obd->obd_olg, LLOG_CONFIG_REPL_CTXT, obd, &llog_client_ops); if (rc) @@ -697,7 +716,8 @@ static int mgc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage) static int mgc_cleanup(struct obd_device *obd) { /* COMPAT_146 - old config logs may have added profiles we don't - know about */ + * know about + */ if (obd->obd_type->typ_refcnt <= 1) /* Only for the last mgc */ class_del_profiles(); @@ -711,6 +731,7 @@ static int mgc_cleanup(struct obd_device *obd) static int mgc_setup(struct obd_device *obd, struct lustre_cfg *lcfg) { struct lprocfs_static_vars lvars = { NULL }; + struct task_struct *task; int rc; ptlrpcd_addref(); @@ -734,10 +755,10 @@ static int mgc_setup(struct obd_device *obd, struct lustre_cfg *lcfg) init_waitqueue_head(&rq_waitq); /* start requeue thread */ - rc = PTR_ERR(kthread_run(mgc_requeue_thread, NULL, - "ll_cfg_requeue")); - if (IS_ERR_VALUE(rc)) { - CERROR("%s: Cannot start requeue thread (%d),no more log updates!\n", + task = kthread_run(mgc_requeue_thread, NULL, "ll_cfg_requeue"); + if (IS_ERR(task)) { + rc = PTR_ERR(task); + CERROR("%s: cannot start requeue thread: rc = %d; no more log updates\n", obd->obd_name, rc); goto err_cleanup; } @@ -793,7 +814,8 @@ static int mgc_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, break; } /* Make sure not to re-enqueue when the mgc is stopping - (we get called from client_disconnect_export) */ + * (we get called from client_disconnect_export) + */ if (!lock->l_conn_export || !lock->l_conn_export->exp_obd->u.cli.cl_conn_count) { CDEBUG(D_MGC, "log %.8s: disconnecting, won't requeue\n", @@ -815,7 +837,8 @@ static int mgc_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, /* Not sure where this should go... */ /* This is the timeout value for MGS_CONNECT request plus a ping interval, such - * that we can have a chance to try the secondary MGS if any. */ + * that we can have a chance to try the secondary MGS if any. + */ #define MGC_ENQUEUE_LIMIT (INITIAL_CONNECT_TIMEOUT + (AT_OFF ? 0 : at_min) \ + PING_INTERVAL) #define MGC_TARGET_REG_LIMIT 10 @@ -879,11 +902,12 @@ static int mgc_enqueue(struct obd_export *exp, struct lov_stripe_md *lsm, cld->cld_resid.name[0]); /* We need a callback for every lockholder, so don't try to - ldlm_lock_match (see rev 1.1.2.11.2.47) */ + * ldlm_lock_match (see rev 1.1.2.11.2.47) + */ req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE, LUSTRE_DLM_VERSION, LDLM_ENQUEUE); - if (req == NULL) + if (!req) return -ENOMEM; req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, 0); @@ -894,7 +918,8 @@ static int mgc_enqueue(struct obd_export *exp, struct lov_stripe_md *lsm, rc = ldlm_cli_enqueue(exp, &req, &einfo, &cld->cld_resid, NULL, flags, NULL, 0, LVB_T_NONE, lockh, 0); /* A failed enqueue should still call the mgc_blocking_ast, - where it will be requeued if needed ("grant failed"). */ + * where it will be requeued if needed ("grant failed"). + */ ptlrpc_req_finished(req); return rc; } @@ -921,7 +946,7 @@ static int mgc_target_register(struct obd_export *exp, req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_MGS_TARGET_REG, LUSTRE_MGS_VERSION, MGS_TARGET_REG); - if (req == NULL) + if (!req) return -ENOMEM; req_mti = req_capsule_client_get(&req->rq_pill, &RMF_MGS_TARGET_INFO); @@ -950,8 +975,8 @@ static int mgc_target_register(struct obd_export *exp, } static int mgc_set_info_async(const struct lu_env *env, struct obd_export *exp, - u32 keylen, void *key, u32 vallen, - void *val, struct ptlrpc_request_set *set) + u32 keylen, void *key, u32 vallen, + void *val, struct ptlrpc_request_set *set) { int rc = -EINVAL; @@ -1109,7 +1134,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc, int rc = 0; int off = 0; - LASSERT(cfg->cfg_instance != NULL); + LASSERT(cfg->cfg_instance); LASSERT(cfg->cfg_sb == cfg->cfg_instance); inst = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL); @@ -1195,7 +1220,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc, /* lustre-OST0001-osc-<instance #> */ strcpy(obdname, cld->cld_logname); cname = strrchr(obdname, '-'); - if (cname == NULL) { + if (!cname) { CERROR("mgc %s: invalid logname %s\n", mgc->obd_name, obdname); break; @@ -1212,7 +1237,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc, /* find the obd by obdname */ obd = class_name2obd(obdname); - if (obd == NULL) { + if (!obd) { CDEBUG(D_INFO, "mgc %s: cannot find obdname %s\n", mgc->obd_name, obdname); rc = 0; @@ -1227,7 +1252,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc, uuid = buf + pos; down_read(&obd->u.cli.cl_sem); - if (obd->u.cli.cl_import == NULL) { + if (!obd->u.cli.cl_import) { /* client does not connect to the OST yet */ up_read(&obd->u.cli.cl_sem); rc = 0; @@ -1257,7 +1282,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc, rc = -ENOMEM; lcfg = lustre_cfg_new(LCFG_PARAM, &bufs); - if (lcfg == NULL) { + if (!lcfg) { CERROR("mgc: cannot allocate memory\n"); break; } @@ -1309,14 +1334,14 @@ static int mgc_process_recover_log(struct obd_device *obd, nrpages = CONFIG_READ_NRPAGES_INIT; pages = kcalloc(nrpages, sizeof(*pages), GFP_KERNEL); - if (pages == NULL) { + if (!pages) { rc = -ENOMEM; goto out; } for (i = 0; i < nrpages; i++) { pages[i] = alloc_page(GFP_KERNEL); - if (pages[i] == NULL) { + if (!pages[i]) { rc = -ENOMEM; goto out; } @@ -1327,7 +1352,7 @@ again: LASSERT(mutex_is_locked(&cld->cld_lock)); req = ptlrpc_request_alloc(class_exp2cliimp(cld->cld_mgcexp), &RQF_MGS_CONFIG_READ); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto out; } @@ -1338,7 +1363,6 @@ again: /* pack request */ body = req_capsule_client_get(&req->rq_pill, &RMF_MGS_CONFIG_BODY); - LASSERT(body != NULL); LASSERT(sizeof(body->mcb_name) > strlen(cld->cld_logname)); if (strlcpy(body->mcb_name, cld->cld_logname, sizeof(body->mcb_name)) >= sizeof(body->mcb_name)) { @@ -1353,7 +1377,7 @@ again: /* allocate bulk transfer descriptor */ desc = ptlrpc_prep_bulk_imp(req, nrpages, 1, BULK_PUT_SINK, MGS_BULK_PORTAL); - if (desc == NULL) { + if (!desc) { rc = -ENOMEM; goto out; } @@ -1373,7 +1397,8 @@ again: } /* always update the index even though it might have errors with - * handling the recover logs */ + * handling the recover logs + */ cfg->cfg_last_idx = res->mcr_offset; eof = res->mcr_offset == res->mcr_size; @@ -1400,7 +1425,8 @@ again: mne_swab = !!ptlrpc_rep_need_swab(req); #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 2, 50, 0) /* This import flag means the server did an extra swab of IR MNE - * records (fixed in LU-1252), reverse it here if needed. LU-1644 */ + * records (fixed in LU-1252), reverse it here if needed. LU-1644 + */ if (unlikely(req->rq_import->imp_need_mne_swab)) mne_swab = !mne_swab; #else @@ -1434,7 +1460,7 @@ out: if (pages) { for (i = 0; i < nrpages; i++) { - if (pages[i] == NULL) + if (!pages[i]) break; __free_page(pages[i]); } @@ -1489,7 +1515,8 @@ static int mgc_process_cfg_log(struct obd_device *mgc, /* logname and instance info should be the same, so use our * copy of the instance for the update. The cfg_last_idx will - * be updated here. */ + * be updated here. + */ rc = class_config_parse_llog(env, ctxt, cld->cld_logname, &cld->cld_cfg); @@ -1529,9 +1556,10 @@ int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld) LASSERT(cld); /* I don't want multiple processes running process_log at once -- - sounds like badness. It actually might be fine, as long as - we're not trying to update from the same log - simultaneously (in which case we should use a per-log sem.) */ + * sounds like badness. It actually might be fine, as long as + * we're not trying to update from the same log + * simultaneously (in which case we should use a per-log sem.) + */ mutex_lock(&cld->cld_lock); if (cld->cld_stopping) { mutex_unlock(&cld->cld_lock); @@ -1556,7 +1584,8 @@ int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld) CDEBUG(D_MGC, "Can't get cfg lock: %d\n", rcl); /* mark cld_lostlock so that it will requeue - * after MGC becomes available. */ + * after MGC becomes available. + */ cld->cld_lostlock = 1; /* Get extra reference, it will be put in requeue thread */ config_log_get(cld); @@ -1635,18 +1664,19 @@ static int mgc_process_config(struct obd_device *obd, u32 len, void *buf) if (rc) break; cld = config_log_find(logname, cfg); - if (cld == NULL) { + if (!cld) { rc = -ENOENT; break; } /* COMPAT_146 */ /* FIXME only set this for old logs! Right now this forces - us to always skip the "inside markers" check */ + * us to always skip the "inside markers" check + */ cld->cld_cfg.cfg_flags |= CFG_F_COMPAT146; rc = mgc_process_log(obd, cld); - if (rc == 0 && cld->cld_recover != NULL) { + if (rc == 0 && cld->cld_recover) { if (OCD_HAS_FLAG(&obd->u.cli.cl_import-> imp_connect_data, IMP_RECOV)) { rc = mgc_process_log(obd, cld->cld_recover); @@ -1660,7 +1690,7 @@ static int mgc_process_config(struct obd_device *obd, u32 len, void *buf) CERROR("Cannot process recover llog %d\n", rc); } - if (rc == 0 && cld->cld_params != NULL) { + if (rc == 0 && cld->cld_params) { rc = mgc_process_log(obd, cld->cld_params); if (rc == -ENOENT) { CDEBUG(D_MGC, @@ -1727,6 +1757,7 @@ static void /*__exit*/ mgc_exit(void) MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>"); MODULE_DESCRIPTION("Lustre Management Client"); +MODULE_VERSION(LUSTRE_VERSION_STRING); MODULE_LICENSE("GPL"); module_init(mgc_init); diff --git a/drivers/staging/lustre/lustre/obdclass/Makefile b/drivers/staging/lustre/lustre/obdclass/Makefile index acc685712ce9..c404eb3864ff 100644 --- a/drivers/staging/lustre/lustre/obdclass/Makefile +++ b/drivers/staging/lustre/lustre/obdclass/Makefile @@ -2,8 +2,8 @@ obj-$(CONFIG_LUSTRE_FS) += obdclass.o obdclass-y := linux/linux-module.o linux/linux-obdo.o linux/linux-sysctl.o \ llog.o llog_cat.o llog_obd.o llog_swab.o class_obd.o debug.o \ - genops.o uuid.o lprocfs_status.o \ - lustre_handles.o lustre_peer.o \ - statfs_pack.o obdo.o obd_config.o obd_mount.o \ - lu_object.o cl_object.o \ - cl_page.o cl_lock.o cl_io.o lu_ref.o acl.o lprocfs_counters.o + genops.o uuid.o lprocfs_status.o lprocfs_counters.o \ + lustre_handles.o lustre_peer.o statfs_pack.o \ + obdo.o obd_config.o obd_mount.o lu_object.o lu_ref.o \ + cl_object.o cl_page.o cl_lock.o cl_io.o \ + acl.o kernelcomm.o diff --git a/drivers/staging/lustre/lustre/obdclass/acl.c b/drivers/staging/lustre/lustre/obdclass/acl.c index 49ba8851c8ac..0e02ae97b7ed 100644 --- a/drivers/staging/lustre/lustre/obdclass/acl.c +++ b/drivers/staging/lustre/lustre/obdclass/acl.c @@ -104,7 +104,7 @@ static int lustre_posix_acl_xattr_reduce_space(posix_acl_xattr_header **header, return old_size; new = kmemdup(*header, new_size, GFP_NOFS); - if (unlikely(new == NULL)) + if (unlikely(!new)) return -ENOMEM; kfree(*header); @@ -124,7 +124,7 @@ static int lustre_ext_acl_xattr_reduce_space(ext_acl_xattr_header **header, return 0; new = kmemdup(*header, ext_size, GFP_NOFS); - if (unlikely(new == NULL)) + if (unlikely(!new)) return -ENOMEM; kfree(*header); @@ -149,7 +149,7 @@ lustre_posix_acl_xattr_2ext(posix_acl_xattr_header *header, int size) count = CFS_ACL_XATTR_COUNT(size, posix_acl_xattr); esize = CFS_ACL_XATTR_SIZE(count, ext_acl_xattr); new = kzalloc(esize, GFP_NOFS); - if (unlikely(new == NULL)) + if (unlikely(!new)) return ERR_PTR(-ENOMEM); new->a_count = cpu_to_le32(count); @@ -180,7 +180,7 @@ int lustre_posix_acl_xattr_filter(posix_acl_xattr_header *header, size_t size, return -EINVAL; new = kzalloc(size, GFP_NOFS); - if (unlikely(new == NULL)) + if (unlikely(!new)) return -ENOMEM; new->a_version = cpu_to_le32(CFS_ACL_XATTR_VERSION); @@ -300,7 +300,7 @@ lustre_acl_xattr_merge2ext(posix_acl_xattr_header *posix_header, int size, ext_size = CFS_ACL_XATTR_SIZE(ext_count, ext_acl_xattr); new = kzalloc(ext_size, GFP_NOFS); - if (unlikely(new == NULL)) + if (unlikely(!new)) return ERR_PTR(-ENOMEM); for (i = 0, j = 0; i < posix_count; i++) { diff --git a/drivers/staging/lustre/lustre/obdclass/cl_io.c b/drivers/staging/lustre/lustre/obdclass/cl_io.c index 63246ba36798..f5128b4f176f 100644 --- a/drivers/staging/lustre/lustre/obdclass/cl_io.c +++ b/drivers/staging/lustre/lustre/obdclass/cl_io.c @@ -44,6 +44,7 @@ #include "../include/obd_support.h" #include "../include/lustre_fid.h" #include <linux/list.h> +#include <linux/sched.h> #include "../include/cl_object.h" #include "cl_internal.h" @@ -93,7 +94,7 @@ static int cl_io_invariant(const struct cl_io *io) * CIS_IO_GOING. */ ergo(io->ci_owned_nr > 0, io->ci_state == CIS_IO_GOING || - (io->ci_state == CIS_LOCKED && up != NULL)); + (io->ci_state == CIS_LOCKED && up)); } /** @@ -111,7 +112,7 @@ void cl_io_fini(const struct lu_env *env, struct cl_io *io) slice = container_of(io->ci_layers.prev, struct cl_io_slice, cis_linkage); list_del_init(&slice->cis_linkage); - if (slice->cis_iop->op[io->ci_type].cio_fini != NULL) + if (slice->cis_iop->op[io->ci_type].cio_fini) slice->cis_iop->op[io->ci_type].cio_fini(env, slice); /* * Invalidate slice to catch use after free. This assumes that @@ -138,7 +139,7 @@ void cl_io_fini(const struct lu_env *env, struct cl_io *io) case CIT_MISC: /* Check ignore layout change conf */ LASSERT(ergo(io->ci_ignore_layout || !io->ci_verify_layout, - !io->ci_need_restart)); + !io->ci_need_restart)); break; default: LBUG(); @@ -164,7 +165,7 @@ static int cl_io_init0(const struct lu_env *env, struct cl_io *io, result = 0; cl_object_for_each(scan, obj) { - if (scan->co_ops->coo_io_init != NULL) { + if (scan->co_ops->coo_io_init) { result = scan->co_ops->coo_io_init(env, scan, io); if (result != 0) break; @@ -186,7 +187,7 @@ int cl_io_sub_init(const struct lu_env *env, struct cl_io *io, struct cl_thread_info *info = cl_env_info(env); LASSERT(obj != cl_object_top(obj)); - if (info->clt_current_io == NULL) + if (!info->clt_current_io) info->clt_current_io = io; return cl_io_init0(env, io, iot, obj); } @@ -208,7 +209,7 @@ int cl_io_init(const struct lu_env *env, struct cl_io *io, struct cl_thread_info *info = cl_env_info(env); LASSERT(obj == cl_object_top(obj)); - LASSERT(info->clt_current_io == NULL); + LASSERT(!info->clt_current_io); info->clt_current_io = io; return cl_io_init0(env, io, iot, obj); @@ -224,7 +225,7 @@ int cl_io_rw_init(const struct lu_env *env, struct cl_io *io, enum cl_io_type iot, loff_t pos, size_t count) { LINVRNT(iot == CIT_READ || iot == CIT_WRITE); - LINVRNT(io->ci_obj != NULL); + LINVRNT(io->ci_obj); LU_OBJECT_HEADER(D_VFSTRACE, env, &io->ci_obj->co_lu, "io range: %u [%llu, %llu) %u %u\n", @@ -290,11 +291,11 @@ static void cl_io_locks_sort(struct cl_io *io) prev = NULL; list_for_each_entry_safe(curr, temp, - &io->ci_lockset.cls_todo, - cill_linkage) { - if (prev != NULL) { + &io->ci_lockset.cls_todo, + cill_linkage) { + if (prev) { switch (cl_lock_descr_sort(&prev->cill_descr, - &curr->cill_descr)) { + &curr->cill_descr)) { case 0: /* * IMPOSSIBLE: Identical locks are @@ -305,10 +306,11 @@ static void cl_io_locks_sort(struct cl_io *io) LBUG(); case 1: list_move_tail(&curr->cill_linkage, - &prev->cill_linkage); + &prev->cill_linkage); done = 0; continue; /* don't change prev: it's - * still "previous" */ + * still "previous" + */ case -1: /* already in order */ break; } @@ -327,32 +329,31 @@ static void cl_io_locks_sort(struct cl_io *io) int cl_queue_match(const struct list_head *queue, const struct cl_lock_descr *need) { - struct cl_io_lock_link *scan; + struct cl_io_lock_link *scan; - list_for_each_entry(scan, queue, cill_linkage) { - if (cl_lock_descr_match(&scan->cill_descr, need)) - return 1; - } - return 0; + list_for_each_entry(scan, queue, cill_linkage) { + if (cl_lock_descr_match(&scan->cill_descr, need)) + return 1; + } + return 0; } EXPORT_SYMBOL(cl_queue_match); static int cl_queue_merge(const struct list_head *queue, const struct cl_lock_descr *need) { - struct cl_io_lock_link *scan; - - list_for_each_entry(scan, queue, cill_linkage) { - if (cl_lock_descr_cmp(&scan->cill_descr, need)) - continue; - cl_lock_descr_merge(&scan->cill_descr, need); - CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n", - scan->cill_descr.cld_mode, scan->cill_descr.cld_start, - scan->cill_descr.cld_end); - return 1; - } - return 0; + struct cl_io_lock_link *scan; + list_for_each_entry(scan, queue, cill_linkage) { + if (cl_lock_descr_cmp(&scan->cill_descr, need)) + continue; + cl_lock_descr_merge(&scan->cill_descr, need); + CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n", + scan->cill_descr.cld_mode, scan->cill_descr.cld_start, + scan->cill_descr.cld_end); + return 1; + } + return 0; } static int cl_lockset_match(const struct cl_lockset *set, @@ -384,8 +385,7 @@ static int cl_lockset_lock_one(const struct lu_env *env, if (!(link->cill_descr.cld_enq_flags & CEF_ASYNC)) { result = cl_wait(env, lock); if (result == 0) - list_move(&link->cill_linkage, - &set->cls_done); + list_move(&link->cill_linkage, &set->cls_done); } else result = 0; } else @@ -399,11 +399,11 @@ static void cl_lock_link_fini(const struct lu_env *env, struct cl_io *io, struct cl_lock *lock = link->cill_lock; list_del_init(&link->cill_linkage); - if (lock != NULL) { + if (lock) { cl_lock_release(env, lock, "io", io); link->cill_lock = NULL; } - if (link->cill_fini != NULL) + if (link->cill_fini) link->cill_fini(env, link); } @@ -419,7 +419,8 @@ static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io, list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) { if (!cl_lockset_match(set, &link->cill_descr)) { /* XXX some locking to guarantee that locks aren't - * expanded in between. */ + * expanded in between. + */ result = cl_lockset_lock_one(env, io, set, link); if (result != 0) break; @@ -428,12 +429,11 @@ static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io, } if (result == 0) { list_for_each_entry_safe(link, temp, - &set->cls_curr, cill_linkage) { + &set->cls_curr, cill_linkage) { lock = link->cill_lock; result = cl_wait(env, lock); if (result == 0) - list_move(&link->cill_linkage, - &set->cls_done); + list_move(&link->cill_linkage, &set->cls_done); else break; } @@ -458,7 +458,7 @@ int cl_io_lock(const struct lu_env *env, struct cl_io *io) LINVRNT(cl_io_invariant(io)); cl_io_for_each(scan, io) { - if (scan->cis_iop->op[io->ci_type].cio_lock == NULL) + if (!scan->cis_iop->op[io->ci_type].cio_lock) continue; result = scan->cis_iop->op[io->ci_type].cio_lock(env, scan); if (result != 0) @@ -503,7 +503,7 @@ void cl_io_unlock(const struct lu_env *env, struct cl_io *io) cl_lock_link_fini(env, io, link); } cl_io_for_each_reverse(scan, io) { - if (scan->cis_iop->op[io->ci_type].cio_unlock != NULL) + if (scan->cis_iop->op[io->ci_type].cio_unlock) scan->cis_iop->op[io->ci_type].cio_unlock(env, scan); } io->ci_state = CIS_UNLOCKED; @@ -529,7 +529,7 @@ int cl_io_iter_init(const struct lu_env *env, struct cl_io *io) result = 0; cl_io_for_each(scan, io) { - if (scan->cis_iop->op[io->ci_type].cio_iter_init == NULL) + if (!scan->cis_iop->op[io->ci_type].cio_iter_init) continue; result = scan->cis_iop->op[io->ci_type].cio_iter_init(env, scan); @@ -556,7 +556,7 @@ void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io) LINVRNT(cl_io_invariant(io)); cl_io_for_each_reverse(scan, io) { - if (scan->cis_iop->op[io->ci_type].cio_iter_fini != NULL) + if (scan->cis_iop->op[io->ci_type].cio_iter_fini) scan->cis_iop->op[io->ci_type].cio_iter_fini(env, scan); } io->ci_state = CIS_IT_ENDED; @@ -581,7 +581,7 @@ static void cl_io_rw_advance(const struct lu_env *env, struct cl_io *io, /* layers have to be notified. */ cl_io_for_each_reverse(scan, io) { - if (scan->cis_iop->op[io->ci_type].cio_advance != NULL) + if (scan->cis_iop->op[io->ci_type].cio_advance) scan->cis_iop->op[io->ci_type].cio_advance(env, scan, nob); } @@ -621,7 +621,7 @@ int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io, int result; link = kzalloc(sizeof(*link), GFP_NOFS); - if (link != NULL) { + if (link) { link->cill_descr = *descr; link->cill_fini = cl_free_io_lock_link; result = cl_io_lock_add(env, io, link); @@ -648,7 +648,7 @@ int cl_io_start(const struct lu_env *env, struct cl_io *io) io->ci_state = CIS_IO_GOING; cl_io_for_each(scan, io) { - if (scan->cis_iop->op[io->ci_type].cio_start == NULL) + if (!scan->cis_iop->op[io->ci_type].cio_start) continue; result = scan->cis_iop->op[io->ci_type].cio_start(env, scan); if (result != 0) @@ -673,7 +673,7 @@ void cl_io_end(const struct lu_env *env, struct cl_io *io) LINVRNT(cl_io_invariant(io)); cl_io_for_each_reverse(scan, io) { - if (scan->cis_iop->op[io->ci_type].cio_end != NULL) + if (scan->cis_iop->op[io->ci_type].cio_end) scan->cis_iop->op[io->ci_type].cio_end(env, scan); /* TODO: error handling. */ } @@ -687,7 +687,7 @@ cl_io_slice_page(const struct cl_io_slice *ios, struct cl_page *page) const struct cl_page_slice *slice; slice = cl_page_at(page, ios->cis_obj->co_lu.lo_dev->ld_type); - LINVRNT(slice != NULL); + LINVRNT(slice); return slice; } @@ -759,11 +759,11 @@ int cl_io_read_page(const struct lu_env *env, struct cl_io *io, * "parallel io" (see CLO_REPEAT loops in cl_lock.c). */ cl_io_for_each(scan, io) { - if (scan->cis_iop->cio_read_page != NULL) { + if (scan->cis_iop->cio_read_page) { const struct cl_page_slice *slice; slice = cl_io_slice_page(scan, page); - LINVRNT(slice != NULL); + LINVRNT(slice); result = scan->cis_iop->cio_read_page(env, scan, slice); if (result != 0) break; @@ -798,7 +798,7 @@ int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io, LASSERT(cl_page_in_io(page, io)); cl_io_for_each_reverse(scan, io) { - if (scan->cis_iop->cio_prepare_write != NULL) { + if (scan->cis_iop->cio_prepare_write) { const struct cl_page_slice *slice; slice = cl_io_slice_page(scan, page); @@ -833,11 +833,11 @@ int cl_io_commit_write(const struct lu_env *env, struct cl_io *io, * state. Better (and more general) way of dealing with such situation * is needed. */ - LASSERT(cl_page_is_owned(page, io) || page->cp_parent != NULL); + LASSERT(cl_page_is_owned(page, io) || page->cp_parent); LASSERT(cl_page_in_io(page, io)); cl_io_for_each(scan, io) { - if (scan->cis_iop->cio_commit_write != NULL) { + if (scan->cis_iop->cio_commit_write) { const struct cl_page_slice *slice; slice = cl_io_slice_page(scan, page); @@ -872,7 +872,7 @@ int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io, LINVRNT(crt < ARRAY_SIZE(scan->cis_iop->req_op)); cl_io_for_each(scan, io) { - if (scan->cis_iop->req_op[crt].cio_submit == NULL) + if (!scan->cis_iop->req_op[crt].cio_submit) continue; result = scan->cis_iop->req_op[crt].cio_submit(env, scan, crt, queue); @@ -900,7 +900,7 @@ int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io, int rc; cl_page_list_for_each(pg, &queue->c2_qin) { - LASSERT(pg->cp_sync_io == NULL); + LASSERT(!pg->cp_sync_io); pg->cp_sync_io = anchor; } @@ -913,14 +913,14 @@ int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io, * clean pages), count them as completed to avoid infinite * wait. */ - cl_page_list_for_each(pg, &queue->c2_qin) { + cl_page_list_for_each(pg, &queue->c2_qin) { pg->cp_sync_io = NULL; cl_sync_io_note(anchor, 1); - } + } - /* wait for the IO to be finished. */ - rc = cl_sync_io_wait(env, io, &queue->c2_qout, - anchor, timeout); + /* wait for the IO to be finished. */ + rc = cl_sync_io_wait(env, io, &queue->c2_qout, + anchor, timeout); } else { LASSERT(list_empty(&queue->c2_qout.pl_pages)); cl_page_list_for_each(pg, &queue->c2_qin) @@ -1026,7 +1026,7 @@ void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice, { struct list_head *linkage = &slice->cis_linkage; - LASSERT((linkage->prev == NULL && linkage->next == NULL) || + LASSERT((!linkage->prev && !linkage->next) || list_empty(linkage)); list_add_tail(linkage, &io->ci_layers); @@ -1053,8 +1053,9 @@ EXPORT_SYMBOL(cl_page_list_init); void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page) { /* it would be better to check that page is owned by "current" io, but - * it is not passed here. */ - LASSERT(page->cp_owner != NULL); + * it is not passed here. + */ + LASSERT(page->cp_owner); LINVRNT(plist->pl_owner == current); lockdep_off(); @@ -1263,7 +1264,7 @@ EXPORT_SYMBOL(cl_2queue_init_page); */ struct cl_io *cl_io_top(struct cl_io *io) { - while (io->ci_parent != NULL) + while (io->ci_parent) io = io->ci_parent; return io; } @@ -1296,13 +1297,13 @@ static void cl_req_free(const struct lu_env *env, struct cl_req *req) LASSERT(list_empty(&req->crq_pages)); LASSERT(req->crq_nrpages == 0); LINVRNT(list_empty(&req->crq_layers)); - LINVRNT(equi(req->crq_nrobjs > 0, req->crq_o != NULL)); + LINVRNT(equi(req->crq_nrobjs > 0, req->crq_o)); - if (req->crq_o != NULL) { + if (req->crq_o) { for (i = 0; i < req->crq_nrobjs; ++i) { struct cl_object *obj = req->crq_o[i].ro_obj; - if (obj != NULL) { + if (obj) { lu_object_ref_del_at(&obj->co_lu, &req->crq_o[i].ro_obj_ref, "cl_req", req); @@ -1326,7 +1327,7 @@ static int cl_req_init(const struct lu_env *env, struct cl_req *req, do { list_for_each_entry(slice, &page->cp_layers, cpl_linkage) { dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev); - if (dev->cd_ops->cdo_req_init != NULL) { + if (dev->cd_ops->cdo_req_init) { result = dev->cd_ops->cdo_req_init(env, dev, req); if (result != 0) @@ -1334,7 +1335,7 @@ static int cl_req_init(const struct lu_env *env, struct cl_req *req, } } page = page->cp_child; - } while (page != NULL && result == 0); + } while (page && result == 0); return result; } @@ -1351,9 +1352,9 @@ void cl_req_completion(const struct lu_env *env, struct cl_req *req, int rc) */ while (!list_empty(&req->crq_layers)) { slice = list_entry(req->crq_layers.prev, - struct cl_req_slice, crs_linkage); + struct cl_req_slice, crs_linkage); list_del_init(&slice->crs_linkage); - if (slice->crs_ops->cro_completion != NULL) + if (slice->crs_ops->cro_completion) slice->crs_ops->cro_completion(env, slice, rc); } cl_req_free(env, req); @@ -1371,7 +1372,7 @@ struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page, LINVRNT(nr_objects > 0); req = kzalloc(sizeof(*req), GFP_NOFS); - if (req != NULL) { + if (req) { int result; req->crq_type = crt; @@ -1380,7 +1381,7 @@ struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page, req->crq_o = kcalloc(nr_objects, sizeof(req->crq_o[0]), GFP_NOFS); - if (req->crq_o != NULL) { + if (req->crq_o) { req->crq_nrobjs = nr_objects; result = cl_req_init(env, req, page); } else @@ -1408,7 +1409,7 @@ void cl_req_page_add(const struct lu_env *env, page = cl_page_top(page); LASSERT(list_empty(&page->cp_flight)); - LASSERT(page->cp_req == NULL); + LASSERT(!page->cp_req); CL_PAGE_DEBUG(D_PAGE, env, page, "req %p, %d, %u\n", req, req->crq_type, req->crq_nrpages); @@ -1418,7 +1419,7 @@ void cl_req_page_add(const struct lu_env *env, page->cp_req = req; obj = cl_object_top(page->cp_obj); for (i = 0, rqo = req->crq_o; obj != rqo->ro_obj; ++i, ++rqo) { - if (rqo->ro_obj == NULL) { + if (!rqo->ro_obj) { rqo->ro_obj = obj; cl_object_get(obj); lu_object_ref_add_at(&obj->co_lu, &rqo->ro_obj_ref, @@ -1463,11 +1464,11 @@ int cl_req_prep(const struct lu_env *env, struct cl_req *req) * of objects. */ for (i = 0; i < req->crq_nrobjs; ++i) - LASSERT(req->crq_o[i].ro_obj != NULL); + LASSERT(req->crq_o[i].ro_obj); result = 0; list_for_each_entry(slice, &req->crq_layers, crs_linkage) { - if (slice->crs_ops->cro_prep != NULL) { + if (slice->crs_ops->cro_prep) { result = slice->crs_ops->cro_prep(env, slice); if (result != 0) break; @@ -1501,9 +1502,8 @@ void cl_req_attr_set(const struct lu_env *env, struct cl_req *req, scan = cl_page_at(page, slice->crs_dev->cd_lu_dev.ld_type); - LASSERT(scan != NULL); obj = scan->cpl_obj; - if (slice->crs_ops->cro_attr_set != NULL) + if (slice->crs_ops->cro_attr_set) slice->crs_ops->cro_attr_set(env, slice, obj, attr + i, flags); } @@ -1511,9 +1511,6 @@ void cl_req_attr_set(const struct lu_env *env, struct cl_req *req, } EXPORT_SYMBOL(cl_req_attr_set); -/* XXX complete(), init_completion(), and wait_for_completion(), until they are - * implemented in libcfs. */ -# include <linux/sched.h> /** * Initialize synchronous io wait anchor, for transfer of \a nrpages pages. diff --git a/drivers/staging/lustre/lustre/obdclass/cl_lock.c b/drivers/staging/lustre/lustre/obdclass/cl_lock.c index 1836dc01499a..aec644eb4db9 100644 --- a/drivers/staging/lustre/lustre/obdclass/cl_lock.c +++ b/drivers/staging/lustre/lustre/obdclass/cl_lock.c @@ -96,8 +96,8 @@ static int cl_lock_invariant(const struct lu_env *env, result = atomic_read(&lock->cll_ref) > 0 && cl_lock_invariant_trusted(env, lock); - if (!result && env != NULL) - CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken"); + if (!result && env) + CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken\n"); return result; } @@ -259,7 +259,7 @@ static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock) struct cl_lock_slice *slice; slice = list_entry(lock->cll_layers.next, - struct cl_lock_slice, cls_linkage); + struct cl_lock_slice, cls_linkage); list_del_init(lock->cll_layers.next); slice->cls_ops->clo_fini(env, slice); } @@ -288,7 +288,7 @@ void cl_lock_put(const struct lu_env *env, struct cl_lock *lock) LINVRNT(cl_lock_invariant(env, lock)); obj = lock->cll_descr.cld_obj; - LINVRNT(obj != NULL); + LINVRNT(obj); CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n", atomic_read(&lock->cll_ref), lock, RETIP); @@ -361,8 +361,8 @@ static struct cl_lock *cl_lock_alloc(const struct lu_env *env, struct cl_lock *lock; struct lu_object_header *head; - lock = kmem_cache_alloc(cl_lock_kmem, GFP_NOFS | __GFP_ZERO); - if (lock != NULL) { + lock = kmem_cache_zalloc(cl_lock_kmem, GFP_NOFS); + if (lock) { atomic_set(&lock->cll_ref, 1); lock->cll_descr = *descr; lock->cll_state = CLS_NEW; @@ -382,8 +382,7 @@ static struct cl_lock *cl_lock_alloc(const struct lu_env *env, CS_LOCK_INC(obj, total); CS_LOCK_INC(obj, create); cl_lock_lockdep_init(lock); - list_for_each_entry(obj, &head->loh_layers, - co_lu.lo_linkage) { + list_for_each_entry(obj, &head->loh_layers, co_lu.lo_linkage) { int err; err = obj->co_ops->coo_lock_init(env, obj, lock, io); @@ -461,7 +460,7 @@ static int cl_lock_fits_into(const struct lu_env *env, LINVRNT(cl_lock_invariant_trusted(env, lock)); list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { - if (slice->cls_ops->clo_fits_into != NULL && + if (slice->cls_ops->clo_fits_into && !slice->cls_ops->clo_fits_into(env, slice, need, io)) return 0; } @@ -524,17 +523,17 @@ static struct cl_lock *cl_lock_find(const struct lu_env *env, lock = cl_lock_lookup(env, obj, io, need); spin_unlock(&head->coh_lock_guard); - if (lock == NULL) { + if (!lock) { lock = cl_lock_alloc(env, obj, io, need); if (!IS_ERR(lock)) { struct cl_lock *ghost; spin_lock(&head->coh_lock_guard); ghost = cl_lock_lookup(env, obj, io, need); - if (ghost == NULL) { + if (!ghost) { cl_lock_get_trust(lock); list_add_tail(&lock->cll_linkage, - &head->coh_locks); + &head->coh_locks); spin_unlock(&head->coh_lock_guard); CS_LOCK_INC(obj, busy); } else { @@ -572,7 +571,7 @@ struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io, spin_lock(&head->coh_lock_guard); lock = cl_lock_lookup(env, obj, io, need); spin_unlock(&head->coh_lock_guard); - if (lock == NULL) + if (!lock) return NULL; cl_lock_mutex_get(env, lock); @@ -584,7 +583,7 @@ struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io, cl_lock_put(env, lock); lock = NULL; } - } while (lock == NULL); + } while (!lock); cl_lock_hold_add(env, lock, scope, source); cl_lock_user_add(env, lock); @@ -774,8 +773,8 @@ static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock) lock->cll_flags |= CLF_CANCELLED; list_for_each_entry_reverse(slice, &lock->cll_layers, - cls_linkage) { - if (slice->cls_ops->clo_cancel != NULL) + cls_linkage) { + if (slice->cls_ops->clo_cancel) slice->cls_ops->clo_cancel(env, slice); } } @@ -811,8 +810,8 @@ static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock) * by cl_lock_lookup(). */ list_for_each_entry_reverse(slice, &lock->cll_layers, - cls_linkage) { - if (slice->cls_ops->clo_delete != NULL) + cls_linkage) { + if (slice->cls_ops->clo_delete) slice->cls_ops->clo_delete(env, slice); } /* @@ -935,7 +934,8 @@ int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock) if (result == 0) { /* To avoid being interrupted by the 'non-fatal' signals * (SIGCHLD, for instance), we'd block them temporarily. - * LU-305 */ + * LU-305 + */ blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS); init_waitqueue_entry(&waiter, current); @@ -946,7 +946,8 @@ int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock) LASSERT(cl_lock_nr_mutexed(env) == 0); /* Returning ERESTARTSYS instead of EINTR so syscalls - * can be restarted if signals are pending here */ + * can be restarted if signals are pending here + */ result = -ERESTARTSYS; if (likely(!OBD_FAIL_CHECK(OBD_FAIL_LOCK_STATE_WAIT_INTR))) { schedule(); @@ -974,7 +975,7 @@ static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock, LINVRNT(cl_lock_invariant(env, lock)); list_for_each_entry(slice, &lock->cll_layers, cls_linkage) - if (slice->cls_ops->clo_state != NULL) + if (slice->cls_ops->clo_state) slice->cls_ops->clo_state(env, slice, state); wake_up_all(&lock->cll_wq); } @@ -1038,8 +1039,8 @@ static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock) result = -ENOSYS; list_for_each_entry_reverse(slice, &lock->cll_layers, - cls_linkage) { - if (slice->cls_ops->clo_unuse != NULL) { + cls_linkage) { + if (slice->cls_ops->clo_unuse) { result = slice->cls_ops->clo_unuse(env, slice); if (result != 0) break; @@ -1072,7 +1073,7 @@ int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic) result = -ENOSYS; state = cl_lock_intransit(env, lock); list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { - if (slice->cls_ops->clo_use != NULL) { + if (slice->cls_ops->clo_use) { result = slice->cls_ops->clo_use(env, slice); if (result != 0) break; @@ -1125,7 +1126,7 @@ static int cl_enqueue_kick(const struct lu_env *env, result = -ENOSYS; list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { - if (slice->cls_ops->clo_enqueue != NULL) { + if (slice->cls_ops->clo_enqueue) { result = slice->cls_ops->clo_enqueue(env, slice, io, flags); if (result != 0) @@ -1170,7 +1171,8 @@ int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock, /* kick layers. */ result = cl_enqueue_kick(env, lock, io, flags); /* For AGL case, the cl_lock::cll_state may - * become CLS_HELD already. */ + * become CLS_HELD already. + */ if (result == 0 && lock->cll_state == CLS_QUEUING) cl_lock_state_set(env, lock, CLS_ENQUEUED); break; @@ -1215,7 +1217,7 @@ int cl_lock_enqueue_wait(const struct lu_env *env, LASSERT(cl_lock_is_mutexed(lock)); LASSERT(lock->cll_state == CLS_QUEUING); - LASSERT(lock->cll_conflict != NULL); + LASSERT(lock->cll_conflict); conflict = lock->cll_conflict; lock->cll_conflict = NULL; @@ -1258,7 +1260,7 @@ static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock, do { result = cl_enqueue_try(env, lock, io, enqflags); if (result == CLO_WAIT) { - if (lock->cll_conflict != NULL) + if (lock->cll_conflict) result = cl_lock_enqueue_wait(env, lock, 1); else result = cl_lock_state_wait(env, lock); @@ -1300,7 +1302,8 @@ int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock) } /* Only if the lock is in CLS_HELD or CLS_ENQUEUED state, it can hold - * underlying resources. */ + * underlying resources. + */ if (!(lock->cll_state == CLS_HELD || lock->cll_state == CLS_ENQUEUED)) { cl_lock_user_del(env, lock); return 0; @@ -1416,7 +1419,7 @@ int cl_wait_try(const struct lu_env *env, struct cl_lock *lock) result = -ENOSYS; list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { - if (slice->cls_ops->clo_wait != NULL) { + if (slice->cls_ops->clo_wait) { result = slice->cls_ops->clo_wait(env, slice); if (result != 0) break; @@ -1449,7 +1452,7 @@ int cl_wait(const struct lu_env *env, struct cl_lock *lock) LINVRNT(cl_lock_invariant(env, lock)); LASSERTF(lock->cll_state == CLS_ENQUEUED || lock->cll_state == CLS_HELD, - "Wrong state %d \n", lock->cll_state); + "Wrong state %d\n", lock->cll_state); LASSERT(lock->cll_holds > 0); do { @@ -1487,7 +1490,7 @@ unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock) pound = 0; list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) { - if (slice->cls_ops->clo_weigh != NULL) { + if (slice->cls_ops->clo_weigh) { ounce = slice->cls_ops->clo_weigh(env, slice); pound += ounce; if (pound < ounce) /* over-weight^Wflow */ @@ -1523,7 +1526,7 @@ int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock, LINVRNT(cl_lock_invariant(env, lock)); list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) { - if (slice->cls_ops->clo_modify != NULL) { + if (slice->cls_ops->clo_modify) { result = slice->cls_ops->clo_modify(env, slice, desc); if (result != 0) return result; @@ -1584,7 +1587,7 @@ int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock, result = cl_lock_enclosure(env, lock, closure); if (result == 0) { list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { - if (slice->cls_ops->clo_closure != NULL) { + if (slice->cls_ops->clo_closure) { result = slice->cls_ops->clo_closure(env, slice, closure); if (result != 0) @@ -1654,7 +1657,7 @@ void cl_lock_disclosure(const struct lu_env *env, cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin); list_for_each_entry_safe(scan, temp, &closure->clc_list, - cll_inclosure){ + cll_inclosure) { list_del_init(&scan->cll_inclosure); cl_lock_mutex_put(env, scan); lu_ref_del(&scan->cll_reference, "closure", closure); @@ -1777,13 +1780,15 @@ struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env, lock = NULL; need->cld_mode = CLM_READ; /* CLM_READ matches both READ & WRITE, but - * not PHANTOM */ + * not PHANTOM + */ need->cld_start = need->cld_end = index; need->cld_enq_flags = 0; spin_lock(&head->coh_lock_guard); /* It is fine to match any group lock since there could be only one - * with a uniq gid and it conflicts with all other lock modes too */ + * with a uniq gid and it conflicts with all other lock modes too + */ list_for_each_entry(scan, &head->coh_locks, cll_linkage) { if (scan != except && (scan->cll_descr.cld_mode == CLM_GROUP || @@ -1798,7 +1803,8 @@ struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env, (canceld || !(scan->cll_flags & CLF_CANCELLED)) && (pending || !(scan->cll_flags & CLF_CANCELPEND))) { /* Don't increase cs_hit here since this - * is just a helper function. */ + * is just a helper function. + */ cl_lock_get_trust(scan); lock = scan; break; @@ -1820,7 +1826,6 @@ static pgoff_t pgoff_at_lock(struct cl_page *page, struct cl_lock *lock) dtype = lock->cll_descr.cld_obj->co_lu.lo_dev->ld_type; slice = cl_page_at(page, dtype); - LASSERT(slice != NULL); return slice->cpl_page->cp_index; } @@ -1839,12 +1844,13 @@ static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io, /* refresh non-overlapped index */ tmp = cl_lock_at_pgoff(env, lock->cll_descr.cld_obj, index, - lock, 1, 0); - if (tmp != NULL) { + lock, 1, 0); + if (tmp) { /* Cache the first-non-overlapped index so as to skip * all pages within [index, clt_fn_index). This * is safe because if tmp lock is canceled, it will - * discard these pages. */ + * discard these pages. + */ info->clt_fn_index = tmp->cll_descr.cld_end + 1; if (tmp->cll_descr.cld_end == CL_PAGE_EOF) info->clt_fn_index = CL_PAGE_EOF; @@ -1950,7 +1956,7 @@ void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel) * already destroyed (as otherwise they will be left unprotected). */ LASSERT(ergo(!cancel, - head->coh_tree.rnode == NULL && head->coh_pages == 0)); + !head->coh_tree.rnode && head->coh_pages == 0)); spin_lock(&head->coh_lock_guard); while (!list_empty(&head->coh_locks)) { @@ -2166,8 +2172,8 @@ EXPORT_SYMBOL(cl_lock_mode_name); * Prints human readable representation of a lock description. */ void cl_lock_descr_print(const struct lu_env *env, void *cookie, - lu_printer_t printer, - const struct cl_lock_descr *descr) + lu_printer_t printer, + const struct cl_lock_descr *descr) { const struct lu_fid *fid; @@ -2194,7 +2200,7 @@ void cl_lock_print(const struct lu_env *env, void *cookie, (*printer)(env, cookie, " %s@%p: ", slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name, slice); - if (slice->cls_ops->clo_print != NULL) + if (slice->cls_ops->clo_print) slice->cls_ops->clo_print(env, cookie, printer, slice); (*printer)(env, cookie, "\n"); } diff --git a/drivers/staging/lustre/lustre/obdclass/cl_object.c b/drivers/staging/lustre/lustre/obdclass/cl_object.c index 57c8d5412bbd..43e299d4d416 100644 --- a/drivers/staging/lustre/lustre/obdclass/cl_object.c +++ b/drivers/staging/lustre/lustre/obdclass/cl_object.c @@ -152,7 +152,7 @@ struct cl_object *cl_object_top(struct cl_object *o) struct cl_object_header *hdr = cl_object_header(o); struct cl_object *top; - while (hdr->coh_parent != NULL) + while (hdr->coh_parent) hdr = hdr->coh_parent; top = lu2cl(lu_object_top(&hdr->coh_lu)); @@ -217,7 +217,7 @@ int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj, top = obj->co_lu.lo_header; result = 0; list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) { - if (obj->co_ops->coo_attr_get != NULL) { + if (obj->co_ops->coo_attr_get) { result = obj->co_ops->coo_attr_get(env, obj, attr); if (result != 0) { if (result > 0) @@ -247,9 +247,8 @@ int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj, top = obj->co_lu.lo_header; result = 0; - list_for_each_entry_reverse(obj, &top->loh_layers, - co_lu.lo_linkage) { - if (obj->co_ops->coo_attr_set != NULL) { + list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) { + if (obj->co_ops->coo_attr_set) { result = obj->co_ops->coo_attr_set(env, obj, attr, v); if (result != 0) { if (result > 0) @@ -278,9 +277,8 @@ int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj, top = obj->co_lu.lo_header; result = 0; - list_for_each_entry_reverse(obj, &top->loh_layers, - co_lu.lo_linkage) { - if (obj->co_ops->coo_glimpse != NULL) { + list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) { + if (obj->co_ops->coo_glimpse) { result = obj->co_ops->coo_glimpse(env, obj, lvb); if (result != 0) break; @@ -306,7 +304,7 @@ int cl_conf_set(const struct lu_env *env, struct cl_object *obj, top = obj->co_lu.lo_header; result = 0; list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) { - if (obj->co_ops->coo_conf_set != NULL) { + if (obj->co_ops->coo_conf_set) { result = obj->co_ops->coo_conf_set(env, obj, conf); if (result != 0) break; @@ -328,7 +326,7 @@ void cl_object_kill(const struct lu_env *env, struct cl_object *obj) struct cl_object_header *hdr; hdr = cl_object_header(obj); - LASSERT(hdr->coh_tree.rnode == NULL); + LASSERT(!hdr->coh_tree.rnode); LASSERT(hdr->coh_pages == 0); set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags); @@ -362,7 +360,8 @@ void cache_stats_init(struct cache_stats *cs, const char *name) atomic_set(&cs->cs_stats[i], 0); } -int cache_stats_print(const struct cache_stats *cs, struct seq_file *m, int h) +static int cache_stats_print(const struct cache_stats *cs, + struct seq_file *m, int h) { int i; /* @@ -456,13 +455,13 @@ locks: ...... ...... ...... ...... ...... [...... ...... ...... ...... ......] seq_printf(m, " ["); for (i = 0; i < ARRAY_SIZE(site->cs_pages_state); ++i) seq_printf(m, "%s: %u ", pstate[i], - atomic_read(&site->cs_pages_state[i])); + atomic_read(&site->cs_pages_state[i])); seq_printf(m, "]\n"); cache_stats_print(&site->cs_locks, m, 0); seq_printf(m, " ["); for (i = 0; i < ARRAY_SIZE(site->cs_locks_state); ++i) seq_printf(m, "%s: %u ", lstate[i], - atomic_read(&site->cs_locks_state[i])); + atomic_read(&site->cs_locks_state[i])); seq_printf(m, "]\n"); cache_stats_print(&cl_env_stats, m, 0); seq_printf(m, "\n"); @@ -482,7 +481,6 @@ EXPORT_SYMBOL(cl_site_stats_print); * because Lustre code may call into other fs which has certain assumptions * about journal_info. Currently following fields in task_struct are identified * can be used for this purpose: - * - cl_env: for liblustre. * - tux_info: only on RedHat kernel. * - ... * \note As long as we use task_struct to store cl_env, we assume that once @@ -540,7 +538,7 @@ static void cl_env_init0(struct cl_env *cle, void *debug) { LASSERT(cle->ce_ref == 0); LASSERT(cle->ce_magic == &cl_env_init0); - LASSERT(cle->ce_debug == NULL && cle->ce_owner == NULL); + LASSERT(!cle->ce_debug && !cle->ce_owner); cle->ce_ref = 1; cle->ce_debug = debug; @@ -575,7 +573,7 @@ static int cl_env_hops_keycmp(const void *key, struct hlist_node *hn) { struct cl_env *cle = cl_env_hops_obj(hn); - LASSERT(cle->ce_owner != NULL); + LASSERT(cle->ce_owner); return (key == cle->ce_owner); } @@ -609,7 +607,7 @@ static inline void cl_env_attach(struct cl_env *cle) if (cle) { int rc; - LASSERT(cle->ce_owner == NULL); + LASSERT(!cle->ce_owner); cle->ce_owner = (void *) (long) current->pid; rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner, &cle->ce_node); @@ -637,7 +635,7 @@ static int cl_env_store_init(void) CFS_HASH_MAX_THETA, &cl_env_hops, CFS_HASH_RW_BKTLOCK); - return cl_env_hash != NULL ? 0 : -ENOMEM; + return cl_env_hash ? 0 : -ENOMEM; } static void cl_env_store_fini(void) @@ -647,7 +645,7 @@ static void cl_env_store_fini(void) static inline struct cl_env *cl_env_detach(struct cl_env *cle) { - if (cle == NULL) + if (!cle) cle = cl_env_fetch(); if (cle && cle->ce_owner) @@ -661,8 +659,8 @@ static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug) struct lu_env *env; struct cl_env *cle; - cle = kmem_cache_alloc(cl_env_kmem, GFP_NOFS | __GFP_ZERO); - if (cle != NULL) { + cle = kmem_cache_zalloc(cl_env_kmem, GFP_NOFS); + if (cle) { int rc; INIT_LIST_HEAD(&cle->ce_linkage); @@ -716,7 +714,7 @@ static struct lu_env *cl_env_peek(int *refcheck) env = NULL; cle = cl_env_fetch(); - if (cle != NULL) { + if (cle) { CL_ENV_INC(hit); env = &cle->ce_lu; *refcheck = ++cle->ce_ref; @@ -741,7 +739,7 @@ struct lu_env *cl_env_get(int *refcheck) struct lu_env *env; env = cl_env_peek(refcheck); - if (env == NULL) { + if (!env) { env = cl_env_new(lu_context_tags_default, lu_session_tags_default, __builtin_return_address(0)); @@ -768,7 +766,7 @@ struct lu_env *cl_env_alloc(int *refcheck, __u32 tags) { struct lu_env *env; - LASSERT(cl_env_peek(refcheck) == NULL); + LASSERT(!cl_env_peek(refcheck)); env = cl_env_new(tags, tags, __builtin_return_address(0)); if (!IS_ERR(env)) { struct cl_env *cle; @@ -783,7 +781,7 @@ EXPORT_SYMBOL(cl_env_alloc); static void cl_env_exit(struct cl_env *cle) { - LASSERT(cle->ce_owner == NULL); + LASSERT(!cle->ce_owner); lu_context_exit(&cle->ce_lu.le_ctx); lu_context_exit(&cle->ce_ses); } @@ -802,7 +800,7 @@ void cl_env_put(struct lu_env *env, int *refcheck) cle = cl_env_container(env); LASSERT(cle->ce_ref > 0); - LASSERT(ergo(refcheck != NULL, cle->ce_ref == *refcheck)); + LASSERT(ergo(refcheck, cle->ce_ref == *refcheck)); CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle); if (--cle->ce_ref == 0) { @@ -877,7 +875,7 @@ struct lu_env *cl_env_nested_get(struct cl_env_nest *nest) nest->cen_cookie = NULL; env = cl_env_peek(&nest->cen_refcheck); - if (env != NULL) { + if (env) { if (!cl_io_is_going(env)) return env; cl_env_put(env, &nest->cen_refcheck); @@ -929,14 +927,12 @@ struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site, const char *typename; struct lu_device *d; - LASSERT(ldt != NULL); - typename = ldt->ldt_name; d = ldt->ldt_ops->ldto_device_alloc(env, ldt, NULL); if (!IS_ERR(d)) { int rc; - if (site != NULL) + if (site) d->ld_site = site; rc = ldt->ldt_ops->ldto_device_init(env, d, typename, next); if (rc == 0) { diff --git a/drivers/staging/lustre/lustre/obdclass/cl_page.c b/drivers/staging/lustre/lustre/obdclass/cl_page.c index 61f28ebfc058..231a2f26c693 100644 --- a/drivers/staging/lustre/lustre/obdclass/cl_page.c +++ b/drivers/staging/lustre/lustre/obdclass/cl_page.c @@ -69,7 +69,7 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg, */ static struct cl_page *cl_page_top_trusted(struct cl_page *page) { - while (page->cp_parent != NULL) + while (page->cp_parent) page = page->cp_parent; return page; } @@ -110,7 +110,7 @@ cl_page_at_trusted(const struct cl_page *page, return slice; } page = page->cp_child; - } while (page != NULL); + } while (page); return NULL; } @@ -127,7 +127,7 @@ struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index) assert_spin_locked(&hdr->coh_page_guard); page = radix_tree_lookup(&hdr->coh_tree, index); - if (page != NULL) + if (page) cl_page_get_trust(page); return page; } @@ -188,7 +188,7 @@ int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj, * Pages for lsm-less file has no underneath sub-page * for osc, in case of ... */ - PASSERT(env, page, slice != NULL); + PASSERT(env, page, slice); page = slice->cpl_page; /* @@ -245,9 +245,9 @@ static void cl_page_free(const struct lu_env *env, struct cl_page *page) struct cl_object *obj = page->cp_obj; PASSERT(env, page, list_empty(&page->cp_batch)); - PASSERT(env, page, page->cp_owner == NULL); - PASSERT(env, page, page->cp_req == NULL); - PASSERT(env, page, page->cp_parent == NULL); + PASSERT(env, page, !page->cp_owner); + PASSERT(env, page, !page->cp_req); + PASSERT(env, page, !page->cp_parent); PASSERT(env, page, page->cp_state == CPS_FREEING); might_sleep(); @@ -255,7 +255,7 @@ static void cl_page_free(const struct lu_env *env, struct cl_page *page) struct cl_page_slice *slice; slice = list_entry(page->cp_layers.next, - struct cl_page_slice, cpl_linkage); + struct cl_page_slice, cpl_linkage); list_del_init(page->cp_layers.next); slice->cpl_ops->cpo_fini(env, slice); } @@ -277,14 +277,15 @@ static inline void cl_page_state_set_trust(struct cl_page *page, } static struct cl_page *cl_page_alloc(const struct lu_env *env, - struct cl_object *o, pgoff_t ind, struct page *vmpage, - enum cl_page_type type) + struct cl_object *o, pgoff_t ind, + struct page *vmpage, + enum cl_page_type type) { struct cl_page *page; struct lu_object_header *head; page = kzalloc(cl_object_header(o)->coh_page_bufsize, GFP_NOFS); - if (page != NULL) { + if (page) { int result = 0; atomic_set(&page->cp_ref, 1); @@ -303,9 +304,8 @@ static struct cl_page *cl_page_alloc(const struct lu_env *env, mutex_init(&page->cp_mutex); lu_ref_init(&page->cp_reference); head = o->co_lu.lo_header; - list_for_each_entry(o, &head->loh_layers, - co_lu.lo_linkage) { - if (o->co_ops->coo_page_init != NULL) { + list_for_each_entry(o, &head->loh_layers, co_lu.lo_linkage) { + if (o->co_ops->coo_page_init) { result = o->co_ops->coo_page_init(env, o, page, vmpage); if (result != 0) { @@ -369,13 +369,13 @@ static struct cl_page *cl_page_find0(const struct lu_env *env, */ page = cl_vmpage_page(vmpage, o); PINVRNT(env, page, - ergo(page != NULL, + ergo(page, cl_page_vmpage(env, page) == vmpage && (void *)radix_tree_lookup(&hdr->coh_tree, idx) == page)); } - if (page != NULL) + if (page) return page; /* allocate and initialize cl_page */ @@ -385,7 +385,7 @@ static struct cl_page *cl_page_find0(const struct lu_env *env, if (type == CPT_TRANSIENT) { if (parent) { - LASSERT(page->cp_parent == NULL); + LASSERT(!page->cp_parent); page->cp_parent = parent; parent->cp_child = page; } @@ -418,7 +418,7 @@ static struct cl_page *cl_page_find0(const struct lu_env *env, "fail to insert into radix tree: %d\n", err); } else { if (parent) { - LASSERT(page->cp_parent == NULL); + LASSERT(!page->cp_parent); page->cp_parent = parent; parent->cp_child = page; } @@ -426,7 +426,7 @@ static struct cl_page *cl_page_find0(const struct lu_env *env, } spin_unlock(&hdr->coh_page_guard); - if (unlikely(ghost != NULL)) { + if (unlikely(ghost)) { cl_page_delete0(env, ghost, 0); cl_page_free(env, ghost); } @@ -467,14 +467,13 @@ static inline int cl_page_invariant(const struct cl_page *pg) owner = pg->cp_owner; return cl_page_in_use(pg) && - ergo(parent != NULL, parent->cp_child == pg) && - ergo(child != NULL, child->cp_parent == pg) && - ergo(child != NULL, pg->cp_obj != child->cp_obj) && - ergo(parent != NULL, pg->cp_obj != parent->cp_obj) && - ergo(owner != NULL && parent != NULL, + ergo(parent, parent->cp_child == pg) && + ergo(child, child->cp_parent == pg) && + ergo(child, pg->cp_obj != child->cp_obj) && + ergo(parent, pg->cp_obj != parent->cp_obj) && + ergo(owner && parent, parent->cp_owner == pg->cp_owner->ci_parent) && - ergo(owner != NULL && child != NULL, - child->cp_owner->ci_parent == owner) && + ergo(owner && child, child->cp_owner->ci_parent == owner) && /* * Either page is early in initialization (has neither child * nor parent yet), or it is in the object radix tree. @@ -482,7 +481,7 @@ static inline int cl_page_invariant(const struct cl_page *pg) ergo(pg->cp_state < CPS_FREEING && pg->cp_type == CPT_CACHEABLE, (void *)radix_tree_lookup(&header->coh_tree, pg->cp_index) == pg || - (child == NULL && parent == NULL)); + (!child && !parent)); } static void cl_page_state_set0(const struct lu_env *env, @@ -535,10 +534,10 @@ static void cl_page_state_set0(const struct lu_env *env, old = page->cp_state; PASSERT(env, page, allowed_transitions[old][state]); CL_PAGE_HEADER(D_TRACE, env, page, "%d -> %d\n", old, state); - for (; page != NULL; page = page->cp_child) { + for (; page; page = page->cp_child) { PASSERT(env, page, page->cp_state == old); PASSERT(env, page, - equi(state == CPS_OWNED, page->cp_owner != NULL)); + equi(state == CPS_OWNED, page->cp_owner)); cl_page_state_set_trust(page, state); } @@ -584,7 +583,7 @@ void cl_page_put(const struct lu_env *env, struct cl_page *page) LASSERT(page->cp_state == CPS_FREEING); LASSERT(atomic_read(&page->cp_ref) == 0); - PASSERT(env, page, page->cp_owner == NULL); + PASSERT(env, page, !page->cp_owner); PASSERT(env, page, list_empty(&page->cp_batch)); /* * Page is no longer reachable by other threads. Tear @@ -609,11 +608,11 @@ struct page *cl_page_vmpage(const struct lu_env *env, struct cl_page *page) page = cl_page_top(page); do { list_for_each_entry(slice, &page->cp_layers, cpl_linkage) { - if (slice->cpl_ops->cpo_vmpage != NULL) + if (slice->cpl_ops->cpo_vmpage) return slice->cpl_ops->cpo_vmpage(env, slice); } page = page->cp_child; - } while (page != NULL); + } while (page); LBUG(); /* ->cpo_vmpage() has to be defined somewhere in the stack */ } EXPORT_SYMBOL(cl_page_vmpage); @@ -639,10 +638,10 @@ struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj) * can be rectified easily. */ top = (struct cl_page *)vmpage->private; - if (top == NULL) + if (!top) return NULL; - for (page = top; page != NULL; page = page->cp_child) { + for (page = top; page; page = page->cp_child) { if (cl_object_same(page->cp_obj, obj)) { cl_page_get_trust(page); break; @@ -689,7 +688,7 @@ EXPORT_SYMBOL(cl_page_at); cpl_linkage) { \ __method = *(void **)((char *)__scan->cpl_ops + \ __op); \ - if (__method != NULL) { \ + if (__method) { \ __result = (*__method)(__env, __scan, \ ## __VA_ARGS__); \ if (__result != 0) \ @@ -697,7 +696,7 @@ EXPORT_SYMBOL(cl_page_at); } \ } \ __page = __page->cp_child; \ - } while (__page != NULL && __result == 0); \ + } while (__page && __result == 0); \ if (__result > 0) \ __result = 0; \ __result; \ @@ -717,12 +716,12 @@ do { \ cpl_linkage) { \ __method = *(void **)((char *)__scan->cpl_ops + \ __op); \ - if (__method != NULL) \ + if (__method) \ (*__method)(__env, __scan, \ ## __VA_ARGS__); \ } \ __page = __page->cp_child; \ - } while (__page != NULL); \ + } while (__page); \ } while (0) #define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...) \ @@ -734,19 +733,19 @@ do { \ void (*__method)_proto; \ \ /* get to the bottom page. */ \ - while (__page->cp_child != NULL) \ + while (__page->cp_child) \ __page = __page->cp_child; \ do { \ list_for_each_entry_reverse(__scan, &__page->cp_layers, \ cpl_linkage) { \ __method = *(void **)((char *)__scan->cpl_ops + \ __op); \ - if (__method != NULL) \ + if (__method) \ (*__method)(__env, __scan, \ ## __VA_ARGS__); \ } \ __page = __page->cp_parent; \ - } while (__page != NULL); \ + } while (__page); \ } while (0) static int cl_page_invoke(const struct lu_env *env, @@ -772,8 +771,8 @@ static void cl_page_invoid(const struct lu_env *env, static void cl_page_owner_clear(struct cl_page *page) { - for (page = cl_page_top(page); page != NULL; page = page->cp_child) { - if (page->cp_owner != NULL) { + for (page = cl_page_top(page); page; page = page->cp_child) { + if (page->cp_owner) { LASSERT(page->cp_owner->ci_owned_nr > 0); page->cp_owner->ci_owned_nr--; page->cp_owner = NULL; @@ -784,10 +783,8 @@ static void cl_page_owner_clear(struct cl_page *page) static void cl_page_owner_set(struct cl_page *page) { - for (page = cl_page_top(page); page != NULL; page = page->cp_child) { - LASSERT(page->cp_owner != NULL); + for (page = cl_page_top(page); page; page = page->cp_child) page->cp_owner->ci_owned_nr++; - } } void cl_page_disown0(const struct lu_env *env, @@ -862,8 +859,8 @@ static int cl_page_own0(const struct lu_env *env, struct cl_io *io, struct cl_io *, int), io, nonblock); if (result == 0) { - PASSERT(env, pg, pg->cp_owner == NULL); - PASSERT(env, pg, pg->cp_req == NULL); + PASSERT(env, pg, !pg->cp_owner); + PASSERT(env, pg, !pg->cp_req); pg->cp_owner = io; pg->cp_task = current; cl_page_owner_set(pg); @@ -921,7 +918,7 @@ void cl_page_assume(const struct lu_env *env, io = cl_io_top(io); cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume)); - PASSERT(env, pg, pg->cp_owner == NULL); + PASSERT(env, pg, !pg->cp_owner); pg->cp_owner = io; pg->cp_task = current; cl_page_owner_set(pg); @@ -1037,7 +1034,7 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg, * skip removing it. */ tmp = pg->cp_child; - for (; tmp != NULL; tmp = tmp->cp_child) { + for (; tmp; tmp = tmp->cp_child) { void *value; struct cl_object_header *hdr; @@ -1135,7 +1132,7 @@ int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg) pg = cl_page_top_trusted((struct cl_page *)pg); slice = container_of(pg->cp_layers.next, const struct cl_page_slice, cpl_linkage); - PASSERT(env, pg, slice->cpl_ops->cpo_is_vmlocked != NULL); + PASSERT(env, pg, slice->cpl_ops->cpo_is_vmlocked); /* * Call ->cpo_is_vmlocked() directly instead of going through * CL_PAGE_INVOKE(), because cl_page_is_vmlocked() is used by @@ -1216,7 +1213,7 @@ void cl_page_completion(const struct lu_env *env, PASSERT(env, pg, crt < CRT_NR); /* cl_page::cp_req already cleared by the caller (osc_completion()) */ - PASSERT(env, pg, pg->cp_req == NULL); + PASSERT(env, pg, !pg->cp_req); PASSERT(env, pg, pg->cp_state == cl_req_type_state(crt)); CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, ioret); @@ -1304,7 +1301,7 @@ int cl_page_cache_add(const struct lu_env *env, struct cl_io *io, return -EINVAL; list_for_each_entry(scan, &pg->cp_layers, cpl_linkage) { - if (scan->cpl_ops->io[crt].cpo_cache_add == NULL) + if (!scan->cpl_ops->io[crt].cpo_cache_add) continue; result = scan->cpl_ops->io[crt].cpo_cache_add(env, scan, io); @@ -1450,8 +1447,8 @@ void cl_page_print(const struct lu_env *env, void *cookie, { struct cl_page *scan; - for (scan = cl_page_top((struct cl_page *)pg); - scan != NULL; scan = scan->cp_child) + for (scan = cl_page_top((struct cl_page *)pg); scan; + scan = scan->cp_child) cl_page_header_print(env, cookie, printer, scan); CL_PAGE_INVOKE(env, (struct cl_page *)pg, CL_PAGE_OP(cpo_print), (const struct lu_env *env, diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c index 0975e443057c..1a938e1376f9 100644 --- a/drivers/staging/lustre/lustre/obdclass/class_obd.c +++ b/drivers/staging/lustre/lustre/obdclass/class_obd.c @@ -42,7 +42,6 @@ #include "../../include/linux/lnet/lnetctl.h" #include "../include/lustre_debug.h" #include "../include/lprocfs_status.h" -#include "../include/lustre/lustre_build_version.h" #include <linux/list.h> #include "../include/cl_object.h" #include "llog_internal.h" @@ -52,7 +51,7 @@ EXPORT_SYMBOL(obd_devs); struct list_head obd_types; DEFINE_RWLOCK(obd_dev_lock); -/* The following are visible and mutable through /proc/sys/lustre/. */ +/* The following are visible and mutable through /sys/fs/lustre. */ unsigned int obd_debug_peer_on_timeout; EXPORT_SYMBOL(obd_debug_peer_on_timeout); unsigned int obd_dump_on_timeout; @@ -67,7 +66,7 @@ unsigned int obd_timeout = OBD_TIMEOUT_DEFAULT; /* seconds */ EXPORT_SYMBOL(obd_timeout); unsigned int obd_timeout_set; EXPORT_SYMBOL(obd_timeout_set); -/* Adaptive timeout defs here instead of ptlrpc module for /proc/sys/ access */ +/* Adaptive timeout defs here instead of ptlrpc module for /sys/fs/ access */ unsigned int at_min; EXPORT_SYMBOL(at_min); unsigned int at_max = 600; @@ -180,7 +179,7 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg) } CDEBUG(D_IOCTL, "cmd = %x\n", cmd); - if (obd_ioctl_getdata(&buf, &len, (void *)arg)) { + if (obd_ioctl_getdata(&buf, &len, (void __user *)arg)) { CERROR("OBD ioctl: data error\n"); return -EINVAL; } @@ -200,8 +199,7 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg) err = -ENOMEM; goto out; } - err = copy_from_user(lcfg, data->ioc_pbuf1, - data->ioc_plen1); + err = copy_from_user(lcfg, data->ioc_pbuf1, data->ioc_plen1); if (!err) err = lustre_cfg_sanity_check(lcfg, data->ioc_plen1); if (!err) @@ -218,16 +216,16 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg) goto out; } - if (strlen(BUILD_VERSION) + 1 > data->ioc_inllen1) { + if (strlen(LUSTRE_VERSION_STRING) + 1 > data->ioc_inllen1) { CERROR("ioctl buffer too small to hold version\n"); err = -EINVAL; goto out; } - memcpy(data->ioc_bulk, BUILD_VERSION, - strlen(BUILD_VERSION) + 1); + memcpy(data->ioc_bulk, LUSTRE_VERSION_STRING, + strlen(LUSTRE_VERSION_STRING) + 1); - err = obd_ioctl_popdata((void *)arg, data, len); + err = obd_ioctl_popdata((void __user *)arg, data, len); if (err) err = -EFAULT; goto out; @@ -246,7 +244,8 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg) goto out; } - err = obd_ioctl_popdata((void *)arg, data, sizeof(*data)); + err = obd_ioctl_popdata((void __user *)arg, data, + sizeof(*data)); if (err) err = -EFAULT; goto out; @@ -283,7 +282,8 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg) CDEBUG(D_IOCTL, "device name %s, dev %d\n", data->ioc_inlbuf1, dev); - err = obd_ioctl_popdata((void *)arg, data, sizeof(*data)); + err = obd_ioctl_popdata((void __user *)arg, data, + sizeof(*data)); if (err) err = -EFAULT; goto out; @@ -330,7 +330,7 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg) (int)index, status, obd->obd_type->typ_name, obd->obd_name, obd->obd_uuid.uuid, atomic_read(&obd->obd_refcount)); - err = obd_ioctl_popdata((void *)arg, data, len); + err = obd_ioctl_popdata((void __user *)arg, data, len); err = 0; goto out; @@ -339,7 +339,7 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg) } if (data->ioc_dev == OBD_DEV_BY_DEVNAME) { - if (data->ioc_inllen4 <= 0 || data->ioc_inlbuf4 == NULL) { + if (data->ioc_inllen4 <= 0 || !data->ioc_inlbuf4) { err = -EINVAL; goto out; } @@ -356,7 +356,7 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg) goto out; } - if (obd == NULL) { + if (!obd) { CERROR("OBD ioctl : No Device %d\n", data->ioc_dev); err = -EINVAL; goto out; @@ -388,7 +388,7 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg) if (err) goto out; - err = obd_ioctl_popdata((void *)arg, data, len); + err = obd_ioctl_popdata((void __user *)arg, data, len); if (err) err = -EFAULT; goto out; @@ -473,13 +473,13 @@ static int obd_init_checks(void) extern int class_procfs_init(void); extern int class_procfs_clean(void); -static int __init init_obdclass(void) +static int __init obdclass_init(void) { int i, err; int lustre_register_fs(void); - LCONSOLE_INFO("Lustre: Build Version: "BUILD_VERSION"\n"); + LCONSOLE_INFO("Lustre: Build Version: " LUSTRE_VERSION_STRING "\n"); spin_lock_init(&obd_types_lock); obd_zombie_impexp_init(); @@ -507,7 +507,8 @@ static int __init init_obdclass(void) /* Default the dirty page cache cap to 1/2 of system memory. * For clients with less memory, a larger fraction is needed - * for other purposes (mostly for BGL). */ + * for other purposes (mostly for BGL). + */ if (totalram_pages <= 512 << (20 - PAGE_CACHE_SHIFT)) obd_max_dirty_pages = totalram_pages / 4; else @@ -542,9 +543,7 @@ static int __init init_obdclass(void) return err; } -/* liblustre doesn't call cleanup_obdclass, apparently. we carry on in this - * ifdef to the end of the file to cover module and versioning goo.*/ -static void cleanup_obdclass(void) +static void obdclass_exit(void) { int i; @@ -577,9 +576,9 @@ static void cleanup_obdclass(void) } MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>"); -MODULE_DESCRIPTION("Lustre Class Driver Build Version: " BUILD_VERSION); -MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Lustre Class Driver"); MODULE_VERSION(LUSTRE_VERSION_STRING); +MODULE_LICENSE("GPL"); -module_init(init_obdclass); -module_exit(cleanup_obdclass); +module_init(obdclass_init); +module_exit(obdclass_exit); diff --git a/drivers/staging/lustre/lustre/obdclass/genops.c b/drivers/staging/lustre/lustre/obdclass/genops.c index 228c44c37c4a..cf97b8f06764 100644 --- a/drivers/staging/lustre/lustre/obdclass/genops.c +++ b/drivers/staging/lustre/lustre/obdclass/genops.c @@ -42,6 +42,7 @@ #define DEBUG_SUBSYSTEM S_CLASS #include "../include/obd_class.h" #include "../include/lprocfs_status.h" +#include "../include/lustre_kernelcomm.h" spinlock_t obd_types_lock; @@ -68,18 +69,17 @@ static struct obd_device *obd_device_alloc(void) { struct obd_device *obd; - obd = kmem_cache_alloc(obd_device_cachep, GFP_NOFS | __GFP_ZERO); - if (obd != NULL) + obd = kmem_cache_zalloc(obd_device_cachep, GFP_NOFS); + if (obd) obd->obd_magic = OBD_DEVICE_MAGIC; return obd; } static void obd_device_free(struct obd_device *obd) { - LASSERT(obd != NULL); LASSERTF(obd->obd_magic == OBD_DEVICE_MAGIC, "obd %p obd_magic %08x != %08x\n", obd, obd->obd_magic, OBD_DEVICE_MAGIC); - if (obd->obd_namespace != NULL) { + if (obd->obd_namespace) { CERROR("obd %p: namespace %p was not properly cleaned up (obd_force=%d)!\n", obd, obd->obd_namespace, obd->obd_force); LBUG(); @@ -112,15 +112,6 @@ static struct obd_type *class_get_type(const char *name) if (!type) { const char *modname = name; - if (strcmp(modname, "obdfilter") == 0) - modname = "ofd"; - - if (strcmp(modname, LUSTRE_LWP_NAME) == 0) - modname = LUSTRE_OSP_NAME; - - if (!strncmp(modname, LUSTRE_MDS_NAME, strlen(LUSTRE_MDS_NAME))) - modname = LUSTRE_MDT_NAME; - if (!request_module("%s", modname)) { CDEBUG(D_INFO, "Loaded module '%s'\n", modname); type = class_search_type(name); @@ -202,7 +193,7 @@ int class_register_type(struct obd_ops *dt_ops, struct md_ops *md_ops, goto failed; } - if (ldt != NULL) { + if (ldt) { type->typ_lu = ldt; rc = lu_device_type_init(ldt); if (rc != 0) @@ -364,7 +355,7 @@ void class_release_dev(struct obd_device *obd) obd, obd->obd_magic, OBD_DEVICE_MAGIC); LASSERTF(obd == obd_devs[obd->obd_minor], "obd %p != obd_devs[%d] %p\n", obd, obd->obd_minor, obd_devs[obd->obd_minor]); - LASSERT(obd_type != NULL); + LASSERT(obd_type); CDEBUG(D_INFO, "Release obd device %s at %d obd_type name =%s\n", obd->obd_name, obd->obd_minor, obd->obd_type->typ_name); @@ -390,7 +381,8 @@ int class_name2dev(const char *name) if (obd && strcmp(name, obd->obd_name) == 0) { /* Make sure we finished attaching before we give - out any references */ + * out any references + */ LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC); if (obd->obd_attached) { read_unlock(&obd_dev_lock); @@ -465,11 +457,12 @@ struct obd_device *class_num2obd(int num) EXPORT_SYMBOL(class_num2obd); /* Search for a client OBD connected to tgt_uuid. If grp_uuid is - specified, then only the client with that uuid is returned, - otherwise any client connected to the tgt is returned. */ + * specified, then only the client with that uuid is returned, + * otherwise any client connected to the tgt is returned. + */ struct obd_device *class_find_client_obd(struct obd_uuid *tgt_uuid, - const char *typ_name, - struct obd_uuid *grp_uuid) + const char *typ_name, + struct obd_uuid *grp_uuid) { int i; @@ -497,9 +490,10 @@ struct obd_device *class_find_client_obd(struct obd_uuid *tgt_uuid, EXPORT_SYMBOL(class_find_client_obd); /* Iterate the obd_device list looking devices have grp_uuid. Start - searching at *next, and if a device is found, the next index to look - at is saved in *next. If next is NULL, then the first matching device - will always be returned. */ + * searching at *next, and if a device is found, the next index to look + * at is saved in *next. If next is NULL, then the first matching device + * will always be returned. + */ struct obd_device *class_devices_in_group(struct obd_uuid *grp_uuid, int *next) { int i; @@ -588,21 +582,21 @@ int obd_init_caches(void) { LASSERT(!obd_device_cachep); obd_device_cachep = kmem_cache_create("ll_obd_dev_cache", - sizeof(struct obd_device), - 0, 0, NULL); + sizeof(struct obd_device), + 0, 0, NULL); if (!obd_device_cachep) goto out; LASSERT(!obdo_cachep); obdo_cachep = kmem_cache_create("ll_obdo_cache", sizeof(struct obdo), - 0, 0, NULL); + 0, 0, NULL); if (!obdo_cachep) goto out; LASSERT(!import_cachep); import_cachep = kmem_cache_create("ll_import_cache", - sizeof(struct obd_import), - 0, 0, NULL); + sizeof(struct obd_import), + 0, 0, NULL); if (!import_cachep) goto out; @@ -658,7 +652,7 @@ static void class_export_destroy(struct obd_export *exp) struct obd_device *obd = exp->exp_obd; LASSERT_ATOMIC_ZERO(&exp->exp_refcount); - LASSERT(obd != NULL); + LASSERT(obd); CDEBUG(D_IOCTL, "destroying export %p/%s for %s\n", exp, exp->exp_client_uuid.uuid, obd->obd_name); @@ -698,7 +692,6 @@ EXPORT_SYMBOL(class_export_get); void class_export_put(struct obd_export *exp) { - LASSERT(exp != NULL); LASSERT_ATOMIC_GT_LT(&exp->exp_refcount, 0, LI_POISON); CDEBUG(D_INFO, "PUTting export %p : new refcount %d\n", exp, atomic_read(&exp->exp_refcount) - 1); @@ -718,7 +711,8 @@ EXPORT_SYMBOL(class_export_put); /* Creates a new export, adds it to the hash table, and returns a * pointer to it. The refcount is 2: one for the hash reference, and - * one for the pointer returned by this function. */ + * one for the pointer returned by this function. + */ struct obd_export *class_new_export(struct obd_device *obd, struct obd_uuid *cluuid) { @@ -834,7 +828,7 @@ EXPORT_SYMBOL(class_unlink_export); static void class_import_destroy(struct obd_import *imp) { CDEBUG(D_IOCTL, "destroying import %p for %s\n", imp, - imp->imp_obd->obd_name); + imp->imp_obd->obd_name); LASSERT_ATOMIC_ZERO(&imp->imp_refcount); @@ -844,7 +838,7 @@ static void class_import_destroy(struct obd_import *imp) struct obd_import_conn *imp_conn; imp_conn = list_entry(imp->imp_conn_list.next, - struct obd_import_conn, oic_item); + struct obd_import_conn, oic_item); list_del_init(&imp_conn->oic_item); ptlrpc_put_connection_superhack(imp_conn->oic_conn); kfree(imp_conn); @@ -901,8 +895,9 @@ static void init_imp_at(struct imp_at *at) at_init(&at->iat_net_latency, 0, 0); for (i = 0; i < IMP_AT_MAX_PORTALS; i++) { /* max service estimates are tracked on the server side, so - don't use the AT history here, just use the last reported - val. (But keep hist for proc histogram, worst_ever) */ + * don't use the AT history here, just use the last reported + * val. (But keep hist for proc histogram, worst_ever) + */ at_init(&at->iat_service_estimate[i], INITIAL_CONNECT_TIMEOUT, AT_FLG_NOHIST); } @@ -941,7 +936,8 @@ struct obd_import *class_new_import(struct obd_device *obd) init_imp_at(&imp->imp_at); /* the default magic is V2, will be used in connect RPC, and - * then adjusted according to the flags in request/reply. */ + * then adjusted according to the flags in request/reply. + */ imp->imp_msg_magic = LUSTRE_MSG_MAGIC_V2; return imp; @@ -950,7 +946,7 @@ EXPORT_SYMBOL(class_new_import); void class_destroy_import(struct obd_import *import) { - LASSERT(import != NULL); + LASSERT(import); LASSERT(import != LP_POISON); class_handle_unhash(&import->imp_handle); @@ -970,8 +966,7 @@ void __class_export_add_lock_ref(struct obd_export *exp, struct ldlm_lock *lock) LASSERT(lock->l_exp_refs_nr >= 0); - if (lock->l_exp_refs_target != NULL && - lock->l_exp_refs_target != exp) { + if (lock->l_exp_refs_target && lock->l_exp_refs_target != exp) { LCONSOLE_WARN("setting export %p for lock %p which already has export %p\n", exp, lock, lock->l_exp_refs_target); } @@ -1005,17 +1000,18 @@ EXPORT_SYMBOL(__class_export_del_lock_ref); #endif /* A connection defines an export context in which preallocation can - be managed. This releases the export pointer reference, and returns - the export handle, so the export refcount is 1 when this function - returns. */ + * be managed. This releases the export pointer reference, and returns + * the export handle, so the export refcount is 1 when this function + * returns. + */ int class_connect(struct lustre_handle *conn, struct obd_device *obd, struct obd_uuid *cluuid) { struct obd_export *export; - LASSERT(conn != NULL); - LASSERT(obd != NULL); - LASSERT(cluuid != NULL); + LASSERT(conn); + LASSERT(obd); + LASSERT(cluuid); export = class_new_export(obd, cluuid); if (IS_ERR(export)) @@ -1035,7 +1031,8 @@ EXPORT_SYMBOL(class_connect); * and if disconnect really need * 2 - removing from hash * 3 - in client_unlink_export - * The export pointer passed to this function can destroyed */ + * The export pointer passed to this function can destroyed + */ int class_disconnect(struct obd_export *export) { int already_disconnected; @@ -1052,7 +1049,8 @@ int class_disconnect(struct obd_export *export) /* class_cleanup(), abort_recovery(), and class_fail_export() * all end up in here, and if any of them race we shouldn't - * call extra class_export_puts(). */ + * call extra class_export_puts(). + */ if (already_disconnected) goto no_disconn; @@ -1092,7 +1090,8 @@ void class_fail_export(struct obd_export *exp) /* Most callers into obd_disconnect are removing their own reference * (request, for example) in addition to the one from the hash table. - * We don't have such a reference here, so make one. */ + * We don't have such a reference here, so make one. + */ class_export_get(exp); rc = obd_disconnect(exp); if (rc) @@ -1126,29 +1125,29 @@ static void obd_zombie_impexp_cull(void) import = NULL; if (!list_empty(&obd_zombie_imports)) { import = list_entry(obd_zombie_imports.next, - struct obd_import, - imp_zombie_chain); + struct obd_import, + imp_zombie_chain); list_del_init(&import->imp_zombie_chain); } export = NULL; if (!list_empty(&obd_zombie_exports)) { export = list_entry(obd_zombie_exports.next, - struct obd_export, - exp_obd_chain); + struct obd_export, + exp_obd_chain); list_del_init(&export->exp_obd_chain); } spin_unlock(&obd_zombie_impexp_lock); - if (import != NULL) { + if (import) { class_import_destroy(import); spin_lock(&obd_zombie_impexp_lock); zombies_count--; spin_unlock(&obd_zombie_impexp_lock); } - if (export != NULL) { + if (export) { class_export_destroy(export); spin_lock(&obd_zombie_impexp_lock); zombies_count--; @@ -1156,7 +1155,7 @@ static void obd_zombie_impexp_cull(void) } cond_resched(); - } while (import != NULL || export != NULL); + } while (import || export); } static struct completion obd_zombie_start; diff --git a/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c b/drivers/staging/lustre/lustre/obdclass/kernelcomm.c index d8230aec9a2b..8405eccdac19 100644 --- a/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c +++ b/drivers/staging/lustre/lustre/obdclass/kernelcomm.c @@ -42,9 +42,8 @@ #define DEBUG_SUBSYSTEM S_CLASS #define D_KUC D_OTHER -#include "../../include/linux/libcfs/libcfs.h" - -/* This is the kernel side (liblustre as well). */ +#include "../include/obd_support.h" +#include "../include/lustre_kernelcomm.h" /** * libcfs_kkuc_msg_put - send an message from kernel to userspace @@ -58,14 +57,14 @@ int libcfs_kkuc_msg_put(struct file *filp, void *payload) ssize_t count = kuch->kuc_msglen; loff_t offset = 0; mm_segment_t fs; - int rc = -ENOSYS; + int rc = -ENXIO; - if (filp == NULL || IS_ERR(filp)) + if (IS_ERR_OR_NULL(filp)) return -EBADF; if (kuch->kuc_magic != KUC_MAGIC) { CERROR("KernelComm: bad magic %x\n", kuch->kuc_magic); - return -ENOSYS; + return rc; } fs = get_fs(); @@ -90,18 +89,20 @@ int libcfs_kkuc_msg_put(struct file *filp, void *payload) } EXPORT_SYMBOL(libcfs_kkuc_msg_put); -/* Broadcast groups are global across all mounted filesystems; +/* + * Broadcast groups are global across all mounted filesystems; * i.e. registering for a group on 1 fs will get messages for that - * group from any fs */ + * group from any fs + */ /** A single group registration has a uid and a file pointer */ struct kkuc_reg { - struct list_head kr_chain; - int kr_uid; + struct list_head kr_chain; + int kr_uid; struct file *kr_fp; - __u32 kr_data; + char kr_data[0]; }; -static struct list_head kkuc_groups[KUC_GRP_MAX+1] = {}; +static struct list_head kkuc_groups[KUC_GRP_MAX + 1] = {}; /* Protect message sending against remove and adds */ static DECLARE_RWSEM(kg_sem); @@ -109,9 +110,10 @@ static DECLARE_RWSEM(kg_sem); * @param filp pipe to write into * @param uid identifier for this receiver * @param group group number + * @param data user data */ int libcfs_kkuc_group_add(struct file *filp, int uid, unsigned int group, - __u32 data) + void *data, size_t data_len) { struct kkuc_reg *reg; @@ -121,20 +123,20 @@ int libcfs_kkuc_group_add(struct file *filp, int uid, unsigned int group, } /* fput in group_rem */ - if (filp == NULL) + if (!filp) return -EBADF; /* freed in group_rem */ - reg = kmalloc(sizeof(*reg), 0); - if (reg == NULL) + reg = kmalloc(sizeof(*reg) + data_len, 0); + if (!reg) return -ENOMEM; reg->kr_fp = filp; reg->kr_uid = uid; - reg->kr_data = data; + memcpy(reg->kr_data, data, data_len); down_write(&kg_sem); - if (kkuc_groups[group].next == NULL) + if (!kkuc_groups[group].next) INIT_LIST_HEAD(&kkuc_groups[group]); list_add(®->kr_chain, &kkuc_groups[group]); up_write(&kg_sem); @@ -145,14 +147,14 @@ int libcfs_kkuc_group_add(struct file *filp, int uid, unsigned int group, } EXPORT_SYMBOL(libcfs_kkuc_group_add); -int libcfs_kkuc_group_rem(int uid, int group) +int libcfs_kkuc_group_rem(int uid, unsigned int group) { struct kkuc_reg *reg, *next; - if (kkuc_groups[group].next == NULL) + if (!kkuc_groups[group].next) return 0; - if (uid == 0) { + if (!uid) { /* Broadcast a shutdown message */ struct kuc_hdr lh; @@ -165,11 +167,11 @@ int libcfs_kkuc_group_rem(int uid, int group) down_write(&kg_sem); list_for_each_entry_safe(reg, next, &kkuc_groups[group], kr_chain) { - if ((uid == 0) || (uid == reg->kr_uid)) { + if (!uid || (uid == reg->kr_uid)) { list_del(®->kr_chain); CDEBUG(D_KUC, "Removed uid=%d fp=%p from group %d\n", reg->kr_uid, reg->kr_fp, group); - if (reg->kr_fp != NULL) + if (reg->kr_fp) fput(reg->kr_fp); kfree(reg); } @@ -180,28 +182,30 @@ int libcfs_kkuc_group_rem(int uid, int group) } EXPORT_SYMBOL(libcfs_kkuc_group_rem); -int libcfs_kkuc_group_put(int group, void *payload) +int libcfs_kkuc_group_put(unsigned int group, void *payload) { struct kkuc_reg *reg; - int rc = 0; + int rc = 0; int one_success = 0; - down_read(&kg_sem); + down_write(&kg_sem); list_for_each_entry(reg, &kkuc_groups[group], kr_chain) { - if (reg->kr_fp != NULL) { + if (reg->kr_fp) { rc = libcfs_kkuc_msg_put(reg->kr_fp, payload); - if (rc == 0) + if (!rc) { one_success = 1; - else if (rc == -EPIPE) { + } else if (rc == -EPIPE) { fput(reg->kr_fp); reg->kr_fp = NULL; } } } - up_read(&kg_sem); + up_write(&kg_sem); - /* don't return an error if the message has been delivered - * at least to one agent */ + /* + * don't return an error if the message has been delivered + * at least to one agent + */ if (one_success) rc = 0; @@ -213,9 +217,9 @@ EXPORT_SYMBOL(libcfs_kkuc_group_put); * Calls a callback function for each link of the given kuc group. * @param group the group to call the function on. * @param cb_func the function to be called. - * @param cb_arg iextra argument to be passed to the callback function. + * @param cb_arg extra argument to be passed to the callback function. */ -int libcfs_kkuc_group_foreach(int group, libcfs_kkuc_cb_t cb_func, +int libcfs_kkuc_group_foreach(unsigned int group, libcfs_kkuc_cb_t cb_func, void *cb_arg) { struct kkuc_reg *reg; @@ -227,15 +231,15 @@ int libcfs_kkuc_group_foreach(int group, libcfs_kkuc_cb_t cb_func, } /* no link for this group */ - if (kkuc_groups[group].next == NULL) + if (!kkuc_groups[group].next) return 0; - down_write(&kg_sem); + down_read(&kg_sem); list_for_each_entry(reg, &kkuc_groups[group], kr_chain) { - if (reg->kr_fp != NULL) + if (reg->kr_fp) rc = cb_func(reg->kr_data, cb_arg); } - up_write(&kg_sem); + up_read(&kg_sem); return rc; } diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c index a055cbb4f162..8eddf206f1ed 100644 --- a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c +++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c @@ -59,7 +59,6 @@ #include <linux/highmem.h> #include <linux/io.h> #include <asm/ioctls.h> -#include <linux/poll.h> #include <linux/uaccess.h> #include <linux/miscdevice.h> #include <linux/seq_file.h> @@ -71,17 +70,16 @@ #include "../../include/obd_class.h" #include "../../include/lprocfs_status.h" #include "../../include/lustre_ver.h" -#include "../../include/lustre/lustre_build_version.h" /* buffer MUST be at least the size of obd_ioctl_hdr */ -int obd_ioctl_getdata(char **buf, int *len, void *arg) +int obd_ioctl_getdata(char **buf, int *len, void __user *arg) { struct obd_ioctl_hdr hdr; struct obd_ioctl_data *data; int err; int offset = 0; - if (copy_from_user(&hdr, (void *)arg, sizeof(hdr))) + if (copy_from_user(&hdr, arg, sizeof(hdr))) return -EFAULT; if (hdr.ioc_version != OBD_IOCTL_VERSION) { @@ -104,9 +102,10 @@ int obd_ioctl_getdata(char **buf, int *len, void *arg) /* When there are lots of processes calling vmalloc on multi-core * system, the high lock contention will hurt performance badly, * obdfilter-survey is an example, which relies on ioctl. So we'd - * better avoid vmalloc on ioctl path. LU-66 */ + * better avoid vmalloc on ioctl path. LU-66 + */ *buf = libcfs_kvzalloc(hdr.ioc_len, GFP_NOFS); - if (*buf == NULL) { + if (!*buf) { CERROR("Cannot allocate control buffer of len %d\n", hdr.ioc_len); return -EINVAL; @@ -114,7 +113,7 @@ int obd_ioctl_getdata(char **buf, int *len, void *arg) *len = hdr.ioc_len; data = (struct obd_ioctl_data *)*buf; - if (copy_from_user(*buf, (void *)arg, hdr.ioc_len)) { + if (copy_from_user(*buf, arg, hdr.ioc_len)) { err = -EFAULT; goto free_buf; } @@ -144,9 +143,8 @@ int obd_ioctl_getdata(char **buf, int *len, void *arg) offset += cfs_size_round(data->ioc_inllen3); } - if (data->ioc_inllen4) { + if (data->ioc_inllen4) data->ioc_inlbuf4 = &data->ioc_bulk[0] + offset; - } return 0; @@ -156,7 +154,7 @@ free_buf: } EXPORT_SYMBOL(obd_ioctl_getdata); -int obd_ioctl_popdata(void *arg, void *data, int len) +int obd_ioctl_popdata(void __user *arg, void *data, int len) { int err; @@ -240,7 +238,7 @@ static ssize_t health_show(struct kobject *kobj, struct attribute *attr, struct obd_device *obd; obd = class_num2obd(i); - if (obd == NULL || !obd->obd_attached || !obd->obd_set_up) + if (!obd || !obd->obd_attached || !obd->obd_set_up) continue; LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC); @@ -250,9 +248,8 @@ static ssize_t health_show(struct kobject *kobj, struct attribute *attr, class_incref(obd, __func__, current); read_unlock(&obd_dev_lock); - if (obd_health_check(NULL, obd)) { + if (obd_health_check(NULL, obd)) healthy = false; - } class_decref(obd, __func__, current); read_lock(&obd_dev_lock); } @@ -360,7 +357,7 @@ static int obd_device_list_seq_show(struct seq_file *p, void *v) struct obd_device *obd = class_num2obd((int)index); char *status; - if (obd == NULL) + if (!obd) return 0; LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC); @@ -424,7 +421,7 @@ int class_procfs_init(void) struct dentry *file; lustre_kobj = kobject_create_and_add("lustre", fs_kobj); - if (lustre_kobj == NULL) + if (!lustre_kobj) goto out; /* Create the files associated with this kobject */ @@ -456,8 +453,7 @@ out: int class_procfs_clean(void) { - if (debugfs_lustre_root != NULL) - debugfs_remove_recursive(debugfs_lustre_root); + debugfs_remove_recursive(debugfs_lustre_root); debugfs_lustre_root = NULL; diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c index 42fc26f4ae25..fd333b9e968c 100644 --- a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c +++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c @@ -62,8 +62,8 @@ struct static_lustre_uintvalue_attr { }; static ssize_t static_uintvalue_show(struct kobject *kobj, - struct attribute *attr, - char *buf) + struct attribute *attr, + char *buf) { struct static_lustre_uintvalue_attr *lattr = (void *)attr; @@ -71,8 +71,8 @@ static ssize_t static_uintvalue_show(struct kobject *kobj, } static ssize_t static_uintvalue_store(struct kobject *kobj, - struct attribute *attr, - const char *buffer, size_t count) + struct attribute *attr, + const char *buffer, size_t count) { struct static_lustre_uintvalue_attr *lattr = (void *)attr; int rc; diff --git a/drivers/staging/lustre/lustre/obdclass/llog.c b/drivers/staging/lustre/lustre/obdclass/llog.c index f956d7ed6785..992573eae1b1 100644 --- a/drivers/staging/lustre/lustre/obdclass/llog.c +++ b/drivers/staging/lustre/lustre/obdclass/llog.c @@ -76,8 +76,6 @@ static struct llog_handle *llog_alloc_handle(void) */ static void llog_free_handle(struct llog_handle *loghandle) { - LASSERT(loghandle != NULL); - /* failed llog_init_handle */ if (!loghandle->lgh_hdr) goto out; @@ -115,7 +113,7 @@ static int llog_read_header(const struct lu_env *env, if (rc) return rc; - if (lop->lop_read_header == NULL) + if (!lop->lop_read_header) return -EOPNOTSUPP; rc = lop->lop_read_header(env, handle); @@ -144,7 +142,7 @@ int llog_init_handle(const struct lu_env *env, struct llog_handle *handle, struct llog_log_hdr *llh; int rc; - LASSERT(handle->lgh_hdr == NULL); + LASSERT(!handle->lgh_hdr); llh = kzalloc(sizeof(*llh), GFP_NOFS); if (!llh) @@ -228,11 +226,11 @@ static int llog_process_thread(void *arg) return 0; } - if (cd != NULL) { + if (cd) { last_called_index = cd->lpcd_first_idx; index = cd->lpcd_first_idx + 1; } - if (cd != NULL && cd->lpcd_last_idx) + if (cd && cd->lpcd_last_idx) last_index = cd->lpcd_last_idx; else last_index = LLOG_BITMAP_BYTES * 8 - 1; @@ -262,7 +260,8 @@ repeat: /* NB: when rec->lrh_len is accessed it is already swabbed * since it is used at the "end" of the loop and the rec - * swabbing is done at the beginning of the loop. */ + * swabbing is done at the beginning of the loop. + */ for (rec = (struct llog_rec_hdr *)buf; (char *)rec < buf + LLOG_CHUNK_SIZE; rec = (struct llog_rec_hdr *)((char *)rec + rec->lrh_len)) { @@ -328,7 +327,7 @@ repeat: } out: - if (cd != NULL) + if (cd) cd->lpcd_last_idx = last_called_index; kfree(buf); @@ -366,27 +365,28 @@ int llog_process_or_fork(const struct lu_env *env, int rc; lpi = kzalloc(sizeof(*lpi), GFP_NOFS); - if (!lpi) { - CERROR("cannot alloc pointer\n"); + if (!lpi) return -ENOMEM; - } lpi->lpi_loghandle = loghandle; lpi->lpi_cb = cb; lpi->lpi_cbdata = data; lpi->lpi_catdata = catdata; if (fork) { + struct task_struct *task; + /* The new thread can't use parent env, - * init the new one in llog_process_thread_daemonize. */ + * init the new one in llog_process_thread_daemonize. + */ lpi->lpi_env = NULL; init_completion(&lpi->lpi_completion); - rc = PTR_ERR(kthread_run(llog_process_thread_daemonize, lpi, - "llog_process_thread")); - if (IS_ERR_VALUE(rc)) { + task = kthread_run(llog_process_thread_daemonize, lpi, + "llog_process_thread"); + if (IS_ERR(task)) { + rc = PTR_ERR(task); CERROR("%s: cannot start thread: rc = %d\n", loghandle->lgh_ctxt->loc_obd->obd_name, rc); - kfree(lpi); - return rc; + goto out_lpi; } wait_for_completion(&lpi->lpi_completion); } else { @@ -394,6 +394,7 @@ int llog_process_or_fork(const struct lu_env *env, llog_process_thread(lpi); } rc = lpi->lpi_rc; +out_lpi: kfree(lpi); return rc; } @@ -416,13 +417,13 @@ int llog_open(const struct lu_env *env, struct llog_ctxt *ctxt, LASSERT(ctxt); LASSERT(ctxt->loc_logops); - if (ctxt->loc_logops->lop_open == NULL) { + if (!ctxt->loc_logops->lop_open) { *lgh = NULL; return -EOPNOTSUPP; } *lgh = llog_alloc_handle(); - if (*lgh == NULL) + if (!*lgh) return -ENOMEM; (*lgh)->lgh_ctxt = ctxt; (*lgh)->lgh_logops = ctxt->loc_logops; @@ -449,7 +450,7 @@ int llog_close(const struct lu_env *env, struct llog_handle *loghandle) rc = llog_handle2ops(loghandle, &lop); if (rc) goto out; - if (lop->lop_close == NULL) { + if (!lop->lop_close) { rc = -EOPNOTSUPP; goto out; } diff --git a/drivers/staging/lustre/lustre/obdclass/llog_cat.c b/drivers/staging/lustre/lustre/obdclass/llog_cat.c index 0f05e9c4a5b2..c27d4ec1df9e 100644 --- a/drivers/staging/lustre/lustre/obdclass/llog_cat.c +++ b/drivers/staging/lustre/lustre/obdclass/llog_cat.c @@ -69,12 +69,12 @@ static int llog_cat_id2handle(const struct lu_env *env, struct llog_handle *loghandle; int rc = 0; - if (cathandle == NULL) + if (!cathandle) return -EBADF; down_write(&cathandle->lgh_lock); list_for_each_entry(loghandle, &cathandle->u.chd.chd_head, - u.phd.phd_entry) { + u.phd.phd_entry) { struct llog_logid *cgl = &loghandle->lgh_id; if (ostid_id(&cgl->lgl_oi) == ostid_id(&logid->lgl_oi) && @@ -130,7 +130,7 @@ int llog_cat_close(const struct lu_env *env, struct llog_handle *cathandle) int rc; list_for_each_entry_safe(loghandle, n, &cathandle->u.chd.chd_head, - u.phd.phd_entry) { + u.phd.phd_entry) { /* unlink open-not-created llogs */ list_del_init(&loghandle->u.phd.phd_entry); llog_close(env, loghandle); diff --git a/drivers/staging/lustre/lustre/obdclass/llog_obd.c b/drivers/staging/lustre/lustre/obdclass/llog_obd.c index 9bc51998c05c..826623f528da 100644 --- a/drivers/staging/lustre/lustre/obdclass/llog_obd.c +++ b/drivers/staging/lustre/lustre/obdclass/llog_obd.c @@ -88,7 +88,8 @@ int __llog_ctxt_put(const struct lu_env *env, struct llog_ctxt *ctxt) spin_unlock(&obd->obd_dev_lock); /* obd->obd_starting is needed for the case of cleanup - * in error case while obd is starting up. */ + * in error case while obd is starting up. + */ LASSERTF(obd->obd_starting == 1 || obd->obd_stopping == 1 || obd->obd_set_up == 0, "wrong obd state: %d/%d/%d\n", !!obd->obd_starting, @@ -110,11 +111,8 @@ int llog_cleanup(const struct lu_env *env, struct llog_ctxt *ctxt) struct obd_llog_group *olg; int rc, idx; - LASSERT(ctxt != NULL); - LASSERT(ctxt != LP_POISON); - olg = ctxt->loc_olg; - LASSERT(olg != NULL); + LASSERT(olg); LASSERT(olg != LP_POISON); idx = ctxt->loc_idx; @@ -151,7 +149,7 @@ int llog_setup(const struct lu_env *env, struct obd_device *obd, if (index < 0 || index >= LLOG_MAX_CTXTS) return -EINVAL; - LASSERT(olg != NULL); + LASSERT(olg); ctxt = llog_new_ctxt(obd); if (!ctxt) diff --git a/drivers/staging/lustre/lustre/obdclass/llog_swab.c b/drivers/staging/lustre/lustre/obdclass/llog_swab.c index 3aa7393b20c3..967ba2e1bfcb 100644 --- a/drivers/staging/lustre/lustre/obdclass/llog_swab.c +++ b/drivers/staging/lustre/lustre/obdclass/llog_swab.c @@ -346,7 +346,6 @@ void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg) __swab32s(&lcfg->lcfg_buflens[i]); print_lustre_cfg(lcfg); - return; } EXPORT_SYMBOL(lustre_swab_lustre_cfg); @@ -387,7 +386,8 @@ void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size) * * Overwrite fields from the end first, so they are not * clobbered, and use memmove() instead of memcpy() because - * the source and target buffers overlap. bug 16771 */ + * the source and target buffers overlap. bug 16771 + */ createtime = cm32->cm_createtime; canceltime = cm32->cm_canceltime; memmove(marker->cm_comment, cm32->cm_comment, MTI_NAMELEN32); @@ -406,7 +406,5 @@ void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size) __swab64s(&marker->cm_createtime); __swab64s(&marker->cm_canceltime); } - - return; } EXPORT_SYMBOL(lustre_swab_cfg_marker); diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c index 6acc4a10fde9..13aca5b93c6a 100644 --- a/drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c +++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c @@ -48,14 +48,15 @@ void lprocfs_counter_add(struct lprocfs_stats *stats, int idx, long amount) int smp_id; unsigned long flags = 0; - if (stats == NULL) + if (!stats) return; LASSERTF(0 <= idx && idx < stats->ls_num, "idx %d, ls_num %hu\n", idx, stats->ls_num); /* With per-client stats, statistics are allocated only for - * single CPU area, so the smp_id should be 0 always. */ + * single CPU area, so the smp_id should be 0 always. + */ smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID, &flags); if (smp_id < 0) return; @@ -96,14 +97,15 @@ void lprocfs_counter_sub(struct lprocfs_stats *stats, int idx, long amount) int smp_id; unsigned long flags = 0; - if (stats == NULL) + if (!stats) return; LASSERTF(0 <= idx && idx < stats->ls_num, "idx %d, ls_num %hu\n", idx, stats->ls_num); /* With per-client stats, statistics are allocated only for - * single CPU area, so the smp_id should be 0 always. */ + * single CPU area, so the smp_id should be 0 always. + */ smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID, &flags); if (smp_id < 0) return; diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c index 51fe15f5d687..d93f42fee420 100644 --- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c +++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c @@ -109,7 +109,7 @@ int obd_connect_flags2str(char *page, int count, __u64 flags, char *sep) __u64 mask = 1; int i, ret = 0; - for (i = 0; obd_connect_names[i] != NULL; i++, mask <<= 1) { + for (i = 0; obd_connect_names[i]; i++, mask <<= 1) { if (flags & mask) ret += snprintf(page + ret, count - ret, "%s%s", ret ? sep : "", obd_connect_names[i]); @@ -149,10 +149,10 @@ int lprocfs_read_frac_helper(char *buffer, unsigned long count, long val, } /* * Need to think these cases : - * 1. #echo x.00 > /proc/xxx output result : x - * 2. #echo x.0x > /proc/xxx output result : x.0x - * 3. #echo x.x0 > /proc/xxx output result : x.x - * 4. #echo x.xx > /proc/xxx output result : x.xx + * 1. #echo x.00 > /sys/xxx output result : x + * 2. #echo x.0x > /sys/xxx output result : x.0x + * 3. #echo x.x0 > /sys/xxx output result : x.x + * 4. #echo x.xx > /sys/xxx output result : x.xx * Only reserved 2 bits fraction. */ for (i = 0; i < (5 - prtn); i++) @@ -199,7 +199,7 @@ int lprocfs_write_frac_helper(const char __user *buffer, unsigned long count, if (pbuf == end) return -EINVAL; - if (end != NULL && *end == '.') { + if (end && *end == '.') { int temp_val, pow = 1; int i; @@ -247,7 +247,7 @@ struct dentry *ldebugfs_add_simple(struct dentry *root, struct dentry *entry; umode_t mode = 0; - if (root == NULL || name == NULL || fops == NULL) + if (!root || !name || !fops) return ERR_PTR(-EINVAL); if (fops->read) @@ -256,12 +256,12 @@ struct dentry *ldebugfs_add_simple(struct dentry *root, mode |= 0200; entry = debugfs_create_file(name, mode, root, data, fops); if (IS_ERR_OR_NULL(entry)) { - CERROR("LprocFS: No memory to create <debugfs> entry %s", name); + CERROR("LprocFS: No memory to create <debugfs> entry %s\n", name); return entry ?: ERR_PTR(-ENOMEM); } return entry; } -EXPORT_SYMBOL(ldebugfs_add_simple); +EXPORT_SYMBOL_GPL(ldebugfs_add_simple); static struct file_operations lprocfs_generic_fops = { }; @@ -272,7 +272,7 @@ int ldebugfs_add_vars(struct dentry *parent, if (IS_ERR_OR_NULL(parent) || IS_ERR_OR_NULL(list)) return -EINVAL; - while (list->name != NULL) { + while (list->name) { struct dentry *entry; umode_t mode = 0; @@ -294,14 +294,14 @@ int ldebugfs_add_vars(struct dentry *parent, } return 0; } -EXPORT_SYMBOL(ldebugfs_add_vars); +EXPORT_SYMBOL_GPL(ldebugfs_add_vars); void ldebugfs_remove(struct dentry **entryp) { debugfs_remove_recursive(*entryp); *entryp = NULL; } -EXPORT_SYMBOL(ldebugfs_remove); +EXPORT_SYMBOL_GPL(ldebugfs_remove); struct dentry *ldebugfs_register(const char *name, struct dentry *parent, @@ -327,7 +327,7 @@ struct dentry *ldebugfs_register(const char *name, out: return entry; } -EXPORT_SYMBOL(ldebugfs_register); +EXPORT_SYMBOL_GPL(ldebugfs_register); /* Generic callbacks */ int lprocfs_rd_uint(struct seq_file *m, void *data) @@ -491,7 +491,7 @@ int lprocfs_rd_server_uuid(struct seq_file *m, void *data) char *imp_state_name = NULL; int rc; - LASSERT(obd != NULL); + LASSERT(obd); rc = lprocfs_climp_check(obd); if (rc) return rc; @@ -514,7 +514,7 @@ int lprocfs_rd_conn_uuid(struct seq_file *m, void *data) struct ptlrpc_connection *conn; int rc; - LASSERT(obd != NULL); + LASSERT(obd); rc = lprocfs_climp_check(obd); if (rc) @@ -543,7 +543,7 @@ void lprocfs_stats_collect(struct lprocfs_stats *stats, int idx, memset(cnt, 0, sizeof(*cnt)); - if (stats == NULL) { + if (!stats) { /* set count to 1 to avoid divide-by-zero errs in callers */ cnt->lc_count = 1; return; @@ -554,7 +554,7 @@ void lprocfs_stats_collect(struct lprocfs_stats *stats, int idx, num_entry = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags); for (i = 0; i < num_entry; i++) { - if (stats->ls_percpu[i] == NULL) + if (!stats->ls_percpu[i]) continue; percpu_cntr = lprocfs_stats_counter_get(stats, i, idx); @@ -577,7 +577,7 @@ EXPORT_SYMBOL(lprocfs_stats_collect); #define flag2str(flag, first) \ do { \ if (imp->imp_##flag) \ - seq_printf(m, "%s" #flag, first ? "" : ", "); \ + seq_printf(m, "%s" #flag, first ? "" : ", "); \ } while (0) static int obd_import_flags2str(struct obd_import *imp, struct seq_file *m) { @@ -604,16 +604,16 @@ static void obd_connect_seq_flags2str(struct seq_file *m, __u64 flags, char *sep int i; bool first = true; - for (i = 0; obd_connect_names[i] != NULL; i++, mask <<= 1) { + for (i = 0; obd_connect_names[i]; i++, mask <<= 1) { if (flags & mask) { seq_printf(m, "%s%s", - first ? sep : "", obd_connect_names[i]); + first ? sep : "", obd_connect_names[i]); first = false; } } if (flags & ~(mask - 1)) seq_printf(m, "%sunknown flags %#llx", - first ? sep : "", flags & ~(mask - 1)); + first ? sep : "", flags & ~(mask - 1)); } int lprocfs_rd_import(struct seq_file *m, void *data) @@ -629,7 +629,7 @@ int lprocfs_rd_import(struct seq_file *m, void *data) int rw = 0; int rc; - LASSERT(obd != NULL); + LASSERT(obd); rc = lprocfs_climp_check(obd); if (rc) return rc; @@ -637,26 +637,27 @@ int lprocfs_rd_import(struct seq_file *m, void *data) imp = obd->u.cli.cl_import; seq_printf(m, - "import:\n" - " name: %s\n" - " target: %s\n" - " state: %s\n" - " instance: %u\n" - " connect_flags: [", - obd->obd_name, - obd2cli_tgt(obd), - ptlrpc_import_state_name(imp->imp_state), - imp->imp_connect_data.ocd_instance); - obd_connect_seq_flags2str(m, imp->imp_connect_data.ocd_connect_flags, ", "); + "import:\n" + " name: %s\n" + " target: %s\n" + " state: %s\n" + " instance: %u\n" + " connect_flags: [ ", + obd->obd_name, + obd2cli_tgt(obd), + ptlrpc_import_state_name(imp->imp_state), + imp->imp_connect_data.ocd_instance); + obd_connect_seq_flags2str(m, imp->imp_connect_data.ocd_connect_flags, + ", "); seq_printf(m, - "]\n" - " import_flags: ["); + " ]\n" + " import_flags: [ "); obd_import_flags2str(imp, m); seq_printf(m, - "]\n" - " connection:\n" - " failover_nids: ["); + " ]\n" + " connection:\n" + " failover_nids: [ "); spin_lock(&imp->imp_lock); j = 0; list_for_each_entry(conn, &imp->imp_conn_list, oic_item) { @@ -665,24 +666,24 @@ int lprocfs_rd_import(struct seq_file *m, void *data) seq_printf(m, "%s%s", j ? ", " : "", nidstr); j++; } - if (imp->imp_connection != NULL) + if (imp->imp_connection) libcfs_nid2str_r(imp->imp_connection->c_peer.nid, nidstr, sizeof(nidstr)); else strncpy(nidstr, "<none>", sizeof(nidstr)); seq_printf(m, - "]\n" - " current_connection: %s\n" - " connection_attempts: %u\n" - " generation: %u\n" - " in-progress_invalidations: %u\n", - nidstr, - imp->imp_conn_cnt, - imp->imp_generation, - atomic_read(&imp->imp_inval_count)); + " ]\n" + " current_connection: %s\n" + " connection_attempts: %u\n" + " generation: %u\n" + " in-progress_invalidations: %u\n", + nidstr, + imp->imp_conn_cnt, + imp->imp_generation, + atomic_read(&imp->imp_inval_count)); spin_unlock(&imp->imp_lock); - if (obd->obd_svc_stats == NULL) + if (!obd->obd_svc_stats) goto out_climp; header = &obd->obd_svc_stats->ls_cnt_header[PTLRPC_REQWAIT_CNTR]; @@ -696,15 +697,15 @@ int lprocfs_rd_import(struct seq_file *m, void *data) } else ret.lc_sum = 0; seq_printf(m, - " rpcs:\n" - " inflight: %u\n" - " unregistering: %u\n" - " timeouts: %u\n" - " avg_waittime: %llu %s\n", - atomic_read(&imp->imp_inflight), - atomic_read(&imp->imp_unregistering), - atomic_read(&imp->imp_timeouts), - ret.lc_sum, header->lc_units); + " rpcs:\n" + " inflight: %u\n" + " unregistering: %u\n" + " timeouts: %u\n" + " avg_waittime: %llu %s\n", + atomic_read(&imp->imp_inflight), + atomic_read(&imp->imp_unregistering), + atomic_read(&imp->imp_timeouts), + ret.lc_sum, header->lc_units); k = 0; for (j = 0; j < IMP_AT_MAX_PORTALS; j++) { @@ -714,20 +715,20 @@ int lprocfs_rd_import(struct seq_file *m, void *data) at_get(&imp->imp_at.iat_service_estimate[j])); } seq_printf(m, - " service_estimates:\n" - " services: %u sec\n" - " network: %u sec\n", - k, - at_get(&imp->imp_at.iat_net_latency)); + " service_estimates:\n" + " services: %u sec\n" + " network: %u sec\n", + k, + at_get(&imp->imp_at.iat_net_latency)); seq_printf(m, - " transactions:\n" - " last_replay: %llu\n" - " peer_committed: %llu\n" - " last_checked: %llu\n", - imp->imp_last_replay_transno, - imp->imp_peer_committed_transno, - imp->imp_last_transno_checked); + " transactions:\n" + " last_replay: %llu\n" + " peer_committed: %llu\n" + " last_checked: %llu\n", + imp->imp_last_replay_transno, + imp->imp_peer_committed_transno, + imp->imp_last_transno_checked); /* avg data rates */ for (rw = 0; rw <= 1; rw++) { @@ -741,10 +742,10 @@ int lprocfs_rd_import(struct seq_file *m, void *data) do_div(sum, ret.lc_count); ret.lc_sum = sum; seq_printf(m, - " %s_data_averages:\n" - " bytes_per_rpc: %llu\n", - rw ? "write" : "read", - ret.lc_sum); + " %s_data_averages:\n" + " bytes_per_rpc: %llu\n", + rw ? "write" : "read", + ret.lc_sum); } k = (int)ret.lc_sum; j = opcode_offset(OST_READ + rw) + EXTRA_MAX_OPCODES; @@ -757,13 +758,13 @@ int lprocfs_rd_import(struct seq_file *m, void *data) do_div(sum, ret.lc_count); ret.lc_sum = sum; seq_printf(m, - " %s_per_rpc: %llu\n", - header->lc_units, ret.lc_sum); + " %s_per_rpc: %llu\n", + header->lc_units, ret.lc_sum); j = (int)ret.lc_sum; if (j > 0) seq_printf(m, - " MB_per_sec: %u.%.02u\n", - k / j, (100 * k / j) % 100); + " MB_per_sec: %u.%.02u\n", + k / j, (100 * k / j) % 100); } } @@ -779,7 +780,7 @@ int lprocfs_rd_state(struct seq_file *m, void *data) struct obd_import *imp; int j, k, rc; - LASSERT(obd != NULL); + LASSERT(obd); rc = lprocfs_climp_check(obd); if (rc) return rc; @@ -787,7 +788,7 @@ int lprocfs_rd_state(struct seq_file *m, void *data) imp = obd->u.cli.cl_import; seq_printf(m, "current_state: %s\n", - ptlrpc_import_state_name(imp->imp_state)); + ptlrpc_import_state_name(imp->imp_state)); seq_printf(m, "state_history:\n"); k = imp->imp_state_hist_idx; for (j = 0; j < IMP_STATE_HIST_LEN; j++) { @@ -795,7 +796,7 @@ int lprocfs_rd_state(struct seq_file *m, void *data) &imp->imp_state_hist[(k + j) % IMP_STATE_HIST_LEN]; if (ish->ish_state == 0) continue; - seq_printf(m, " - [%lld, %s]\n", (s64)ish->ish_time, + seq_printf(m, " - [ %lld, %s ]\n", (s64)ish->ish_time, ptlrpc_import_state_name(ish->ish_state)); } @@ -825,7 +826,7 @@ int lprocfs_rd_timeouts(struct seq_file *m, void *data) struct dhms ts; int i, rc; - LASSERT(obd != NULL); + LASSERT(obd); rc = lprocfs_climp_check(obd); if (rc) return rc; @@ -942,7 +943,7 @@ int lprocfs_obd_setup(struct obd_device *obd, struct lprocfs_vars *list, return rc; } -EXPORT_SYMBOL(lprocfs_obd_setup); +EXPORT_SYMBOL_GPL(lprocfs_obd_setup); int lprocfs_obd_cleanup(struct obd_device *obd) { @@ -957,7 +958,7 @@ int lprocfs_obd_cleanup(struct obd_device *obd) return 0; } -EXPORT_SYMBOL(lprocfs_obd_cleanup); +EXPORT_SYMBOL_GPL(lprocfs_obd_cleanup); int lprocfs_stats_alloc_one(struct lprocfs_stats *stats, unsigned int cpuid) { @@ -967,12 +968,12 @@ int lprocfs_stats_alloc_one(struct lprocfs_stats *stats, unsigned int cpuid) unsigned long flags = 0; int i; - LASSERT(stats->ls_percpu[cpuid] == NULL); + LASSERT(!stats->ls_percpu[cpuid]); LASSERT((stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) == 0); percpusize = lprocfs_stats_counter_size(stats); LIBCFS_ALLOC_ATOMIC(stats->ls_percpu[cpuid], percpusize); - if (stats->ls_percpu[cpuid] != NULL) { + if (stats->ls_percpu[cpuid]) { rc = 0; if (unlikely(stats->ls_biggest_alloc_num <= cpuid)) { if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) @@ -1017,7 +1018,7 @@ struct lprocfs_stats *lprocfs_alloc_stats(unsigned int num, /* alloc percpu pointers for all possible cpu slots */ LIBCFS_ALLOC(stats, offsetof(typeof(*stats), ls_percpu[num_entry])); - if (stats == NULL) + if (!stats) return NULL; stats->ls_num = num; @@ -1027,14 +1028,14 @@ struct lprocfs_stats *lprocfs_alloc_stats(unsigned int num, /* alloc num of counter headers */ LIBCFS_ALLOC(stats->ls_cnt_header, stats->ls_num * sizeof(struct lprocfs_counter_header)); - if (stats->ls_cnt_header == NULL) + if (!stats->ls_cnt_header) goto fail; if ((flags & LPROCFS_STATS_FLAG_NOPERCPU) != 0) { /* contains only one set counters */ percpusize = lprocfs_stats_counter_size(stats); LIBCFS_ALLOC_ATOMIC(stats->ls_percpu[0], percpusize); - if (stats->ls_percpu[0] == NULL) + if (!stats->ls_percpu[0]) goto fail; stats->ls_biggest_alloc_num = 1; } else if ((flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0) { @@ -1059,7 +1060,7 @@ void lprocfs_free_stats(struct lprocfs_stats **statsh) unsigned int percpusize; unsigned int i; - if (stats == NULL || stats->ls_num == 0) + if (!stats || stats->ls_num == 0) return; *statsh = NULL; @@ -1070,9 +1071,9 @@ void lprocfs_free_stats(struct lprocfs_stats **statsh) percpusize = lprocfs_stats_counter_size(stats); for (i = 0; i < num_entry; i++) - if (stats->ls_percpu[i] != NULL) + if (stats->ls_percpu[i]) LIBCFS_FREE(stats->ls_percpu[i], percpusize); - if (stats->ls_cnt_header != NULL) + if (stats->ls_cnt_header) LIBCFS_FREE(stats->ls_cnt_header, stats->ls_num * sizeof(struct lprocfs_counter_header)); LIBCFS_FREE(stats, offsetof(typeof(*stats), ls_percpu[num_entry])); @@ -1090,7 +1091,7 @@ void lprocfs_clear_stats(struct lprocfs_stats *stats) num_entry = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags); for (i = 0; i < num_entry; i++) { - if (stats->ls_percpu[i] == NULL) + if (!stats->ls_percpu[i]) continue; for (j = 0; j < stats->ls_num; j++) { percpu_cntr = lprocfs_stats_counter_get(stats, i, j); @@ -1196,7 +1197,7 @@ static int lprocfs_stats_seq_open(struct inode *inode, struct file *file) return 0; } -struct file_operations lprocfs_stats_seq_fops = { +static const struct file_operations lprocfs_stats_seq_fops = { .owner = THIS_MODULE, .open = lprocfs_stats_seq_open, .read = seq_read, @@ -1206,7 +1207,7 @@ struct file_operations lprocfs_stats_seq_fops = { }; int ldebugfs_register_stats(struct dentry *parent, const char *name, - struct lprocfs_stats *stats) + struct lprocfs_stats *stats) { struct dentry *entry; @@ -1219,7 +1220,7 @@ int ldebugfs_register_stats(struct dentry *parent, const char *name, return 0; } -EXPORT_SYMBOL(ldebugfs_register_stats); +EXPORT_SYMBOL_GPL(ldebugfs_register_stats); void lprocfs_counter_init(struct lprocfs_stats *stats, int index, unsigned conf, const char *name, const char *units) @@ -1230,10 +1231,8 @@ void lprocfs_counter_init(struct lprocfs_stats *stats, int index, unsigned int i; unsigned int num_cpu; - LASSERT(stats != NULL); - header = &stats->ls_cnt_header[index]; - LASSERTF(header != NULL, "Failed to allocate stats header:[%d]%s/%s\n", + LASSERTF(header, "Failed to allocate stats header:[%d]%s/%s\n", index, name, units); header->lc_config = conf; @@ -1242,7 +1241,7 @@ void lprocfs_counter_init(struct lprocfs_stats *stats, int index, num_cpu = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags); for (i = 0; i < num_cpu; ++i) { - if (stats->ls_percpu[i] == NULL) + if (!stats->ls_percpu[i]) continue; percpu_cntr = lprocfs_stats_counter_get(stats, i, index); percpu_cntr->lc_count = 0; @@ -1270,7 +1269,7 @@ __s64 lprocfs_read_helper(struct lprocfs_counter *lc, { __s64 ret = 0; - if (lc == NULL || header == NULL) + if (!lc || !header) return 0; switch (field) { @@ -1319,8 +1318,8 @@ int lprocfs_write_u64_helper(const char __user *buffer, unsigned long count, } EXPORT_SYMBOL(lprocfs_write_u64_helper); -int lprocfs_write_frac_u64_helper(const char *buffer, unsigned long count, - __u64 *val, int mult) +int lprocfs_write_frac_u64_helper(const char __user *buffer, + unsigned long count, __u64 *val, int mult) { char kernbuf[22], *end, *pbuf; __u64 whole, frac = 0, units; @@ -1360,17 +1359,19 @@ int lprocfs_write_frac_u64_helper(const char *buffer, unsigned long count, } units = 1; - switch (tolower(*end)) { - case 'p': - units <<= 10; - case 't': - units <<= 10; - case 'g': - units <<= 10; - case 'm': - units <<= 10; - case 'k': - units <<= 10; + if (end) { + switch (tolower(*end)) { + case 'p': + units <<= 10; + case 't': + units <<= 10; + case 'g': + units <<= 10; + case 'm': + units <<= 10; + case 'k': + units <<= 10; + } } /* Specified units override the multiplier */ if (units > 1) @@ -1412,7 +1413,7 @@ char *lprocfs_find_named_value(const char *buffer, const char *name, /* there is no strnstr() in rhel5 and ubuntu kernels */ val = lprocfs_strnstr(buffer, name, buflen); - if (val == NULL) + if (!val) return (char *)buffer; val += strlen(name); /* skip prefix */ @@ -1429,11 +1430,9 @@ char *lprocfs_find_named_value(const char *buffer, const char *name, } EXPORT_SYMBOL(lprocfs_find_named_value); -int ldebugfs_seq_create(struct dentry *parent, - const char *name, - umode_t mode, - const struct file_operations *seq_fops, - void *data) +int ldebugfs_seq_create(struct dentry *parent, const char *name, + umode_t mode, const struct file_operations *seq_fops, + void *data) { struct dentry *entry; @@ -1446,7 +1445,7 @@ int ldebugfs_seq_create(struct dentry *parent, return 0; } -EXPORT_SYMBOL(ldebugfs_seq_create); +EXPORT_SYMBOL_GPL(ldebugfs_seq_create); int ldebugfs_obd_seq_create(struct obd_device *dev, const char *name, @@ -1457,7 +1456,7 @@ int ldebugfs_obd_seq_create(struct obd_device *dev, return ldebugfs_seq_create(dev->obd_debugfs_entry, name, mode, seq_fops, data); } -EXPORT_SYMBOL(ldebugfs_obd_seq_create); +EXPORT_SYMBOL_GPL(ldebugfs_obd_seq_create); void lprocfs_oh_tally(struct obd_histogram *oh, unsigned int value) { diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c index ce248f4072c2..65a4746c89ca 100644 --- a/drivers/staging/lustre/lustre/obdclass/lu_object.c +++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c @@ -86,13 +86,12 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o) */ fid = lu_object_fid(o); if (fid_is_zero(fid)) { - LASSERT(top->loh_hash.next == NULL - && top->loh_hash.pprev == NULL); + LASSERT(!top->loh_hash.next && !top->loh_hash.pprev); LASSERT(list_empty(&top->loh_lru)); if (!atomic_dec_and_test(&top->loh_ref)) return; list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) { - if (o->lo_ops->loo_object_release != NULL) + if (o->lo_ops->loo_object_release) o->lo_ops->loo_object_release(env, o); } lu_object_free(env, orig); @@ -119,7 +118,7 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o) * layers, and notify them that object is no longer busy. */ list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) { - if (o->lo_ops->loo_object_release != NULL) + if (o->lo_ops->loo_object_release) o->lo_ops->loo_object_release(env, o); } @@ -135,7 +134,7 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o) } /* - * If object is dying (will not be cached), removed it + * If object is dying (will not be cached), then removed it * from hash table and LRU. * * This is done with hash table and LRU lists locked. As the only @@ -210,7 +209,7 @@ static struct lu_object *lu_object_alloc(const struct lu_env *env, * lu_object_header. */ top = dev->ld_ops->ldo_object_alloc(env, NULL, dev); - if (top == NULL) + if (!top) return ERR_PTR(-ENOMEM); if (IS_ERR(top)) return top; @@ -245,7 +244,7 @@ next: } while (!clean); list_for_each_entry_reverse(scan, layers, lo_linkage) { - if (scan->lo_ops->loo_object_start != NULL) { + if (scan->lo_ops->loo_object_start) { result = scan->lo_ops->loo_object_start(env, scan); if (result != 0) { lu_object_free(env, top); @@ -276,7 +275,7 @@ static void lu_object_free(const struct lu_env *env, struct lu_object *o) * First call ->loo_object_delete() method to release all resources. */ list_for_each_entry_reverse(scan, layers, lo_linkage) { - if (scan->lo_ops->loo_object_delete != NULL) + if (scan->lo_ops->loo_object_delete) scan->lo_ops->loo_object_delete(env, scan); } @@ -296,7 +295,6 @@ static void lu_object_free(const struct lu_env *env, struct lu_object *o) */ o = container_of0(splice.prev, struct lu_object, lo_linkage); list_del_init(&o->lo_linkage); - LASSERT(o->lo_ops->loo_object_free != NULL); o->lo_ops->loo_object_free(env, o); } @@ -451,7 +449,6 @@ int lu_cdebug_printer(const struct lu_env *env, va_start(args, format); key = lu_context_key_get(&env->le_ctx, &lu_global_key); - LASSERT(key != NULL); used = strlen(key->lck_area); complete = format[strlen(format) - 1] == '\n'; @@ -462,7 +459,7 @@ int lu_cdebug_printer(const struct lu_env *env, ARRAY_SIZE(key->lck_area) - used, format, args); if (complete) { if (cfs_cdebug_show(msgdata->msg_mask, msgdata->msg_subsys)) - libcfs_debug_msg(msgdata, "%s", key->lck_area); + libcfs_debug_msg(msgdata, "%s\n", key->lck_area); key->lck_area[0] = 0; } va_end(args); @@ -508,7 +505,7 @@ void lu_object_print(const struct lu_env *env, void *cookie, (*printer)(env, cookie, "%*.*s%s@%p", depth, depth, ruler, o->lo_dev->ld_type->ldt_name, o); - if (o->lo_ops->loo_object_print != NULL) + if (o->lo_ops->loo_object_print) (*o->lo_ops->loo_object_print)(env, cookie, printer, o); (*printer)(env, cookie, "\n"); @@ -535,9 +532,10 @@ static struct lu_object *htable_lookup(struct lu_site *s, *version = ver; bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, bd); /* cfs_hash_bd_peek_locked is a somehow "internal" function - * of cfs_hash, it doesn't add refcount on object. */ + * of cfs_hash, it doesn't add refcount on object. + */ hnode = cfs_hash_bd_peek_locked(s->ls_obj_hash, bd, (void *)f); - if (hnode == NULL) { + if (!hnode) { lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS); return ERR_PTR(-ENOENT); } @@ -636,7 +634,7 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env, * If dying object is found during index search, add @waiter to the * site wait-queue and return ERR_PTR(-EAGAIN). */ - if (conf != NULL && conf->loc_flags & LOC_F_NEW) + if (conf && conf->loc_flags & LOC_F_NEW) return lu_object_new(env, dev, f, conf); s = dev->ld_site; @@ -715,7 +713,7 @@ struct lu_object *lu_object_find_slice(const struct lu_env *env, top = lu_object_find(env, dev, f, conf); if (!IS_ERR(top)) { obj = lu_object_locate(top->lo_header, dev->ld_type); - if (obj == NULL) + if (!obj) lu_object_put(env, top); } else obj = top; @@ -966,11 +964,11 @@ int lu_site_init(struct lu_site *s, struct lu_device *top) CFS_HASH_NO_ITEMREF | CFS_HASH_DEPTH | CFS_HASH_ASSERT_EMPTY); - if (s->ls_obj_hash != NULL) + if (s->ls_obj_hash) break; } - if (s->ls_obj_hash == NULL) { + if (!s->ls_obj_hash) { CERROR("failed to create lu_site hash with bits: %d\n", bits); return -ENOMEM; } @@ -982,7 +980,7 @@ int lu_site_init(struct lu_site *s, struct lu_device *top) } s->ls_stats = lprocfs_alloc_stats(LU_SS_LAST_STAT, 0); - if (s->ls_stats == NULL) { + if (!s->ls_stats) { cfs_hash_putref(s->ls_obj_hash); s->ls_obj_hash = NULL; return -ENOMEM; @@ -1031,19 +1029,19 @@ void lu_site_fini(struct lu_site *s) list_del_init(&s->ls_linkage); mutex_unlock(&lu_sites_guard); - if (s->ls_obj_hash != NULL) { + if (s->ls_obj_hash) { cfs_hash_putref(s->ls_obj_hash); s->ls_obj_hash = NULL; } - if (s->ls_top_dev != NULL) { + if (s->ls_top_dev) { s->ls_top_dev->ld_site = NULL; lu_ref_del(&s->ls_top_dev->ld_reference, "site-top", s); lu_device_put(s->ls_top_dev); s->ls_top_dev = NULL; } - if (s->ls_stats != NULL) + if (s->ls_stats) lprocfs_free_stats(&s->ls_stats); } EXPORT_SYMBOL(lu_site_fini); @@ -1088,7 +1086,7 @@ EXPORT_SYMBOL(lu_device_put); */ int lu_device_init(struct lu_device *d, struct lu_device_type *t) { - if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start != NULL) + if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start) t->ldt_ops->ldto_start(t); memset(d, 0, sizeof(*d)); atomic_set(&d->ld_ref, 0); @@ -1107,7 +1105,7 @@ void lu_device_fini(struct lu_device *d) struct lu_device_type *t; t = d->ld_type; - if (d->ld_obd != NULL) { + if (d->ld_obd) { d->ld_obd->obd_lu_dev = NULL; d->ld_obd = NULL; } @@ -1116,7 +1114,7 @@ void lu_device_fini(struct lu_device *d) LASSERTF(atomic_read(&d->ld_ref) == 0, "Refcount is %u\n", atomic_read(&d->ld_ref)); LASSERT(t->ldt_device_nr > 0); - if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop != NULL) + if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop) t->ldt_ops->ldto_stop(t); } EXPORT_SYMBOL(lu_device_fini); @@ -1148,7 +1146,7 @@ void lu_object_fini(struct lu_object *o) LASSERT(list_empty(&o->lo_linkage)); - if (dev != NULL) { + if (dev) { lu_ref_del_at(&dev->ld_reference, &o->lo_dev_ref, "lu_object", o); lu_device_put(dev); @@ -1239,7 +1237,7 @@ void lu_stack_fini(const struct lu_env *env, struct lu_device *top) struct lu_device *next; lu_site_purge(env, site, ~0); - for (scan = top; scan != NULL; scan = next) { + for (scan = top; scan; scan = next) { next = scan->ld_type->ldt_ops->ldto_device_fini(env, scan); lu_ref_del(&scan->ld_reference, "lu-stack", &lu_site_init); lu_device_put(scan); @@ -1248,13 +1246,13 @@ void lu_stack_fini(const struct lu_env *env, struct lu_device *top) /* purge again. */ lu_site_purge(env, site, ~0); - for (scan = top; scan != NULL; scan = next) { + for (scan = top; scan; scan = next) { const struct lu_device_type *ldt = scan->ld_type; struct obd_type *type; next = ldt->ldt_ops->ldto_device_free(env, scan); type = ldt->ldt_obd_type; - if (type != NULL) { + if (type) { type->typ_refcnt--; class_put_type(type); } @@ -1289,14 +1287,14 @@ int lu_context_key_register(struct lu_context_key *key) int result; int i; - LASSERT(key->lct_init != NULL); - LASSERT(key->lct_fini != NULL); + LASSERT(key->lct_init); + LASSERT(key->lct_fini); LASSERT(key->lct_tags != 0); result = -ENFILE; spin_lock(&lu_keys_guard); for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { - if (lu_keys[i] == NULL) { + if (!lu_keys[i]) { key->lct_index = i; atomic_set(&key->lct_used, 1); lu_keys[i] = key; @@ -1313,12 +1311,10 @@ EXPORT_SYMBOL(lu_context_key_register); static void key_fini(struct lu_context *ctx, int index) { - if (ctx->lc_value != NULL && ctx->lc_value[index] != NULL) { + if (ctx->lc_value && ctx->lc_value[index]) { struct lu_context_key *key; key = lu_keys[index]; - LASSERT(key != NULL); - LASSERT(key->lct_fini != NULL); LASSERT(atomic_read(&key->lct_used) > 1); key->lct_fini(ctx, key, ctx->lc_value[index]); @@ -1376,7 +1372,7 @@ int lu_context_key_register_many(struct lu_context_key *k, ...) if (result) break; key = va_arg(args, struct lu_context_key *); - } while (key != NULL); + } while (key); va_end(args); if (result != 0) { @@ -1404,7 +1400,7 @@ void lu_context_key_degister_many(struct lu_context_key *k, ...) do { lu_context_key_degister(k); k = va_arg(args, struct lu_context_key*); - } while (k != NULL); + } while (k); va_end(args); } EXPORT_SYMBOL(lu_context_key_degister_many); @@ -1420,7 +1416,7 @@ void lu_context_key_revive_many(struct lu_context_key *k, ...) do { lu_context_key_revive(k); k = va_arg(args, struct lu_context_key*); - } while (k != NULL); + } while (k); va_end(args); } EXPORT_SYMBOL(lu_context_key_revive_many); @@ -1436,7 +1432,7 @@ void lu_context_key_quiesce_many(struct lu_context_key *k, ...) do { lu_context_key_quiesce(k); k = va_arg(args, struct lu_context_key*); - } while (k != NULL); + } while (k); va_end(args); } EXPORT_SYMBOL(lu_context_key_quiesce_many); @@ -1477,8 +1473,7 @@ void lu_context_key_quiesce(struct lu_context_key *key) * XXX memory barrier has to go here. */ spin_lock(&lu_keys_guard); - list_for_each_entry(ctx, &lu_context_remembered, - lc_remember) + list_for_each_entry(ctx, &lu_context_remembered, lc_remember) key_fini(ctx, key->lct_index); spin_unlock(&lu_keys_guard); ++key_set_version; @@ -1497,7 +1492,7 @@ static void keys_fini(struct lu_context *ctx) { int i; - if (ctx->lc_value == NULL) + if (!ctx->lc_value) return; for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) @@ -1511,12 +1506,12 @@ static int keys_fill(struct lu_context *ctx) { int i; - LINVRNT(ctx->lc_value != NULL); + LINVRNT(ctx->lc_value); for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { struct lu_context_key *key; key = lu_keys[i]; - if (ctx->lc_value[i] == NULL && key != NULL && + if (!ctx->lc_value[i] && key && (key->lct_tags & ctx->lc_tags) && /* * Don't create values for a LCT_QUIESCENT key, as this @@ -1525,7 +1520,7 @@ static int keys_fill(struct lu_context *ctx) !(key->lct_tags & LCT_QUIESCENT)) { void *value; - LINVRNT(key->lct_init != NULL); + LINVRNT(key->lct_init); LINVRNT(key->lct_index == i); value = key->lct_init(ctx, key); @@ -1542,7 +1537,7 @@ static int keys_fill(struct lu_context *ctx) * value. */ ctx->lc_value[i] = value; - if (key->lct_exit != NULL) + if (key->lct_exit) ctx->lc_tags |= LCT_HAS_EXIT; } ctx->lc_version = key_set_version; @@ -1554,7 +1549,7 @@ static int keys_init(struct lu_context *ctx) { ctx->lc_value = kcalloc(ARRAY_SIZE(lu_keys), sizeof(ctx->lc_value[0]), GFP_NOFS); - if (likely(ctx->lc_value != NULL)) + if (likely(ctx->lc_value)) return keys_fill(ctx); return -ENOMEM; @@ -1626,14 +1621,13 @@ void lu_context_exit(struct lu_context *ctx) LINVRNT(ctx->lc_state == LCS_ENTERED); ctx->lc_state = LCS_LEFT; - if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value != NULL) { + if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value) { for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { - if (ctx->lc_value[i] != NULL) { + if (ctx->lc_value[i]) { struct lu_context_key *key; key = lu_keys[i]; - LASSERT(key != NULL); - if (key->lct_exit != NULL) + if (key->lct_exit) key->lct_exit(ctx, key, ctx->lc_value[i]); } @@ -1688,7 +1682,7 @@ int lu_env_refill(struct lu_env *env) int result; result = lu_context_refill(&env->le_ctx); - if (result == 0 && env->le_ses != NULL) + if (result == 0 && env->le_ses) result = lu_context_refill(env->le_ses); return result; } @@ -1922,11 +1916,11 @@ int lu_kmem_init(struct lu_kmem_descr *caches) int result; struct lu_kmem_descr *iter = caches; - for (result = 0; iter->ckd_cache != NULL; ++iter) { + for (result = 0; iter->ckd_cache; ++iter) { *iter->ckd_cache = kmem_cache_create(iter->ckd_name, iter->ckd_size, 0, 0, NULL); - if (*iter->ckd_cache == NULL) { + if (!*iter->ckd_cache) { result = -ENOMEM; /* free all previously allocated caches */ lu_kmem_fini(caches); @@ -1943,7 +1937,7 @@ EXPORT_SYMBOL(lu_kmem_init); */ void lu_kmem_fini(struct lu_kmem_descr *caches) { - for (; caches->ckd_cache != NULL; ++caches) { + for (; caches->ckd_cache; ++caches) { kmem_cache_destroy(*caches->ckd_cache); *caches->ckd_cache = NULL; } diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_handles.c b/drivers/staging/lustre/lustre/obdclass/lustre_handles.c index fb9147cc607f..403ceea06186 100644 --- a/drivers/staging/lustre/lustre/obdclass/lustre_handles.c +++ b/drivers/staging/lustre/lustre/obdclass/lustre_handles.c @@ -65,7 +65,7 @@ void class_handle_hash(struct portals_handle *h, { struct handle_bucket *bucket; - LASSERT(h != NULL); + LASSERT(h); LASSERT(list_empty(&h->h_link)); /* @@ -140,10 +140,11 @@ void *class_handle2object(__u64 cookie) struct portals_handle *h; void *retval = NULL; - LASSERT(handle_hash != NULL); + LASSERT(handle_hash); /* Be careful when you want to change this code. See the - * rcu_read_lock() definition on top this file. - jxiong */ + * rcu_read_lock() definition on top this file. - jxiong + */ bucket = handle_hash + (cookie & HANDLE_HASH_MASK); rcu_read_lock(); @@ -170,7 +171,7 @@ void class_handle_free_cb(struct rcu_head *rcu) struct portals_handle *h = RCU2HANDLE(rcu); void *ptr = (void *)(unsigned long)h->h_cookie; - if (h->h_ops->hop_free != NULL) + if (h->h_ops->hop_free) h->h_ops->hop_free(ptr, h->h_size); else kfree(ptr); @@ -183,11 +184,11 @@ int class_handle_init(void) struct timespec64 ts; int seed[2]; - LASSERT(handle_hash == NULL); + LASSERT(!handle_hash); handle_hash = libcfs_kvzalloc(sizeof(*bucket) * HANDLE_HASH_SIZE, GFP_NOFS); - if (handle_hash == NULL) + if (!handle_hash) return -ENOMEM; spin_lock_init(&handle_base_lock); @@ -234,7 +235,7 @@ void class_handle_cleanup(void) { int count; - LASSERT(handle_hash != NULL); + LASSERT(handle_hash); count = cleanup_all_handles(); diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c index d6184f821cd0..5f812460b3ea 100644 --- a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c +++ b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c @@ -93,7 +93,8 @@ int lustre_uuid_to_peer(const char *uuid, lnet_nid_t *peer_nid, int index) EXPORT_SYMBOL(lustre_uuid_to_peer); /* Add a nid to a niduuid. Multiple nids can be added to a single uuid; - LNET will choose the best one. */ + * LNET will choose the best one. + */ int class_add_uuid(const char *uuid, __u64 nid) { struct uuid_nid_data *data, *entry; @@ -149,9 +150,10 @@ int class_del_uuid(const char *uuid) { LIST_HEAD(deathrow); struct uuid_nid_data *data; + struct uuid_nid_data *temp; spin_lock(&g_uuid_lock); - if (uuid != NULL) { + if (uuid) { struct obd_uuid tmp; obd_str2uuid(&tmp, uuid); @@ -165,14 +167,12 @@ int class_del_uuid(const char *uuid) list_splice_init(&g_uuid_list, &deathrow); spin_unlock(&g_uuid_lock); - if (uuid != NULL && list_empty(&deathrow)) { + if (uuid && list_empty(&deathrow)) { CDEBUG(D_INFO, "Try to delete a non-existent uuid %s\n", uuid); return -EINVAL; } - while (!list_empty(&deathrow)) { - data = list_entry(deathrow.next, struct uuid_nid_data, - un_list); + list_for_each_entry_safe(data, temp, &deathrow, un_list) { list_del(&data->un_list); CDEBUG(D_INFO, "del uuid %s %s/%d\n", diff --git a/drivers/staging/lustre/lustre/obdclass/obd_config.c b/drivers/staging/lustre/lustre/obdclass/obd_config.c index 49cdc647910c..5395e994deab 100644 --- a/drivers/staging/lustre/lustre/obdclass/obd_config.c +++ b/drivers/staging/lustre/lustre/obdclass/obd_config.c @@ -71,8 +71,9 @@ int class_find_param(char *buf, char *key, char **valp) EXPORT_SYMBOL(class_find_param); /* returns 0 if this is the first key in the buffer, else 1. - valp points to first char after key. */ -static int class_match_param(char *buf, char *key, char **valp) + * valp points to first char after key. + */ +static int class_match_param(char *buf, const char *key, char **valp) { if (!buf) return 1; @@ -114,9 +115,10 @@ enum { }; /* 0 is good nid, - 1 not found - < 0 error - endh is set to next separator */ + * 1 not found + * < 0 error + * endh is set to next separator + */ static int class_parse_value(char *buf, int opc, void *value, char **endh, int quiet) { @@ -210,7 +212,7 @@ static int class_attach(struct lustre_cfg *lcfg) name, typename, rc); goto out; } - LASSERTF(obd != NULL, "Cannot get obd device %s of type %s\n", + LASSERTF(obd, "Cannot get obd device %s of type %s\n", name, typename); LASSERTF(obd->obd_magic == OBD_DEVICE_MAGIC, "obd %p obd_magic %08X != %08X\n", @@ -230,7 +232,8 @@ static int class_attach(struct lustre_cfg *lcfg) mutex_init(&obd->obd_dev_mutex); spin_lock_init(&obd->obd_osfs_lock); /* obd->obd_osfs_age must be set to a value in the distant - * past to guarantee a fresh statfs is fetched on mount. */ + * past to guarantee a fresh statfs is fetched on mount. + */ obd->obd_osfs_age = cfs_time_shift_64(-1000); /* XXX belongs in setup not attach */ @@ -272,9 +275,9 @@ static int class_attach(struct lustre_cfg *lcfg) obd->obd_minor, typename, atomic_read(&obd->obd_refcount)); return 0; out: - if (obd != NULL) { + if (obd) class_release_dev(obd); - } + return rc; } @@ -286,7 +289,7 @@ static int class_setup(struct obd_device *obd, struct lustre_cfg *lcfg) int err = 0; struct obd_export *exp; - LASSERT(obd != NULL); + LASSERT(obd); LASSERTF(obd == class_num2obd(obd->obd_minor), "obd %p != obd_devs[%d] %p\n", obd, obd->obd_minor, class_num2obd(obd->obd_minor)); @@ -315,7 +318,8 @@ static int class_setup(struct obd_device *obd, struct lustre_cfg *lcfg) return -EEXIST; } /* just leave this on forever. I can't use obd_set_up here because - other fns check that status, and we're not actually set up yet. */ + * other fns check that status, and we're not actually set up yet. + */ obd->obd_starting = 1; obd->obd_uuid_hash = NULL; spin_unlock(&obd->obd_dev_lock); @@ -503,7 +507,8 @@ void class_decref(struct obd_device *obd, const char *scope, const void *source) if ((refs == 1) && obd->obd_stopping) { /* All exports have been destroyed; there should - be no more in-progress ops by this point.*/ + * be no more in-progress ops by this point. + */ spin_lock(&obd->obd_self_export->exp_lock); obd->obd_self_export->exp_flags |= exp_flags_from_obd(obd); @@ -723,7 +728,8 @@ static int class_set_global(char *ptr, int val, struct lustre_cfg *lcfg) } /* We can't call ll_process_config or lquota_process_config directly because - * it lives in a module that must be loaded after this one. */ + * it lives in a module that must be loaded after this one. + */ static int (*client_process_config)(struct lustre_cfg *lcfg); static int (*quota_process_config)(struct lustre_cfg *lcfg); @@ -812,7 +818,8 @@ int class_process_config(struct lustre_cfg *lcfg) lustre_cfg_string(lcfg, 2), lustre_cfg_string(lcfg, 3)); /* set these mount options somewhere, so ll_fill_super - * can find them. */ + * can find them. + */ err = class_add_profile(LUSTRE_CFG_BUFLEN(lcfg, 1), lustre_cfg_string(lcfg, 1), LUSTRE_CFG_BUFLEN(lcfg, 2), @@ -988,8 +995,9 @@ int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars, fakefile.private_data = &fake_seqfile; fake_seqfile.private = data; /* e.g. tunefs.lustre --param mdt.group_upcall=foo /r/tmp/lustre-mdt - or lctl conf_param lustre-MDT0000.mdt.group_upcall=bar - or lctl conf_param lustre-OST0000.osc.max_dirty_mb=36 */ + * or lctl conf_param lustre-MDT0000.mdt.group_upcall=bar + * or lctl conf_param lustre-OST0000.osc.max_dirty_mb=36 + */ for (i = 1; i < lcfg->lcfg_bufcount; i++) { key = lustre_cfg_buf(lcfg, i); /* Strip off prefix */ @@ -1008,7 +1016,7 @@ int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars, /* Search proc entries */ while (lvars[j].name) { var = &lvars[j]; - if (class_match_param(key, (char *)var->name, NULL) == 0 + if (!class_match_param(key, var->name, NULL) && keylen == strlen(var->name)) { matched++; rc = -EROFS; @@ -1027,9 +1035,10 @@ int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars, } if (!matched) { /* If the prefix doesn't match, return error so we - can pass it down the stack */ + * can pass it down the stack + */ if (strnchr(key, keylen, '.')) - return -ENOSYS; + return -ENOSYS; CERROR("%s: unknown param %s\n", (char *)lustre_cfg_string(lcfg, 0), key); /* rc = -EINVAL; continue parsing other params */ @@ -1040,9 +1049,9 @@ int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars, rc = 0; } else { CDEBUG(D_CONFIG, "%s.%.*s: Set parameter %.*s=%s\n", - lustre_cfg_string(lcfg, 0), - (int)strlen(prefix) - 1, prefix, - (int)(sval - key - 1), key, sval); + lustre_cfg_string(lcfg, 0), + (int)strlen(prefix) - 1, prefix, + (int)(sval - key - 1), key, sval); } } @@ -1116,7 +1125,8 @@ int class_config_llog_handler(const struct lu_env *env, } } /* A config command without a start marker before it is - illegal (post 146) */ + * illegal (post 146) + */ if (!(clli->cfg_flags & CFG_F_COMPAT146) && !(clli->cfg_flags & CFG_F_MARKER) && (lcfg->lcfg_command != LCFG_MARKER)) { @@ -1182,8 +1192,9 @@ int class_config_llog_handler(const struct lu_env *env, } /* we override the llog's uuid for clients, to insure they - are unique */ - if (clli && clli->cfg_instance != NULL && + * are unique + */ + if (clli && clli->cfg_instance && lcfg->lcfg_command == LCFG_ATTACH) { lustre_cfg_bufs_set_string(&bufs, 2, clli->cfg_uuid.uuid); @@ -1211,7 +1222,8 @@ int class_config_llog_handler(const struct lu_env *env, lcfg_new->lcfg_flags = lcfg->lcfg_flags; /* XXX Hack to try to remain binary compatible with - * pre-newconfig logs */ + * pre-newconfig logs + */ if (lcfg->lcfg_nal != 0 && /* pre-newconfig log? */ (lcfg->lcfg_nid >> 32) == 0) { __u32 addr = (__u32)(lcfg->lcfg_nid & 0xffffffff); @@ -1270,7 +1282,7 @@ int class_config_parse_llog(const struct lu_env *env, struct llog_ctxt *ctxt, if (cfg) { cd.lpcd_first_idx = cfg->cfg_last_idx; callback = cfg->cfg_callback; - LASSERT(callback != NULL); + LASSERT(callback); } else { callback = class_config_llog_handler; } diff --git a/drivers/staging/lustre/lustre/obdclass/obd_mount.c b/drivers/staging/lustre/lustre/obdclass/obd_mount.c index b5aa8168dbff..d3e28a389ac1 100644 --- a/drivers/staging/lustre/lustre/obdclass/obd_mount.c +++ b/drivers/staging/lustre/lustre/obdclass/obd_mount.c @@ -72,7 +72,7 @@ static void (*kill_super_cb)(struct super_block *sb); * this log, and is added to the mgc's list of logs to follow. */ int lustre_process_log(struct super_block *sb, char *logname, - struct config_llog_instance *cfg) + struct config_llog_instance *cfg) { struct lustre_cfg *lcfg; struct lustre_cfg_bufs *bufs; @@ -114,7 +114,7 @@ EXPORT_SYMBOL(lustre_process_log); /* Stop watching this config log for updates */ int lustre_end_log(struct super_block *sb, char *logname, - struct config_llog_instance *cfg) + struct config_llog_instance *cfg) { struct lustre_cfg *lcfg; struct lustre_cfg_bufs bufs; @@ -283,9 +283,10 @@ int lustre_start_mgc(struct super_block *sb) recov_bk = 0; /* Try all connections, but only once (again). - We don't want to block another target from starting - (using its local copy of the log), but we do want to connect - if at all possible. */ + * We don't want to block another target from starting + * (using its local copy of the log), but we do want to connect + * if at all possible. + */ recov_bk++; CDEBUG(D_MOUNT, "%s: Set MGC reconnect %d\n", mgcname, recov_bk); @@ -339,7 +340,7 @@ int lustre_start_mgc(struct super_block *sb) /* Add any failover MGS nids */ i = 1; while (ptr && ((*ptr == ':' || - class_find_param(ptr, PARAM_MGSNODE, &ptr) == 0))) { + class_find_param(ptr, PARAM_MGSNODE, &ptr) == 0))) { /* New failover node */ sprintf(niduuid, "%s_%x", mgcname, i); j = 0; @@ -375,7 +376,8 @@ int lustre_start_mgc(struct super_block *sb) goto out_free; /* Keep a refcount of servers/clients who started with "mount", - so we know when we can get rid of the mgc. */ + * so we know when we can get rid of the mgc. + */ atomic_set(&obd->u.cli.cl_mgc_refcount, 1); /* We connect to the MGS at setup, and don't disconnect until cleanup */ @@ -403,7 +405,8 @@ int lustre_start_mgc(struct super_block *sb) out: /* Keep the mgc info in the sb. Note that many lsi's can point - to the same mgc.*/ + * to the same mgc. + */ lsi->lsi_mgc = obd; out_free: mutex_unlock(&mgc_start_lock); @@ -432,7 +435,8 @@ static int lustre_stop_mgc(struct super_block *sb) LASSERT(atomic_read(&obd->u.cli.cl_mgc_refcount) > 0); if (!atomic_dec_and_test(&obd->u.cli.cl_mgc_refcount)) { /* This is not fatal, every client that stops - will call in here. */ + * will call in here. + */ CDEBUG(D_MOUNT, "mgc still has %d references.\n", atomic_read(&obd->u.cli.cl_mgc_refcount)); rc = -EBUSY; @@ -440,19 +444,20 @@ static int lustre_stop_mgc(struct super_block *sb) } /* The MGC has no recoverable data in any case. - * force shutdown set in umount_begin */ + * force shutdown set in umount_begin + */ obd->obd_no_recov = 1; if (obd->u.cli.cl_mgc_mgsexp) { /* An error is not fatal, if we are unable to send the - disconnect mgs ping evictor cleans up the export */ + * disconnect mgs ping evictor cleans up the export + */ rc = obd_disconnect(obd->u.cli.cl_mgc_mgsexp); if (rc) CDEBUG(D_MOUNT, "disconnect failed %d\n", rc); } - /* Save the obdname for cleaning the nid uuids, which are - obdname_XX */ + /* Save the obdname for cleaning the nid uuids, which are obdname_XX */ len = strlen(obd->obd_name) + 6; niduuid = kzalloc(len, GFP_NOFS); if (niduuid) { @@ -518,13 +523,12 @@ static int lustre_free_lsi(struct super_block *sb) { struct lustre_sb_info *lsi = s2lsi(sb); - LASSERT(lsi != NULL); CDEBUG(D_MOUNT, "Freeing lsi %p\n", lsi); /* someone didn't call server_put_mount. */ LASSERT(atomic_read(&lsi->lsi_mounts) == 0); - if (lsi->lsi_lmd != NULL) { + if (lsi->lsi_lmd) { kfree(lsi->lsi_lmd->lmd_dev); kfree(lsi->lsi_lmd->lmd_profile); kfree(lsi->lsi_lmd->lmd_mgssec); @@ -538,7 +542,7 @@ static int lustre_free_lsi(struct super_block *sb) kfree(lsi->lsi_lmd); } - LASSERT(lsi->lsi_llsbi == NULL); + LASSERT(!lsi->lsi_llsbi); kfree(lsi); s2lsi_nocast(sb) = NULL; @@ -546,13 +550,12 @@ static int lustre_free_lsi(struct super_block *sb) } /* The lsi has one reference for every server that is using the disk - - e.g. MDT, MGS, and potentially MGC */ + * e.g. MDT, MGS, and potentially MGC + */ static int lustre_put_lsi(struct super_block *sb) { struct lustre_sb_info *lsi = s2lsi(sb); - LASSERT(lsi != NULL); - CDEBUG(D_MOUNT, "put %p %d\n", sb, atomic_read(&lsi->lsi_mounts)); if (atomic_dec_and_test(&lsi->lsi_mounts)) { lustre_free_lsi(sb); @@ -588,21 +591,22 @@ static int server_name2fsname(const char *svname, char *fsname, if (dash == svname) return -EINVAL; - if (fsname != NULL) { + if (fsname) { strncpy(fsname, svname, dash - svname); fsname[dash - svname] = '\0'; } - if (endptr != NULL) + if (endptr) *endptr = dash; return 0; } /* Get the index from the obd name. - rc = server type, or - rc < 0 on error - if endptr isn't NULL it is set to end of name */ + * rc = server type, or + * rc < 0 on error + * if endptr isn't NULL it is set to end of name + */ static int server_name2index(const char *svname, __u32 *idx, const char **endptr) { @@ -627,18 +631,18 @@ static int server_name2index(const char *svname, __u32 *idx, dash += 3; if (strncmp(dash, "all", 3) == 0) { - if (endptr != NULL) + if (endptr) *endptr = dash + 3; return rc | LDD_F_SV_ALL; } index = simple_strtoul(dash, (char **)endptr, 16); - if (idx != NULL) + if (idx) *idx = index; /* Account for -mdc after index that is possible when specifying mdt */ - if (endptr != NULL && strncmp(LUSTRE_MDC_NAME, *endptr + 1, - sizeof(LUSTRE_MDC_NAME)-1) == 0) + if (endptr && strncmp(LUSTRE_MDC_NAME, *endptr + 1, + sizeof(LUSTRE_MDC_NAME) - 1) == 0) *endptr += sizeof(LUSTRE_MDC_NAME); return rc; @@ -661,7 +665,8 @@ int lustre_common_put_super(struct super_block *sb) return rc; } /* BUSY just means that there's some other obd that - needs the mgc. Let him clean it up. */ + * needs the mgc. Let him clean it up. + */ CDEBUG(D_MOUNT, "MGC still in use\n"); } /* Drop a ref to the mounted disk */ @@ -731,8 +736,9 @@ static int lmd_make_exclusion(struct lustre_mount_data *lmd, const char *ptr) int rc = 0, devmax; /* The shortest an ost name can be is 8 chars: -OST0000. - We don't actually know the fsname at this time, so in fact - a user could specify any fsname. */ + * We don't actually know the fsname at this time, so in fact + * a user could specify any fsname. + */ devmax = strlen(ptr) / 8 + 1; /* temp storage until we figure out how many we have */ @@ -756,7 +762,8 @@ static int lmd_make_exclusion(struct lustre_mount_data *lmd, const char *ptr) (uint)(s2-s1), s1, rc); s1 = s2; /* now we are pointing at ':' (next exclude) - or ',' (end of excludes) */ + * or ',' (end of excludes) + */ if (lmd->lmd_exclude_count >= devmax) break; } @@ -788,7 +795,7 @@ static int lmd_parse_mgssec(struct lustre_mount_data *lmd, char *ptr) lmd->lmd_mgssec = NULL; tail = strchr(ptr, ','); - if (tail == NULL) + if (!tail) length = strlen(ptr); else length = tail - ptr; @@ -807,14 +814,14 @@ static int lmd_parse_string(char **handle, char *ptr) char *tail; int length; - if ((handle == NULL) || (ptr == NULL)) + if (!handle || !ptr) return -EINVAL; kfree(*handle); *handle = NULL; tail = strchr(ptr, ','); - if (tail == NULL) + if (!tail) length = strlen(ptr); else length = tail - ptr; @@ -847,14 +854,14 @@ static int lmd_parse_mgs(struct lustre_mount_data *lmd, char **ptr) return -EINVAL; } - if (lmd->lmd_mgs != NULL) + if (lmd->lmd_mgs) oldlen = strlen(lmd->lmd_mgs) + 1; mgsnid = kzalloc(oldlen + length + 1, GFP_NOFS); if (!mgsnid) return -ENOMEM; - if (lmd->lmd_mgs != NULL) { + if (lmd->lmd_mgs) { /* Multiple mgsnid= are taken to mean failover locations */ memcpy(mgsnid, lmd->lmd_mgs, oldlen); mgsnid[oldlen - 1] = ':'; @@ -909,10 +916,12 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd) s1++; /* Client options are parsed in ll_options: eg. flock, - user_xattr, acl */ + * user_xattr, acl + */ /* Parse non-ldiskfs options here. Rather than modifying - ldiskfs, we just zero these out here */ + * ldiskfs, we just zero these out here + */ if (strncmp(s1, "abort_recov", 11) == 0) { lmd->lmd_flags |= LMD_FLG_ABORT_RECOV; clear++; @@ -940,7 +949,8 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd) sizeof(PARAM_MGSNODE) - 1) == 0) { s2 = s1 + sizeof(PARAM_MGSNODE) - 1; /* Assume the next mount opt is the first - invalid nid we get to. */ + * invalid nid we get to. + */ rc = lmd_parse_mgs(lmd, &s2); if (rc) goto invalid; @@ -981,7 +991,7 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd) size_t length, params_length; char *tail = strchr(s1 + 6, ','); - if (tail == NULL) + if (!tail) length = strlen(s1); else length = tail - s1; @@ -1000,18 +1010,20 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd) clear++; } /* Linux 2.4 doesn't pass the device, so we stuck it at the - end of the options. */ + * end of the options. + */ else if (strncmp(s1, "device=", 7) == 0) { devname = s1 + 7; /* terminate options right before device. device - must be the last one. */ + * must be the last one. + */ *s1 = '\0'; break; } /* Find next opt */ s2 = strchr(s1, ','); - if (s2 == NULL) { + if (!s2) { if (clear) *s1 = '\0'; break; @@ -1113,9 +1125,9 @@ static int lustre_fill_super(struct super_block *sb, void *data, int silent) if (lmd_is_client(lmd)) { CDEBUG(D_MOUNT, "Mounting client %s\n", lmd->lmd_profile); - if (client_fill_super == NULL) + if (!client_fill_super) request_module("lustre"); - if (client_fill_super == NULL) { + if (!client_fill_super) { LCONSOLE_ERROR_MSG(0x165, "Nothing registered for client mount! Is the 'lustre' module loaded?\n"); lustre_put_lsi(sb); rc = -ENODEV; @@ -1136,7 +1148,8 @@ static int lustre_fill_super(struct super_block *sb, void *data, int silent) } /* If error happens in fill_super() call, @lsi will be killed there. - * This is why we do not put it here. */ + * This is why we do not put it here. + */ goto out; out: if (rc) { @@ -1151,7 +1164,8 @@ out: } /* We can't call ll_fill_super by name because it lives in a module that - must be loaded after this one. */ + * must be loaded after this one. + */ void lustre_register_client_fill_super(int (*cfs)(struct super_block *sb, struct vfsmount *mnt)) { @@ -1166,8 +1180,8 @@ void lustre_register_kill_super_cb(void (*cfs)(struct super_block *sb)) EXPORT_SYMBOL(lustre_register_kill_super_cb); /***************** FS registration ******************/ -struct dentry *lustre_mount(struct file_system_type *fs_type, int flags, - const char *devname, void *data) +static struct dentry *lustre_mount(struct file_system_type *fs_type, int flags, + const char *devname, void *data) { struct lustre_mount_data2 lmd2 = { .lmd2_data = data, diff --git a/drivers/staging/lustre/lustre/obdclass/obdo.c b/drivers/staging/lustre/lustre/obdclass/obdo.c index 75e1deadddd9..e6436cb4ac62 100644 --- a/drivers/staging/lustre/lustre/obdclass/obdo.c +++ b/drivers/staging/lustre/lustre/obdclass/obdo.c @@ -55,7 +55,8 @@ void obdo_set_parent_fid(struct obdo *dst, const struct lu_fid *parent) EXPORT_SYMBOL(obdo_set_parent_fid); /* WARNING: the file systems must take care not to tinker with - attributes they don't manage (such as blocks). */ + * attributes they don't manage (such as blocks). + */ void obdo_from_inode(struct obdo *dst, struct inode *src, u32 valid) { u32 newvalid = 0; @@ -122,7 +123,8 @@ void obdo_to_ioobj(struct obdo *oa, struct obd_ioobj *ioobj) ostid_set_seq_mdt0(&ioobj->ioo_oid); /* Since 2.4 this does not contain o_mode in the low 16 bits. - * Instead, it holds (bd_md_max_brw - 1) for multi-bulk BRW RPCs */ + * Instead, it holds (bd_md_max_brw - 1) for multi-bulk BRW RPCs + */ ioobj->ioo_max_brw = 0; } EXPORT_SYMBOL(obdo_to_ioobj); diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c index 7b53f7dd1797..64ffe243f870 100644 --- a/drivers/staging/lustre/lustre/obdecho/echo_client.c +++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c @@ -60,7 +60,6 @@ struct echo_device { struct cl_site ed_site_myself; struct cl_site *ed_site; struct lu_device *ed_next; - int ed_next_islov; }; struct echo_object { @@ -147,7 +146,7 @@ static inline struct echo_thread_info *echo_env_info(const struct lu_env *env) struct echo_thread_info *info; info = lu_context_key_get(&env->le_ctx, &echo_thread_key); - LASSERT(info != NULL); + LASSERT(info); return info; } @@ -162,9 +161,6 @@ struct echo_object_conf *cl2echo_conf(const struct cl_object_conf *c) static struct echo_object *cl_echo_object_find(struct echo_device *d, struct lov_stripe_md **lsm); static int cl_echo_object_put(struct echo_object *eco); -static int cl_echo_enqueue(struct echo_object *eco, u64 start, - u64 end, int mode, __u64 *cookie); -static int cl_echo_cancel(struct echo_device *d, __u64 cookie); static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset, struct page **pages, int npages, int async); @@ -224,7 +220,7 @@ static struct lu_kmem_descr echo_caches[] = { * @{ */ static struct page *echo_page_vmpage(const struct lu_env *env, - const struct cl_page_slice *slice) + const struct cl_page_slice *slice) { return cl2echo_page(slice)->ep_vmpage; } @@ -271,7 +267,7 @@ static void echo_page_completion(const struct lu_env *env, const struct cl_page_slice *slice, int ioret) { - LASSERT(slice->cpl_page->cp_sync_io != NULL); + LASSERT(slice->cpl_page->cp_sync_io); } static void echo_page_fini(const struct lu_env *env, @@ -371,7 +367,7 @@ static struct cl_lock_operations echo_lock_ops = { * @{ */ static int echo_page_init(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, struct page *vmpage) + struct cl_page *page, struct page *vmpage) { struct echo_page *ep = cl_object_page_slice(obj, page); struct echo_object *eco = cl2echo_obj(obj); @@ -396,14 +392,14 @@ static int echo_lock_init(const struct lu_env *env, { struct echo_lock *el; - el = kmem_cache_alloc(echo_lock_kmem, GFP_NOFS | __GFP_ZERO); - if (el != NULL) { + el = kmem_cache_zalloc(echo_lock_kmem, GFP_NOFS); + if (el) { cl_lock_slice_add(lock, &el->el_cl, obj, &echo_lock_ops); el->el_object = cl2echo_obj(obj); INIT_LIST_HEAD(&el->el_chain); atomic_set(&el->el_refcount, 0); } - return el == NULL ? -ENOMEM : 0; + return !el ? -ENOMEM : 0; } static int echo_conf_set(const struct lu_env *env, struct cl_object *obj, @@ -443,7 +439,7 @@ static int echo_object_init(const struct lu_env *env, struct lu_object *obj, under = ed->ed_next; below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under); - if (below == NULL) + if (!below) return -ENOMEM; lu_object_add(obj, below); } @@ -474,12 +470,12 @@ static int echo_alloc_memmd(struct echo_device *ed, int lsm_size; /* If export is lov/osc then use their obd method */ - if (ed->ed_next != NULL) + if (ed->ed_next) return obd_alloc_memmd(ed->ed_ec->ec_exp, lsmp); /* OFD has no unpackmd method, do everything here */ lsm_size = lov_stripe_md_size(1); - LASSERT(*lsmp == NULL); + LASSERT(!*lsmp); *lsmp = kzalloc(lsm_size, GFP_NOFS); if (!*lsmp) return -ENOMEM; @@ -502,12 +498,11 @@ static int echo_free_memmd(struct echo_device *ed, struct lov_stripe_md **lsmp) int lsm_size; /* If export is lov/osc then use their obd method */ - if (ed->ed_next != NULL) + if (ed->ed_next) return obd_free_memmd(ed->ed_ec->ec_exp, lsmp); /* OFD has no unpackmd method, do everything here */ lsm_size = lov_stripe_md_size(1); - LASSERT(*lsmp != NULL); kfree((*lsmp)->lsm_oinfo[0]); kfree(*lsmp); *lsmp = NULL; @@ -534,7 +529,7 @@ static void echo_object_free(const struct lu_env *env, struct lu_object *obj) } static int echo_object_print(const struct lu_env *env, void *cookie, - lu_printer_t p, const struct lu_object *o) + lu_printer_t p, const struct lu_object *o) { struct echo_object *obj = cl2echo_obj(lu2cl(o)); @@ -566,9 +561,9 @@ static struct lu_object *echo_object_alloc(const struct lu_env *env, struct lu_object *obj = NULL; /* we're the top dev. */ - LASSERT(hdr == NULL); - eco = kmem_cache_alloc(echo_object_kmem, GFP_NOFS | __GFP_ZERO); - if (eco != NULL) { + LASSERT(!hdr); + eco = kmem_cache_zalloc(echo_object_kmem, GFP_NOFS); + if (eco) { struct cl_object_header *hdr = &eco->eo_hdr; obj = &echo_obj2cl(eco)->co_lu; @@ -582,13 +577,13 @@ static struct lu_object *echo_object_alloc(const struct lu_env *env, return obj; } -static struct lu_device_operations echo_device_lu_ops = { +static const struct lu_device_operations echo_device_lu_ops = { .ldo_object_alloc = echo_object_alloc, }; /** @} echo_lu_dev_ops */ -static struct cl_device_operations echo_device_cl_ops = { +static const struct cl_device_operations echo_device_cl_ops = { }; /** \defgroup echo_init Setup and teardown @@ -626,18 +621,18 @@ static void echo_site_fini(const struct lu_env *env, struct echo_device *ed) } static void *echo_thread_key_init(const struct lu_context *ctx, - struct lu_context_key *key) + struct lu_context_key *key) { struct echo_thread_info *info; - info = kmem_cache_alloc(echo_thread_kmem, GFP_NOFS | __GFP_ZERO); - if (info == NULL) + info = kmem_cache_zalloc(echo_thread_kmem, GFP_NOFS); + if (!info) info = ERR_PTR(-ENOMEM); return info; } static void echo_thread_key_fini(const struct lu_context *ctx, - struct lu_context_key *key, void *data) + struct lu_context_key *key, void *data) { struct echo_thread_info *info = data; @@ -645,7 +640,7 @@ static void echo_thread_key_fini(const struct lu_context *ctx, } static void echo_thread_key_exit(const struct lu_context *ctx, - struct lu_context_key *key, void *data) + struct lu_context_key *key, void *data) { } @@ -657,18 +652,18 @@ static struct lu_context_key echo_thread_key = { }; static void *echo_session_key_init(const struct lu_context *ctx, - struct lu_context_key *key) + struct lu_context_key *key) { struct echo_session_info *session; - session = kmem_cache_alloc(echo_session_kmem, GFP_NOFS | __GFP_ZERO); - if (session == NULL) + session = kmem_cache_zalloc(echo_session_kmem, GFP_NOFS); + if (!session) session = ERR_PTR(-ENOMEM); return session; } static void echo_session_key_fini(const struct lu_context *ctx, - struct lu_context_key *key, void *data) + struct lu_context_key *key, void *data) { struct echo_session_info *session = data; @@ -676,7 +671,7 @@ static void echo_session_key_fini(const struct lu_context *ctx, } static void echo_session_key_exit(const struct lu_context *ctx, - struct lu_context_key *key, void *data) + struct lu_context_key *key, void *data) { } @@ -719,13 +714,13 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env, cleanup = 2; obd = class_name2obd(lustre_cfg_string(cfg, 0)); - LASSERT(obd != NULL); - LASSERT(env != NULL); + LASSERT(obd); + LASSERT(env); tgt = class_name2obd(lustre_cfg_string(cfg, 1)); - if (tgt == NULL) { + if (!tgt) { CERROR("Can not find tgt device %s\n", - lustre_cfg_string(cfg, 1)); + lustre_cfg_string(cfg, 1)); rc = -ENODEV; goto out; } @@ -751,14 +746,14 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env, cleanup = 4; /* if echo client is to be stacked upon ost device, the next is - * NULL since ost is not a clio device so far */ - if (next != NULL && !lu_device_is_cl(next)) + * NULL since ost is not a clio device so far + */ + if (next && !lu_device_is_cl(next)) next = NULL; tgt_type_name = tgt->obd_type->typ_name; - if (next != NULL) { - LASSERT(next != NULL); - if (next->ld_site != NULL) { + if (next) { + if (next->ld_site) { rc = -EBUSY; goto out; } @@ -770,14 +765,6 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env, if (rc) goto out; - /* Tricky case, I have to determine the obd type since - * CLIO uses the different parameters to initialize - * objects for lov & osc. */ - if (strcmp(tgt_type_name, LUSTRE_LOV_NAME) == 0) - ed->ed_next_islov = 1; - else - LASSERT(strcmp(tgt_type_name, - LUSTRE_OSC_NAME) == 0); } else { LASSERT(strcmp(tgt_type_name, LUSTRE_OST_NAME) == 0); } @@ -809,7 +796,7 @@ out: } static int echo_device_init(const struct lu_env *env, struct lu_device *d, - const char *name, struct lu_device *next) + const char *name, struct lu_device *next) { LBUG(); return 0; @@ -963,20 +950,11 @@ static struct echo_object *cl_echo_object_find(struct echo_device *d, info = echo_env_info(env); conf = &info->eti_conf; if (d->ed_next) { - if (!d->ed_next_islov) { - struct lov_oinfo *oinfo = lsm->lsm_oinfo[0]; - - LASSERT(oinfo != NULL); - oinfo->loi_oi = lsm->lsm_oi; - conf->eoc_cl.u.coc_oinfo = oinfo; - } else { - struct lustre_md *md; + struct lov_oinfo *oinfo = lsm->lsm_oinfo[0]; - md = &info->eti_md; - memset(md, 0, sizeof(*md)); - md->lsm = lsm; - conf->eoc_cl.u.coc_md = md; - } + LASSERT(oinfo); + oinfo->loi_oi = lsm->lsm_oi; + conf->eoc_cl.u.coc_oinfo = oinfo; } conf->eoc_md = lsmp; @@ -988,7 +966,8 @@ static struct echo_object *cl_echo_object_find(struct echo_device *d, } /* In the function below, .hs_keycmp resolves to - * lu_obj_hop_keycmp() */ + * lu_obj_hop_keycmp() + */ /* coverity[overrun-buffer-val] */ obj = cl_object_find(env, echo_dev2cl(d), fid, &conf->eoc_cl); if (IS_ERR(obj)) { @@ -1076,36 +1055,6 @@ static int cl_echo_enqueue0(struct lu_env *env, struct echo_object *eco, return rc; } -static int cl_echo_enqueue(struct echo_object *eco, u64 start, u64 end, - int mode, __u64 *cookie) -{ - struct echo_thread_info *info; - struct lu_env *env; - struct cl_io *io; - int refcheck; - int result; - - env = cl_env_get(&refcheck); - if (IS_ERR(env)) - return PTR_ERR(env); - - info = echo_env_info(env); - io = &info->eti_io; - - io->ci_ignore_layout = 1; - result = cl_io_init(env, io, CIT_MISC, echo_obj2cl(eco)); - if (result < 0) - goto out; - LASSERT(result == 0); - - result = cl_echo_enqueue0(env, eco, start, end, mode, cookie, 0); - cl_io_fini(env, io); - -out: - cl_env_put(env, &refcheck); - return result; -} - static int cl_echo_cancel0(struct lu_env *env, struct echo_device *ed, __u64 cookie) { @@ -1114,7 +1063,6 @@ static int cl_echo_cancel0(struct lu_env *env, struct echo_device *ed, struct list_head *el; int found = 0, still_used = 0; - LASSERT(ec != NULL); spin_lock(&ec->ec_lock); list_for_each(el, &ec->ec_locks) { ecl = list_entry(el, struct echo_lock, el_chain); @@ -1137,22 +1085,6 @@ static int cl_echo_cancel0(struct lu_env *env, struct echo_device *ed, return 0; } -static int cl_echo_cancel(struct echo_device *ed, __u64 cookie) -{ - struct lu_env *env; - int refcheck; - int rc; - - env = cl_env_get(&refcheck); - if (IS_ERR(env)) - return PTR_ERR(env); - - rc = cl_echo_cancel0(env, ed, cookie); - - cl_env_put(env, &refcheck); - return rc; -} - static int cl_echo_async_brw(const struct lu_env *env, struct cl_io *io, enum cl_req_type unused, struct cl_2queue *queue) { @@ -1188,7 +1120,7 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset, int i; LASSERT((offset & ~CFS_PAGE_MASK) == 0); - LASSERT(ed->ed_next != NULL); + LASSERT(ed->ed_next); env = cl_env_get(&refcheck); if (IS_ERR(env)) return PTR_ERR(env); @@ -1234,7 +1166,8 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset, cl_page_list_add(&queue->c2_qin, clp); /* drop the reference count for cl_page_find, so that the page - * will be freed in cl_2queue_fini. */ + * will be freed in cl_2queue_fini. + */ cl_page_put(env, clp); cl_page_clip(env, clp, 0, page_size); @@ -1268,61 +1201,8 @@ out: static u64 last_object_id; -static int -echo_copyout_lsm(struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob) -{ - struct lov_stripe_md *ulsm = _ulsm; - struct lov_oinfo **p; - int nob, i; - - nob = offsetof(struct lov_stripe_md, lsm_oinfo[lsm->lsm_stripe_count]); - if (nob > ulsm_nob) - return -EINVAL; - - if (copy_to_user(ulsm, lsm, sizeof(*ulsm))) - return -EFAULT; - - for (i = 0, p = lsm->lsm_oinfo; i < lsm->lsm_stripe_count; i++, p++) { - struct lov_oinfo __user *up; - if (get_user(up, ulsm->lsm_oinfo + i) || - copy_to_user(up, *p, sizeof(struct lov_oinfo))) - return -EFAULT; - } - return 0; -} - -static int -echo_copyin_lsm(struct echo_device *ed, struct lov_stripe_md *lsm, - struct lov_stripe_md __user *ulsm, int ulsm_nob) -{ - struct echo_client_obd *ec = ed->ed_ec; - struct lov_oinfo **p; - int i; - - if (ulsm_nob < sizeof(*lsm)) - return -EINVAL; - - if (copy_from_user(lsm, ulsm, sizeof(*lsm))) - return -EFAULT; - - if (lsm->lsm_stripe_count > ec->ec_nstripes || - lsm->lsm_magic != LOV_MAGIC || - (lsm->lsm_stripe_size & (~CFS_PAGE_MASK)) != 0 || - ((__u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count > ~0UL)) - return -EINVAL; - - for (i = 0, p = lsm->lsm_oinfo; i < lsm->lsm_stripe_count; i++, p++) { - struct lov_oinfo __user *up; - if (get_user(up, ulsm->lsm_oinfo + i) || - copy_from_user(*p, up, sizeof(struct lov_oinfo))) - return -EFAULT; - } - return 0; -} - static int echo_create_object(const struct lu_env *env, struct echo_device *ed, - int on_target, struct obdo *oa, void *ulsm, - int ulsm_nob, struct obd_trans_info *oti) + struct obdo *oa, struct obd_trans_info *oti) { struct echo_object *eco; struct echo_client_obd *ec = ed->ed_ec; @@ -1330,10 +1210,10 @@ static int echo_create_object(const struct lu_env *env, struct echo_device *ed, int rc; int created = 0; - if ((oa->o_valid & OBD_MD_FLID) == 0 && /* no obj id */ - (on_target || /* set_stripe */ - ec->ec_nstripes != 0)) { /* LOV */ - CERROR("No valid oid\n"); + if (!(oa->o_valid & OBD_MD_FLID) || + !(oa->o_valid & OBD_MD_FLGROUP) || + !fid_seq_is_echo(ostid_seq(&oa->o_oi))) { + CERROR("invalid oid " DOSTID "\n", POSTID(&oa->o_oi)); return -EINVAL; } @@ -1343,52 +1223,18 @@ static int echo_create_object(const struct lu_env *env, struct echo_device *ed, goto failed; } - if (ulsm != NULL) { - int i, idx; - - rc = echo_copyin_lsm(ed, lsm, ulsm, ulsm_nob); - if (rc != 0) - goto failed; - - if (lsm->lsm_stripe_count == 0) - lsm->lsm_stripe_count = ec->ec_nstripes; - - if (lsm->lsm_stripe_size == 0) - lsm->lsm_stripe_size = PAGE_CACHE_SIZE; - - idx = cfs_rand(); - - /* setup stripes: indices + default ids if required */ - for (i = 0; i < lsm->lsm_stripe_count; i++) { - if (ostid_id(&lsm->lsm_oinfo[i]->loi_oi) == 0) - lsm->lsm_oinfo[i]->loi_oi = lsm->lsm_oi; - - lsm->lsm_oinfo[i]->loi_ost_idx = - (idx + i) % ec->ec_nstripes; - } - } - - /* setup object ID here for !on_target and LOV hint */ - if (oa->o_valid & OBD_MD_FLID) { - LASSERT(oa->o_valid & OBD_MD_FLGROUP); - lsm->lsm_oi = oa->o_oi; - } + /* setup object ID here */ + lsm->lsm_oi = oa->o_oi; if (ostid_id(&lsm->lsm_oi) == 0) ostid_set_id(&lsm->lsm_oi, ++last_object_id); - rc = 0; - if (on_target) { - /* Only echo objects are allowed to be created */ - LASSERT((oa->o_valid & OBD_MD_FLGROUP) && - (ostid_seq(&oa->o_oi) == FID_SEQ_ECHO)); - rc = obd_create(env, ec->ec_exp, oa, &lsm, oti); - if (rc != 0) { - CERROR("Cannot create objects: rc = %d\n", rc); - goto failed; - } - created = 1; + rc = obd_create(env, ec->ec_exp, oa, &lsm, oti); + if (rc != 0) { + CERROR("Cannot create objects: rc = %d\n", rc); + goto failed; } + created = 1; /* See what object ID we were given */ oa->o_oi = lsm->lsm_oi; @@ -1447,42 +1293,16 @@ static int echo_get_object(struct echo_object **ecop, struct echo_device *ed, static void echo_put_object(struct echo_object *eco) { - if (cl_echo_object_put(eco)) - CERROR("echo client: drop an object failed"); -} - -static void -echo_get_stripe_off_id(struct lov_stripe_md *lsm, u64 *offp, u64 *idp) -{ - unsigned long stripe_count; - unsigned long stripe_size; - unsigned long width; - unsigned long woffset; - int stripe_index; - u64 offset; - - if (lsm->lsm_stripe_count <= 1) - return; - - offset = *offp; - stripe_size = lsm->lsm_stripe_size; - stripe_count = lsm->lsm_stripe_count; - - /* width = # bytes in all stripes */ - width = stripe_size * stripe_count; - - /* woffset = offset within a width; offset = whole number of widths */ - woffset = do_div(offset, width); - - stripe_index = woffset / stripe_size; + int rc; - *idp = ostid_id(&lsm->lsm_oinfo[stripe_index]->loi_oi); - *offp = offset * stripe_size + woffset % stripe_size; + rc = cl_echo_object_put(eco); + if (rc) + CERROR("%s: echo client drop an object failed: rc = %d\n", + eco->eo_dev->ed_ec->ec_exp->exp_obd->obd_name, rc); } static void -echo_client_page_debug_setup(struct lov_stripe_md *lsm, - struct page *page, int rw, u64 id, +echo_client_page_debug_setup(struct page *page, int rw, u64 id, u64 offset, u64 count) { char *addr; @@ -1499,7 +1319,6 @@ echo_client_page_debug_setup(struct lov_stripe_md *lsm, if (rw == OBD_BRW_WRITE) { stripe_off = offset + delta; stripe_id = id; - echo_get_stripe_off_id(lsm, &stripe_off, &stripe_id); } else { stripe_off = 0xdeadbeef00c0ffeeULL; stripe_id = 0xdeadbeef00c0ffeeULL; @@ -1511,8 +1330,7 @@ echo_client_page_debug_setup(struct lov_stripe_md *lsm, kunmap(page); } -static int echo_client_page_debug_check(struct lov_stripe_md *lsm, - struct page *page, u64 id, +static int echo_client_page_debug_check(struct page *page, u64 id, u64 offset, u64 count) { u64 stripe_off; @@ -1530,7 +1348,6 @@ static int echo_client_page_debug_check(struct lov_stripe_md *lsm, for (rc = delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) { stripe_off = offset + delta; stripe_id = id; - echo_get_stripe_off_id(lsm, &stripe_off, &stripe_id); rc2 = block_debug_check("test_brw", addr + delta, OBD_ECHO_BLOCK_SIZE, @@ -1550,7 +1367,6 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa, u64 count, int async, struct obd_trans_info *oti) { - struct lov_stripe_md *lsm = eco->eo_lsm; u32 npages; struct brw_page *pga; struct brw_page *pgp; @@ -1569,8 +1385,6 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa, gfp_mask = ((ostid_id(&oa->o_oi) & 2) == 0) ? GFP_KERNEL : GFP_HIGHUSER; LASSERT(rw == OBD_BRW_WRITE || rw == OBD_BRW_READ); - LASSERT(lsm != NULL); - LASSERT(ostid_id(&lsm->lsm_oi) == ostid_id(&oa->o_oi)); if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0) @@ -1583,11 +1397,11 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa, brw_flags = OBD_BRW_ASYNC; pga = kcalloc(npages, sizeof(*pga), GFP_NOFS); - if (pga == NULL) + if (!pga) return -ENOMEM; pages = kcalloc(npages, sizeof(*pages), GFP_NOFS); - if (pages == NULL) { + if (!pages) { kfree(pga); return -ENOMEM; } @@ -1596,11 +1410,11 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa, i < npages; i++, pgp++, off += PAGE_CACHE_SIZE) { - LASSERT(pgp->pg == NULL); /* for cleanup */ + LASSERT(!pgp->pg); /* for cleanup */ rc = -ENOMEM; pgp->pg = alloc_page(gfp_mask); - if (pgp->pg == NULL) + if (!pgp->pg) goto out; pages[i] = pgp->pg; @@ -1609,13 +1423,13 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa, pgp->flag = brw_flags; if (verify) - echo_client_page_debug_setup(lsm, pgp->pg, rw, + echo_client_page_debug_setup(pgp->pg, rw, ostid_id(&oa->o_oi), off, pgp->count); } /* brw mode can only be used at client */ - LASSERT(ed->ed_next != NULL); + LASSERT(ed->ed_next); rc = cl_echo_object_brw(eco, rw, offset, pages, npages, async); out: @@ -1623,13 +1437,13 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa, verify = 0; for (i = 0, pgp = pga; i < npages; i++, pgp++) { - if (pgp->pg == NULL) + if (!pgp->pg) continue; if (verify) { int vrc; - vrc = echo_client_page_debug_check(lsm, pgp->pg, + vrc = echo_client_page_debug_check(pgp->pg, ostid_id(&oa->o_oi), pgp->off, pgp->count); if (vrc != 0 && rc == 0) @@ -1649,7 +1463,6 @@ static int echo_client_prep_commit(const struct lu_env *env, u64 batch, struct obd_trans_info *oti, int async) { - struct lov_stripe_md *lsm = eco->eo_lsm; struct obd_ioobj ioo; struct niobuf_local *lnb; struct niobuf_remote *rnb; @@ -1657,8 +1470,7 @@ static int echo_client_prep_commit(const struct lu_env *env, u64 npages, tot_pages; int i, ret = 0, brw_flags = 0; - if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0 || - (lsm != NULL && ostid_id(&lsm->lsm_oi) != ostid_id(&oa->o_oi))) + if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0) return -EINVAL; npages = batch >> PAGE_CACHE_SHIFT; @@ -1667,7 +1479,7 @@ static int echo_client_prep_commit(const struct lu_env *env, lnb = kcalloc(npages, sizeof(struct niobuf_local), GFP_NOFS); rnb = kcalloc(npages, sizeof(struct niobuf_remote), GFP_NOFS); - if (lnb == NULL || rnb == NULL) { + if (!lnb || !rnb) { ret = -ENOMEM; goto out; } @@ -1705,7 +1517,7 @@ static int echo_client_prep_commit(const struct lu_env *env, struct page *page = lnb[i].page; /* read past eof? */ - if (page == NULL && lnb[i].rc == 0) + if (!page && lnb[i].rc == 0) continue; if (async) @@ -1717,12 +1529,12 @@ static int echo_client_prep_commit(const struct lu_env *env, continue; if (rw == OBD_BRW_WRITE) - echo_client_page_debug_setup(lsm, page, rw, + echo_client_page_debug_setup(page, rw, ostid_id(&oa->o_oi), rnb[i].offset, rnb[i].len); else - echo_client_page_debug_check(lsm, page, + echo_client_page_debug_check(page, ostid_id(&oa->o_oi), rnb[i].offset, rnb[i].len); @@ -1774,7 +1586,7 @@ static int echo_client_brw_ioctl(const struct lu_env *env, int rw, if (test_mode == 1) async = 0; - if (ed->ed_next == NULL && test_mode != 3) { + if (!ed->ed_next && test_mode != 3) { test_mode = 3; data->ioc_plen1 = data->ioc_count; } @@ -1805,55 +1617,8 @@ static int echo_client_brw_ioctl(const struct lu_env *env, int rw, } static int -echo_client_enqueue(struct obd_export *exp, struct obdo *oa, - int mode, u64 offset, u64 nob) -{ - struct echo_device *ed = obd2echo_dev(exp->exp_obd); - struct lustre_handle *ulh = &oa->o_handle; - struct echo_object *eco; - u64 end; - int rc; - - if (ed->ed_next == NULL) - return -EOPNOTSUPP; - - if (!(mode == LCK_PR || mode == LCK_PW)) - return -EINVAL; - - if ((offset & (~CFS_PAGE_MASK)) != 0 || - (nob & (~CFS_PAGE_MASK)) != 0) - return -EINVAL; - - rc = echo_get_object(&eco, ed, oa); - if (rc != 0) - return rc; - - end = (nob == 0) ? ((u64) -1) : (offset + nob - 1); - rc = cl_echo_enqueue(eco, offset, end, mode, &ulh->cookie); - if (rc == 0) { - oa->o_valid |= OBD_MD_FLHANDLE; - CDEBUG(D_INFO, "Cookie is %#llx\n", ulh->cookie); - } - echo_put_object(eco); - return rc; -} - -static int -echo_client_cancel(struct obd_export *exp, struct obdo *oa) -{ - struct echo_device *ed = obd2echo_dev(exp->exp_obd); - __u64 cookie = oa->o_handle.cookie; - - if ((oa->o_valid & OBD_MD_FLHANDLE) == 0) - return -EINVAL; - - CDEBUG(D_INFO, "Cookie is %#llx\n", cookie); - return cl_echo_cancel(ed, cookie); -} - -static int echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len, - void *karg, void *uarg) + void *karg, void __user *uarg) { struct obd_device *obd = exp->exp_obd; struct echo_device *ed = obd2echo_dev(obd); @@ -1899,8 +1664,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len, goto out; } - rc = echo_create_object(env, ed, 1, oa, data->ioc_pbuf1, - data->ioc_plen1, &dummy_oti); + rc = echo_create_object(env, ed, oa, &dummy_oti); goto out; case OBD_IOC_DESTROY: @@ -1911,7 +1675,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len, rc = echo_get_object(&eco, ed, oa); if (rc == 0) { - rc = obd_destroy(env, ec->ec_exp, oa, eco->eo_lsm, + rc = obd_destroy(env, ec->ec_exp, oa, NULL, &dummy_oti, NULL); if (rc == 0) eco->eo_deleted = 1; @@ -1922,10 +1686,10 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len, case OBD_IOC_GETATTR: rc = echo_get_object(&eco, ed, oa); if (rc == 0) { - struct obd_info oinfo = { }; + struct obd_info oinfo = { + .oi_oa = oa, + }; - oinfo.oi_md = eco->eo_lsm; - oinfo.oi_oa = oa; rc = obd_getattr(env, ec->ec_exp, &oinfo); echo_put_object(eco); } @@ -1939,10 +1703,9 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len, rc = echo_get_object(&eco, ed, oa); if (rc == 0) { - struct obd_info oinfo = { }; - - oinfo.oi_oa = oa; - oinfo.oi_md = eco->eo_lsm; + struct obd_info oinfo = { + .oi_oa = oa, + }; rc = obd_setattr(env, ec->ec_exp, &oinfo, NULL); echo_put_object(eco); @@ -1961,50 +1724,6 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len, rc = echo_client_brw_ioctl(env, rw, exp, data, &dummy_oti); goto out; - case ECHO_IOC_GET_STRIPE: - rc = echo_get_object(&eco, ed, oa); - if (rc == 0) { - rc = echo_copyout_lsm(eco->eo_lsm, data->ioc_pbuf1, - data->ioc_plen1); - echo_put_object(eco); - } - goto out; - - case ECHO_IOC_SET_STRIPE: - if (!capable(CFS_CAP_SYS_ADMIN)) { - rc = -EPERM; - goto out; - } - - if (data->ioc_pbuf1 == NULL) { /* unset */ - rc = echo_get_object(&eco, ed, oa); - if (rc == 0) { - eco->eo_deleted = 1; - echo_put_object(eco); - } - } else { - rc = echo_create_object(env, ed, 0, oa, - data->ioc_pbuf1, - data->ioc_plen1, &dummy_oti); - } - goto out; - - case ECHO_IOC_ENQUEUE: - if (!capable(CFS_CAP_SYS_ADMIN)) { - rc = -EPERM; - goto out; - } - - rc = echo_client_enqueue(exp, oa, - data->ioc_conn1, /* lock mode */ - data->ioc_offset, - data->ioc_count);/*extent*/ - goto out; - - case ECHO_IOC_CANCEL: - rc = echo_client_cancel(exp, oa); - goto out; - default: CERROR("echo_ioctl(): unrecognised ioctl %#x\n", cmd); rc = -ENOTTY; @@ -2051,14 +1770,10 @@ static int echo_client_setup(const struct lu_env *env, INIT_LIST_HEAD(&ec->ec_objects); INIT_LIST_HEAD(&ec->ec_locks); ec->ec_unique = 0; - ec->ec_nstripes = 0; ocd = kzalloc(sizeof(*ocd), GFP_NOFS); - if (!ocd) { - CERROR("Can't alloc ocd connecting to %s\n", - lustre_cfg_string(lcfg, 1)); + if (!ocd) return -ENOMEM; - } ocd->ocd_connect_flags = OBD_CONNECT_VERSION | OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE | @@ -2120,7 +1835,7 @@ static int echo_client_disconnect(struct obd_export *exp) { int rc; - if (exp == NULL) { + if (!exp) { rc = -EINVAL; goto out; } @@ -2175,9 +1890,9 @@ static void /*__exit*/ obdecho_exit(void) } MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>"); -MODULE_DESCRIPTION("Lustre Testing Echo OBD driver"); -MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Lustre Echo Client test driver"); MODULE_VERSION(LUSTRE_VERSION_STRING); +MODULE_LICENSE("GPL"); module_init(obdecho_init); module_exit(obdecho_exit); diff --git a/drivers/staging/lustre/lustre/obdecho/echo_internal.h b/drivers/staging/lustre/lustre/obdecho/echo_internal.h index 69063fa65d35..f5034a253f6d 100644 --- a/drivers/staging/lustre/lustre/obdecho/echo_internal.h +++ b/drivers/staging/lustre/lustre/obdecho/echo_internal.h @@ -13,11 +13,6 @@ * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 021110-1307, USA - * * GPL HEADER END */ /* diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c index 1091536fc90d..57c43c506ef2 100644 --- a/drivers/staging/lustre/lustre/osc/lproc_osc.c +++ b/drivers/staging/lustre/lustre/osc/lproc_osc.c @@ -381,7 +381,7 @@ static int osc_checksum_type_seq_show(struct seq_file *m, void *v) DECLARE_CKSUM_NAME; - if (obd == NULL) + if (!obd) return 0; for (i = 0; i < ARRAY_SIZE(cksum_name); i++) { @@ -397,8 +397,8 @@ static int osc_checksum_type_seq_show(struct seq_file *m, void *v) } static ssize_t osc_checksum_type_seq_write(struct file *file, - const char __user *buffer, - size_t count, loff_t *off) + const char __user *buffer, + size_t count, loff_t *off) { struct obd_device *obd = ((struct seq_file *)file->private_data)->private; int i; @@ -406,7 +406,7 @@ static ssize_t osc_checksum_type_seq_write(struct file *file, DECLARE_CKSUM_NAME; char kernbuf[10]; - if (obd == NULL) + if (!obd) return 0; if (count > sizeof(kernbuf) - 1) @@ -422,8 +422,8 @@ static ssize_t osc_checksum_type_seq_write(struct file *file, if (((1 << i) & obd->u.cli.cl_supp_cksum_types) == 0) continue; if (!strcmp(kernbuf, cksum_name[i])) { - obd->u.cli.cl_cksum_type = 1 << i; - return count; + obd->u.cli.cl_cksum_type = 1 << i; + return count; } } return -EINVAL; @@ -480,9 +480,19 @@ static ssize_t contention_seconds_store(struct kobject *kobj, struct obd_device *obd = container_of(kobj, struct obd_device, obd_kobj); struct osc_device *od = obd2osc_dev(obd); + int rc; + int val; + + rc = kstrtoint(buffer, 10, &val); + if (rc) + return rc; + + if (val < 0) + return -EINVAL; + + od->od_contention_time = val; - return lprocfs_write_helper(buffer, count, &od->od_contention_time) ?: - count; + return count; } LUSTRE_RW_ATTR(contention_seconds); @@ -505,9 +515,16 @@ static ssize_t lockless_truncate_store(struct kobject *kobj, struct obd_device *obd = container_of(kobj, struct obd_device, obd_kobj); struct osc_device *od = obd2osc_dev(obd); + int rc; + unsigned int val; - return lprocfs_write_helper(buffer, count, &od->od_lockless_truncate) ?: - count; + rc = kstrtouint(buffer, 10, &val); + if (rc) + return rc; + + od->od_lockless_truncate = val; + + return count; } LUSTRE_RW_ATTR(lockless_truncate); @@ -635,10 +652,10 @@ static int osc_rpc_stats_seq_show(struct seq_file *seq, void *v) read_cum += r; write_cum += w; seq_printf(seq, "%d:\t\t%10lu %3lu %3lu | %10lu %3lu %3lu\n", - 1 << i, r, pct(r, read_tot), - pct(read_cum, read_tot), w, - pct(w, write_tot), - pct(write_cum, write_tot)); + 1 << i, r, pct(r, read_tot), + pct(read_cum, read_tot), w, + pct(w, write_tot), + pct(write_cum, write_tot)); if (read_cum == read_tot && write_cum == write_tot) break; } @@ -659,10 +676,10 @@ static int osc_rpc_stats_seq_show(struct seq_file *seq, void *v) read_cum += r; write_cum += w; seq_printf(seq, "%d:\t\t%10lu %3lu %3lu | %10lu %3lu %3lu\n", - i, r, pct(r, read_tot), - pct(read_cum, read_tot), w, - pct(w, write_tot), - pct(write_cum, write_tot)); + i, r, pct(r, read_tot), + pct(read_cum, read_tot), w, + pct(w, write_tot), + pct(write_cum, write_tot)); if (read_cum == read_tot && write_cum == write_tot) break; } diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c index 2229419b7184..63363111380c 100644 --- a/drivers/staging/lustre/lustre/osc/osc_cache.c +++ b/drivers/staging/lustre/lustre/osc/osc_cache.c @@ -140,7 +140,7 @@ static const char *oes_strings[] = { static inline struct osc_extent *rb_extent(struct rb_node *n) { - if (n == NULL) + if (!n) return NULL; return container_of(n, struct osc_extent, oe_node); @@ -148,7 +148,7 @@ static inline struct osc_extent *rb_extent(struct rb_node *n) static inline struct osc_extent *next_extent(struct osc_extent *ext) { - if (ext == NULL) + if (!ext) return NULL; LASSERT(ext->oe_intree); @@ -157,7 +157,7 @@ static inline struct osc_extent *next_extent(struct osc_extent *ext) static inline struct osc_extent *prev_extent(struct osc_extent *ext) { - if (ext == NULL) + if (!ext) return NULL; LASSERT(ext->oe_intree); @@ -240,7 +240,7 @@ static int osc_extent_sanity_check0(struct osc_extent *ext, goto out; } - if (ext->oe_osclock == NULL && ext->oe_grants > 0) { + if (!ext->oe_osclock && ext->oe_grants > 0) { rc = 90; goto out; } @@ -262,7 +262,8 @@ static int osc_extent_sanity_check0(struct osc_extent *ext, } /* Do not verify page list if extent is in RPC. This is because an - * in-RPC extent is supposed to be exclusively accessible w/o lock. */ + * in-RPC extent is supposed to be exclusively accessible w/o lock. + */ if (ext->oe_state > OES_CACHE) { rc = 0; goto out; @@ -319,7 +320,7 @@ static int osc_extent_is_overlapped(struct osc_object *obj, if (!extent_debug) return 0; - for (tmp = first_extent(obj); tmp != NULL; tmp = next_extent(tmp)) { + for (tmp = first_extent(obj); tmp; tmp = next_extent(tmp)) { if (tmp == ext) continue; if (tmp->oe_end >= ext->oe_start && @@ -346,8 +347,8 @@ static struct osc_extent *osc_extent_alloc(struct osc_object *obj) { struct osc_extent *ext; - ext = kmem_cache_alloc(osc_extent_kmem, GFP_NOFS | __GFP_ZERO); - if (ext == NULL) + ext = kmem_cache_zalloc(osc_extent_kmem, GFP_NOFS); + if (!ext) return NULL; RB_CLEAR_NODE(&ext->oe_node); @@ -415,7 +416,7 @@ static struct osc_extent *osc_extent_search(struct osc_object *obj, struct osc_extent *tmp, *p = NULL; LASSERT(osc_object_is_locked(obj)); - while (n != NULL) { + while (n) { tmp = rb_extent(n); if (index < tmp->oe_start) { n = n->rb_left; @@ -439,7 +440,7 @@ static struct osc_extent *osc_extent_lookup(struct osc_object *obj, struct osc_extent *ext; ext = osc_extent_search(obj, index); - if (ext != NULL && ext->oe_start <= index && index <= ext->oe_end) + if (ext && ext->oe_start <= index && index <= ext->oe_end) return osc_extent_get(ext); return NULL; } @@ -454,7 +455,7 @@ static void osc_extent_insert(struct osc_object *obj, struct osc_extent *ext) LASSERT(ext->oe_intree == 0); LASSERT(ext->oe_obj == obj); LASSERT(osc_object_is_locked(obj)); - while (*n != NULL) { + while (*n) { tmp = rb_extent(*n); parent = *n; @@ -463,7 +464,7 @@ static void osc_extent_insert(struct osc_object *obj, struct osc_extent *ext) else if (ext->oe_start > tmp->oe_end) n = &(*n)->rb_right; else - EASSERTF(0, tmp, EXTSTR, EXTPARA(ext)); + EASSERTF(0, tmp, EXTSTR"\n", EXTPARA(ext)); } rb_link_node(&ext->oe_node, parent, n); rb_insert_color(&ext->oe_node, &obj->oo_root); @@ -533,7 +534,7 @@ static int osc_extent_merge(const struct lu_env *env, struct osc_extent *cur, LASSERT(cur->oe_state == OES_CACHE); LASSERT(osc_object_is_locked(obj)); - if (victim == NULL) + if (!victim) return -EINVAL; if (victim->oe_state != OES_CACHE || victim->oe_fsync_wait) @@ -587,7 +588,8 @@ void osc_extent_release(const struct lu_env *env, struct osc_extent *ext) if (ext->oe_trunc_pending) { /* a truncate process is waiting for this extent. * This may happen due to a race, check - * osc_cache_truncate_start(). */ + * osc_cache_truncate_start(). + */ osc_extent_state_set(ext, OES_TRUNC); ext->oe_trunc_pending = 0; } else { @@ -601,7 +603,7 @@ void osc_extent_release(const struct lu_env *env, struct osc_extent *ext) if (ext->oe_urgent) list_move_tail(&ext->oe_link, - &obj->oo_urgent_exts); + &obj->oo_urgent_exts); } osc_object_unlock(obj); @@ -639,11 +641,10 @@ static struct osc_extent *osc_extent_find(const struct lu_env *env, int rc; cur = osc_extent_alloc(obj); - if (cur == NULL) + if (!cur) return ERR_PTR(-ENOMEM); lock = cl_lock_at_pgoff(env, osc2cl(obj), index, NULL, 1, 0); - LASSERT(lock != NULL); LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE); LASSERT(cli->cl_chunkbits >= PAGE_CACHE_SHIFT); @@ -673,14 +674,15 @@ static struct osc_extent *osc_extent_find(const struct lu_env *env, /* grants has been allocated by caller */ LASSERTF(*grants >= chunksize + cli->cl_extent_tax, "%u/%u/%u.\n", *grants, chunksize, cli->cl_extent_tax); - LASSERTF((max_end - cur->oe_start) < max_pages, EXTSTR, EXTPARA(cur)); + LASSERTF((max_end - cur->oe_start) < max_pages, EXTSTR"\n", + EXTPARA(cur)); restart: osc_object_lock(obj); ext = osc_extent_search(obj, cur->oe_start); - if (ext == NULL) + if (!ext) ext = first_extent(obj); - while (ext != NULL) { + while (ext) { loff_t ext_chk_start = ext->oe_start >> ppc_bits; loff_t ext_chk_end = ext->oe_end >> ppc_bits; @@ -691,7 +693,7 @@ restart: /* if covering by different locks, no chance to match */ if (lock != ext->oe_osclock) { EASSERTF(!overlapped(ext, cur), ext, - EXTSTR, EXTPARA(cur)); + EXTSTR"\n", EXTPARA(cur)); ext = next_extent(ext); continue; @@ -705,18 +707,21 @@ restart: /* ok, from now on, ext and cur have these attrs: * 1. covered by the same lock - * 2. contiguous at chunk level or overlapping. */ + * 2. contiguous at chunk level or overlapping. + */ if (overlapped(ext, cur)) { /* cur is the minimum unit, so overlapping means - * full contain. */ + * full contain. + */ EASSERTF((ext->oe_start <= cur->oe_start && ext->oe_end >= cur->oe_end), - ext, EXTSTR, EXTPARA(cur)); + ext, EXTSTR"\n", EXTPARA(cur)); if (ext->oe_state > OES_CACHE || ext->oe_fsync_wait) { /* for simplicity, we wait for this extent to - * finish before going forward. */ + * finish before going forward. + */ conflict = osc_extent_get(ext); break; } @@ -729,17 +734,20 @@ restart: if (ext->oe_state != OES_CACHE || ext->oe_fsync_wait) { /* we can't do anything for a non OES_CACHE extent, or * if there is someone waiting for this extent to be - * flushed, try next one. */ + * flushed, try next one. + */ ext = next_extent(ext); continue; } /* check if they belong to the same rpc slot before trying to * merge. the extents are not overlapped and contiguous at - * chunk level to get here. */ + * chunk level to get here. + */ if (ext->oe_max_end != max_end) { /* if they don't belong to the same RPC slot or - * max_pages_per_rpc has ever changed, do not merge. */ + * max_pages_per_rpc has ever changed, do not merge. + */ ext = next_extent(ext); continue; } @@ -748,7 +756,8 @@ restart: * level so that we know the whole extent is covered by grant * (the pages in the extent are NOT required to be contiguous). * Otherwise, it will be too much difficult to know which - * chunks have grants allocated. */ + * chunks have grants allocated. + */ /* try to do front merge - extend ext's start */ if (chunk + 1 == ext_chk_start) { @@ -768,28 +777,29 @@ restart: *grants -= chunksize; /* try to merge with the next one because we just fill - * in a gap */ + * in a gap + */ if (osc_extent_merge(env, ext, next_extent(ext)) == 0) /* we can save extent tax from next extent */ *grants += cli->cl_extent_tax; found = osc_extent_hold(ext); } - if (found != NULL) + if (found) break; ext = next_extent(ext); } osc_extent_tree_dump(D_CACHE, obj); - if (found != NULL) { - LASSERT(conflict == NULL); + if (found) { + LASSERT(!conflict); if (!IS_ERR(found)) { LASSERT(found->oe_osclock == cur->oe_osclock); OSC_EXTENT_DUMP(D_CACHE, found, "found caching ext for %lu.\n", index); } - } else if (conflict == NULL) { + } else if (!conflict) { /* create a new extent */ EASSERT(osc_extent_is_overlapped(obj, cur) == 0, cur); cur->oe_grants = chunksize + cli->cl_extent_tax; @@ -804,11 +814,12 @@ restart: } osc_object_unlock(obj); - if (conflict != NULL) { - LASSERT(found == NULL); + if (conflict) { + LASSERT(!found); /* waiting for IO to finish. Please notice that it's impossible - * to be an OES_TRUNC extent. */ + * to be an OES_TRUNC extent. + */ rc = osc_extent_wait(env, conflict, OES_INV); osc_extent_put(env, conflict); conflict = NULL; @@ -845,8 +856,7 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext, ext->oe_rc = rc ?: ext->oe_nr_pages; EASSERT(ergo(rc == 0, ext->oe_state == OES_RPC), ext); - list_for_each_entry_safe(oap, tmp, &ext->oe_pages, - oap_pending_item) { + list_for_each_entry_safe(oap, tmp, &ext->oe_pages, oap_pending_item) { list_del_init(&oap->oap_rpc_item); list_del_init(&oap->oap_pending_item); if (last_off <= oap->oap_obj_off) { @@ -865,7 +875,8 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext, last_count != PAGE_CACHE_SIZE) { /* For short writes we shouldn't count parts of pages that * span a whole chunk on the OST side, or our accounting goes - * wrong. Should match the code in filter_grant_check. */ + * wrong. Should match the code in filter_grant_check. + */ int offset = oap->oap_page_off & ~CFS_PAGE_MASK; int count = oap->oap_count + (offset & (blocksize - 1)); int end = (offset + oap->oap_count) & (blocksize - 1); @@ -909,7 +920,8 @@ static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext, osc_object_lock(obj); LASSERT(sanity_check_nolock(ext) == 0); /* `Kick' this extent only if the caller is waiting for it to be - * written out. */ + * written out. + */ if (state == OES_INV && !ext->oe_urgent && !ext->oe_hp && !ext->oe_trunc_pending) { if (ext->oe_state == OES_ACTIVE) { @@ -967,7 +979,8 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index, /* Request new lu_env. * We can't use that env from osc_cache_truncate_start() because - * it's from lov_io_sub and not fully initialized. */ + * it's from lov_io_sub and not fully initialized. + */ env = cl_env_nested_get(&nest); io = &osc_env_info(env)->oti_io; io->ci_obj = cl_object_top(osc2cl(obj)); @@ -976,15 +989,15 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index, goto out; /* discard all pages with index greater then trunc_index */ - list_for_each_entry_safe(oap, tmp, &ext->oe_pages, - oap_pending_item) { + list_for_each_entry_safe(oap, tmp, &ext->oe_pages, oap_pending_item) { struct cl_page *sub = oap2cl_page(oap); struct cl_page *page = cl_page_top(sub); LASSERT(list_empty(&oap->oap_rpc_item)); /* only discard the pages with their index greater than - * trunc_index, and ... */ + * trunc_index, and ... + */ if (sub->cp_index < trunc_index || (sub->cp_index == trunc_index && partial)) { /* accounting how many pages remaining in the chunk @@ -1028,11 +1041,13 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index, pgoff_t last_index; /* if there is no pages in this chunk, we can also free grants - * for the last chunk */ + * for the last chunk + */ if (pages_in_chunk == 0) { /* if this is the 1st chunk and no pages in this chunk, * ext->oe_nr_pages must be zero, so we should be in - * the other if-clause. */ + * the other if-clause. + */ LASSERT(trunc_chunk > 0); --trunc_chunk; ++chunks; @@ -1074,13 +1089,13 @@ static int osc_extent_make_ready(const struct lu_env *env, LASSERT(sanity_check(ext) == 0); /* in locking state, any process should not touch this extent. */ EASSERT(ext->oe_state == OES_LOCKING, ext); - EASSERT(ext->oe_owner != NULL, ext); + EASSERT(ext->oe_owner, ext); OSC_EXTENT_DUMP(D_CACHE, ext, "make ready\n"); list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) { ++page_count; - if (last == NULL || last->oap_obj_off < oap->oap_obj_off) + if (!last || last->oap_obj_off < oap->oap_obj_off) last = oap; /* checking ASYNC_READY is race safe */ @@ -1103,9 +1118,10 @@ static int osc_extent_make_ready(const struct lu_env *env, } LASSERT(page_count == ext->oe_nr_pages); - LASSERT(last != NULL); + LASSERT(last); /* the last page is the only one we need to refresh its count by - * the size of file. */ + * the size of file. + */ if (!(last->oap_async_flags & ASYNC_COUNT_STABLE)) { last->oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE); LASSERT(last->oap_count > 0); @@ -1114,7 +1130,8 @@ static int osc_extent_make_ready(const struct lu_env *env, } /* for the rest of pages, we don't need to call osf_refresh_count() - * because it's known they are not the last page */ + * because it's known they are not the last page + */ list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) { if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) { oap->oap_count = PAGE_CACHE_SIZE - oap->oap_page_off; @@ -1167,9 +1184,10 @@ static int osc_extent_expand(struct osc_extent *ext, pgoff_t index, int *grants) end_index = min(ext->oe_max_end, ((chunk + 1) << ppc_bits) - 1); next = next_extent(ext); - if (next != NULL && next->oe_start <= end_index) { + if (next && next->oe_start <= end_index) { /* complex mode - overlapped with the next extent, - * this case will be handled by osc_extent_find() */ + * this case will be handled by osc_extent_find() + */ rc = -EAGAIN; goto out; } @@ -1197,7 +1215,7 @@ static void osc_extent_tree_dump0(int level, struct osc_object *obj, /* osc_object_lock(obj); */ cnt = 1; - for (ext = first_extent(obj); ext != NULL; ext = next_extent(ext)) + for (ext = first_extent(obj); ext; ext = next_extent(ext)) OSC_EXTENT_DUMP(level, ext, "in tree %d.\n", cnt++); cnt = 1; @@ -1262,7 +1280,6 @@ static int osc_refresh_count(const struct lu_env *env, /* readpage queues with _COUNT_STABLE, shouldn't get here. */ LASSERT(!(cmd & OBD_BRW_READ)); - LASSERT(opg != NULL); obj = opg->ops_cl.cpl_obj; cl_object_attr_lock(obj); @@ -1299,16 +1316,16 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap, * page->cp_req can be NULL if io submission failed before * cl_req was allocated. */ - if (page->cp_req != NULL) + if (page->cp_req) cl_req_page_done(env, page); - LASSERT(page->cp_req == NULL); + LASSERT(!page->cp_req); crt = cmd == OBD_BRW_READ ? CRT_READ : CRT_WRITE; /* Clear opg->ops_transfer_pinned before VM lock is released. */ opg->ops_transfer_pinned = 0; spin_lock(&obj->oo_seatbelt); - LASSERT(opg->ops_submitter != NULL); + LASSERT(opg->ops_submitter); LASSERT(!list_empty(&opg->ops_inflight)); list_del_init(&opg->ops_inflight); opg->ops_submitter = NULL; @@ -1367,7 +1384,8 @@ static void osc_consume_write_grant(struct client_obd *cli, } /* the companion to osc_consume_write_grant, called when a brw has completed. - * must be called with the loi lock held. */ + * must be called with the loi lock held. + */ static void osc_release_write_grant(struct client_obd *cli, struct brw_page *pga) { @@ -1410,7 +1428,8 @@ static void __osc_unreserve_grant(struct client_obd *cli, /* it's quite normal for us to get more grant than reserved. * Thinking about a case that two extents merged by adding a new * chunk, we can save one extent tax. If extent tax is greater than - * one chunk, we can save more grant by adding a new chunk */ + * one chunk, we can save more grant by adding a new chunk + */ cli->cl_reserved_grant -= reserved; if (unused > reserved) { cli->cl_avail_grant += reserved; @@ -1454,7 +1473,8 @@ static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages, cli->cl_lost_grant += lost_grant; if (cli->cl_avail_grant < grant && cli->cl_lost_grant >= grant) { /* borrow some grant from truncate to avoid the case that - * truncate uses up all avail grant */ + * truncate uses up all avail grant + */ cli->cl_lost_grant -= grant; cli->cl_avail_grant += grant; } @@ -1539,7 +1559,8 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli, client_obd_list_lock(&cli->cl_loi_list_lock); /* force the caller to try sync io. this can jump the list - * of queued writes and create a discontiguous rpc stream */ + * of queued writes and create a discontiguous rpc stream + */ if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) || cli->cl_dirty_max < PAGE_CACHE_SIZE || cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync) { @@ -1558,7 +1579,8 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli, * Adding a cache waiter will trigger urgent write-out no matter what * RPC size will be. * The exiting condition is no avail grants and no dirty pages caching, - * that really means there is no space on the OST. */ + * that really means there is no space on the OST. + */ init_waitqueue_head(&ocw.ocw_waitq); ocw.ocw_oap = oap; ocw.ocw_grant = bytes; @@ -1640,7 +1662,8 @@ static int osc_max_rpc_in_flight(struct client_obd *cli, struct osc_object *osc) /* This maintains the lists of pending pages to read/write for a given object * (lop). This is used by osc_check_rpcs->osc_next_obj() and osc_list_maint() - * to quickly find objects that are ready to send an RPC. */ + * to quickly find objects that are ready to send an RPC. + */ static int osc_makes_rpc(struct client_obd *cli, struct osc_object *osc, int cmd) { @@ -1649,8 +1672,9 @@ static int osc_makes_rpc(struct client_obd *cli, struct osc_object *osc, /* if we have an invalid import we want to drain the queued pages * by forcing them through rpcs that immediately fail and complete * the pages. recovery relies on this to empty the queued pages - * before canceling the locks and evicting down the llite pages */ - if ((cli->cl_import == NULL || cli->cl_import->imp_invalid)) + * before canceling the locks and evicting down the llite pages + */ + if (!cli->cl_import || cli->cl_import->imp_invalid) invalid_import = 1; if (cmd & OBD_BRW_WRITE) { @@ -1670,7 +1694,8 @@ static int osc_makes_rpc(struct client_obd *cli, struct osc_object *osc, } /* trigger a write rpc stream as long as there are dirtiers * waiting for space. as they're waiting, they're not going to - * create more pages to coalesce with what's waiting.. */ + * create more pages to coalesce with what's waiting.. + */ if (!list_empty(&cli->cl_cache_waiters)) { CDEBUG(D_CACHE, "cache waiters forcing RPC\n"); return 1; @@ -1723,7 +1748,8 @@ static void on_list(struct list_head *item, struct list_head *list, int should_b } /* maintain the osc's cli list membership invariants so that osc_send_oap_rpc - * can find pages to build into rpcs quickly */ + * can find pages to build into rpcs quickly + */ static int __osc_list_maint(struct client_obd *cli, struct osc_object *osc) { if (osc_makes_hprpc(osc)) { @@ -1761,7 +1787,8 @@ static int osc_list_maint(struct client_obd *cli, struct osc_object *osc) * application. As an async write fails we record the error code for later if * the app does an fsync. As long as errors persist we force future rpcs to be * sync so that the app can get a sync error and break the cycle of queueing - * pages for which writeback will fail. */ + * pages for which writeback will fail. + */ static void osc_process_ar(struct osc_async_rc *ar, __u64 xid, int rc) { @@ -1780,7 +1807,8 @@ static void osc_process_ar(struct osc_async_rc *ar, __u64 xid, } /* this must be called holding the loi list lock to give coverage to exit_cache, - * async_flag maintenance, and oap_request */ + * async_flag maintenance, and oap_request + */ static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli, struct osc_async_page *oap, int sent, int rc) { @@ -1788,7 +1816,7 @@ static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli, struct lov_oinfo *loi = osc->oo_oinfo; __u64 xid = 0; - if (oap->oap_request != NULL) { + if (oap->oap_request) { xid = ptlrpc_req_xid(oap->oap_request); ptlrpc_req_finished(oap->oap_request); oap->oap_request = NULL; @@ -1877,13 +1905,12 @@ static int get_write_extents(struct osc_object *obj, struct list_head *rpclist) { struct client_obd *cli = osc_cli(obj); struct osc_extent *ext; + struct osc_extent *temp; int page_count = 0; unsigned int max_pages = cli->cl_max_pages_per_rpc; LASSERT(osc_object_is_locked(obj)); - while (!list_empty(&obj->oo_hp_exts)) { - ext = list_entry(obj->oo_hp_exts.next, struct osc_extent, - oe_link); + list_for_each_entry_safe(ext, temp, &obj->oo_hp_exts, oe_link) { LASSERT(ext->oe_state == OES_CACHE); if (!try_to_add_extent_for_io(cli, ext, rpclist, &page_count, &max_pages)) @@ -1895,7 +1922,7 @@ static int get_write_extents(struct osc_object *obj, struct list_head *rpclist) while (!list_empty(&obj->oo_urgent_exts)) { ext = list_entry(obj->oo_urgent_exts.next, - struct osc_extent, oe_link); + struct osc_extent, oe_link); if (!try_to_add_extent_for_io(cli, ext, rpclist, &page_count, &max_pages)) return page_count; @@ -1906,7 +1933,7 @@ static int get_write_extents(struct osc_object *obj, struct list_head *rpclist) while ((ext = next_extent(ext)) != NULL) { if ((ext->oe_state != OES_CACHE) || (!list_empty(&ext->oe_link) && - ext->oe_owner != NULL)) + ext->oe_owner)) continue; if (!try_to_add_extent_for_io(cli, ext, rpclist, @@ -1918,10 +1945,10 @@ static int get_write_extents(struct osc_object *obj, struct list_head *rpclist) return page_count; ext = first_extent(obj); - while (ext != NULL) { + while (ext) { if ((ext->oe_state != OES_CACHE) || /* this extent may be already in current rpclist */ - (!list_empty(&ext->oe_link) && ext->oe_owner != NULL)) { + (!list_empty(&ext->oe_link) && ext->oe_owner)) { ext = next_extent(ext); continue; } @@ -1938,6 +1965,7 @@ static int get_write_extents(struct osc_object *obj, struct list_head *rpclist) static int osc_send_write_rpc(const struct lu_env *env, struct client_obd *cli, struct osc_object *osc) + __must_hold(osc) { LIST_HEAD(rpclist); struct osc_extent *ext; @@ -1967,7 +1995,8 @@ osc_send_write_rpc(const struct lu_env *env, struct client_obd *cli, } /* we're going to grab page lock, so release object lock because - * lock order is page lock -> object lock. */ + * lock order is page lock -> object lock. + */ osc_object_unlock(osc); list_for_each_entry_safe(ext, tmp, &rpclist, oe_link) { @@ -1979,7 +2008,7 @@ osc_send_write_rpc(const struct lu_env *env, struct client_obd *cli, continue; } } - if (first == NULL) { + if (!first) { first = ext; srvlock = ext->oe_srvlock; } else { @@ -2010,6 +2039,7 @@ osc_send_write_rpc(const struct lu_env *env, struct client_obd *cli, static int osc_send_read_rpc(const struct lu_env *env, struct client_obd *cli, struct osc_object *osc) + __must_hold(osc) { struct osc_extent *ext; struct osc_extent *next; @@ -2019,8 +2049,7 @@ osc_send_read_rpc(const struct lu_env *env, struct client_obd *cli, int rc = 0; LASSERT(osc_object_is_locked(osc)); - list_for_each_entry_safe(ext, next, - &osc->oo_reading_exts, oe_link) { + list_for_each_entry_safe(ext, next, &osc->oo_reading_exts, oe_link) { EASSERT(ext->oe_state == OES_LOCK_DONE, ext); if (!try_to_add_extent_for_io(cli, ext, &rpclist, &page_count, &max_pages)) @@ -2051,12 +2080,14 @@ osc_send_read_rpc(const struct lu_env *env, struct client_obd *cli, }) /* This is called by osc_check_rpcs() to find which objects have pages that - * we could be sending. These lists are maintained by osc_makes_rpc(). */ + * we could be sending. These lists are maintained by osc_makes_rpc(). + */ static struct osc_object *osc_next_obj(struct client_obd *cli) { /* First return objects that have blocked locks so that they * will be flushed quickly and other clients can get the lock, - * then objects which have pages ready to be stuffed into RPCs */ + * then objects which have pages ready to be stuffed into RPCs + */ if (!list_empty(&cli->cl_loi_hp_ready_list)) return list_to_obj(&cli->cl_loi_hp_ready_list, hp_ready_item); if (!list_empty(&cli->cl_loi_ready_list)) @@ -2065,14 +2096,16 @@ static struct osc_object *osc_next_obj(struct client_obd *cli) /* then if we have cache waiters, return all objects with queued * writes. This is especially important when many small files * have filled up the cache and not been fired into rpcs because - * they don't pass the nr_pending/object threshold */ + * they don't pass the nr_pending/object threshold + */ if (!list_empty(&cli->cl_cache_waiters) && !list_empty(&cli->cl_loi_write_list)) return list_to_obj(&cli->cl_loi_write_list, write_item); /* then return all queued objects when we have an invalid import - * so that they get flushed */ - if (cli->cl_import == NULL || cli->cl_import->imp_invalid) { + * so that they get flushed + */ + if (!cli->cl_import || cli->cl_import->imp_invalid) { if (!list_empty(&cli->cl_loi_write_list)) return list_to_obj(&cli->cl_loi_write_list, write_item); if (!list_empty(&cli->cl_loi_read_list)) @@ -2083,6 +2116,7 @@ static struct osc_object *osc_next_obj(struct client_obd *cli) /* called with the loi list lock held */ static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli) + __must_hold(&cli->cl_loi_list_lock) { struct osc_object *osc; int rc = 0; @@ -2108,7 +2142,8 @@ static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli) * would be redundant if we were getting read/write work items * instead of objects. we don't want send_oap_rpc to drain a * partial read pending queue when we're given this object to - * do io on writes while there are cache waiters */ + * do io on writes while there are cache waiters + */ osc_object_lock(osc); if (osc_makes_rpc(cli, osc, OBD_BRW_WRITE)) { rc = osc_send_write_rpc(env, cli, osc); @@ -2130,7 +2165,8 @@ static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli) * because it might be blocked at grabbing * the page lock as we mentioned. * - * Anyway, continue to drain pages. */ + * Anyway, continue to drain pages. + */ /* break; */ } } @@ -2155,12 +2191,13 @@ static int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli, { int rc = 0; - if (osc != NULL && osc_list_maint(cli, osc) == 0) + if (osc && osc_list_maint(cli, osc) == 0) return 0; if (!async) { /* disable osc_lru_shrink() temporarily to avoid - * potential stack overrun problem. LU-2859 */ + * potential stack overrun problem. LU-2859 + */ atomic_inc(&cli->cl_lru_shrinkers); client_obd_list_lock(&cli->cl_loi_list_lock); osc_check_rpcs(env, cli); @@ -2168,7 +2205,7 @@ static int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli, atomic_dec(&cli->cl_lru_shrinkers); } else { CDEBUG(D_CACHE, "Queue writeback work for client %p.\n", cli); - LASSERT(cli->cl_writeback_work != NULL); + LASSERT(cli->cl_writeback_work); rc = ptlrpcd_queue_work(cli->cl_writeback_work); } return rc; @@ -2233,7 +2270,7 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io, if (oap->oap_magic != OAP_MAGIC) return -EINVAL; - if (cli->cl_import == NULL || cli->cl_import->imp_invalid) + if (!cli->cl_import || cli->cl_import->imp_invalid) return -EIO; if (!list_empty(&oap->oap_pending_item) || @@ -2284,12 +2321,14 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io, * 1. if there exists an active extent for this IO, mostly this page * can be added to the active extent and sometimes we need to * expand extent to accommodate this page; - * 2. otherwise, a new extent will be allocated. */ + * 2. otherwise, a new extent will be allocated. + */ ext = oio->oi_active; - if (ext != NULL && ext->oe_start <= index && ext->oe_max_end >= index) { + if (ext && ext->oe_start <= index && ext->oe_max_end >= index) { /* one chunk plus extent overhead must be enough to write this - * page */ + * page + */ grants = (1 << cli->cl_chunkbits) + cli->cl_extent_tax; if (ext->oe_end >= index) grants = 0; @@ -2316,7 +2355,7 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io, } } rc = 0; - } else if (ext != NULL) { + } else if (ext) { /* index is located outside of active extent */ need_release = 1; } @@ -2326,13 +2365,14 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io, ext = NULL; } - if (ext == NULL) { + if (!ext) { int tmp = (1 << cli->cl_chunkbits) + cli->cl_extent_tax; /* try to find new extent to cover this page */ - LASSERT(oio->oi_active == NULL); + LASSERT(!oio->oi_active); /* we may have allocated grant for this page if we failed - * to expand the previous active extent. */ + * to expand the previous active extent. + */ LASSERT(ergo(grants > 0, grants >= tmp)); rc = 0; @@ -2359,8 +2399,8 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io, osc_unreserve_grant(cli, grants, tmp); } - LASSERT(ergo(rc == 0, ext != NULL)); - if (ext != NULL) { + LASSERT(ergo(rc == 0, ext)); + if (ext) { EASSERTF(ext->oe_end >= index && ext->oe_start <= index, ext, "index = %lu.\n", index); LASSERT((oap->oap_brw_flags & OBD_BRW_FROM_GRANT) != 0); @@ -2397,15 +2437,16 @@ int osc_teardown_async_page(const struct lu_env *env, ext = osc_extent_lookup(obj, oap2cl_page(oap)->cp_index); /* only truncated pages are allowed to be taken out. * See osc_extent_truncate() and osc_cache_truncate_start() - * for details. */ - if (ext != NULL && ext->oe_state != OES_TRUNC) { + * for details. + */ + if (ext && ext->oe_state != OES_TRUNC) { OSC_EXTENT_DUMP(D_ERROR, ext, "trunc at %lu.\n", oap2cl_page(oap)->cp_index); rc = -EBUSY; } } osc_object_unlock(obj); - if (ext != NULL) + if (ext) osc_extent_put(env, ext); return rc; } @@ -2430,7 +2471,7 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io, osc_object_lock(obj); ext = osc_extent_lookup(obj, index); - if (ext == NULL) { + if (!ext) { osc_extent_tree_dump(D_ERROR, obj); LASSERTF(0, "page index %lu is NOT covered.\n", index); } @@ -2448,7 +2489,8 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io, * exists a deadlock problem because other process can wait for * page writeback bit holding page lock; and meanwhile in * vvp_page_make_ready(), we need to grab page lock before - * really sending the RPC. */ + * really sending the RPC. + */ case OES_TRUNC: /* race with truncate, page will be redirtied */ case OES_ACTIVE: @@ -2456,7 +2498,8 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io, * re-dirty the page. If we continued on here, and we were the * one making the extent active, we could deadlock waiting for * the page writeback to clear but it won't because the extent - * is active and won't be written out. */ + * is active and won't be written out. + */ rc = -EAGAIN; goto out; default: @@ -2527,12 +2570,13 @@ int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops) if (ext->oe_start <= index && ext->oe_end >= index) { LASSERT(ext->oe_state == OES_LOCK_DONE); /* For OES_LOCK_DONE state extent, it has already held - * a refcount for RPC. */ + * a refcount for RPC. + */ found = osc_extent_get(ext); break; } } - if (found != NULL) { + if (found) { list_del_init(&found->oe_link); osc_update_pending(obj, cmd, -found->oe_nr_pages); osc_object_unlock(obj); @@ -2543,8 +2587,9 @@ int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops) } else { osc_object_unlock(obj); /* ok, it's been put in an rpc. only one oap gets a request - * reference */ - if (oap->oap_request != NULL) { + * reference + */ + if (oap->oap_request) { ptlrpc_mark_interrupted(oap->oap_request); ptlrpcd_wake(oap->oap_request); ptlrpc_req_finished(oap->oap_request); @@ -2579,7 +2624,7 @@ int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj, } ext = osc_extent_alloc(obj); - if (ext == NULL) { + if (!ext) { list_for_each_entry_safe(oap, tmp, list, oap_pending_item) { list_del_init(&oap->oap_pending_item); osc_ap_completion(env, cli, oap, 0, -ENOMEM); @@ -2621,6 +2666,7 @@ int osc_cache_truncate_start(const struct lu_env *env, struct osc_io *oio, { struct client_obd *cli = osc_cli(obj); struct osc_extent *ext; + struct osc_extent *temp; struct osc_extent *waiting = NULL; pgoff_t index; LIST_HEAD(list); @@ -2634,18 +2680,19 @@ int osc_cache_truncate_start(const struct lu_env *env, struct osc_io *oio, again: osc_object_lock(obj); ext = osc_extent_search(obj, index); - if (ext == NULL) + if (!ext) ext = first_extent(obj); else if (ext->oe_end < index) ext = next_extent(ext); - while (ext != NULL) { + while (ext) { EASSERT(ext->oe_state != OES_TRUNC, ext); if (ext->oe_state > OES_CACHE || ext->oe_urgent) { /* if ext is in urgent state, it means there must exist * a page already having been flushed by write_page(). * We have to wait for this extent because we can't - * truncate that page. */ + * truncate that page. + */ LASSERT(!ext->oe_hp); OSC_EXTENT_DUMP(D_CACHE, ext, "waiting for busy extent\n"); @@ -2660,7 +2707,8 @@ again: /* though we grab inode mutex for write path, but we * release it before releasing extent(in osc_io_end()), * so there is a race window that an extent is still - * in OES_ACTIVE when truncate starts. */ + * in OES_ACTIVE when truncate starts. + */ LASSERT(!ext->oe_trunc_pending); ext->oe_trunc_pending = 1; } else { @@ -2678,14 +2726,14 @@ again: osc_list_maint(cli, obj); - while (!list_empty(&list)) { + list_for_each_entry_safe(ext, temp, &list, oe_link) { int rc; - ext = list_entry(list.next, struct osc_extent, oe_link); list_del_init(&ext->oe_link); /* extent may be in OES_ACTIVE state because inode mutex - * is released before osc_io_end() in file write case */ + * is released before osc_io_end() in file write case + */ if (ext->oe_state != OES_TRUNC) osc_extent_wait(env, ext, OES_TRUNC); @@ -2710,19 +2758,21 @@ again: /* we need to hold this extent in OES_TRUNC state so * that no writeback will happen. This is to avoid - * BUG 17397. */ - LASSERT(oio->oi_trunc == NULL); + * BUG 17397. + */ + LASSERT(!oio->oi_trunc); oio->oi_trunc = osc_extent_get(ext); OSC_EXTENT_DUMP(D_CACHE, ext, "trunc at %llu\n", size); } osc_extent_put(env, ext); } - if (waiting != NULL) { + if (waiting) { int rc; /* ignore the result of osc_extent_wait the write initiator - * should take care of it. */ + * should take care of it. + */ rc = osc_extent_wait(env, waiting, OES_INV); if (rc < 0) OSC_EXTENT_DUMP(D_CACHE, waiting, "error: %d.\n", rc); @@ -2743,7 +2793,7 @@ void osc_cache_truncate_end(const struct lu_env *env, struct osc_io *oio, struct osc_extent *ext = oio->oi_trunc; oio->oi_trunc = NULL; - if (ext != NULL) { + if (ext) { bool unplug = false; EASSERT(ext->oe_nr_pages > 0, ext); @@ -2786,11 +2836,11 @@ int osc_cache_wait_range(const struct lu_env *env, struct osc_object *obj, again: osc_object_lock(obj); ext = osc_extent_search(obj, index); - if (ext == NULL) + if (!ext) ext = first_extent(obj); else if (ext->oe_end < index) ext = next_extent(ext); - while (ext != NULL) { + while (ext) { int rc; if (ext->oe_start > end) @@ -2841,11 +2891,11 @@ int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj, osc_object_lock(obj); ext = osc_extent_search(obj, start); - if (ext == NULL) + if (!ext) ext = first_extent(obj); else if (ext->oe_end < start) ext = next_extent(ext); - while (ext != NULL) { + while (ext) { if (ext->oe_start > end) break; @@ -2864,18 +2914,18 @@ int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj, ext->oe_urgent = 1; list = &obj->oo_urgent_exts; } - if (list != NULL) + if (list) list_move_tail(&ext->oe_link, list); unplug = true; } else { /* the only discarder is lock cancelling, so - * [start, end] must contain this extent */ + * [start, end] must contain this extent + */ EASSERT(ext->oe_start >= start && ext->oe_max_end <= end, ext); osc_extent_state_set(ext, OES_LOCKING); ext->oe_owner = current; - list_move_tail(&ext->oe_link, - &discard_list); + list_move_tail(&ext->oe_link, &discard_list); osc_update_pending(obj, OBD_BRW_WRITE, -ext->oe_nr_pages); } @@ -2884,14 +2934,16 @@ int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj, /* It's pretty bad to wait for ACTIVE extents, because * we don't know how long we will wait for it to be * flushed since it may be blocked at awaiting more - * grants. We do this for the correctness of fsync. */ + * grants. We do this for the correctness of fsync. + */ LASSERT(hp == 0 && discard == 0); ext->oe_urgent = 1; break; case OES_TRUNC: /* this extent is being truncated, can't do anything * for it now. it will be set to urgent after truncate - * is finished in osc_cache_truncate_end(). */ + * is finished in osc_cache_truncate_end(). + */ default: break; } @@ -2910,7 +2962,8 @@ int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj, EASSERT(ext->oe_state == OES_LOCKING, ext); /* Discard caching pages. We don't actually write this - * extent out but we complete it as if we did. */ + * extent out but we complete it as if we did. + */ rc = osc_extent_make_ready(env, ext); if (unlikely(rc < 0)) { OSC_EXTENT_DUMP(D_ERROR, ext, diff --git a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h index 415c27e4ab66..d55d04d0428b 100644 --- a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h +++ b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h @@ -69,10 +69,12 @@ struct osc_io { /** true if this io is lockless. */ int oi_lockless; /** active extents, we know how many bytes is going to be written, - * so having an active extent will prevent it from being fragmented */ + * so having an active extent will prevent it from being fragmented + */ struct osc_extent *oi_active; /** partially truncated extent, we need to hold this extent to prevent - * page writeback from happening. */ + * page writeback from happening. + */ struct osc_extent *oi_trunc; struct obd_info oi_info; @@ -154,7 +156,8 @@ struct osc_object { atomic_t oo_nr_writes; /** Protect extent tree. Will be used to protect - * oo_{read|write}_pages soon. */ + * oo_{read|write}_pages soon. + */ spinlock_t oo_lock; }; @@ -472,7 +475,7 @@ static inline struct osc_thread_info *osc_env_info(const struct lu_env *env) struct osc_thread_info *info; info = lu_context_key_get(&env->le_ctx, &osc_key); - LASSERT(info != NULL); + LASSERT(info); return info; } @@ -481,7 +484,7 @@ static inline struct osc_session *osc_env_session(const struct lu_env *env) struct osc_session *ses; ses = lu_context_key_get(env->le_ses, &osc_session_key); - LASSERT(ses != NULL); + LASSERT(ses); return ses; } @@ -522,7 +525,7 @@ static inline struct cl_object *osc2cl(const struct osc_object *obj) return (struct cl_object *)&obj->oo_cl; } -static inline ldlm_mode_t osc_cl_lock2ldlm(enum cl_lock_mode mode) +static inline enum ldlm_mode osc_cl_lock2ldlm(enum cl_lock_mode mode) { LASSERT(mode == CLM_READ || mode == CLM_WRITE || mode == CLM_GROUP); if (mode == CLM_READ) @@ -533,7 +536,7 @@ static inline ldlm_mode_t osc_cl_lock2ldlm(enum cl_lock_mode mode) return LCK_GROUP; } -static inline enum cl_lock_mode osc_ldlm2cl_lock(ldlm_mode_t mode) +static inline enum cl_lock_mode osc_ldlm2cl_lock(enum ldlm_mode mode) { LASSERT(mode == LCK_PR || mode == LCK_PW || mode == LCK_GROUP); if (mode == LCK_PR) @@ -627,22 +630,26 @@ struct osc_extent { oe_srvlock:1, oe_memalloc:1, /** an ACTIVE extent is going to be truncated, so when this extent - * is released, it will turn into TRUNC state instead of CACHE. */ + * is released, it will turn into TRUNC state instead of CACHE. + */ oe_trunc_pending:1, /** this extent should be written asap and someone may wait for the * write to finish. This bit is usually set along with urgent if * the extent was CACHE state. * fsync_wait extent can't be merged because new extent region may - * exceed fsync range. */ + * exceed fsync range. + */ oe_fsync_wait:1, /** covering lock is being canceled */ oe_hp:1, /** this extent should be written back asap. set if one of pages is - * called by page WB daemon, or sync write or reading requests. */ + * called by page WB daemon, or sync write or reading requests. + */ oe_urgent:1; /** how many grants allocated for this extent. * Grant allocated for this extent. There is no grant allocated - * for reading extents and sync write extents. */ + * for reading extents and sync write extents. + */ unsigned int oe_grants; /** # of dirty pages in this extent */ unsigned int oe_nr_pages; @@ -655,21 +662,25 @@ struct osc_extent { struct osc_page *oe_next_page; /** start and end index of this extent, include start and end * themselves. Page offset here is the page index of osc_pages. - * oe_start is used as keyword for red-black tree. */ + * oe_start is used as keyword for red-black tree. + */ pgoff_t oe_start; pgoff_t oe_end; /** maximum ending index of this extent, this is limited by - * max_pages_per_rpc, lock extent and chunk size. */ + * max_pages_per_rpc, lock extent and chunk size. + */ pgoff_t oe_max_end; /** waitqueue - for those who want to be notified if this extent's - * state has changed. */ + * state has changed. + */ wait_queue_head_t oe_waitq; /** lock covering this extent */ struct cl_lock *oe_osclock; /** terminator of this extent. Must be true if this extent is in IO. */ struct task_struct *oe_owner; /** return value of writeback. If somebody is waiting for this extent, - * this value can be known by outside world. */ + * this value can be known by outside world. + */ int oe_rc; /** max pages per rpc when this extent was created */ unsigned int oe_mppr; diff --git a/drivers/staging/lustre/lustre/osc/osc_dev.c b/drivers/staging/lustre/lustre/osc/osc_dev.c index 7078cc57d8b9..d4fe507f165f 100644 --- a/drivers/staging/lustre/lustre/osc/osc_dev.c +++ b/drivers/staging/lustre/lustre/osc/osc_dev.c @@ -122,8 +122,8 @@ static void *osc_key_init(const struct lu_context *ctx, { struct osc_thread_info *info; - info = kmem_cache_alloc(osc_thread_kmem, GFP_NOFS | __GFP_ZERO); - if (info == NULL) + info = kmem_cache_zalloc(osc_thread_kmem, GFP_NOFS); + if (!info) info = ERR_PTR(-ENOMEM); return info; } @@ -147,8 +147,8 @@ static void *osc_session_init(const struct lu_context *ctx, { struct osc_session *info; - info = kmem_cache_alloc(osc_session_kmem, GFP_NOFS | __GFP_ZERO); - if (info == NULL) + info = kmem_cache_zalloc(osc_session_kmem, GFP_NOFS); + if (!info) info = ERR_PTR(-ENOMEM); return info; } @@ -228,7 +228,7 @@ static struct lu_device *osc_device_alloc(const struct lu_env *env, /* Setup OSC OBD */ obd = class_name2obd(lustre_cfg_string(cfg, 0)); - LASSERT(obd != NULL); + LASSERT(obd); rc = osc_setup(obd, cfg); if (rc) { osc_device_free(env, d); diff --git a/drivers/staging/lustre/lustre/osc/osc_internal.h b/drivers/staging/lustre/lustre/osc/osc_internal.h index a4c61463b1c7..ea695c2099ee 100644 --- a/drivers/staging/lustre/lustre/osc/osc_internal.h +++ b/drivers/staging/lustre/lustre/osc/osc_internal.h @@ -47,11 +47,13 @@ struct lu_env; enum async_flags { ASYNC_READY = 0x1, /* ap_make_ready will not be called before this - page is added to an rpc */ + * page is added to an rpc + */ ASYNC_URGENT = 0x2, /* page must be put into an RPC before return */ ASYNC_COUNT_STABLE = 0x4, /* ap_refresh_count will not be called - to give the caller a chance to update - or cancel the size of the io */ + * to give the caller a chance to update + * or cancel the size of the io + */ ASYNC_HP = 0x10, }; diff --git a/drivers/staging/lustre/lustre/osc/osc_io.c b/drivers/staging/lustre/lustre/osc/osc_io.c index abd0beb483fe..6bd0a45d8b06 100644 --- a/drivers/staging/lustre/lustre/osc/osc_io.c +++ b/drivers/staging/lustre/lustre/osc/osc_io.c @@ -73,7 +73,7 @@ static struct osc_page *osc_cl_page_osc(struct cl_page *page) const struct cl_page_slice *slice; slice = cl_page_at(page, &osc_device_type); - LASSERT(slice != NULL); + LASSERT(slice); return cl2osc_page(slice); } @@ -135,7 +135,7 @@ static int osc_io_submit(const struct lu_env *env, /* Top level IO. */ io = page->cp_owner; - LASSERT(io != NULL); + LASSERT(io); opg = osc_cl_page_osc(page); oap = &opg->ops_oap; @@ -266,13 +266,14 @@ static int osc_io_prepare_write(const struct lu_env *env, * This implements OBD_BRW_CHECK logic from old client. */ - if (imp == NULL || imp->imp_invalid) + if (!imp || imp->imp_invalid) result = -EIO; if (result == 0 && oio->oi_lockless) /* this page contains `invalid' data, but who cares? * nobody can access the invalid data. * in osc_io_commit_write(), we're going to write exact - * [from, to) bytes of this page to OST. -jay */ + * [from, to) bytes of this page to OST. -jay + */ cl_page_export(env, slice->cpl_page, 1); return result; @@ -349,14 +350,14 @@ static int trunc_check_cb(const struct lu_env *env, struct cl_io *io, __u64 start = *(__u64 *)cbdata; slice = cl_page_at(page, &osc_device_type); - LASSERT(slice != NULL); + LASSERT(slice); ops = cl2osc_page(slice); oap = &ops->ops_oap; if (oap->oap_cmd & OBD_BRW_WRITE && !list_empty(&oap->oap_pending_item)) CL_PAGE_DEBUG(D_ERROR, env, page, "exists %llu/%s.\n", - start, current->comm); + start, current->comm); { struct page *vmpage = cl_page_vmpage(env, page); @@ -500,7 +501,7 @@ static void osc_io_setattr_end(const struct lu_env *env, __u64 size = io->u.ci_setattr.sa_attr.lvb_size; osc_trunc_check(env, io, oio, size); - if (oio->oi_trunc != NULL) { + if (oio->oi_trunc) { osc_cache_truncate_end(env, oio, cl2osc(obj)); oio->oi_trunc = NULL; } @@ -596,7 +597,8 @@ static int osc_io_fsync_start(const struct lu_env *env, * send OST_SYNC RPC. This is bad because it causes extents * to be written osc by osc. However, we usually start * writeback before CL_FSYNC_ALL so this won't have any real - * problem. */ + * problem. + */ rc = osc_cache_wait_range(env, osc, start, end); if (result == 0) result = rc; @@ -754,13 +756,12 @@ static void osc_req_attr_set(const struct lu_env *env, opg = osc_cl_page_osc(apage); apage = opg->ops_cl.cpl_page; /* now apage is a sub-page */ lock = cl_lock_at_page(env, apage->cp_obj, apage, NULL, 1, 1); - if (lock == NULL) { + if (!lock) { struct cl_object_header *head; struct cl_lock *scan; head = cl_object_header(apage->cp_obj); - list_for_each_entry(scan, &head->coh_locks, - cll_linkage) + list_for_each_entry(scan, &head->coh_locks, cll_linkage) CL_LOCK_DEBUG(D_ERROR, env, scan, "no cover page!\n"); CL_PAGE_DEBUG(D_ERROR, env, apage, @@ -770,10 +771,9 @@ static void osc_req_attr_set(const struct lu_env *env, } olck = osc_lock_at(lock); - LASSERT(olck != NULL); - LASSERT(ergo(opg->ops_srvlock, olck->ols_lock == NULL)); + LASSERT(ergo(opg->ops_srvlock, !olck->ols_lock)); /* check for lockless io. */ - if (olck->ols_lock != NULL) { + if (olck->ols_lock) { oa->o_handle = olck->ols_lock->l_remote_handle; oa->o_valid |= OBD_MD_FLHANDLE; } @@ -803,8 +803,8 @@ int osc_req_init(const struct lu_env *env, struct cl_device *dev, struct osc_req *or; int result; - or = kmem_cache_alloc(osc_req_kmem, GFP_NOFS | __GFP_ZERO); - if (or != NULL) { + or = kmem_cache_zalloc(osc_req_kmem, GFP_NOFS); + if (or) { cl_req_slice_add(req, &or->or_cl, dev, &osc_req_ops); result = 0; } else diff --git a/drivers/staging/lustre/lustre/osc/osc_lock.c b/drivers/staging/lustre/lustre/osc/osc_lock.c index 71f2810d18b9..013df9787f3e 100644 --- a/drivers/staging/lustre/lustre/osc/osc_lock.c +++ b/drivers/staging/lustre/lustre/osc/osc_lock.c @@ -79,7 +79,7 @@ static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle) struct ldlm_lock *lock; lock = ldlm_handle2lock(handle); - if (lock != NULL) + if (lock) LDLM_LOCK_PUT(lock); return lock; } @@ -94,42 +94,40 @@ static int osc_lock_invariant(struct osc_lock *ols) int handle_used = lustre_handle_is_used(&ols->ols_handle); if (ergo(osc_lock_is_lockless(ols), - ols->ols_locklessable && ols->ols_lock == NULL)) + ols->ols_locklessable && !ols->ols_lock)) return 1; /* * If all the following "ergo"s are true, return 1, otherwise 0 */ - if (!ergo(olock != NULL, handle_used)) + if (!ergo(olock, handle_used)) return 0; - if (!ergo(olock != NULL, - olock->l_handle.h_cookie == ols->ols_handle.cookie)) + if (!ergo(olock, olock->l_handle.h_cookie == ols->ols_handle.cookie)) return 0; if (!ergo(handle_used, - ergo(lock != NULL && olock != NULL, lock == olock) && - ergo(lock == NULL, olock == NULL))) + ergo(lock && olock, lock == olock) && + ergo(!lock, !olock))) return 0; /* * Check that ->ols_handle and ->ols_lock are consistent, but * take into account that they are set at the different time. */ if (!ergo(ols->ols_state == OLS_CANCELLED, - olock == NULL && !handle_used)) + !olock && !handle_used)) return 0; /* * DLM lock is destroyed only after we have seen cancellation * ast. */ - if (!ergo(olock != NULL && ols->ols_state < OLS_CANCELLED, - ((olock->l_flags & LDLM_FL_DESTROYED) == 0))) + if (!ergo(olock && ols->ols_state < OLS_CANCELLED, + ((olock->l_flags & LDLM_FL_DESTROYED) == 0))) return 0; if (!ergo(ols->ols_state == OLS_GRANTED, - olock != NULL && - olock->l_req_mode == olock->l_granted_mode && - ols->ols_hold)) + olock && olock->l_req_mode == olock->l_granted_mode && + ols->ols_hold)) return 0; return 1; } @@ -149,14 +147,15 @@ static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck) spin_lock(&osc_ast_guard); dlmlock = olck->ols_lock; - if (dlmlock == NULL) { + if (!dlmlock) { spin_unlock(&osc_ast_guard); return; } olck->ols_lock = NULL; /* wb(); --- for all who checks (ols->ols_lock != NULL) before - * call to osc_lock_detach() */ + * call to osc_lock_detach() + */ dlmlock->l_ast_data = NULL; olck->ols_handle.cookie = 0ULL; spin_unlock(&osc_ast_guard); @@ -171,7 +170,8 @@ static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck) /* Must get the value under the lock to avoid possible races. */ old_kms = cl2osc(obj)->oo_oinfo->loi_kms; /* Update the kms. Need to loop all granted locks. - * Not a problem for the client */ + * Not a problem for the client + */ attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms); cl_object_attr_set(env, obj, attr, CAT_KMS); @@ -223,8 +223,7 @@ static int osc_lock_unuse(const struct lu_env *env, /* * Move lock into OLS_RELEASED state before calling * osc_cancel_base() so that possible synchronous cancellation - * (that always happens e.g., for liblustre) sees that lock is - * released. + * sees that lock is released. */ ols->ols_state = OLS_RELEASED; return osc_lock_unhold(ols); @@ -247,7 +246,7 @@ static void osc_lock_fini(const struct lu_env *env, * lock is destroyed immediately after upcall. */ osc_lock_unhold(ols); - LASSERT(ols->ols_lock == NULL); + LASSERT(!ols->ols_lock); LASSERT(atomic_read(&ols->ols_pageref) == 0 || atomic_read(&ols->ols_pageref) == _PAGEREF_MAGIC); @@ -292,7 +291,7 @@ static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock) lock_res_and_lock(dlm_lock); spin_lock(&osc_ast_guard); olck = dlm_lock->l_ast_data; - if (olck != NULL) { + if (olck) { struct cl_lock *lock = olck->ols_cl.cls_lock; /* * If osc_lock holds a reference on ldlm lock, return it even @@ -359,13 +358,13 @@ static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck, __u64 size; dlmlock = olck->ols_lock; - LASSERT(dlmlock != NULL); /* re-grab LVB from a dlm lock under DLM spin-locks. */ *lvb = *(struct ost_lvb *)dlmlock->l_lvb_data; size = lvb->lvb_size; /* Extend KMS up to the end of this lock and no further - * A lock on [x,y] means a KMS of up to y + 1 bytes! */ + * A lock on [x,y] means a KMS of up to y + 1 bytes! + */ if (size > dlmlock->l_policy_data.l_extent.end) size = dlmlock->l_policy_data.l_extent.end + 1; if (size >= oinfo->loi_kms) { @@ -429,7 +428,8 @@ static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck, * to take a semaphore on a parent lock. This is safe, because * spin-locks are needed to protect consistency of * dlmlock->l_*_mode and LVB, and we have finished processing - * them. */ + * them. + */ unlock_res_and_lock(dlmlock); cl_lock_modify(env, lock, descr); cl_lock_signal(env, lock); @@ -444,12 +444,12 @@ static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck) struct ldlm_lock *dlmlock; dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0); - LASSERT(dlmlock != NULL); + LASSERT(dlmlock); lock_res_and_lock(dlmlock); spin_lock(&osc_ast_guard); LASSERT(dlmlock->l_ast_data == olck); - LASSERT(olck->ols_lock == NULL); + LASSERT(!olck->ols_lock); olck->ols_lock = dlmlock; spin_unlock(&osc_ast_guard); @@ -470,7 +470,8 @@ static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck) olck->ols_hold = 1; /* lock reference taken by ldlm_handle2lock_long() is owned by - * osc_lock and released in osc_lock_detach() */ + * osc_lock and released in osc_lock_detach() + */ lu_ref_add(&dlmlock->l_reference, "osc_lock", olck); olck->ols_has_ref = 1; } @@ -508,10 +509,10 @@ static int osc_lock_upcall(void *cookie, int errcode) struct ldlm_lock *dlmlock; dlmlock = ldlm_handle2lock(&olck->ols_handle); - if (dlmlock != NULL) { + if (dlmlock) { lock_res_and_lock(dlmlock); spin_lock(&osc_ast_guard); - LASSERT(olck->ols_lock == NULL); + LASSERT(!olck->ols_lock); dlmlock->l_ast_data = NULL; olck->ols_handle.cookie = 0ULL; spin_unlock(&osc_ast_guard); @@ -548,7 +549,8 @@ static int osc_lock_upcall(void *cookie, int errcode) /* For AGL case, the RPC sponsor may exits the cl_lock * processing without wait() called before related OSC * lock upcall(). So update the lock status according - * to the enqueue result inside AGL upcall(). */ + * to the enqueue result inside AGL upcall(). + */ if (olck->ols_agl) { lock->cll_flags |= CLF_FROM_UPCALL; cl_wait_try(env, lock); @@ -571,7 +573,8 @@ static int osc_lock_upcall(void *cookie, int errcode) lu_ref_del(&lock->cll_reference, "upcall", lock); /* This maybe the last reference, so must be called after - * cl_lock_mutex_put(). */ + * cl_lock_mutex_put(). + */ cl_lock_put(env, lock); cl_env_nested_put(&nest, env); @@ -634,7 +637,7 @@ static int osc_dlm_blocking_ast0(const struct lu_env *env, cancel = 0; olck = osc_ast_data_get(dlmlock); - if (olck != NULL) { + if (olck) { lock = olck->ols_cl.cls_lock; cl_lock_mutex_get(env, lock); LINVRNT(osc_lock_invariant(olck)); @@ -786,17 +789,17 @@ static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock, env = cl_env_nested_get(&nest); if (!IS_ERR(env)) { olck = osc_ast_data_get(dlmlock); - if (olck != NULL) { + if (olck) { lock = olck->ols_cl.cls_lock; cl_lock_mutex_get(env, lock); /* * ldlm_handle_cp_callback() copied LVB from request * to lock->l_lvb_data, store it in osc_lock. */ - LASSERT(dlmlock->l_lvb_data != NULL); + LASSERT(dlmlock->l_lvb_data); lock_res_and_lock(dlmlock); olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data; - if (olck->ols_lock == NULL) { + if (!olck->ols_lock) { /* * upcall (osc_lock_upcall()) hasn't yet been * called. Do nothing now, upcall will bind @@ -850,14 +853,15 @@ static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data) * environment. */ olck = osc_ast_data_get(dlmlock); - if (olck != NULL) { + if (olck) { lock = olck->ols_cl.cls_lock; /* Do not grab the mutex of cl_lock for glimpse. * See LU-1274 for details. * BTW, it's okay for cl_lock to be cancelled during * this period because server can handle this race. * See ldlm_server_glimpse_ast() for details. - * cl_lock_mutex_get(env, lock); */ + * cl_lock_mutex_get(env, lock); + */ cap = &req->rq_pill; req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK); req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER, @@ -1017,7 +1021,8 @@ static int osc_lock_enqueue_wait(const struct lu_env *env, LASSERT(cl_lock_is_mutexed(lock)); /* make it enqueue anyway for glimpse lock, because we actually - * don't need to cancel any conflicting locks. */ + * don't need to cancel any conflicting locks. + */ if (olck->ols_glimpse) return 0; @@ -1051,7 +1056,8 @@ static int osc_lock_enqueue_wait(const struct lu_env *env, * imagine that client has PR lock on [0, 1000], and thread T0 * is doing lockless IO in [500, 1500] region. Concurrent * thread T1 can see lockless data in [500, 1000], which is - * wrong, because these data are possibly stale. */ + * wrong, because these data are possibly stale. + */ if (!lockless && osc_lock_compatible(olck, scan_ols)) continue; @@ -1074,7 +1080,7 @@ static int osc_lock_enqueue_wait(const struct lu_env *env, } else { CDEBUG(D_DLMTRACE, "lock %p is conflicted with %p, will wait\n", lock, conflict); - LASSERT(lock->cll_conflict == NULL); + LASSERT(!lock->cll_conflict); lu_ref_add(&conflict->cll_reference, "cancel-wait", lock); lock->cll_conflict = conflict; @@ -1111,7 +1117,7 @@ static int osc_lock_enqueue(const struct lu_env *env, "Impossible state: %d\n", ols->ols_state); LASSERTF(ergo(ols->ols_glimpse, lock->cll_descr.cld_mode <= CLM_READ), - "lock = %p, ols = %p\n", lock, ols); + "lock = %p, ols = %p\n", lock, ols); result = osc_lock_enqueue_wait(env, ols); if (result == 0) { @@ -1123,7 +1129,8 @@ static int osc_lock_enqueue(const struct lu_env *env, struct ldlm_enqueue_info *einfo = &ols->ols_einfo; /* lock will be passed as upcall cookie, - * hold ref to prevent to be released. */ + * hold ref to prevent to be released. + */ cl_lock_hold_add(env, lock, "upcall", lock); /* a user for lock also */ cl_lock_user_add(env, lock); @@ -1137,12 +1144,12 @@ static int osc_lock_enqueue(const struct lu_env *env, ostid_build_res_name(&obj->oo_oinfo->loi_oi, resname); osc_lock_build_policy(env, lock, policy); result = osc_enqueue_base(osc_export(obj), resname, - &ols->ols_flags, policy, - &ols->ols_lvb, - obj->oo_oinfo->loi_kms_valid, - osc_lock_upcall, - ols, einfo, &ols->ols_handle, - PTLRPCD_SET, 1, ols->ols_agl); + &ols->ols_flags, policy, + &ols->ols_lvb, + obj->oo_oinfo->loi_kms_valid, + osc_lock_upcall, + ols, einfo, &ols->ols_handle, + PTLRPCD_SET, 1, ols->ols_agl); if (result != 0) { cl_lock_user_del(env, lock); cl_lock_unhold(env, lock, "upcall", lock); @@ -1174,7 +1181,8 @@ static int osc_lock_wait(const struct lu_env *env, } else if (olck->ols_agl) { if (lock->cll_flags & CLF_FROM_UPCALL) /* It is from enqueue RPC reply upcall for - * updating state. Do not re-enqueue. */ + * updating state. Do not re-enqueue. + */ return -ENAVAIL; olck->ols_state = OLS_NEW; } else { @@ -1197,7 +1205,7 @@ static int osc_lock_wait(const struct lu_env *env, } LASSERT(equi(olck->ols_state >= OLS_UPCALL_RECEIVED && - lock->cll_error == 0, olck->ols_lock != NULL)); + lock->cll_error == 0, olck->ols_lock)); return lock->cll_error ?: olck->ols_state >= OLS_GRANTED ? 0 : CLO_WAIT; } @@ -1235,7 +1243,8 @@ static int osc_lock_use(const struct lu_env *env, LASSERT(lock->cll_state == CLS_INTRANSIT); LASSERT(lock->cll_users > 0); /* set a flag for osc_dlm_blocking_ast0() to signal the - * lock.*/ + * lock. + */ olck->ols_ast_wait = 1; rc = CLO_WAIT; } @@ -1257,11 +1266,12 @@ static int osc_lock_flush(struct osc_lock *ols, int discard) if (descr->cld_mode >= CLM_WRITE) { result = osc_cache_writeback_range(env, obj, - descr->cld_start, descr->cld_end, - 1, discard); + descr->cld_start, + descr->cld_end, + 1, discard); LDLM_DEBUG(ols->ols_lock, - "lock %p: %d pages were %s.\n", lock, result, - discard ? "discarded" : "written"); + "lock %p: %d pages were %s.\n", lock, result, + discard ? "discarded" : "written"); if (result > 0) result = 0; } @@ -1306,7 +1316,7 @@ static void osc_lock_cancel(const struct lu_env *env, LASSERT(cl_lock_is_mutexed(lock)); LINVRNT(osc_lock_invariant(olck)); - if (dlmlock != NULL) { + if (dlmlock) { int do_cancel; discard = !!(dlmlock->l_flags & LDLM_FL_DISCARD_DATA); @@ -1318,7 +1328,8 @@ static void osc_lock_cancel(const struct lu_env *env, /* Now that we're the only user of dlm read/write reference, * mostly the ->l_readers + ->l_writers should be zero. * However, there is a corner case. - * See bug 18829 for details.*/ + * See bug 18829 for details. + */ do_cancel = (dlmlock->l_readers == 0 && dlmlock->l_writers == 0); dlmlock->l_flags |= LDLM_FL_CBPENDING; @@ -1382,7 +1393,7 @@ static void osc_lock_state(const struct lu_env *env, if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) { struct osc_io *oio = osc_env_io(env); - LASSERT(lock->ols_owner == NULL); + LASSERT(!lock->ols_owner); lock->ols_owner = oio; } else if (state != CLS_HELD) lock->ols_owner = NULL; @@ -1517,7 +1528,8 @@ static void osc_lock_lockless_state(const struct lu_env *env, lock->ols_owner = oio; /* set the io to be lockless if this lock is for io's - * host object */ + * host object + */ if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj)) oio->oi_lockless = 1; } @@ -1555,8 +1567,8 @@ int osc_lock_init(const struct lu_env *env, struct osc_lock *clk; int result; - clk = kmem_cache_alloc(osc_lock_kmem, GFP_NOFS | __GFP_ZERO); - if (clk != NULL) { + clk = kmem_cache_zalloc(osc_lock_kmem, GFP_NOFS); + if (clk) { __u32 enqflags = lock->cll_descr.cld_enq_flags; osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo); @@ -1578,8 +1590,8 @@ int osc_lock_init(const struct lu_env *env, if (clk->ols_locklessable && !(enqflags & CEF_DISCARD_DATA)) clk->ols_flags |= LDLM_FL_DENY_ON_CONTENTION; - LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %llx\n", - lock, clk, clk->ols_flags); + LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %llx", + lock, clk, clk->ols_flags); result = 0; } else @@ -1599,9 +1611,9 @@ int osc_dlm_lock_pageref(struct ldlm_lock *dlm) * doesn't matter because in the worst case we don't cancel a lock * which we actually can, that's no harm. */ - if (olock != NULL && + if (olock && atomic_add_return(_PAGEREF_MAGIC, - &olock->ols_pageref) != _PAGEREF_MAGIC) { + &olock->ols_pageref) != _PAGEREF_MAGIC) { atomic_sub(_PAGEREF_MAGIC, &olock->ols_pageref); rc = 1; } diff --git a/drivers/staging/lustre/lustre/osc/osc_object.c b/drivers/staging/lustre/lustre/osc/osc_object.c index fdd6219aacf6..9d474fcdd9a7 100644 --- a/drivers/staging/lustre/lustre/osc/osc_object.c +++ b/drivers/staging/lustre/lustre/osc/osc_object.c @@ -113,7 +113,7 @@ static void osc_object_free(const struct lu_env *env, struct lu_object *obj) LASSERT(list_empty(&osc->oo_write_item)); LASSERT(list_empty(&osc->oo_read_item)); - LASSERT(osc->oo_root.rb_node == NULL); + LASSERT(!osc->oo_root.rb_node); LASSERT(list_empty(&osc->oo_hp_exts)); LASSERT(list_empty(&osc->oo_urgent_exts)); LASSERT(list_empty(&osc->oo_rpc_exts)); @@ -255,8 +255,8 @@ struct lu_object *osc_object_alloc(const struct lu_env *env, struct osc_object *osc; struct lu_object *obj; - osc = kmem_cache_alloc(osc_object_kmem, GFP_NOFS | __GFP_ZERO); - if (osc != NULL) { + osc = kmem_cache_zalloc(osc_object_kmem, GFP_NOFS); + if (osc) { obj = osc2lu(osc); lu_object_init(obj, NULL, dev); osc->oo_cl.co_ops = &osc_ops; diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c index 2439d804fe75..d720b1a1c18c 100644 --- a/drivers/staging/lustre/lustre/osc/osc_page.c +++ b/drivers/staging/lustre/lustre/osc/osc_page.c @@ -51,111 +51,12 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj, * @{ */ -/* - * Comment out osc_page_protected because it may sleep inside the - * the client_obd_list_lock. - * client_obd_list_lock -> osc_ap_completion -> osc_completion -> - * -> osc_page_protected -> osc_page_is_dlocked -> osc_match_base - * -> ldlm_lock_match -> sptlrpc_import_check_ctx -> sleep. - */ -#if 0 -static int osc_page_is_dlocked(const struct lu_env *env, - const struct osc_page *opg, - enum cl_lock_mode mode, int pending, int unref) -{ - struct cl_page *page; - struct osc_object *obj; - struct osc_thread_info *info; - struct ldlm_res_id *resname; - struct lustre_handle *lockh; - ldlm_policy_data_t *policy; - ldlm_mode_t dlmmode; - __u64 flags; - - might_sleep(); - - info = osc_env_info(env); - resname = &info->oti_resname; - policy = &info->oti_policy; - lockh = &info->oti_handle; - page = opg->ops_cl.cpl_page; - obj = cl2osc(opg->ops_cl.cpl_obj); - - flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED; - if (pending) - flags |= LDLM_FL_CBPENDING; - - dlmmode = osc_cl_lock2ldlm(mode) | LCK_PW; - osc_lock_build_res(env, obj, resname); - osc_index2policy(policy, page->cp_obj, page->cp_index, page->cp_index); - return osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy, - dlmmode, &flags, NULL, lockh, unref); -} - -/** - * Checks an invariant that a page in the cache is covered by a lock, as - * needed. - */ -static int osc_page_protected(const struct lu_env *env, - const struct osc_page *opg, - enum cl_lock_mode mode, int unref) -{ - struct cl_object_header *hdr; - struct cl_lock *scan; - struct cl_page *page; - struct cl_lock_descr *descr; - int result; - - LINVRNT(!opg->ops_temp); - - page = opg->ops_cl.cpl_page; - if (page->cp_owner != NULL && - cl_io_top(page->cp_owner)->ci_lockreq == CILR_NEVER) - /* - * If IO is done without locks (liblustre, or lloop), lock is - * not required. - */ - result = 1; - else - /* otherwise check for a DLM lock */ - result = osc_page_is_dlocked(env, opg, mode, 1, unref); - if (result == 0) { - /* maybe this page is a part of a lockless io? */ - hdr = cl_object_header(opg->ops_cl.cpl_obj); - descr = &osc_env_info(env)->oti_descr; - descr->cld_mode = mode; - descr->cld_start = page->cp_index; - descr->cld_end = page->cp_index; - spin_lock(&hdr->coh_lock_guard); - list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) { - /* - * Lock-less sub-lock has to be either in HELD state - * (when io is actively going on), or in CACHED state, - * when top-lock is being unlocked: - * cl_io_unlock()->cl_unuse()->...->lov_lock_unuse(). - */ - if ((scan->cll_state == CLS_HELD || - scan->cll_state == CLS_CACHED) && - cl_lock_ext_match(&scan->cll_descr, descr)) { - struct osc_lock *olck; - - olck = osc_lock_at(scan); - result = osc_lock_is_lockless(olck); - break; - } - } - spin_unlock(&hdr->coh_lock_guard); - } - return result; -} -#else static int osc_page_protected(const struct lu_env *env, const struct osc_page *opg, enum cl_lock_mode mode, int unref) { return 1; } -#endif /***************************************************************************** * @@ -168,7 +69,7 @@ static void osc_page_fini(const struct lu_env *env, struct osc_page *opg = cl2osc_page(slice); CDEBUG(D_TRACE, "%p\n", opg); - LASSERT(opg->ops_lock == NULL); + LASSERT(!opg->ops_lock); } static void osc_page_transfer_get(struct osc_page *opg, const char *label) @@ -204,7 +105,8 @@ static void osc_page_transfer_add(const struct lu_env *env, struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj); /* ops_lru and ops_inflight share the same field, so take it from LRU - * first and then use it as inflight. */ + * first and then use it as inflight. + */ osc_lru_del(osc_cli(obj), opg, false); spin_lock(&obj->oo_seatbelt); @@ -232,9 +134,10 @@ static int osc_page_cache_add(const struct lu_env *env, /* for sync write, kernel will wait for this page to be flushed before * osc_io_end() is called, so release it earlier. - * for mkwrite(), it's known there is no further pages. */ + * for mkwrite(), it's known there is no further pages. + */ if (cl_io_is_sync_write(io) || cl_io_is_mkwrite(io)) { - if (oio->oi_active != NULL) { + if (oio->oi_active) { osc_extent_release(env, oio->oi_active); oio->oi_active = NULL; } @@ -258,7 +161,7 @@ static int osc_page_addref_lock(const struct lu_env *env, struct osc_lock *olock; int rc; - LASSERT(opg->ops_lock == NULL); + LASSERT(!opg->ops_lock); olock = osc_lock_at(lock); if (atomic_inc_return(&olock->ols_pageref) <= 0) { @@ -278,7 +181,7 @@ static void osc_page_putref_lock(const struct lu_env *env, struct cl_lock *lock = opg->ops_lock; struct osc_lock *olock; - LASSERT(lock != NULL); + LASSERT(lock); olock = osc_lock_at(lock); atomic_dec(&olock->ols_pageref); @@ -296,7 +199,7 @@ static int osc_page_is_under_lock(const struct lu_env *env, lock = cl_lock_at_page(env, slice->cpl_obj, slice->cpl_page, NULL, 1, 0); - if (lock != NULL) { + if (lock) { if (osc_page_addref_lock(env, cl2osc_page(slice), lock) == 0) result = -EBUSY; cl_lock_put(env, lock); @@ -424,7 +327,7 @@ static void osc_page_delete(const struct lu_env *env, } spin_lock(&obj->oo_seatbelt); - if (opg->ops_submitter != NULL) { + if (opg->ops_submitter) { LASSERT(!list_empty(&opg->ops_inflight)); list_del_init(&opg->ops_inflight); opg->ops_submitter = NULL; @@ -434,8 +337,8 @@ static void osc_page_delete(const struct lu_env *env, osc_lru_del(osc_cli(obj), opg, true); } -void osc_page_clip(const struct lu_env *env, const struct cl_page_slice *slice, - int from, int to) +static void osc_page_clip(const struct lu_env *env, + const struct cl_page_slice *slice, int from, int to) { struct osc_page *opg = cl2osc_page(slice); struct osc_async_page *oap = &opg->ops_oap; @@ -458,7 +361,8 @@ static int osc_page_cancel(const struct lu_env *env, LINVRNT(osc_page_protected(env, opg, CLM_READ, 0)); /* Check if the transferring against this page - * is completed, or not even queued. */ + * is completed, or not even queued. + */ if (opg->ops_transfer_pinned) /* FIXME: may not be interrupted.. */ rc = osc_cancel_async_page(env, opg); @@ -499,7 +403,7 @@ static const struct cl_page_operations osc_page_ops = { }; int osc_page_init(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, struct page *vmpage) + struct cl_page *page, struct page *vmpage) { struct osc_object *osc = cl2osc(obj); struct osc_page *opg = cl_object_page_slice(obj, page); @@ -509,20 +413,20 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj, opg->ops_to = PAGE_CACHE_SIZE; result = osc_prep_async_page(osc, opg, vmpage, - cl_offset(obj, page->cp_index)); + cl_offset(obj, page->cp_index)); if (result == 0) { struct osc_io *oio = osc_env_io(env); opg->ops_srvlock = osc_io_srvlock(oio); - cl_page_slice_add(page, &opg->ops_cl, obj, - &osc_page_ops); + cl_page_slice_add(page, &opg->ops_cl, obj, &osc_page_ops); } /* * Cannot assert osc_page_protected() here as read-ahead * creates temporary pages outside of a lock. */ /* ops_inflight and ops_lru are the same field, but it doesn't - * hurt to initialize it twice :-) */ + * hurt to initialize it twice :-) + */ INIT_LIST_HEAD(&opg->ops_inflight); INIT_LIST_HEAD(&opg->ops_lru); @@ -557,7 +461,7 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg, oap->oap_brw_flags = brw_flags | OBD_BRW_SYNC; if (!client_is_remote(osc_export(obj)) && - capable(CFS_CAP_SYS_RESOURCE)) { + capable(CFS_CAP_SYS_RESOURCE)) { oap->oap_brw_flags |= OBD_BRW_NOQUOTA; oap->oap_cmd |= OBD_BRW_NOQUOTA; } @@ -581,7 +485,8 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg, static DECLARE_WAIT_QUEUE_HEAD(osc_lru_waitq); static atomic_t osc_lru_waiters = ATOMIC_INIT(0); /* LRU pages are freed in batch mode. OSC should at least free this - * number of pages to avoid running out of LRU budget, and.. */ + * number of pages to avoid running out of LRU budget, and.. + */ static const int lru_shrink_min = 2 << (20 - PAGE_CACHE_SHIFT); /* 2M */ /* free this number at most otherwise it will take too long time to finish. */ static const int lru_shrink_max = 32 << (20 - PAGE_CACHE_SHIFT); /* 32M */ @@ -590,7 +495,8 @@ static const int lru_shrink_max = 32 << (20 - PAGE_CACHE_SHIFT); /* 32M */ * we should free slots aggressively. In this way, slots are freed in a steady * step to maintain fairness among OSCs. * - * Return how many LRU pages should be freed. */ + * Return how many LRU pages should be freed. + */ static int osc_cache_too_much(struct client_obd *cli) { struct cl_client_cache *cache = cli->cl_cache; @@ -602,7 +508,8 @@ static int osc_cache_too_much(struct client_obd *cli) return min(pages, lru_shrink_max); /* if it's going to run out LRU slots, we should free some, but not - * too much to maintain fairness among OSCs. */ + * too much to maintain fairness among OSCs. + */ if (atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) { unsigned long tmp; @@ -630,7 +537,8 @@ static int discard_pagevec(const struct lu_env *env, struct cl_io *io, /* free LRU page only if nobody is using it. * This check is necessary to avoid freeing the pages * having already been removed from LRU and pinned - * for IO. */ + * for IO. + */ if (!cl_page_in_use(page)) { cl_page_unmap(env, io, page); cl_page_discard(env, io, page); @@ -655,6 +563,7 @@ int osc_lru_shrink(struct client_obd *cli, int target) struct cl_object *clobj = NULL; struct cl_page **pvec; struct osc_page *opg; + struct osc_page *temp; int maxscan = 0; int count = 0; int index = 0; @@ -674,28 +583,26 @@ int osc_lru_shrink(struct client_obd *cli, int target) client_obd_list_lock(&cli->cl_lru_list_lock); atomic_inc(&cli->cl_lru_shrinkers); maxscan = min(target << 1, atomic_read(&cli->cl_lru_in_list)); - while (!list_empty(&cli->cl_lru_list)) { + list_for_each_entry_safe(opg, temp, &cli->cl_lru_list, ops_lru) { struct cl_page *page; if (--maxscan < 0) break; - opg = list_entry(cli->cl_lru_list.next, struct osc_page, - ops_lru); page = cl_page_top(opg->ops_cl.cpl_page); if (cl_page_in_use_noref(page)) { list_move_tail(&opg->ops_lru, &cli->cl_lru_list); continue; } - LASSERT(page->cp_obj != NULL); + LASSERT(page->cp_obj); if (clobj != page->cp_obj) { struct cl_object *tmp = page->cp_obj; cl_object_get(tmp); client_obd_list_unlock(&cli->cl_lru_list_lock); - if (clobj != NULL) { + if (clobj) { count -= discard_pagevec(env, io, pvec, index); index = 0; @@ -720,11 +627,13 @@ int osc_lru_shrink(struct client_obd *cli, int target) /* move this page to the end of list as it will be discarded * soon. The page will be finally removed from LRU list in - * osc_page_delete(). */ + * osc_page_delete(). + */ list_move_tail(&opg->ops_lru, &cli->cl_lru_list); /* it's okay to grab a refcount here w/o holding lock because - * it has to grab cl_lru_list_lock to delete the page. */ + * it has to grab cl_lru_list_lock to delete the page. + */ cl_page_get(page); pvec[index++] = page; if (++count >= target) @@ -740,7 +649,7 @@ int osc_lru_shrink(struct client_obd *cli, int target) } client_obd_list_unlock(&cli->cl_lru_list_lock); - if (clobj != NULL) { + if (clobj) { count -= discard_pagevec(env, io, pvec, index); cl_io_fini(env, io); @@ -775,7 +684,8 @@ static void osc_lru_add(struct client_obd *cli, struct osc_page *opg) } /* delete page from LRUlist. The page can be deleted from LRUlist for two - * reasons: redirtied or deleted from page cache. */ + * reasons: redirtied or deleted from page cache. + */ static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del) { if (opg->ops_in_lru) { @@ -797,7 +707,8 @@ static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del) * this osc occupies too many LRU pages and kernel is * stealing one of them. * cl_lru_shrinkers is to avoid recursive call in case - * we're already in the context of osc_lru_shrink(). */ + * we're already in the context of osc_lru_shrink(). + */ if (atomic_read(&cli->cl_lru_shrinkers) == 0 && !memory_pressure_get()) osc_lru_shrink(cli, osc_cache_too_much(cli)); @@ -819,22 +730,23 @@ static int osc_lru_reclaim(struct client_obd *cli) int max_scans; int rc; - LASSERT(cache != NULL); + LASSERT(cache); rc = osc_lru_shrink(cli, lru_shrink_min); if (rc != 0) { CDEBUG(D_CACHE, "%s: Free %d pages from own LRU: %p.\n", - cli->cl_import->imp_obd->obd_name, rc, cli); + cli->cl_import->imp_obd->obd_name, rc, cli); return rc; } CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %d, busy: %d.\n", - cli->cl_import->imp_obd->obd_name, cli, - atomic_read(&cli->cl_lru_in_list), - atomic_read(&cli->cl_lru_busy)); + cli->cl_import->imp_obd->obd_name, cli, + atomic_read(&cli->cl_lru_in_list), + atomic_read(&cli->cl_lru_busy)); /* Reclaim LRU slots from other client_obd as it can't free enough - * from its own. This should rarely happen. */ + * from its own. This should rarely happen. + */ spin_lock(&cache->ccc_lru_lock); LASSERT(!list_empty(&cache->ccc_lru)); @@ -844,12 +756,12 @@ static int osc_lru_reclaim(struct client_obd *cli) max_scans = atomic_read(&cache->ccc_users); while (--max_scans > 0 && !list_empty(&cache->ccc_lru)) { cli = list_entry(cache->ccc_lru.next, struct client_obd, - cl_lru_osc); + cl_lru_osc); CDEBUG(D_CACHE, "%s: cli %p LRU pages: %d, busy: %d.\n", - cli->cl_import->imp_obd->obd_name, cli, - atomic_read(&cli->cl_lru_in_list), - atomic_read(&cli->cl_lru_busy)); + cli->cl_import->imp_obd->obd_name, cli, + atomic_read(&cli->cl_lru_in_list), + atomic_read(&cli->cl_lru_busy)); list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru); if (atomic_read(&cli->cl_lru_in_list) > 0) { @@ -864,7 +776,7 @@ static int osc_lru_reclaim(struct client_obd *cli) spin_unlock(&cache->ccc_lru_lock); CDEBUG(D_CACHE, "%s: cli %p freed %d pages.\n", - cli->cl_import->imp_obd->obd_name, cli, rc); + cli->cl_import->imp_obd->obd_name, cli, rc); return rc; } @@ -875,7 +787,7 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj, struct client_obd *cli = osc_cli(obj); int rc = 0; - if (cli->cl_cache == NULL) /* shall not be in LRU */ + if (!cli->cl_cache) /* shall not be in LRU */ return 0; LASSERT(atomic_read(cli->cl_lru_left) >= 0); @@ -892,15 +804,16 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj, cond_resched(); /* slowest case, all of caching pages are busy, notifying - * other OSCs that we're lack of LRU slots. */ + * other OSCs that we're lack of LRU slots. + */ atomic_inc(&osc_lru_waiters); gen = atomic_read(&cli->cl_lru_in_list); rc = l_wait_event(osc_lru_waitq, - atomic_read(cli->cl_lru_left) > 0 || - (atomic_read(&cli->cl_lru_in_list) > 0 && - gen != atomic_read(&cli->cl_lru_in_list)), - &lwi); + atomic_read(cli->cl_lru_left) > 0 || + (atomic_read(&cli->cl_lru_in_list) > 0 && + gen != atomic_read(&cli->cl_lru_in_list)), + &lwi); atomic_dec(&osc_lru_waiters); if (rc < 0) diff --git a/drivers/staging/lustre/lustre/osc/osc_quota.c b/drivers/staging/lustre/lustre/osc/osc_quota.c index e70e7961d763..194d8ede40a2 100644 --- a/drivers/staging/lustre/lustre/osc/osc_quota.c +++ b/drivers/staging/lustre/lustre/osc/osc_quota.c @@ -13,11 +13,6 @@ * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 021110-1307, USA - * * GPL HEADER END */ /* @@ -35,8 +30,8 @@ static inline struct osc_quota_info *osc_oqi_alloc(u32 id) { struct osc_quota_info *oqi; - oqi = kmem_cache_alloc(osc_quota_kmem, GFP_NOFS | __GFP_ZERO); - if (oqi != NULL) + oqi = kmem_cache_zalloc(osc_quota_kmem, GFP_NOFS); + if (oqi) oqi->oqi_id = id; return oqi; @@ -52,10 +47,12 @@ int osc_quota_chkdq(struct client_obd *cli, const unsigned int qid[]) oqi = cfs_hash_lookup(cli->cl_quota_hash[type], &qid[type]); if (oqi) { /* do not try to access oqi here, it could have been - * freed by osc_quota_setdq() */ + * freed by osc_quota_setdq() + */ /* the slot is busy, the user is about to run out of - * quota space on this OST */ + * quota space on this OST + */ CDEBUG(D_QUOTA, "chkdq found noquota for %s %d\n", type == USRQUOTA ? "user" : "grout", qid[type]); return NO_QUOTA; @@ -89,12 +86,13 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[], oqi = cfs_hash_lookup(cli->cl_quota_hash[type], &qid[type]); if ((flags & FL_QUOTA_FLAG(type)) != 0) { /* This ID is getting close to its quota limit, let's - * switch to sync I/O */ - if (oqi != NULL) + * switch to sync I/O + */ + if (oqi) continue; oqi = osc_oqi_alloc(qid[type]); - if (oqi == NULL) { + if (!oqi) { rc = -ENOMEM; break; } @@ -113,8 +111,9 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[], qid[type], rc); } else { /* This ID is now off the hook, let's remove it from - * the hash table */ - if (oqi == NULL) + * the hash table + */ + if (!oqi) continue; oqi = cfs_hash_del_key(cli->cl_quota_hash[type], @@ -147,7 +146,7 @@ oqi_keycmp(const void *key, struct hlist_node *hnode) struct osc_quota_info *oqi; u32 uid; - LASSERT(key != NULL); + LASSERT(key); uid = *((u32 *)key); oqi = hlist_entry(hnode, struct osc_quota_info, oqi_hash); @@ -218,7 +217,7 @@ int osc_quota_setup(struct obd_device *obd) CFS_HASH_MAX_THETA, "a_hash_ops, CFS_HASH_DEFAULT); - if (cli->cl_quota_hash[type] == NULL) + if (!cli->cl_quota_hash[type]) break; } @@ -252,7 +251,7 @@ int osc_quotactl(struct obd_device *unused, struct obd_export *exp, req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_OST_QUOTACTL, LUSTRE_OST_VERSION, OST_QUOTACTL); - if (req == NULL) + if (!req) return -ENOMEM; oqc = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL); @@ -294,7 +293,7 @@ int osc_quotacheck(struct obd_device *unused, struct obd_export *exp, req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_OST_QUOTACHECK, LUSTRE_OST_VERSION, OST_QUOTACHECK); - if (req == NULL) + if (!req) return -ENOMEM; body = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL); @@ -302,8 +301,8 @@ int osc_quotacheck(struct obd_device *unused, struct obd_export *exp, ptlrpc_request_set_replen(req); - /* the next poll will find -ENODATA, that means quotacheck is - * going on */ + /* the next poll will find -ENODATA, that means quotacheck is going on + */ cli->cl_qchk_stat = -ENODATA; rc = ptlrpc_queue_wait(req); if (rc) diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c index 7034f0a942c5..74805f1ae888 100644 --- a/drivers/staging/lustre/lustre/osc/osc_request.c +++ b/drivers/staging/lustre/lustre/osc/osc_request.c @@ -104,7 +104,6 @@ struct osc_enqueue_args { static void osc_release_ppga(struct brw_page **ppga, u32 count); static int brw_interpret(const struct lu_env *env, struct ptlrpc_request *req, void *data, int rc); -static int osc_cleanup(struct obd_device *obd); /* Pack OSC object metadata for disk storage (LE byte order). */ static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp, @@ -113,18 +112,18 @@ static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp, int lmm_size; lmm_size = sizeof(**lmmp); - if (lmmp == NULL) + if (!lmmp) return lmm_size; - if (*lmmp != NULL && lsm == NULL) { + if (*lmmp && !lsm) { kfree(*lmmp); *lmmp = NULL; return 0; - } else if (unlikely(lsm != NULL && ostid_id(&lsm->lsm_oi) == 0)) { + } else if (unlikely(lsm && ostid_id(&lsm->lsm_oi) == 0)) { return -EBADF; } - if (*lmmp == NULL) { + if (!*lmmp) { *lmmp = kzalloc(lmm_size, GFP_NOFS); if (!*lmmp) return -ENOMEM; @@ -143,7 +142,7 @@ static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, int lsm_size; struct obd_import *imp = class_exp2cliimp(exp); - if (lmm != NULL) { + if (lmm) { if (lmm_bytes < sizeof(*lmm)) { CERROR("%s: lov_mds_md too small: %d, need %d\n", exp->exp_obd->obd_name, lmm_bytes, @@ -160,23 +159,23 @@ static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, } lsm_size = lov_stripe_md_size(1); - if (lsmp == NULL) + if (!lsmp) return lsm_size; - if (*lsmp != NULL && lmm == NULL) { + if (*lsmp && !lmm) { kfree((*lsmp)->lsm_oinfo[0]); kfree(*lsmp); *lsmp = NULL; return 0; } - if (*lsmp == NULL) { + if (!*lsmp) { *lsmp = kzalloc(lsm_size, GFP_NOFS); - if (unlikely(*lsmp == NULL)) + if (unlikely(!*lsmp)) return -ENOMEM; (*lsmp)->lsm_oinfo[0] = kzalloc(sizeof(struct lov_oinfo), GFP_NOFS); - if (unlikely((*lsmp)->lsm_oinfo[0] == NULL)) { + if (unlikely(!(*lsmp)->lsm_oinfo[0])) { kfree(*lsmp); return -ENOMEM; } @@ -185,11 +184,11 @@ static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, return -EBADF; } - if (lmm != NULL) + if (lmm) /* XXX zero *lsmp? */ ostid_le_to_cpu(&lmm->lmm_oi, &(*lsmp)->lsm_oi); - if (imp != NULL && + if (imp && (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES)) (*lsmp)->lsm_maxbytes = imp->imp_connect_data.ocd_maxbytes; else @@ -246,7 +245,7 @@ static int osc_getattr_async(struct obd_export *exp, struct obd_info *oinfo, int rc; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR); @@ -276,7 +275,7 @@ static int osc_getattr(const struct lu_env *env, struct obd_export *exp, int rc; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR); @@ -294,7 +293,7 @@ static int osc_getattr(const struct lu_env *env, struct obd_export *exp, goto out; body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - if (body == NULL) { + if (!body) { rc = -EPROTO; goto out; } @@ -321,7 +320,7 @@ static int osc_setattr(const struct lu_env *env, struct obd_export *exp, LASSERT(oinfo->oi_oa->o_valid & OBD_MD_FLGROUP); req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR); @@ -339,7 +338,7 @@ static int osc_setattr(const struct lu_env *env, struct obd_export *exp, goto out; body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - if (body == NULL) { + if (!body) { rc = -EPROTO; goto out; } @@ -362,7 +361,7 @@ static int osc_setattr_interpret(const struct lu_env *env, goto out; body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - if (body == NULL) { + if (!body) { rc = -EPROTO; goto out; } @@ -384,7 +383,7 @@ int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo, int rc; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR); @@ -451,7 +450,7 @@ static int osc_real_create(struct obd_export *exp, struct obdo *oa, } req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto out; } @@ -482,7 +481,7 @@ static int osc_real_create(struct obd_export *exp, struct obdo *oa, goto out_req; body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - if (body == NULL) { + if (!body) { rc = -EPROTO; goto out_req; } @@ -500,7 +499,7 @@ static int osc_real_create(struct obd_export *exp, struct obdo *oa, lsm->lsm_oi = oa->o_oi; *ea = lsm; - if (oti != NULL) { + if (oti) { oti->oti_transno = lustre_msg_get_transno(req->rq_repmsg); if (oa->o_valid & OBD_MD_FLCOOKIE) { @@ -530,7 +529,7 @@ int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo, int rc; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_PUNCH); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH); @@ -573,7 +572,7 @@ static int osc_sync_interpret(const struct lu_env *env, goto out; body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - if (body == NULL) { + if (!body) { CERROR("can't unpack ost_body\n"); rc = -EPROTO; goto out; @@ -595,7 +594,7 @@ int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo, int rc; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC); @@ -629,10 +628,11 @@ int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo, /* Find and cancel locally locks matched by @mode in the resource found by * @objid. Found locks are added into @cancel list. Returns the amount of - * locks added to @cancels list. */ + * locks added to @cancels list. + */ static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa, struct list_head *cancels, - ldlm_mode_t mode, __u64 lock_flags) + enum ldlm_mode mode, __u64 lock_flags) { struct ldlm_namespace *ns = exp->exp_obd->obd_namespace; struct ldlm_res_id res_id; @@ -644,13 +644,14 @@ static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa, * * This distinguishes from a case when ELC is not supported originally, * when we still want to cancel locks in advance and just cancel them - * locally, without sending any RPC. */ + * locally, without sending any RPC. + */ if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns)) return 0; ostid_build_res_name(&oa->o_oi, &res_id); res = ldlm_resource_get(ns, NULL, &res_id, 0, 0); - if (res == NULL) + if (!res) return 0; LDLM_RESOURCE_ADDREF(res); @@ -723,7 +724,8 @@ static int osc_create(const struct lu_env *env, struct obd_export *exp, * If the client dies, or the OST is down when the object should be destroyed, * the records are not cancelled, and when the OST reconnects to the MDS next, * it will retrieve the llog unlink logs and then sends the log cancellation - * cookies to the MDS after committing destroy transactions. */ + * cookies to the MDS after committing destroy transactions. + */ static int osc_destroy(const struct lu_env *env, struct obd_export *exp, struct obdo *oa, struct lov_stripe_md *ea, struct obd_trans_info *oti, struct obd_export *md_export) @@ -743,7 +745,7 @@ static int osc_destroy(const struct lu_env *env, struct obd_export *exp, LDLM_FL_DISCARD_DATA); req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY); - if (req == NULL) { + if (!req) { ldlm_lock_list_put(&cancels, l_bl_ast, count); return -ENOMEM; } @@ -758,7 +760,7 @@ static int osc_destroy(const struct lu_env *env, struct obd_export *exp, req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */ ptlrpc_at_set_req_timeout(req); - if (oti != NULL && oa->o_valid & OBD_MD_FLCOOKIE) + if (oti && oa->o_valid & OBD_MD_FLCOOKIE) oa->o_lcookie = *oti->oti_logcookies; body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); LASSERT(body); @@ -769,7 +771,8 @@ static int osc_destroy(const struct lu_env *env, struct obd_export *exp, /* If osc_destroy is for destroying the unlink orphan, * sent from MDT to OST, which should not be blocked here, * because the process might be triggered by ptlrpcd, and - * it is not good to block ptlrpcd thread (b=16006)*/ + * it is not good to block ptlrpcd thread (b=16006 + **/ if (!(oa->o_flags & OBD_FL_DELORPHAN)) { req->rq_interpret_reply = osc_destroy_interpret; if (!osc_can_send_destroy(cli)) { @@ -810,7 +813,8 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa, (long)(obd_max_dirty_pages + 1))) { /* The atomic_read() allowing the atomic_inc() are * not covered by a lock thus they may safely race and trip - * this CERROR() unless we add in a small fudge factor (+1). */ + * this CERROR() unless we add in a small fudge factor (+1). + */ CERROR("dirty %d - %d > system dirty_max %d\n", atomic_read(&obd_dirty_pages), atomic_read(&obd_dirty_transit_pages), @@ -839,7 +843,7 @@ void osc_update_next_shrink(struct client_obd *cli) { cli->cl_next_shrink_grant = cfs_time_shift(cli->cl_grant_shrink_interval); - CDEBUG(D_CACHE, "next time %ld to shrink grant \n", + CDEBUG(D_CACHE, "next time %ld to shrink grant\n", cli->cl_next_shrink_grant); } @@ -900,7 +904,8 @@ static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa) /* Shrink the current grant, either from some large amount to enough for a * full set of in-flight RPCs, or if we have already shrunk to that limit * then to enough for a single RPC. This avoids keeping more grant than - * needed, and avoids shrinking the grant piecemeal. */ + * needed, and avoids shrinking the grant piecemeal. + */ static int osc_shrink_grant(struct client_obd *cli) { __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) * @@ -922,7 +927,8 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes) client_obd_list_lock(&cli->cl_loi_list_lock); /* Don't shrink if we are already above or below the desired limit * We don't want to shrink below a single RPC, as that will negatively - * impact block allocation and long-term performance. */ + * impact block allocation and long-term performance. + */ if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT) target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; @@ -970,7 +976,8 @@ static int osc_should_shrink_grant(struct client_obd *client) if (cfs_time_aftereq(time, next_shrink - 5 * CFS_TICK)) { /* Get the current RPC size directly, instead of going via: * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export) - * Keep comment here so that it can be found by searching. */ + * Keep comment here so that it can be found by searching. + */ int brw_size = client->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; if (client->cl_import->imp_state == LUSTRE_IMP_FULL && @@ -986,8 +993,7 @@ static int osc_grant_shrink_grant_cb(struct timeout_item *item, void *data) { struct client_obd *client; - list_for_each_entry(client, &item->ti_obd_list, - cl_grant_shrink_list) { + list_for_each_entry(client, &item->ti_obd_list, cl_grant_shrink_list) { if (osc_should_shrink_grant(client)) osc_shrink_grant(client); } @@ -1004,10 +1010,10 @@ static int osc_add_shrink_grant(struct client_obd *client) &client->cl_grant_shrink_list); if (rc) { CERROR("add grant client %s error %d\n", - client->cl_import->imp_obd->obd_name, rc); + client->cl_import->imp_obd->obd_name, rc); return rc; } - CDEBUG(D_CACHE, "add grant client %s \n", + CDEBUG(D_CACHE, "add grant client %s\n", client->cl_import->imp_obd->obd_name); osc_update_next_shrink(client); return 0; @@ -1040,7 +1046,8 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd) cli->cl_import->imp_obd->obd_name, cli->cl_avail_grant, ocd->ocd_grant, cli->cl_dirty); /* workaround for servers which do not have the patch from - * LU-2679 */ + * LU-2679 + */ cli->cl_avail_grant = ocd->ocd_grant; } @@ -1060,7 +1067,8 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd) /* We assume that the reason this OSC got a short read is because it read * beyond the end of a stripe file; i.e. lustre is reading a sparse file * via the LOV, and it _knows_ it's reading inside the file, it's just that - * this stripe never got written at or beyond this stripe offset yet. */ + * this stripe never got written at or beyond this stripe offset yet. + */ static void handle_short_read(int nob_read, u32 page_count, struct brw_page **pga) { @@ -1106,7 +1114,7 @@ static int check_write_rcs(struct ptlrpc_request *req, remote_rcs = req_capsule_server_sized_get(&req->rq_pill, &RMF_RCS, sizeof(*remote_rcs) * niocount); - if (remote_rcs == NULL) { + if (!remote_rcs) { CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n"); return -EPROTO; } @@ -1118,7 +1126,7 @@ static int check_write_rcs(struct ptlrpc_request *req, if (remote_rcs[i] != 0) { CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n", - i, remote_rcs[i], req); + i, remote_rcs[i], req); return -EPROTO; } } @@ -1139,7 +1147,8 @@ static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2) OBD_BRW_SYNC | OBD_BRW_ASYNC|OBD_BRW_NOQUOTA); /* warn if we try to combine flags that we don't know to be - * safe to combine */ + * safe to combine + */ if (unlikely((p1->flag & mask) != (p2->flag & mask))) { CWARN("Saw flags 0x%x and 0x%x in the same brw, please report this at http://bugs.whamcloud.com/\n", p1->flag, p2->flag); @@ -1152,7 +1161,7 @@ static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2) static u32 osc_checksum_bulk(int nob, u32 pg_count, struct brw_page **pga, int opc, - cksum_type_t cksum_type) + enum cksum_type cksum_type) { __u32 cksum; int i = 0; @@ -1174,7 +1183,8 @@ static u32 osc_checksum_bulk(int nob, u32 pg_count, int count = pga[i]->count > nob ? nob : pga[i]->count; /* corrupt the data before we compute the checksum, to - * simulate an OST->client data error */ + * simulate an OST->client data error + */ if (i == 0 && opc == OST_READ && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) { unsigned char *ptr = kmap(pga[i]->pg); @@ -1184,7 +1194,7 @@ static u32 osc_checksum_bulk(int nob, u32 pg_count, kunmap(pga[i]->pg); } cfs_crypto_hash_update_page(hdesc, pga[i]->pg, - pga[i]->off & ~CFS_PAGE_MASK, + pga[i]->off & ~CFS_PAGE_MASK, count); CDEBUG(D_PAGE, "page %p map %p index %lu flags %lx count %u priv %0lx: off %d\n", @@ -1205,7 +1215,8 @@ static u32 osc_checksum_bulk(int nob, u32 pg_count, cfs_crypto_hash_final(hdesc, NULL, NULL); /* For sending we only compute the wrong checksum instead - * of corrupting the data so it is still correct on a redo */ + * of corrupting the data so it is still correct on a redo + */ if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND)) cksum++; @@ -1244,7 +1255,7 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli, opc = OST_READ; req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ); } - if (req == NULL) + if (!req) return -ENOMEM; for (niocount = i = 1; i < page_count; i++) { @@ -1266,7 +1277,8 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli, req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */ ptlrpc_at_set_req_timeout(req); /* ask ptlrpc not to resend on EINPROGRESS since BRWs have their own - * retry logic */ + * retry logic + */ req->rq_no_retry_einprogress = 1; desc = ptlrpc_prep_bulk_imp(req, page_count, @@ -1274,7 +1286,7 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli, opc == OST_WRITE ? BULK_GET_SOURCE : BULK_PUT_SINK, OST_BULK_PORTAL); - if (desc == NULL) { + if (!desc) { rc = -ENOMEM; goto out; } @@ -1283,7 +1295,7 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli, body = req_capsule_client_get(pill, &RMF_OST_BODY); ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ); niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE); - LASSERT(body != NULL && ioobj != NULL && niobuf != NULL); + LASSERT(body && ioobj && niobuf); lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa); @@ -1293,7 +1305,8 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli, * that might be send for this request. The actual number is decided * when the RPC is finally sent in ptlrpc_register_bulk(). It sends * "max - 1" for old client compatibility sending "0", and also so the - * the actual maximum is a power-of-two number, not one less. LU-1431 */ + * the actual maximum is a power-of-two number, not one less. LU-1431 + */ ioobj_max_brw_set(ioobj, desc->bd_md_max_brw); LASSERT(page_count > 0); pg_prev = pga[0]; @@ -1355,8 +1368,9 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli, if (cli->cl_checksum && !sptlrpc_flavor_has_bulk(&req->rq_flvr)) { /* store cl_cksum_type in a local variable since - * it can be changed via lprocfs */ - cksum_type_t cksum_type = cli->cl_cksum_type; + * it can be changed via lprocfs + */ + enum cksum_type cksum_type = cli->cl_cksum_type; if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) { oa->o_flags &= OBD_FL_LOCAL_MASK; @@ -1375,7 +1389,8 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli, oa->o_flags |= cksum_type_pack(cksum_type); } else { /* clear out the checksum flag, in case this is a - * resend but cl_checksum is no longer set. b=11238 */ + * resend but cl_checksum is no longer set. b=11238 + */ oa->o_valid &= ~OBD_MD_FLCKSUM; } oa->o_cksum = body->oa.o_cksum; @@ -1415,11 +1430,11 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli, static int check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer, __u32 client_cksum, __u32 server_cksum, int nob, u32 page_count, struct brw_page **pga, - cksum_type_t client_cksum_type) + enum cksum_type client_cksum_type) { __u32 new_cksum; char *msg; - cksum_type_t cksum_type; + enum cksum_type cksum_type; if (server_cksum == client_cksum) { CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum); @@ -1472,9 +1487,9 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc) return rc; } - LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc); + LASSERTF(req->rq_repmsg, "rc = %d\n", rc); body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - if (body == NULL) { + if (!body) { DEBUG_REQ(D_INFO, req, "Can't unpack body\n"); return -EPROTO; } @@ -1538,7 +1553,7 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc) if (rc != req->rq_bulk->bd_nob_transferred) { CERROR("Unexpected rc %d (%d transferred)\n", - rc, req->rq_bulk->bd_nob_transferred); + rc, req->rq_bulk->bd_nob_transferred); return -EPROTO; } @@ -1550,7 +1565,7 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc) __u32 server_cksum = body->oa.o_cksum; char *via = ""; char *router = ""; - cksum_type_t cksum_type; + enum cksum_type cksum_type; cksum_type = cksum_type_unpack(body->oa.o_valid&OBD_MD_FLFLAGS ? body->oa.o_flags : 0); @@ -1627,7 +1642,7 @@ static int osc_brw_redo_request(struct ptlrpc_request *request, return rc; list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) { - if (oap->oap_request != NULL) { + if (oap->oap_request) { LASSERTF(request == oap->oap_request, "request %p != oap_request %p\n", request, oap->oap_request); @@ -1638,12 +1653,14 @@ static int osc_brw_redo_request(struct ptlrpc_request *request, } } /* New request takes over pga and oaps from old request. - * Note that copying a list_head doesn't work, need to move it... */ + * Note that copying a list_head doesn't work, need to move it... + */ aa->aa_resends++; new_req->rq_interpret_reply = request->rq_interpret_reply; new_req->rq_async_args = request->rq_async_args; /* cap resend delay to the current request timeout, this is similar to - * what ptlrpc does (see after_reply()) */ + * what ptlrpc does (see after_reply()) + */ if (aa->aa_resends > new_req->rq_timeout) new_req->rq_sent = ktime_get_real_seconds() + new_req->rq_timeout; else @@ -1669,7 +1686,8 @@ static int osc_brw_redo_request(struct ptlrpc_request *request, /* XXX: This code will run into problem if we're going to support * to add a series of BRW RPCs into a self-defined ptlrpc_request_set * and wait for all of them to be finished. We should inherit request - * set from old request. */ + * set from old request. + */ ptlrpcd_add_req(new_req); DEBUG_REQ(D_INFO, new_req, "new request"); @@ -1709,7 +1727,7 @@ static void sort_brw_pages(struct brw_page **array, int num) static void osc_release_ppga(struct brw_page **ppga, u32 count) { - LASSERT(ppga != NULL); + LASSERT(ppga); kfree(ppga); } @@ -1725,7 +1743,8 @@ static int brw_interpret(const struct lu_env *env, rc = osc_brw_fini_request(req, rc); CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc); /* When server return -EINPROGRESS, client should always retry - * regardless of the number of times the bulk was resent already. */ + * regardless of the number of times the bulk was resent already. + */ if (osc_recoverable_error(rc)) { if (req->rq_import_generation != req->rq_import->imp_generation) { @@ -1748,7 +1767,7 @@ static int brw_interpret(const struct lu_env *env, } list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) { - if (obj == NULL && rc == 0) { + if (!obj && rc == 0) { obj = osc2cl(ext->oe_obj); cl_object_get(obj); } @@ -1759,7 +1778,7 @@ static int brw_interpret(const struct lu_env *env, LASSERT(list_empty(&aa->aa_exts)); LASSERT(list_empty(&aa->aa_oaps)); - if (obj != NULL) { + if (obj) { struct obdo *oa = aa->aa_oa; struct cl_attr *attr = &osc_env_info(env)->oti_attr; unsigned long valid = 0; @@ -1798,7 +1817,8 @@ static int brw_interpret(const struct lu_env *env, client_obd_list_lock(&cli->cl_loi_list_lock); /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters * is called so we know whether to go to sync BRWs or wait for more - * RPCs to complete */ + * RPCs to complete + */ if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) cli->cl_w_in_flight--; else @@ -1871,13 +1891,13 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, } pga = kcalloc(page_count, sizeof(*pga), GFP_NOFS); - if (pga == NULL) { + if (!pga) { rc = -ENOMEM; goto out; } - oa = kmem_cache_alloc(obdo_cachep, GFP_NOFS | __GFP_ZERO); - if (oa == NULL) { + oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); + if (!oa) { rc = -ENOMEM; goto out; } @@ -1886,7 +1906,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, list_for_each_entry(oap, &rpc_list, oap_rpc_item) { struct cl_page *page = oap2cl_page(oap); - if (clerq == NULL) { + if (!clerq) { clerq = cl_req_alloc(env, page, crt, 1 /* only 1-object rpcs for now */); if (IS_ERR(clerq)) { @@ -1907,7 +1927,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, } /* always get the data for the obdo for the rpc */ - LASSERT(clerq != NULL); + LASSERT(clerq); crattr->cra_oa = oa; cl_req_attr_set(env, clerq, crattr, ~0ULL); if (lock) { @@ -1923,7 +1943,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, sort_brw_pages(pga, page_count); rc = osc_brw_prep_request(cmd, cli, oa, NULL, page_count, - pga, &req, 1, 0); + pga, &req, 1, 0); if (rc != 0) { CERROR("prep_req failed: %d\n", rc); goto out; @@ -1938,7 +1958,8 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, * we race with setattr (locally or in queue at OST). If OST gets * later setattr before earlier BRW (as determined by the request xid), * the OST will not use BRW timestamps. Sadly, there is no obvious - * way to do this in a single call. bug 10150 */ + * way to do this in a single call. bug 10150 + */ body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); crattr->cra_oa = &body->oa; cl_req_attr_set(env, clerq, crattr, @@ -1955,19 +1976,20 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, aa->aa_clerq = clerq; /* queued sync pages can be torn down while the pages - * were between the pending list and the rpc */ + * were between the pending list and the rpc + */ tmp = NULL; list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) { /* only one oap gets a request reference */ - if (tmp == NULL) + if (!tmp) tmp = oap; if (oap->oap_interrupted && !req->rq_intr) { CDEBUG(D_INODE, "oap %p in req %p interrupted\n", - oap, req); + oap, req); ptlrpc_mark_interrupted(req); } } - if (tmp != NULL) + if (tmp) tmp->oap_request = ptlrpc_request_addref(req); client_obd_list_lock(&cli->cl_loi_list_lock); @@ -2001,16 +2023,17 @@ out: kfree(crattr); if (rc != 0) { - LASSERT(req == NULL); + LASSERT(!req); if (oa) kmem_cache_free(obdo_cachep, oa); kfree(pga); /* this should happen rarely and is pretty bad, it makes the - * pending list not follow the dirty order */ + * pending list not follow the dirty order + */ while (!list_empty(ext_list)) { ext = list_entry(ext_list->next, struct osc_extent, - oe_link); + oe_link); list_del_init(&ext->oe_link); osc_extent_finish(env, ext, 0, rc); } @@ -2026,7 +2049,6 @@ static int osc_set_lock_data_with_check(struct ldlm_lock *lock, void *data = einfo->ei_cbdata; int set = 0; - LASSERT(lock != NULL); LASSERT(lock->l_blocking_ast == einfo->ei_cb_bl); LASSERT(lock->l_resource->lr_type == einfo->ei_type); LASSERT(lock->l_completion_ast == einfo->ei_cb_cp); @@ -2035,7 +2057,7 @@ static int osc_set_lock_data_with_check(struct ldlm_lock *lock, lock_res_and_lock(lock); spin_lock(&osc_ast_guard); - if (lock->l_ast_data == NULL) + if (!lock->l_ast_data) lock->l_ast_data = data; if (lock->l_ast_data == data) set = 1; @@ -2052,7 +2074,7 @@ static int osc_set_data_with_check(struct lustre_handle *lockh, struct ldlm_lock *lock = ldlm_handle2lock(lockh); int set = 0; - if (lock != NULL) { + if (lock) { set = osc_set_lock_data_with_check(lock, einfo); LDLM_LOCK_PUT(lock); } else @@ -2064,7 +2086,8 @@ static int osc_set_data_with_check(struct lustre_handle *lockh, /* find any ldlm lock of the inode in osc * return 0 not find * 1 find one - * < 0 error */ + * < 0 error + */ static int osc_find_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm, ldlm_iterator_t replace, void *data) { @@ -2095,7 +2118,6 @@ static int osc_enqueue_fini(struct ptlrpc_request *req, struct ost_lvb *lvb, rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP); - LASSERT(rep != NULL); rep->lock_policy_res1 = ptlrpc_status_ntoh(rep->lock_policy_res1); if (rep->lock_policy_res1) @@ -2127,18 +2149,21 @@ static int osc_enqueue_interpret(const struct lu_env *env, __u64 *flags = aa->oa_flags; /* Make a local copy of a lock handle and a mode, because aa->oa_* - * might be freed anytime after lock upcall has been called. */ + * might be freed anytime after lock upcall has been called. + */ lustre_handle_copy(&handle, aa->oa_lockh); mode = aa->oa_ei->ei_mode; /* ldlm_cli_enqueue is holding a reference on the lock, so it must - * be valid. */ + * be valid. + */ lock = ldlm_handle2lock(&handle); /* Take an additional reference so that a blocking AST that * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed * to arrive after an upcall has been executed by - * osc_enqueue_fini(). */ + * osc_enqueue_fini(). + */ ldlm_lock_addref(&handle, mode); /* Let CP AST to grant the lock first. */ @@ -2170,7 +2195,7 @@ static int osc_enqueue_interpret(const struct lu_env *env, */ ldlm_lock_decref(&handle, mode); - LASSERTF(lock != NULL, "lockh %p, req %p, aa %p - client evicted?\n", + LASSERTF(lock, "lockh %p, req %p, aa %p - client evicted?\n", aa->oa_lockh, req, aa); ldlm_lock_decref(&handle, mode); LDLM_LOCK_PUT(lock); @@ -2185,7 +2210,8 @@ struct ptlrpc_request_set *PTLRPCD_SET = (void *)1; * others may take a considerable amount of time in a case of ost failure; and * when other sync requests do not get released lock from a client, the client * is excluded from the cluster -- such scenarious make the life difficult, so - * release locks just after they are obtained. */ + * release locks just after they are obtained. + */ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, __u64 *flags, ldlm_policy_data_t *policy, struct ost_lvb *lvb, int kms_valid, @@ -2198,11 +2224,12 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, struct ptlrpc_request *req = NULL; int intent = *flags & LDLM_FL_HAS_INTENT; __u64 match_lvb = (agl != 0 ? 0 : LDLM_FL_LVB_READY); - ldlm_mode_t mode; + enum ldlm_mode mode; int rc; /* Filesystem lock extents are extended to page boundaries so that - * dealing with the page cache is a little smoother. */ + * dealing with the page cache is a little smoother. + */ policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK; policy->l_extent.end |= ~CFS_PAGE_MASK; @@ -2226,7 +2253,8 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, * * At some point we should cancel the read lock instead of making them * send us a blocking callback, but there are problems with canceling - * locks out from other users right now, too. */ + * locks out from other users right now, too. + */ mode = einfo->ei_mode; if (einfo->ei_mode == LCK_PR) mode |= LCK_PW; @@ -2238,7 +2266,8 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, if ((agl != 0) && !(matched->l_flags & LDLM_FL_LVB_READY)) { /* For AGL, if enqueue RPC is sent but the lock is not * granted, then skip to process this strpe. - * Return -ECANCELED to tell the caller. */ + * Return -ECANCELED to tell the caller. + */ ldlm_lock_decref(lockh, mode); LDLM_LOCK_PUT(matched); return -ECANCELED; @@ -2247,19 +2276,22 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, if (osc_set_lock_data_with_check(matched, einfo)) { *flags |= LDLM_FL_LVB_READY; /* addref the lock only if not async requests and PW - * lock is matched whereas we asked for PR. */ + * lock is matched whereas we asked for PR. + */ if (!rqset && einfo->ei_mode != mode) ldlm_lock_addref(lockh, LCK_PR); if (intent) { /* I would like to be able to ASSERT here that * rss <= kms, but I can't, for reasons which - * are explained in lov_enqueue() */ + * are explained in lov_enqueue() + */ } /* We already have a lock, and it's referenced. * * At this point, the cl_lock::cll_state is CLS_QUEUING, - * AGL upcall may change it to CLS_HELD directly. */ + * AGL upcall may change it to CLS_HELD directly. + */ (*upcall)(cookie, ELDLM_OK); if (einfo->ei_mode != mode) @@ -2281,7 +2313,7 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE_LVB); - if (req == NULL) + if (!req) return -ENOMEM; rc = ldlm_prep_enqueue_req(exp, req, &cancels, 0); @@ -2341,27 +2373,29 @@ int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id, { struct obd_device *obd = exp->exp_obd; __u64 lflags = *flags; - ldlm_mode_t rc; + enum ldlm_mode rc; if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH)) return -EIO; /* Filesystem lock extents are extended to page boundaries so that - * dealing with the page cache is a little smoother */ + * dealing with the page cache is a little smoother + */ policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK; policy->l_extent.end |= ~CFS_PAGE_MASK; /* Next, search for already existing extent locks that will cover us */ /* If we're trying to read, we also search for an existing PW lock. The * VFS and page cache already protect us locally, so lots of readers/ - * writers can share a single PW lock. */ + * writers can share a single PW lock. + */ rc = mode; if (mode == LCK_PR) rc |= LCK_PW; rc = ldlm_lock_match(obd->obd_namespace, lflags, res_id, type, policy, rc, lockh, unref); if (rc) { - if (data != NULL) { + if (data) { if (!osc_set_data_with_check(lockh, data)) { if (!(lflags & LDLM_FL_TEST_LOCK)) ldlm_lock_decref(lockh, rc); @@ -2398,8 +2432,9 @@ static int osc_statfs_interpret(const struct lu_env *env, * due to issues at a higher level (LOV). * Exit immediately since the caller is * aware of the problem and takes care - * of the clean up */ - return rc; + * of the clean up + */ + return rc; if ((rc == -ENOTCONN || rc == -EAGAIN) && (aa->aa_oi->oi_flags & OBD_STATFS_NODELAY)) { @@ -2411,7 +2446,7 @@ static int osc_statfs_interpret(const struct lu_env *env, goto out; msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS); - if (msfs == NULL) { + if (!msfs) { rc = -EPROTO; goto out; } @@ -2436,9 +2471,10 @@ static int osc_statfs_async(struct obd_export *exp, * extra calls into the filesystem if that isn't necessary (e.g. * during mount that would help a bit). Having relative timestamps * is not so great if request processing is slow, while absolute - * timestamps are not ideal because they need time synchronization. */ + * timestamps are not ideal because they need time synchronization. + */ req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS); @@ -2474,8 +2510,9 @@ static int osc_statfs(const struct lu_env *env, struct obd_export *exp, struct obd_import *imp = NULL; int rc; - /*Since the request might also come from lprocfs, so we need - *sync this with client_disconnect_export Bug15684*/ + /* Since the request might also come from lprocfs, so we need + * sync this with client_disconnect_export Bug15684 + */ down_read(&obd->u.cli.cl_sem); if (obd->u.cli.cl_import) imp = class_import_get(obd->u.cli.cl_import); @@ -2488,12 +2525,13 @@ static int osc_statfs(const struct lu_env *env, struct obd_export *exp, * extra calls into the filesystem if that isn't necessary (e.g. * during mount that would help a bit). Having relative timestamps * is not so great if request processing is slow, while absolute - * timestamps are not ideal because they need time synchronization. */ + * timestamps are not ideal because they need time synchronization. + */ req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS); class_import_put(imp); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS); @@ -2516,7 +2554,7 @@ static int osc_statfs(const struct lu_env *env, struct obd_export *exp, goto out; msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS); - if (msfs == NULL) { + if (!msfs) { rc = -EPROTO; goto out; } @@ -2534,7 +2572,8 @@ static int osc_statfs(const struct lu_env *env, struct obd_export *exp, * the maximum number of OST indices which will fit in the user buffer. * lmm_magic must be LOV_MAGIC (we only use 1 slot here). */ -static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump) +static int osc_getstripe(struct lov_stripe_md *lsm, + struct lov_user_md __user *lump) { /* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */ struct lov_user_md_v3 lum, *lumk; @@ -2545,7 +2584,8 @@ static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump) return -ENODATA; /* we only need the header part from user space to get lmm_magic and - * lmm_stripe_count, (the header part is common to v1 and v3) */ + * lmm_stripe_count, (the header part is common to v1 and v3) + */ lum_size = sizeof(struct lov_user_md_v1); if (copy_from_user(&lum, lump, lum_size)) return -EFAULT; @@ -2560,7 +2600,8 @@ static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump) LASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lumk->lmm_objects[0])); /* we can use lov_mds_md_size() to compute lum_size - * because lov_user_md_vX and lov_mds_md_vX have the same size */ + * because lov_user_md_vX and lov_mds_md_vX have the same size + */ if (lum.lmm_stripe_count > 0) { lum_size = lov_mds_md_size(lum.lmm_stripe_count, lum.lmm_magic); lumk = kzalloc(lum_size, GFP_NOFS); @@ -2591,14 +2632,15 @@ static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump) } static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len, - void *karg, void *uarg) + void *karg, void __user *uarg) { struct obd_device *obd = exp->exp_obd; struct obd_ioctl_data *data = karg; int err = 0; if (!try_module_get(THIS_MODULE)) { - CERROR("Can't get module. Is it alive?"); + CERROR("%s: cannot get module '%s'\n", obd->obd_name, + module_name(THIS_MODULE)); return -EINVAL; } switch (cmd) { @@ -2700,7 +2742,7 @@ static int osc_get_info(const struct lu_env *env, struct obd_export *exp, req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GET_INFO_LAST_ID); - if (req == NULL) + if (!req) return -ENOMEM; req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY, @@ -2721,7 +2763,7 @@ static int osc_get_info(const struct lu_env *env, struct obd_export *exp, goto out; reply = req_capsule_server_get(&req->rq_pill, &RMF_OBD_ID); - if (reply == NULL) { + if (!reply) { rc = -EPROTO; goto out; } @@ -2735,7 +2777,7 @@ out: struct ldlm_res_id res_id; ldlm_policy_data_t policy; struct lustre_handle lockh; - ldlm_mode_t mode = 0; + enum ldlm_mode mode = 0; struct ptlrpc_request *req; struct ll_user_fiemap *reply; char *tmp; @@ -2774,7 +2816,7 @@ out: skip_locking: req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GET_INFO_FIEMAP); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto drop_lock; } @@ -2803,7 +2845,7 @@ skip_locking: goto fini_req; reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL); - if (reply == NULL) { + if (!reply) { rc = -EPROTO; goto fini_req; } @@ -2852,7 +2894,7 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp, if (KEY_IS(KEY_CACHE_SET)) { struct client_obd *cli = &obd->u.cli; - LASSERT(cli->cl_cache == NULL); /* only once */ + LASSERT(!cli->cl_cache); /* only once */ cli->cl_cache = val; atomic_inc(&cli->cl_cache->ccc_users); cli->cl_lru_left = &cli->cl_cache->ccc_lru_left; @@ -2880,16 +2922,17 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp, return -EINVAL; /* We pass all other commands directly to OST. Since nobody calls osc - methods directly and everybody is supposed to go through LOV, we - assume lov checked invalid values for us. - The only recognised values so far are evict_by_nid and mds_conn. - Even if something bad goes through, we'd get a -EINVAL from OST - anyway. */ + * methods directly and everybody is supposed to go through LOV, we + * assume lov checked invalid values for us. + * The only recognised values so far are evict_by_nid and mds_conn. + * Even if something bad goes through, we'd get a -EINVAL from OST + * anyway. + */ req = ptlrpc_request_alloc(imp, KEY_IS(KEY_GRANT_SHRINK) ? &RQF_OST_SET_GRANT_INFO : &RQF_OBD_SET_INFO); - if (req == NULL) + if (!req) return -ENOMEM; req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY, @@ -2916,7 +2959,7 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp, CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args)); aa = ptlrpc_req_async_args(req); - oa = kmem_cache_alloc(obdo_cachep, GFP_NOFS | __GFP_ZERO); + oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); if (!oa) { ptlrpc_req_finished(req); return -ENOMEM; @@ -2928,7 +2971,7 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp, ptlrpc_request_set_replen(req); if (!KEY_IS(KEY_GRANT_SHRINK)) { - LASSERT(set != NULL); + LASSERT(set); ptlrpc_set_add_req(set, req); ptlrpc_check_set(NULL, set); } else { @@ -2946,7 +2989,7 @@ static int osc_reconnect(const struct lu_env *env, { struct client_obd *cli = &obd->u.cli; - if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) { + if (data && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) { long lost_grant; client_obd_list_lock(&cli->cl_loi_list_lock); @@ -2987,7 +3030,7 @@ static int osc_disconnect(struct obd_export *exp) * So the osc should be disconnected from the shrink list, after we * are sure the import has been destroyed. BUG18662 */ - if (obd->u.cli.cl_import == NULL) + if (!obd->u.cli.cl_import) osc_del_shrink_grant(&obd->u.cli); return rc; } @@ -3024,7 +3067,8 @@ static int osc_import_event(struct obd_device *obd, /* Reset grants */ cli = &obd->u.cli; /* all pages go to failing rpcs due to the invalid - * import */ + * import + */ osc_io_unplug(env, cli, NULL); ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY); @@ -3206,13 +3250,13 @@ static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage) return 0; } -int osc_cleanup(struct obd_device *obd) +static int osc_cleanup(struct obd_device *obd) { struct client_obd *cli = &obd->u.cli; int rc; /* lru cleanup */ - if (cli->cl_cache != NULL) { + if (cli->cl_cache) { LASSERT(atomic_read(&cli->cl_cache->ccc_users) > 0); spin_lock(&cli->cl_cache->ccc_lru_lock); list_del_init(&cli->cl_lru_osc); @@ -3255,7 +3299,7 @@ static int osc_process_config(struct obd_device *obd, u32 len, void *buf) return osc_process_config_base(obd, buf); } -struct obd_ops osc_obd_ops = { +static struct obd_ops osc_obd_ops = { .owner = THIS_MODULE, .setup = osc_setup, .precleanup = osc_precleanup, @@ -3298,7 +3342,8 @@ static int __init osc_init(void) /* print an address of _any_ initialized kernel symbol from this * module, to allow debugging with gdb that doesn't support data - * symbols from modules.*/ + * symbols from modules. + */ CDEBUG(D_INFO, "Lustre OSC module (%p).\n", &osc_caches); rc = lu_kmem_init(osc_caches); diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c index efdda09507bf..1b7673eec4d7 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/client.c +++ b/drivers/staging/lustre/lustre/ptlrpc/client.c @@ -145,7 +145,7 @@ struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req, LASSERT(type == BULK_PUT_SINK || type == BULK_GET_SOURCE); desc = ptlrpc_new_bulk(npages, max_brw, type, portal); - if (desc == NULL) + if (!desc) return NULL; desc->bd_import_generation = req->rq_import_generation; @@ -171,7 +171,7 @@ void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc, struct page *page, int pageoffset, int len, int pin) { LASSERT(desc->bd_iov_count < desc->bd_max_iov); - LASSERT(page != NULL); + LASSERT(page); LASSERT(pageoffset >= 0); LASSERT(len > 0); LASSERT(pageoffset + len <= PAGE_CACHE_SIZE); @@ -193,7 +193,6 @@ void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin) { int i; - LASSERT(desc != NULL); LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */ LASSERT(desc->bd_md_count == 0); /* network hands off */ LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL)); @@ -353,6 +352,7 @@ static int unpack_reply(struct ptlrpc_request *req) * If anything goes wrong just ignore it - same as if it never happened */ static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req) + __must_hold(&req->rq_lock) { struct ptlrpc_request *early_req; time64_t olddl; @@ -411,7 +411,7 @@ int ptlrpc_request_cache_init(void) request_cache = kmem_cache_create("ptlrpc_cache", sizeof(struct ptlrpc_request), 0, SLAB_HWCACHE_ALIGN, NULL); - return request_cache == NULL ? -ENOMEM : 0; + return !request_cache ? -ENOMEM : 0; } void ptlrpc_request_cache_fini(void) @@ -423,7 +423,7 @@ struct ptlrpc_request *ptlrpc_request_cache_alloc(gfp_t flags) { struct ptlrpc_request *req; - req = kmem_cache_alloc(request_cache, flags | __GFP_ZERO); + req = kmem_cache_zalloc(request_cache, flags); return req; } @@ -441,8 +441,6 @@ void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool) struct list_head *l, *tmp; struct ptlrpc_request *req; - LASSERT(pool != NULL); - spin_lock(&pool->prp_lock); list_for_each_safe(l, tmp, &pool->prp_req_list) { req = list_entry(l, struct ptlrpc_request, rq_list); @@ -559,7 +557,7 @@ ptlrpc_prep_req_from_pool(struct ptlrpc_request_pool *pool) } request = list_entry(pool->prp_req_list.next, struct ptlrpc_request, - rq_list); + rq_list); list_del_init(&request->rq_list); spin_unlock(&pool->prp_lock); @@ -724,10 +722,10 @@ struct ptlrpc_request *__ptlrpc_request_alloc(struct obd_import *imp, request = ptlrpc_prep_req_from_pool(pool); if (request) { - LASSERTF((unsigned long)imp > 0x1000, "%p", imp); + LASSERTF((unsigned long)imp > 0x1000, "%p\n", imp); LASSERT(imp != LP_POISON); - LASSERTF((unsigned long)imp->imp_client > 0x1000, "%p", - imp->imp_client); + LASSERTF((unsigned long)imp->imp_client > 0x1000, "%p\n", + imp->imp_client); LASSERT(imp->imp_client != LP_POISON); request->rq_import = class_import_get(imp); @@ -752,7 +750,7 @@ ptlrpc_request_alloc_internal(struct obd_import *imp, struct ptlrpc_request *request; request = __ptlrpc_request_alloc(imp, pool); - if (request == NULL) + if (!request) return NULL; req_capsule_init(&request->rq_pill, request, RCL_CLIENT); @@ -898,8 +896,7 @@ void ptlrpc_set_destroy(struct ptlrpc_request_set *set) RQ_PHASE_COMPLETE : RQ_PHASE_NEW; list_for_each(tmp, &set->set_requests) { struct ptlrpc_request *req = - list_entry(tmp, struct ptlrpc_request, - rq_set_chain); + list_entry(tmp, struct ptlrpc_request, rq_set_chain); LASSERT(req->rq_phase == expected_phase); n++; @@ -911,8 +908,7 @@ void ptlrpc_set_destroy(struct ptlrpc_request_set *set) list_for_each_safe(tmp, next, &set->set_requests) { struct ptlrpc_request *req = - list_entry(tmp, struct ptlrpc_request, - rq_set_chain); + list_entry(tmp, struct ptlrpc_request, rq_set_chain); list_del_init(&req->rq_set_chain); LASSERT(req->rq_phase == expected_phase); @@ -951,10 +947,10 @@ void ptlrpc_set_add_req(struct ptlrpc_request_set *set, atomic_inc(&set->set_remaining); req->rq_queued_time = cfs_time_current(); - if (req->rq_reqmsg != NULL) + if (req->rq_reqmsg) lustre_msg_set_jobid(req->rq_reqmsg, NULL); - if (set->set_producer != NULL) + if (set->set_producer) /* * If the request set has a producer callback, the RPC must be * sent straight away @@ -974,7 +970,7 @@ void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc, struct ptlrpc_request_set *set = pc->pc_set; int count, i; - LASSERT(req->rq_set == NULL); + LASSERT(!req->rq_set); LASSERT(test_bit(LIOD_STOP, &pc->pc_flags) == 0); spin_lock(&set->set_new_req_lock); @@ -1015,7 +1011,6 @@ static int ptlrpc_import_delay_req(struct obd_import *imp, { int delay = 0; - LASSERT(status != NULL); *status = 0; if (req->rq_ctx_init || req->rq_ctx_fini) { @@ -1078,7 +1073,7 @@ static int ptlrpc_console_allow(struct ptlrpc_request *req) __u32 opc; int err; - LASSERT(req->rq_reqmsg != NULL); + LASSERT(req->rq_reqmsg); opc = lustre_msg_get_opc(req->rq_reqmsg); /* @@ -1167,7 +1162,7 @@ static int after_reply(struct ptlrpc_request *req) struct timespec64 work_start; long timediff; - LASSERT(obd != NULL); + LASSERT(obd); /* repbuf must be unlinked */ LASSERT(!req->rq_receiving_reply && !req->rq_reply_unlink); @@ -1247,7 +1242,7 @@ static int after_reply(struct ptlrpc_request *req) ktime_get_real_ts64(&work_start); timediff = (work_start.tv_sec - req->rq_arrival_time.tv_sec) * USEC_PER_SEC + (work_start.tv_nsec - req->rq_arrival_time.tv_nsec) / NSEC_PER_USEC; - if (obd->obd_svc_stats != NULL) { + if (obd->obd_svc_stats) { lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR, timediff); ptlrpc_lprocfs_rpc_sent(req, timediff); @@ -1310,7 +1305,7 @@ static int after_reply(struct ptlrpc_request *req) /* version recovery */ ptlrpc_save_versions(req); ptlrpc_retain_replayable_request(req, imp); - } else if (req->rq_commit_cb != NULL && + } else if (req->rq_commit_cb && list_empty(&req->rq_replay_list)) { /* * NB: don't call rq_commit_cb if it's already on @@ -1334,8 +1329,8 @@ static int after_reply(struct ptlrpc_request *req) struct ptlrpc_request *last; last = list_entry(imp->imp_replay_list.prev, - struct ptlrpc_request, - rq_replay_list); + struct ptlrpc_request, + rq_replay_list); /* * Requests with rq_replay stay on the list even if no * commit is expected. @@ -1437,7 +1432,7 @@ static inline int ptlrpc_set_producer(struct ptlrpc_request_set *set) { int remaining, rc; - LASSERT(set->set_producer != NULL); + LASSERT(set->set_producer); remaining = atomic_read(&set->set_remaining); @@ -1478,8 +1473,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) INIT_LIST_HEAD(&comp_reqs); list_for_each_safe(tmp, next, &set->set_requests) { struct ptlrpc_request *req = - list_entry(tmp, struct ptlrpc_request, - rq_set_chain); + list_entry(tmp, struct ptlrpc_request, rq_set_chain); struct obd_import *imp = req->rq_import; int unregistered = 0; int rc = 0; @@ -1621,8 +1615,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) */ list_del_init(&req->rq_list); list_add_tail(&req->rq_list, - &imp-> - imp_delayed_list); + &imp->imp_delayed_list); spin_unlock(&imp->imp_lock); continue; } @@ -1630,7 +1623,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) if (status != 0) { req->rq_status = status; ptlrpc_rqphase_move(req, - RQ_PHASE_INTERPRET); + RQ_PHASE_INTERPRET); spin_unlock(&imp->imp_lock); goto interpret; } @@ -1645,7 +1638,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) list_del_init(&req->rq_list); list_add_tail(&req->rq_list, - &imp->imp_sending_list); + &imp->imp_sending_list); spin_unlock(&imp->imp_lock); @@ -1750,7 +1743,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) * process the reply. Similarly if the RPC returned * an error, and therefore the bulk will never arrive. */ - if (req->rq_bulk == NULL || req->rq_status < 0) { + if (!req->rq_bulk || req->rq_status < 0) { ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET); goto interpret; } @@ -1802,7 +1795,7 @@ interpret: } ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE); - CDEBUG(req->rq_reqmsg != NULL ? D_RPCTRACE : 0, + CDEBUG(req->rq_reqmsg ? D_RPCTRACE : 0, "Completed RPC pname:cluuid:pid:xid:nid:opc %s:%s:%d:%llu:%s:%d\n", current_comm(), imp->imp_obd->obd_uuid.uuid, lustre_msg_get_status(req->rq_reqmsg), req->rq_xid, @@ -1882,8 +1875,8 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink) "timed out for sent delay" : "timed out for slow reply"), (s64)req->rq_sent, (s64)req->rq_real_sent); - if (imp != NULL && obd_debug_peer_on_timeout) - LNetCtl(IOC_LIBCFS_DEBUG_PEER, &imp->imp_connection->c_peer); + if (imp && obd_debug_peer_on_timeout) + LNetDebugPeer(imp->imp_connection->c_peer); ptlrpc_unregister_reply(req, async_unlink); ptlrpc_unregister_bulk(req, async_unlink); @@ -1891,7 +1884,7 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink) if (obd_dump_on_timeout) libcfs_debug_dumplog(); - if (imp == NULL) { + if (!imp) { DEBUG_REQ(D_HA, req, "NULL import: already cleaned up?"); return 1; } @@ -1944,13 +1937,10 @@ int ptlrpc_expired_set(void *data) struct list_head *tmp; time64_t now = ktime_get_real_seconds(); - LASSERT(set != NULL); - /* A timeout expired. See which reqs it applies to... */ list_for_each(tmp, &set->set_requests) { struct ptlrpc_request *req = - list_entry(tmp, struct ptlrpc_request, - rq_set_chain); + list_entry(tmp, struct ptlrpc_request, rq_set_chain); /* don't expire request waiting for context */ if (req->rq_wait_ctx) @@ -2002,13 +1992,11 @@ void ptlrpc_interrupted_set(void *data) struct ptlrpc_request_set *set = data; struct list_head *tmp; - LASSERT(set != NULL); CDEBUG(D_RPCTRACE, "INTERRUPTED SET %p\n", set); list_for_each(tmp, &set->set_requests) { struct ptlrpc_request *req = - list_entry(tmp, struct ptlrpc_request, - rq_set_chain); + list_entry(tmp, struct ptlrpc_request, rq_set_chain); if (req->rq_phase != RQ_PHASE_RPC && req->rq_phase != RQ_PHASE_UNREGISTERING) @@ -2081,7 +2069,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set) else list_for_each(tmp, &set->set_requests) { req = list_entry(tmp, struct ptlrpc_request, - rq_set_chain); + rq_set_chain); if (req->rq_phase == RQ_PHASE_NEW) (void)ptlrpc_send_new_req(req); } @@ -2155,7 +2143,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set) if (rc == 0 && atomic_read(&set->set_remaining) == 0) { list_for_each(tmp, &set->set_requests) { req = list_entry(tmp, struct ptlrpc_request, - rq_set_chain); + rq_set_chain); spin_lock(&req->rq_lock); req->rq_invalid_rqset = 1; spin_unlock(&req->rq_lock); @@ -2174,7 +2162,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set) rc = req->rq_status; } - if (set->set_interpret != NULL) { + if (set->set_interpret) { int (*interpreter)(struct ptlrpc_request_set *set, void *, int) = set->set_interpret; rc = interpreter(set, set->set_arg, rc); @@ -2206,10 +2194,10 @@ EXPORT_SYMBOL(ptlrpc_set_wait); */ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked) { - if (request == NULL) + if (!request) return; LASSERTF(!request->rq_receiving_reply, "req %p\n", request); - LASSERTF(request->rq_rqbd == NULL, "req %p\n", request);/* client-side */ + LASSERTF(!request->rq_rqbd, "req %p\n", request);/* client-side */ LASSERTF(list_empty(&request->rq_list), "req %p\n", request); LASSERTF(list_empty(&request->rq_set_chain), "req %p\n", request); LASSERTF(list_empty(&request->rq_exp_list), "req %p\n", request); @@ -2221,7 +2209,7 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked) * We must take it off the imp_replay_list first. Otherwise, we'll set * request->rq_reqmsg to NULL while osc_close is dereferencing it. */ - if (request->rq_import != NULL) { + if (request->rq_import) { if (!locked) spin_lock(&request->rq_import->imp_lock); list_del_init(&request->rq_replay_list); @@ -2236,20 +2224,20 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked) LBUG(); } - if (request->rq_repbuf != NULL) + if (request->rq_repbuf) sptlrpc_cli_free_repbuf(request); - if (request->rq_export != NULL) { + if (request->rq_export) { class_export_put(request->rq_export); request->rq_export = NULL; } - if (request->rq_import != NULL) { + if (request->rq_import) { class_import_put(request->rq_import); request->rq_import = NULL; } - if (request->rq_bulk != NULL) + if (request->rq_bulk) ptlrpc_free_bulk_pin(request->rq_bulk); - if (request->rq_reqbuf != NULL || request->rq_clrbuf != NULL) + if (request->rq_reqbuf || request->rq_clrbuf) sptlrpc_cli_free_reqbuf(request); if (request->rq_cli_ctx) @@ -2269,7 +2257,7 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked) */ static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked) { - if (request == NULL) + if (!request) return 1; if (request == LP_POISON || @@ -2351,7 +2339,7 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async) * a chance to run reply_in_callback(), and to make sure we've * unlinked before returning a req to the pool. */ - if (request->rq_set != NULL) + if (request->rq_set) wq = &request->rq_set->set_waitq; else wq = &request->rq_reply_waitq; @@ -2386,7 +2374,7 @@ static void ptlrpc_free_request(struct ptlrpc_request *req) req->rq_replay = 0; spin_unlock(&req->rq_lock); - if (req->rq_commit_cb != NULL) + if (req->rq_commit_cb) req->rq_commit_cb(req); list_del_init(&req->rq_replay_list); @@ -2427,7 +2415,6 @@ void ptlrpc_free_committed(struct obd_import *imp) struct ptlrpc_request *last_req = NULL; /* temporary fire escape */ bool skip_committed_list = true; - LASSERT(imp != NULL); assert_spin_locked(&imp->imp_lock); if (imp->imp_peer_committed_transno == imp->imp_last_transno_checked && @@ -2575,8 +2562,7 @@ void ptlrpc_retain_replayable_request(struct ptlrpc_request *req, ptlrpc_request_addref(req); list_for_each_prev(tmp, &imp->imp_replay_list) { struct ptlrpc_request *iter = - list_entry(tmp, struct ptlrpc_request, - rq_replay_list); + list_entry(tmp, struct ptlrpc_request, rq_replay_list); /* * We may have duplicate transnos if we create and then @@ -2611,12 +2597,12 @@ int ptlrpc_queue_wait(struct ptlrpc_request *req) struct ptlrpc_request_set *set; int rc; - LASSERT(req->rq_set == NULL); + LASSERT(!req->rq_set); LASSERT(!req->rq_receiving_reply); set = ptlrpc_prep_set(); - if (set == NULL) { - CERROR("Unable to allocate ptlrpc set."); + if (!set) { + CERROR("cannot allocate ptlrpc set: rc = %d\n", -ENOMEM); return -ENOMEM; } @@ -2847,12 +2833,9 @@ void ptlrpc_abort_set(struct ptlrpc_request_set *set) { struct list_head *tmp, *pos; - LASSERT(set != NULL); - list_for_each_safe(pos, tmp, &set->set_requests) { struct ptlrpc_request *req = - list_entry(pos, struct ptlrpc_request, - rq_set_chain); + list_entry(pos, struct ptlrpc_request, rq_set_chain); spin_lock(&req->rq_lock); if (req->rq_phase != RQ_PHASE_RPC) { @@ -2994,7 +2977,6 @@ static int work_interpreter(const struct lu_env *env, struct ptlrpc_work_async_args *arg = data; LASSERT(ptlrpcd_check_work(req)); - LASSERT(arg->cb != NULL); rc = arg->cb(env, arg->cbdata); @@ -3026,12 +3008,12 @@ void *ptlrpcd_alloc_work(struct obd_import *imp, might_sleep(); - if (cb == NULL) + if (!cb) return ERR_PTR(-EINVAL); /* copy some code from deprecated fakereq. */ req = ptlrpc_request_cache_alloc(GFP_NOFS); - if (req == NULL) { + if (!req) { CERROR("ptlrpc: run out of memory!\n"); return ERR_PTR(-ENOMEM); } diff --git a/drivers/staging/lustre/lustre/ptlrpc/connection.c b/drivers/staging/lustre/lustre/ptlrpc/connection.c index da1f0b1ac3e3..a14daff3fca0 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/connection.c +++ b/drivers/staging/lustre/lustre/ptlrpc/connection.c @@ -72,7 +72,8 @@ ptlrpc_connection_get(lnet_process_id_t peer, lnet_nid_t self, * returned and may be compared against out object. */ /* In the function below, .hs_keycmp resolves to - * conn_keycmp() */ + * conn_keycmp() + */ /* coverity[overrun-buffer-val] */ conn2 = cfs_hash_findadd_unique(conn_hash, &peer, &conn->c_hash); if (conn != conn2) { @@ -172,7 +173,7 @@ conn_keycmp(const void *key, struct hlist_node *hnode) struct ptlrpc_connection *conn; const lnet_process_id_t *conn_key; - LASSERT(key != NULL); + LASSERT(key); conn_key = key; conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash); diff --git a/drivers/staging/lustre/lustre/ptlrpc/events.c b/drivers/staging/lustre/lustre/ptlrpc/events.c index 990156986986..47be21ac9f10 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/events.c +++ b/drivers/staging/lustre/lustre/ptlrpc/events.c @@ -71,7 +71,8 @@ void request_out_callback(lnet_event_t *ev) if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) { /* Failed send: make it seem like the reply timed out, just - * like failing sends in client.c does currently... */ + * like failing sends in client.c does currently... + */ req->rq_net_err = 1; ptlrpc_client_wake_req(req); @@ -95,7 +96,8 @@ void reply_in_callback(lnet_event_t *ev) LASSERT(ev->md.start == req->rq_repbuf); LASSERT(ev->offset + ev->mlength <= req->rq_repbuf_len); /* We've set LNET_MD_MANAGE_REMOTE for all outgoing requests - for adaptive timeouts' early reply. */ + * for adaptive timeouts' early reply. + */ LASSERT((ev->md.options & LNET_MD_MANAGE_REMOTE) != 0); spin_lock(&req->rq_lock); @@ -151,7 +153,8 @@ void reply_in_callback(lnet_event_t *ev) req->rq_reply_off = ev->offset; req->rq_nob_received = ev->mlength; /* LNetMDUnlink can't be called under the LNET_LOCK, - so we must unlink in ptlrpc_unregister_reply */ + * so we must unlink in ptlrpc_unregister_reply + */ DEBUG_REQ(D_INFO, req, "reply in flags=%x mlen=%u offset=%d replen=%d", lustre_msg_get_flags(req->rq_reqmsg), @@ -162,7 +165,8 @@ void reply_in_callback(lnet_event_t *ev) out_wake: /* NB don't unlock till after wakeup; req can disappear under us - * since we don't have our own ref */ + * since we don't have our own ref + */ ptlrpc_client_wake_req(req); spin_unlock(&req->rq_lock); } @@ -213,7 +217,8 @@ void client_bulk_callback(lnet_event_t *ev) desc->bd_failure = 1; /* NB don't unlock till after wakeup; desc can disappear under us - * otherwise */ + * otherwise + */ if (desc->bd_md_count == 0) ptlrpc_client_wake_req(desc->bd_req); @@ -250,7 +255,8 @@ static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt, __u64 new_seq; /* set sequence ID for request and add it to history list, - * it must be called with hold svcpt::scp_lock */ + * it must be called with hold svcpt::scp_lock + */ new_seq = (sec << REQS_SEC_SHIFT) | (usec << REQS_USEC_SHIFT) | @@ -258,7 +264,8 @@ static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt, if (new_seq > svcpt->scp_hist_seq) { /* This handles the initial case of scp_hist_seq == 0 or - * we just jumped into a new time window */ + * we just jumped into a new time window + */ svcpt->scp_hist_seq = new_seq; } else { LASSERT(REQS_SEQ_SHIFT(svcpt) < REQS_USEC_SHIFT); @@ -266,7 +273,8 @@ static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt, * however, it's possible that we used up all bits for * sequence and jumped into the next usec bucket (future time), * then we hope there will be less RPCs per bucket at some - * point, and sequence will catch up again */ + * point, and sequence will catch up again + */ svcpt->scp_hist_seq += (1U << REQS_SEQ_SHIFT(svcpt)); new_seq = svcpt->scp_hist_seq; } @@ -302,7 +310,8 @@ void request_in_callback(lnet_event_t *ev) * request buffer we can use the request object embedded in * rqbd. Note that if we failed to allocate a request, * we'd have to re-post the rqbd, which we can't do in this - * context. */ + * context. + */ req = &rqbd->rqbd_req; memset(req, 0, sizeof(*req)); } else { @@ -312,7 +321,7 @@ void request_in_callback(lnet_event_t *ev) return; } req = ptlrpc_request_cache_alloc(GFP_ATOMIC); - if (req == NULL) { + if (!req) { CERROR("Can't allocate incoming request descriptor: Dropping %s RPC from %s\n", service->srv_name, libcfs_id2str(ev->initiator)); @@ -322,7 +331,8 @@ void request_in_callback(lnet_event_t *ev) /* NB we ABSOLUTELY RELY on req being zeroed, so pointers are NULL, * flags are reset and scalars are zero. We only set the message - * size to non-zero if this was a successful receive. */ + * size to non-zero if this was a successful receive. + */ req->rq_xid = ev->match_bits; req->rq_reqbuf = ev->md.start + ev->offset; if (ev->type == LNET_EVENT_PUT && ev->status == 0) @@ -352,7 +362,8 @@ void request_in_callback(lnet_event_t *ev) svcpt->scp_nrqbds_posted); /* Normally, don't complain about 0 buffers posted; LNET won't - * drop incoming reqs since we set the portal lazy */ + * drop incoming reqs since we set the portal lazy + */ if (test_req_buffer_pressure && ev->type != LNET_EVENT_UNLINK && svcpt->scp_nrqbds_posted == 0) @@ -369,7 +380,8 @@ void request_in_callback(lnet_event_t *ev) svcpt->scp_nreqs_incoming++; /* NB everything can disappear under us once the request - * has been queued and we unlock, so do the wake now... */ + * has been queued and we unlock, so do the wake now... + */ wake_up(&svcpt->scp_waitq); spin_unlock(&svcpt->scp_lock); @@ -390,7 +402,8 @@ void reply_out_callback(lnet_event_t *ev) if (!rs->rs_difficult) { /* 'Easy' replies have no further processing so I drop the - * net's ref on 'rs' */ + * net's ref on 'rs' + */ LASSERT(ev->unlinked); ptlrpc_rs_decref(rs); return; @@ -400,7 +413,8 @@ void reply_out_callback(lnet_event_t *ev) if (ev->unlinked) { /* Last network callback. The net's ref on 'rs' stays put - * until ptlrpc_handle_rs() is done with it */ + * until ptlrpc_handle_rs() is done with it + */ spin_lock(&svcpt->scp_rep_lock); spin_lock(&rs->rs_lock); @@ -438,15 +452,12 @@ int ptlrpc_uuid_to_peer(struct obd_uuid *uuid, __u32 best_order = 0; int count = 0; int rc = -ENOENT; - int portals_compatibility; int dist; __u32 order; lnet_nid_t dst_nid; lnet_nid_t src_nid; - portals_compatibility = LNetCtl(IOC_LIBCFS_PORTALS_COMPATIBILITY, NULL); - - peer->pid = LUSTRE_SRV_LNET_PID; + peer->pid = LNET_PID_LUSTRE; /* Choose the matching UUID that's closest */ while (lustre_uuid_to_peer(uuid->uuid, &dst_nid, count++) == 0) { @@ -466,14 +477,6 @@ int ptlrpc_uuid_to_peer(struct obd_uuid *uuid, best_dist = dist; best_order = order; - if (portals_compatibility > 1) { - /* Strong portals compatibility: Zero the nid's - * NET, so if I'm reading new config logs, or - * getting configured by (new) lconf I can - * still talk to old servers. */ - dst_nid = LNET_MKNID(0, LNET_NIDADDR(dst_nid)); - src_nid = LNET_MKNID(0, LNET_NIDADDR(src_nid)); - } peer->nid = dst_nid; *self = src_nid; rc = 0; @@ -494,7 +497,8 @@ static void ptlrpc_ni_fini(void) /* Wait for the event queue to become idle since there may still be * messages in flight with pending events (i.e. the fire-and-forget * messages == client requests and "non-difficult" server - * replies */ + * replies + */ for (retries = 0;; retries++) { rc = LNetEQFree(ptlrpc_eq_h); @@ -524,7 +528,7 @@ static lnet_pid_t ptl_get_pid(void) { lnet_pid_t pid; - pid = LUSTRE_SRV_LNET_PID; + pid = LNET_PID_LUSTRE; return pid; } @@ -544,11 +548,13 @@ static int ptlrpc_ni_init(void) } /* CAVEAT EMPTOR: how we process portals events is _radically_ - * different depending on... */ + * different depending on... + */ /* kernel LNet calls our master callback when there are new event, * because we are guaranteed to get every event via callback, * so we just set EQ size to 0 to avoid overhead of serializing - * enqueue/dequeue operations in LNet. */ + * enqueue/dequeue operations in LNet. + */ rc = LNetEQAlloc(0, ptlrpc_master_callback, &ptlrpc_eq_h); if (rc == 0) return 0; diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c index f752c789bda0..b4eddf291269 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/import.c +++ b/drivers/staging/lustre/lustre/ptlrpc/import.c @@ -112,7 +112,8 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp); * CLOSED. I would rather refcount the import and free it after * disconnection like we do with exports. To do that, the client_obd * will need to save the peer info somewhere other than in the import, - * though. */ + * though. + */ int ptlrpc_init_import(struct obd_import *imp) { spin_lock(&imp->imp_lock); @@ -139,7 +140,7 @@ static void deuuidify(char *uuid, const char *prefix, char **uuid_start, return; if (!strncmp(*uuid_start + *uuid_len - strlen(UUID_STR), - UUID_STR, strlen(UUID_STR))) + UUID_STR, strlen(UUID_STR))) *uuid_len -= strlen(UUID_STR); } @@ -282,11 +283,13 @@ void ptlrpc_invalidate_import(struct obd_import *imp) /* Wait forever until inflight == 0. We really can't do it another * way because in some cases we need to wait for very long reply * unlink. We can't do anything before that because there is really - * no guarantee that some rdma transfer is not in progress right now. */ + * no guarantee that some rdma transfer is not in progress right now. + */ do { /* Calculate max timeout for waiting on rpcs to error * out. Use obd_timeout if calculated value is smaller - * than it. */ + * than it. + */ if (!OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) { timeout = ptlrpc_inflight_timeout(imp); timeout += timeout / 3; @@ -304,7 +307,8 @@ void ptlrpc_invalidate_import(struct obd_import *imp) /* Wait for all requests to error out and call completion * callbacks. Cap it at obd_timeout -- these should all - * have been locally cancelled by ptlrpc_abort_inflight. */ + * have been locally cancelled by ptlrpc_abort_inflight. + */ lwi = LWI_TIMEOUT_INTERVAL( cfs_timeout_cap(cfs_time_seconds(timeout)), (timeout > 1)?cfs_time_seconds(1):cfs_time_seconds(1)/2, @@ -328,28 +332,30 @@ void ptlrpc_invalidate_import(struct obd_import *imp) * maybe waiting for long reply unlink in * sluggish nets). Let's check this. If there * is no inflight and unregistering != 0, this - * is bug. */ + * is bug. + */ LASSERTF(count == 0, "Some RPCs are still unregistering: %d\n", count); /* Let's save one loop as soon as inflight have * dropped to zero. No new inflights possible at - * this point. */ + * this point. + */ rc = 0; } else { list_for_each_safe(tmp, n, - &imp->imp_sending_list) { + &imp->imp_sending_list) { req = list_entry(tmp, - struct ptlrpc_request, - rq_list); + struct ptlrpc_request, + rq_list); DEBUG_REQ(D_ERROR, req, "still on sending list"); } list_for_each_safe(tmp, n, - &imp->imp_delayed_list) { + &imp->imp_delayed_list) { req = list_entry(tmp, - struct ptlrpc_request, - rq_list); + struct ptlrpc_request, + rq_list); DEBUG_REQ(D_ERROR, req, "still on delayed list"); } @@ -427,7 +433,6 @@ EXPORT_SYMBOL(ptlrpc_fail_import); int ptlrpc_reconnect_import(struct obd_import *imp) { -#ifdef ENABLE_PINGER struct l_wait_info lwi; int secs = cfs_time_seconds(obd_timeout); int rc; @@ -443,33 +448,6 @@ int ptlrpc_reconnect_import(struct obd_import *imp) CDEBUG(D_HA, "%s: recovery finished s:%s\n", obd2cli_tgt(imp->imp_obd), ptlrpc_import_state_name(imp->imp_state)); return rc; -#else - ptlrpc_set_import_discon(imp, 0); - /* Force a new connect attempt */ - ptlrpc_invalidate_import(imp); - /* Do a fresh connect next time by zeroing the handle */ - ptlrpc_disconnect_import(imp, 1); - /* Wait for all invalidate calls to finish */ - if (atomic_read(&imp->imp_inval_count) > 0) { - int rc; - struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL); - - rc = l_wait_event(imp->imp_recovery_waitq, - (atomic_read(&imp->imp_inval_count) == 0), - &lwi); - if (rc) - CERROR("Interrupted, inval=%d\n", - atomic_read(&imp->imp_inval_count)); - } - - /* Allow reconnect attempts */ - imp->imp_obd->obd_no_recov = 0; - /* Remove 'invalid' flag */ - ptlrpc_activate_import(imp); - /* Attempt a new connect */ - ptlrpc_recover_import(imp, NULL, 0); - return 0; -#endif } EXPORT_SYMBOL(ptlrpc_reconnect_import); @@ -501,18 +479,20 @@ static int import_select_connection(struct obd_import *imp) conn->oic_last_attempt); /* If we have not tried this connection since - the last successful attempt, go with this one */ + * the last successful attempt, go with this one + */ if ((conn->oic_last_attempt == 0) || cfs_time_beforeq_64(conn->oic_last_attempt, - imp->imp_last_success_conn)) { + imp->imp_last_success_conn)) { imp_conn = conn; tried_all = 0; break; } /* If all of the connections have already been tried - since the last successful connection; just choose the - least recently used */ + * since the last successful connection; just choose the + * least recently used + */ if (!imp_conn) imp_conn = conn; else if (cfs_time_before_64(conn->oic_last_attempt, @@ -529,10 +509,11 @@ static int import_select_connection(struct obd_import *imp) LASSERT(imp_conn->oic_conn); /* If we've tried everything, and we're back to the beginning of the - list, increase our timeout and try again. It will be reset when - we do finally connect. (FIXME: really we should wait for all network - state associated with the last connection attempt to drain before - trying to reconnect on it.) */ + * list, increase our timeout and try again. It will be reset when + * we do finally connect. (FIXME: really we should wait for all network + * state associated with the last connection attempt to drain before + * trying to reconnect on it.) + */ if (tried_all && (imp->imp_conn_list.next == &imp_conn->oic_item)) { struct adaptive_timeout *at = &imp->imp_at.iat_net_latency; @@ -553,7 +534,6 @@ static int import_select_connection(struct obd_import *imp) imp->imp_connection = ptlrpc_connection_addref(imp_conn->oic_conn); dlmexp = class_conn2export(&imp->imp_dlm_handle); - LASSERT(dlmexp != NULL); ptlrpc_connection_put(dlmexp->exp_connection); dlmexp->exp_connection = ptlrpc_connection_addref(imp_conn->oic_conn); class_export_put(dlmexp); @@ -590,7 +570,8 @@ static int ptlrpc_first_transno(struct obd_import *imp, __u64 *transno) struct list_head *tmp; /* The requests in committed_list always have smaller transnos than - * the requests in replay_list */ + * the requests in replay_list + */ if (!list_empty(&imp->imp_committed_list)) { tmp = imp->imp_committed_list.next; req = list_entry(tmp, struct ptlrpc_request, rq_replay_list); @@ -674,7 +655,8 @@ int ptlrpc_connect_import(struct obd_import *imp) goto out; /* Reset connect flags to the originally requested flags, in case - * the server is updated on-the-fly we will get the new features. */ + * the server is updated on-the-fly we will get the new features. + */ imp->imp_connect_data.ocd_connect_flags = imp->imp_connect_flags_orig; /* Reset ocd_version each time so the server knows the exact versions */ imp->imp_connect_data.ocd_version = LUSTRE_VERSION_CODE; @@ -687,7 +669,7 @@ int ptlrpc_connect_import(struct obd_import *imp) goto out; request = ptlrpc_request_alloc(imp, &RQF_MDS_CONNECT); - if (request == NULL) { + if (!request) { rc = -ENOMEM; goto out; } @@ -700,7 +682,8 @@ int ptlrpc_connect_import(struct obd_import *imp) } /* Report the rpc service time to the server so that it knows how long - * to wait for clients to join recovery */ + * to wait for clients to join recovery + */ lustre_msg_set_service_time(request->rq_reqmsg, at_timeout2est(request->rq_timeout)); @@ -708,7 +691,8 @@ int ptlrpc_connect_import(struct obd_import *imp) * import_select_connection will increase the net latency on * repeated reconnect attempts to cover slow networks. * We override/ignore the server rpc completion estimate here, - * which may be large if this is a reconnect attempt */ + * which may be large if this is a reconnect attempt + */ request->rq_timeout = INITIAL_CONNECT_TIMEOUT; lustre_msg_set_timeout(request->rq_reqmsg, request->rq_timeout); @@ -799,7 +783,8 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, if (rc) { /* if this reconnect to busy export - not need select new target - * for connecting*/ + * for connecting + */ imp->imp_force_reconnect = ptlrpc_busy_reconnect(rc); spin_unlock(&imp->imp_lock); ptlrpc_maybe_ping_import_soon(imp); @@ -817,7 +802,7 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, ocd = req_capsule_server_sized_get(&request->rq_pill, &RMF_CONNECT_DATA, ret); - if (ocd == NULL) { + if (!ocd) { CERROR("%s: no connect data from server\n", imp->imp_obd->obd_name); rc = -EPROTO; @@ -851,7 +836,8 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, if (!exp) { /* This could happen if export is cleaned during the - connect attempt */ + * connect attempt + */ CERROR("%s: missing export after connect\n", imp->imp_obd->obd_name); rc = -ENODEV; @@ -877,14 +863,16 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, } /* if applies, adjust the imp->imp_msg_magic here - * according to reply flags */ + * according to reply flags + */ imp->imp_remote_handle = *lustre_msg_get_handle(request->rq_repmsg); /* Initial connects are allowed for clients with non-random * uuids when servers are in recovery. Simply signal the - * servers replay is complete and wait in REPLAY_WAIT. */ + * servers replay is complete and wait in REPLAY_WAIT. + */ if (msg_flags & MSG_CONNECT_RECOVERING) { CDEBUG(D_HA, "connect to %s during recovery\n", obd2cli_tgt(imp->imp_obd)); @@ -923,7 +911,8 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, * already erased all of our state because of previous * eviction. If it is in recovery - we are safe to * participate since we can reestablish all of our state - * with server again */ + * with server again + */ if ((msg_flags & MSG_CONNECT_RECOVERING)) { CDEBUG(level, "%s@%s changed server handle from %#llx to %#llx but is still in recovery\n", obd2cli_tgt(imp->imp_obd), @@ -1015,8 +1004,7 @@ finish: spin_lock(&imp->imp_lock); list_del(&imp->imp_conn_current->oic_item); - list_add(&imp->imp_conn_current->oic_item, - &imp->imp_conn_list); + list_add(&imp->imp_conn_current->oic_item, &imp->imp_conn_list); imp->imp_last_success_conn = imp->imp_conn_current->oic_last_attempt; @@ -1039,7 +1027,8 @@ finish: ocd->ocd_version < LUSTRE_VERSION_CODE - LUSTRE_VERSION_OFFSET_WARN)) { /* Sigh, some compilers do not like #ifdef in the middle - of macro arguments */ + * of macro arguments + */ const char *older = "older. Consider upgrading server or downgrading client" ; const char *newer = "newer than client version. Consider upgrading client" @@ -1061,7 +1050,8 @@ finish: * fixup is version-limited, because we don't want to carry the * OBD_CONNECT_MNE_SWAB flag around forever, just so long as we * need interop with unpatched 2.2 servers. For newer servers, - * the client will do MNE swabbing only as needed. LU-1644 */ + * the client will do MNE swabbing only as needed. LU-1644 + */ if (unlikely((ocd->ocd_connect_flags & OBD_CONNECT_VERSION) && !(ocd->ocd_connect_flags & OBD_CONNECT_MNE_SWAB) && OBD_OCD_VERSION_MAJOR(ocd->ocd_version) == 2 && @@ -1079,7 +1069,8 @@ finish: if (ocd->ocd_connect_flags & OBD_CONNECT_CKSUM) { /* We sent to the server ocd_cksum_types with bits set * for algorithms we understand. The server masked off - * the checksum types it doesn't support */ + * the checksum types it doesn't support + */ if ((ocd->ocd_cksum_types & cksum_types_supported_client()) == 0) { LCONSOLE_WARN("The negotiation of the checksum algorithm to use with server %s failed (%x/%x), disabling checksums\n", @@ -1093,7 +1084,8 @@ finish: } } else { /* The server does not support OBD_CONNECT_CKSUM. - * Enforce ADLER for backward compatibility*/ + * Enforce ADLER for backward compatibility + */ cli->cl_supp_cksum_types = OBD_CKSUM_ADLER; } cli->cl_cksum_type = cksum_type_select(cli->cl_supp_cksum_types); @@ -1109,7 +1101,8 @@ finish: /* Reset ns_connect_flags only for initial connect. It might be * changed in while using FS and if we reset it in reconnect * this leads to losing user settings done before such as - * disable lru_resize, etc. */ + * disable lru_resize, etc. + */ if (old_connect_flags != exp_connect_flags(exp) || aa->pcaa_initial_connect) { CDEBUG(D_HA, "%s: Resetting ns_connect_flags to server flags: %#llx\n", @@ -1123,13 +1116,14 @@ finish: if ((ocd->ocd_connect_flags & OBD_CONNECT_AT) && (imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V2)) /* We need a per-message support flag, because - a. we don't know if the incoming connect reply - supports AT or not (in reply_in_callback) - until we unpack it. - b. failovered server means export and flags are gone - (in ptlrpc_send_reply). - Can only be set when we know AT is supported at - both ends */ + * a. we don't know if the incoming connect reply + * supports AT or not (in reply_in_callback) + * until we unpack it. + * b. failovered server means export and flags are gone + * (in ptlrpc_send_reply). + * Can only be set when we know AT is supported at + * both ends + */ imp->imp_msghdr_flags |= MSGHDR_AT_SUPPORT; else imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT; @@ -1162,7 +1156,7 @@ out: struct obd_connect_data *ocd; /* reply message might not be ready */ - if (request->rq_repmsg == NULL) + if (!request->rq_repmsg) return -EPROTO; ocd = req_capsule_server_get(&request->rq_pill, @@ -1243,7 +1237,7 @@ static int signal_completed_replay(struct obd_import *imp) req = ptlrpc_request_alloc_pack(imp, &RQF_OBD_PING, LUSTRE_OBD_VERSION, OBD_PING); - if (req == NULL) { + if (!req) { atomic_dec(&imp->imp_replay_inflight); return -ENOMEM; } @@ -1337,12 +1331,13 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp) { struct task_struct *task; /* bug 17802: XXX client_disconnect_export vs connect request - * race. if client will evicted at this time, we start + * race. if client is evicted at this time, we start * invalidate thread without reference to import and import can - * be freed at same time. */ + * be freed at same time. + */ class_import_get(imp); task = kthread_run(ptlrpc_invalidate_import_thread, imp, - "ll_imp_inval"); + "ll_imp_inval"); if (IS_ERR(task)) { class_import_put(imp); CERROR("error starting invalidate thread: %d\n", rc); @@ -1471,11 +1466,13 @@ int ptlrpc_disconnect_import(struct obd_import *imp, int noclose) if (req) { /* We are disconnecting, do not retry a failed DISCONNECT rpc if * it fails. We can get through the above with a down server - * if the client doesn't know the server is gone yet. */ + * if the client doesn't know the server is gone yet. + */ req->rq_no_resend = 1; /* We want client umounts to happen quickly, no matter the - server state... */ + * server state... + */ req->rq_timeout = min_t(int, req->rq_timeout, INITIAL_CONNECT_TIMEOUT); @@ -1507,9 +1504,10 @@ EXPORT_SYMBOL(ptlrpc_disconnect_import); extern unsigned int at_min, at_max, at_history; /* Bin into timeslices using AT_BINS bins. - This gives us a max of the last binlimit*AT_BINS secs without the storage, - but still smoothing out a return to normalcy from a slow response. - (E.g. remember the maximum latency in each minute of the last 4 minutes.) */ + * This gives us a max of the last binlimit*AT_BINS secs without the storage, + * but still smoothing out a return to normalcy from a slow response. + * (E.g. remember the maximum latency in each minute of the last 4 minutes.) + */ int at_measured(struct adaptive_timeout *at, unsigned int val) { unsigned int old = at->at_current; @@ -1523,7 +1521,8 @@ int at_measured(struct adaptive_timeout *at, unsigned int val) if (val == 0) /* 0's don't count, because we never want our timeout to - drop to 0, and because 0 could mean an error */ + * drop to 0, and because 0 could mean an error + */ return 0; spin_lock(&at->at_lock); @@ -1565,7 +1564,8 @@ int at_measured(struct adaptive_timeout *at, unsigned int val) if (at->at_flags & AT_FLG_NOHIST) /* Only keep last reported val; keeping the rest of the history - for proc only */ + * for debugfs only + */ at->at_current = val; if (at_max > 0) diff --git a/drivers/staging/lustre/lustre/ptlrpc/layout.c b/drivers/staging/lustre/lustre/ptlrpc/layout.c index c0e613c23854..5b06901e5729 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/layout.c +++ b/drivers/staging/lustre/lustre/ptlrpc/layout.c @@ -118,25 +118,6 @@ static const struct req_msg_field *quotactl_only[] = { &RMF_OBD_QUOTACTL }; -static const struct req_msg_field *quota_body_only[] = { - &RMF_PTLRPC_BODY, - &RMF_QUOTA_BODY -}; - -static const struct req_msg_field *ldlm_intent_quota_client[] = { - &RMF_PTLRPC_BODY, - &RMF_DLM_REQ, - &RMF_LDLM_INTENT, - &RMF_QUOTA_BODY -}; - -static const struct req_msg_field *ldlm_intent_quota_server[] = { - &RMF_PTLRPC_BODY, - &RMF_DLM_REP, - &RMF_DLM_LVB, - &RMF_QUOTA_BODY -}; - static const struct req_msg_field *mdt_close_client[] = { &RMF_PTLRPC_BODY, &RMF_MDT_EPOCH, @@ -514,16 +495,6 @@ static const struct req_msg_field *mds_setattr_server[] = { &RMF_CAPA2 }; -static const struct req_msg_field *mds_update_client[] = { - &RMF_PTLRPC_BODY, - &RMF_UPDATE, -}; - -static const struct req_msg_field *mds_update_server[] = { - &RMF_PTLRPC_BODY, - &RMF_UPDATE_REPLY, -}; - static const struct req_msg_field *llog_origin_handle_create_client[] = { &RMF_PTLRPC_BODY, &RMF_LLOGD_BODY, @@ -551,16 +522,6 @@ static const struct req_msg_field *llog_origin_handle_next_block_server[] = { &RMF_EADATA }; -static const struct req_msg_field *obd_idx_read_client[] = { - &RMF_PTLRPC_BODY, - &RMF_IDX_INFO -}; - -static const struct req_msg_field *obd_idx_read_server[] = { - &RMF_PTLRPC_BODY, - &RMF_IDX_INFO -}; - static const struct req_msg_field *ost_body_only[] = { &RMF_PTLRPC_BODY, &RMF_OST_BODY @@ -676,7 +637,6 @@ static const struct req_msg_field *mdt_hsm_request[] = { static struct req_format *req_formats[] = { &RQF_OBD_PING, &RQF_OBD_SET_INFO, - &RQF_OBD_IDX_READ, &RQF_SEC_CTX, &RQF_MGS_TARGET_REG, &RQF_MGS_SET_INFO, @@ -721,7 +681,6 @@ static struct req_format *req_formats[] = { &RQF_MDS_HSM_ACTION, &RQF_MDS_HSM_REQUEST, &RQF_MDS_SWAP_LAYOUTS, - &RQF_UPDATE_OBJ, &RQF_QC_CALLBACK, &RQF_OST_CONNECT, &RQF_OST_DISCONNECT, @@ -759,8 +718,6 @@ static struct req_format *req_formats[] = { &RQF_LDLM_INTENT_CREATE, &RQF_LDLM_INTENT_UNLINK, &RQF_LDLM_INTENT_GETXATTR, - &RQF_LDLM_INTENT_QUOTA, - &RQF_QUOTA_DQACQ, &RQF_LOG_CANCEL, &RQF_LLOG_ORIGIN_HANDLE_CREATE, &RQF_LLOG_ORIGIN_HANDLE_DESTROY, @@ -899,11 +856,6 @@ struct req_msg_field RMF_OBD_QUOTACTL = lustre_swab_obd_quotactl, NULL); EXPORT_SYMBOL(RMF_OBD_QUOTACTL); -struct req_msg_field RMF_QUOTA_BODY = - DEFINE_MSGF("quota_body", 0, - sizeof(struct quota_body), lustre_swab_quota_body, NULL); -EXPORT_SYMBOL(RMF_QUOTA_BODY); - struct req_msg_field RMF_MDT_EPOCH = DEFINE_MSGF("mdt_ioepoch", 0, sizeof(struct mdt_ioepoch), lustre_swab_mdt_ioepoch, NULL); @@ -938,12 +890,12 @@ EXPORT_SYMBOL(RMF_SYMTGT); struct req_msg_field RMF_TGTUUID = DEFINE_MSGF("tgtuuid", RMF_F_STRING, sizeof(struct obd_uuid) - 1, NULL, - NULL); + NULL); EXPORT_SYMBOL(RMF_TGTUUID); struct req_msg_field RMF_CLUUID = DEFINE_MSGF("cluuid", RMF_F_STRING, sizeof(struct obd_uuid) - 1, NULL, - NULL); + NULL); EXPORT_SYMBOL(RMF_CLUUID); struct req_msg_field RMF_STRING = @@ -1078,7 +1030,7 @@ EXPORT_SYMBOL(RMF_RCS); struct req_msg_field RMF_EAVALS_LENS = DEFINE_MSGF("eavals_lens", RMF_F_STRUCT_ARRAY, sizeof(__u32), - lustre_swab_generic_32s, NULL); + lustre_swab_generic_32s, NULL); EXPORT_SYMBOL(RMF_EAVALS_LENS); struct req_msg_field RMF_OBD_ID = @@ -1105,10 +1057,6 @@ struct req_msg_field RMF_FIEMAP_VAL = DEFINE_MSGF("fiemap", 0, -1, lustre_swab_fiemap, NULL); EXPORT_SYMBOL(RMF_FIEMAP_VAL); -struct req_msg_field RMF_IDX_INFO = - DEFINE_MSGF("idx_info", 0, sizeof(struct idx_info), - lustre_swab_idx_info, NULL); -EXPORT_SYMBOL(RMF_IDX_INFO); struct req_msg_field RMF_HSM_USER_STATE = DEFINE_MSGF("hsm_user_state", 0, sizeof(struct hsm_user_state), lustre_swab_hsm_user_state, NULL); @@ -1145,15 +1093,6 @@ struct req_msg_field RMF_MDS_HSM_REQUEST = lustre_swab_hsm_request, NULL); EXPORT_SYMBOL(RMF_MDS_HSM_REQUEST); -struct req_msg_field RMF_UPDATE = DEFINE_MSGF("update", 0, -1, - lustre_swab_update_buf, NULL); -EXPORT_SYMBOL(RMF_UPDATE); - -struct req_msg_field RMF_UPDATE_REPLY = DEFINE_MSGF("update_reply", 0, -1, - lustre_swab_update_reply_buf, - NULL); -EXPORT_SYMBOL(RMF_UPDATE_REPLY); - struct req_msg_field RMF_SWAP_LAYOUTS = DEFINE_MSGF("swap_layouts", 0, sizeof(struct mdc_swap_layouts), lustre_swab_swap_layouts, NULL); @@ -1196,29 +1135,23 @@ struct req_format RQF_OBD_SET_INFO = DEFINE_REQ_FMT0("OBD_SET_INFO", obd_set_info_client, empty); EXPORT_SYMBOL(RQF_OBD_SET_INFO); -/* Read index file through the network */ -struct req_format RQF_OBD_IDX_READ = - DEFINE_REQ_FMT0("OBD_IDX_READ", - obd_idx_read_client, obd_idx_read_server); -EXPORT_SYMBOL(RQF_OBD_IDX_READ); - struct req_format RQF_SEC_CTX = DEFINE_REQ_FMT0("SEC_CTX", empty, empty); EXPORT_SYMBOL(RQF_SEC_CTX); struct req_format RQF_MGS_TARGET_REG = DEFINE_REQ_FMT0("MGS_TARGET_REG", mgs_target_info_only, - mgs_target_info_only); + mgs_target_info_only); EXPORT_SYMBOL(RQF_MGS_TARGET_REG); struct req_format RQF_MGS_SET_INFO = DEFINE_REQ_FMT0("MGS_SET_INFO", mgs_set_info, - mgs_set_info); + mgs_set_info); EXPORT_SYMBOL(RQF_MGS_SET_INFO); struct req_format RQF_MGS_CONFIG_READ = DEFINE_REQ_FMT0("MGS_CONFIG_READ", mgs_config_read_client, - mgs_config_read_server); + mgs_config_read_server); EXPORT_SYMBOL(RQF_MGS_CONFIG_READ); struct req_format RQF_SEQ_QUERY = @@ -1253,16 +1186,6 @@ struct req_format RQF_QC_CALLBACK = DEFINE_REQ_FMT0("QC_CALLBACK", quotactl_only, empty); EXPORT_SYMBOL(RQF_QC_CALLBACK); -struct req_format RQF_QUOTA_DQACQ = - DEFINE_REQ_FMT0("QUOTA_DQACQ", quota_body_only, quota_body_only); -EXPORT_SYMBOL(RQF_QUOTA_DQACQ); - -struct req_format RQF_LDLM_INTENT_QUOTA = - DEFINE_REQ_FMT0("LDLM_INTENT_QUOTA", - ldlm_intent_quota_client, - ldlm_intent_quota_server); -EXPORT_SYMBOL(RQF_LDLM_INTENT_QUOTA); - struct req_format RQF_MDS_GETSTATUS = DEFINE_REQ_FMT0("MDS_GETSTATUS", mdt_body_only, mdt_body_capa); EXPORT_SYMBOL(RQF_MDS_GETSTATUS); @@ -1357,11 +1280,6 @@ struct req_format RQF_MDS_GET_INFO = mds_getinfo_server); EXPORT_SYMBOL(RQF_MDS_GET_INFO); -struct req_format RQF_UPDATE_OBJ = - DEFINE_REQ_FMT0("OBJECT_UPDATE_OBJ", mds_update_client, - mds_update_server); -EXPORT_SYMBOL(RQF_UPDATE_OBJ); - struct req_format RQF_LDLM_ENQUEUE = DEFINE_REQ_FMT0("LDLM_ENQUEUE", ldlm_enqueue_client, ldlm_enqueue_lvb_server); @@ -1598,32 +1516,32 @@ EXPORT_SYMBOL(RQF_OST_STATFS); struct req_format RQF_OST_SET_GRANT_INFO = DEFINE_REQ_FMT0("OST_SET_GRANT_INFO", ost_grant_shrink_client, - ost_body_only); + ost_body_only); EXPORT_SYMBOL(RQF_OST_SET_GRANT_INFO); struct req_format RQF_OST_GET_INFO_GENERIC = DEFINE_REQ_FMT0("OST_GET_INFO", ost_get_info_generic_client, - ost_get_info_generic_server); + ost_get_info_generic_server); EXPORT_SYMBOL(RQF_OST_GET_INFO_GENERIC); struct req_format RQF_OST_GET_INFO_LAST_ID = DEFINE_REQ_FMT0("OST_GET_INFO_LAST_ID", ost_get_info_generic_client, - ost_get_last_id_server); + ost_get_last_id_server); EXPORT_SYMBOL(RQF_OST_GET_INFO_LAST_ID); struct req_format RQF_OST_GET_INFO_LAST_FID = DEFINE_REQ_FMT0("OST_GET_INFO_LAST_FID", obd_set_info_client, - ost_get_last_fid_server); + ost_get_last_fid_server); EXPORT_SYMBOL(RQF_OST_GET_INFO_LAST_FID); struct req_format RQF_OST_SET_INFO_LAST_FID = DEFINE_REQ_FMT0("OST_SET_INFO_LAST_FID", obd_set_info_client, - empty); + empty); EXPORT_SYMBOL(RQF_OST_SET_INFO_LAST_FID); struct req_format RQF_OST_GET_INFO_FIEMAP = DEFINE_REQ_FMT0("OST_GET_INFO_FIEMAP", ost_get_fiemap_client, - ost_get_fiemap_server); + ost_get_fiemap_server); EXPORT_SYMBOL(RQF_OST_GET_INFO_FIEMAP); #if !defined(__REQ_LAYOUT_USER__) @@ -1712,7 +1630,7 @@ void req_capsule_init(struct req_capsule *pill, * high-priority RPC queue getting peeked at before ost_handle() * handles an OST RPC. */ - if (req != NULL && pill == &req->rq_pill && req->rq_pill_init) + if (req && pill == &req->rq_pill && req->rq_pill_init) return; memset(pill, 0, sizeof(*pill)); @@ -1720,7 +1638,7 @@ void req_capsule_init(struct req_capsule *pill, pill->rc_loc = location; req_capsule_init_area(pill); - if (req != NULL && pill == &req->rq_pill) + if (req && pill == &req->rq_pill) req->rq_pill_init = 1; } EXPORT_SYMBOL(req_capsule_init); @@ -1752,7 +1670,7 @@ static struct lustre_msg *__req_msg(const struct req_capsule *pill, */ void req_capsule_set(struct req_capsule *pill, const struct req_format *fmt) { - LASSERT(pill->rc_fmt == NULL || pill->rc_fmt == fmt); + LASSERT(!pill->rc_fmt || pill->rc_fmt == fmt); LASSERT(__req_format_is_sane(fmt)); pill->rc_fmt = fmt; @@ -1773,8 +1691,6 @@ int req_capsule_filled_sizes(struct req_capsule *pill, const struct req_format *fmt = pill->rc_fmt; int i; - LASSERT(fmt != NULL); - for (i = 0; i < fmt->rf_fields[loc].nr; ++i) { if (pill->rc_area[loc][i] == -1) { pill->rc_area[loc][i] = @@ -1810,15 +1726,15 @@ int req_capsule_server_pack(struct req_capsule *pill) LASSERT(pill->rc_loc == RCL_SERVER); fmt = pill->rc_fmt; - LASSERT(fmt != NULL); + LASSERT(fmt); count = req_capsule_filled_sizes(pill, RCL_SERVER); rc = lustre_pack_reply(pill->rc_req, count, pill->rc_area[RCL_SERVER], NULL); if (rc != 0) { DEBUG_REQ(D_ERROR, pill->rc_req, - "Cannot pack %d fields in format `%s': ", - count, fmt->rf_name); + "Cannot pack %d fields in format `%s': ", + count, fmt->rf_name); } return rc; } @@ -1835,9 +1751,8 @@ static int __req_capsule_offset(const struct req_capsule *pill, int offset; offset = field->rmf_offset[pill->rc_fmt->rf_idx][loc]; - LASSERTF(offset > 0, "%s:%s, off=%d, loc=%d\n", - pill->rc_fmt->rf_name, - field->rmf_name, offset, loc); + LASSERTF(offset > 0, "%s:%s, off=%d, loc=%d\n", pill->rc_fmt->rf_name, + field->rmf_name, offset, loc); offset--; LASSERT(0 <= offset && offset < REQ_MAX_FIELD_NR); @@ -1865,7 +1780,7 @@ swabber_dumper_helper(struct req_capsule *pill, swabber = swabber ?: field->rmf_swabber; if (ptlrpc_buf_need_swab(pill->rc_req, inout, offset) && - swabber != NULL && value != NULL) + swabber && value) do_swab = 1; else do_swab = 0; @@ -1883,7 +1798,7 @@ swabber_dumper_helper(struct req_capsule *pill, return; swabber(value); ptlrpc_buf_set_swabbed(pill->rc_req, inout, offset); - if (dump) { + if (dump && field->rmf_dumper) { CDEBUG(D_RPCTRACE, "Dump of swabbed field %s follows\n", field->rmf_name); field->rmf_dumper(value); @@ -1947,17 +1862,15 @@ static void *__req_capsule_get(struct req_capsule *pill, [RCL_SERVER] = "server" }; - LASSERT(pill != NULL); - LASSERT(pill != LP_POISON); fmt = pill->rc_fmt; - LASSERT(fmt != NULL); + LASSERT(fmt); LASSERT(fmt != LP_POISON); LASSERT(__req_format_is_sane(fmt)); offset = __req_capsule_offset(pill, field, loc); msg = __req_msg(pill, loc); - LASSERT(msg != NULL); + LASSERT(msg); getter = (field->rmf_flags & RMF_F_STRING) ? (typeof(getter))lustre_msg_string : lustre_msg_buf; @@ -1980,7 +1893,7 @@ static void *__req_capsule_get(struct req_capsule *pill, } value = getter(msg, offset, len); - if (value == NULL) { + if (!value) { DEBUG_REQ(D_ERROR, pill->rc_req, "Wrong buffer for field `%s' (%d of %d) in format `%s': %d vs. %d (%s)\n", field->rmf_name, offset, lustre_msg_bufcount(msg), @@ -2209,7 +2122,7 @@ void req_capsule_extend(struct req_capsule *pill, const struct req_format *fmt) const struct req_format *old; - LASSERT(pill->rc_fmt != NULL); + LASSERT(pill->rc_fmt); LASSERT(__req_format_is_sane(fmt)); old = pill->rc_fmt; @@ -2222,7 +2135,7 @@ void req_capsule_extend(struct req_capsule *pill, const struct req_format *fmt) const struct req_msg_field *ofield = FMT_FIELD(old, i, j); /* "opaque" fields can be transmogrified */ - if (ofield->rmf_swabber == NULL && + if (!ofield->rmf_swabber && (ofield->rmf_flags & ~RMF_F_NO_SIZE_CHECK) == 0 && (ofield->rmf_size == -1 || ofield->rmf_flags == RMF_F_NO_SIZE_CHECK)) @@ -2289,7 +2202,7 @@ void req_capsule_shrink(struct req_capsule *pill, int offset; fmt = pill->rc_fmt; - LASSERT(fmt != NULL); + LASSERT(fmt); LASSERT(__req_format_is_sane(fmt)); LASSERT(req_capsule_has_field(pill, field, loc)); LASSERT(req_capsule_field_present(pill, field, loc)); @@ -2299,7 +2212,7 @@ void req_capsule_shrink(struct req_capsule *pill, msg = __req_msg(pill, loc); len = lustre_msg_buflen(msg, offset); LASSERTF(newlen <= len, "%s:%s, oldlen=%d, newlen=%d\n", - fmt->rf_name, field->rmf_name, len, newlen); + fmt->rf_name, field->rmf_name, len, newlen); if (loc == RCL_CLIENT) pill->rc_req->rq_reqlen = lustre_shrink_msg(msg, offset, newlen, diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c index e87702073f1f..a23ac5f9ae96 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c +++ b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c @@ -75,7 +75,8 @@ } while (0) /* This is a callback from the llog_* functions. - * Assumes caller has already pushed us into the kernel context. */ + * Assumes caller has already pushed us into the kernel context. + */ static int llog_client_open(const struct lu_env *env, struct llog_handle *lgh, struct llog_logid *logid, char *name, enum llog_open_param open_param) @@ -93,7 +94,7 @@ static int llog_client_open(const struct lu_env *env, LASSERT(lgh); req = ptlrpc_request_alloc(imp, &RQF_LLOG_ORIGIN_HANDLE_CREATE); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto out; } @@ -130,7 +131,7 @@ static int llog_client_open(const struct lu_env *env, goto out; body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY); - if (body == NULL) { + if (!body) { rc = -EFAULT; goto out; } @@ -158,7 +159,7 @@ static int llog_client_next_block(const struct lu_env *env, req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK, LUSTRE_LOG_VERSION, LLOG_ORIGIN_HANDLE_NEXT_BLOCK); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto err_exit; } @@ -179,14 +180,14 @@ static int llog_client_next_block(const struct lu_env *env, goto out; body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY); - if (body == NULL) { + if (!body) { rc = -EFAULT; goto out; } /* The log records are swabbed as they are processed */ ptr = req_capsule_server_get(&req->rq_pill, &RMF_EADATA); - if (ptr == NULL) { + if (!ptr) { rc = -EFAULT; goto out; } @@ -216,7 +217,7 @@ static int llog_client_prev_block(const struct lu_env *env, req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK, LUSTRE_LOG_VERSION, LLOG_ORIGIN_HANDLE_PREV_BLOCK); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto err_exit; } @@ -236,13 +237,13 @@ static int llog_client_prev_block(const struct lu_env *env, goto out; body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY); - if (body == NULL) { + if (!body) { rc = -EFAULT; goto out; } ptr = req_capsule_server_get(&req->rq_pill, &RMF_EADATA); - if (ptr == NULL) { + if (!ptr) { rc = -EFAULT; goto out; } @@ -269,7 +270,7 @@ static int llog_client_read_header(const struct lu_env *env, req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER, LUSTRE_LOG_VERSION, LLOG_ORIGIN_HANDLE_READ_HEADER); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto err_exit; } @@ -285,7 +286,7 @@ static int llog_client_read_header(const struct lu_env *env, goto out; hdr = req_capsule_server_get(&req->rq_pill, &RMF_LLOG_LOG_HDR); - if (hdr == NULL) { + if (!hdr) { rc = -EFAULT; goto out; } @@ -316,8 +317,9 @@ static int llog_client_close(const struct lu_env *env, struct llog_handle *handle) { /* this doesn't call LLOG_ORIGIN_HANDLE_CLOSE because - the servers all close the file at the end of every - other LLOG_ RPC. */ + * the servers all close the file at the end of every + * other LLOG_ RPC. + */ return 0; } diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_net.c b/drivers/staging/lustre/lustre/ptlrpc/llog_net.c index dac66f5b39da..fbccb62213b5 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/llog_net.c +++ b/drivers/staging/lustre/lustre/ptlrpc/llog_net.c @@ -58,7 +58,7 @@ int llog_initiator_connect(struct llog_ctxt *ctxt) LASSERT(ctxt); new_imp = ctxt->loc_obd->u.cli.cl_import; - LASSERTF(ctxt->loc_imp == NULL || ctxt->loc_imp == new_imp, + LASSERTF(!ctxt->loc_imp || ctxt->loc_imp == new_imp, "%p - %p\n", ctxt->loc_imp, new_imp); mutex_lock(&ctxt->loc_mutex); if (ctxt->loc_imp != new_imp) { diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c index cc55b7973721..cee04efb6fb5 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c +++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c @@ -131,7 +131,6 @@ static struct ll_rpc_opcode { { SEC_CTX_INIT_CONT, "sec_ctx_init_cont" }, { SEC_CTX_FINI, "sec_ctx_fini" }, { FLD_QUERY, "fld_query" }, - { UPDATE_OBJ, "update_obj" }, }; static struct ll_eopcode { @@ -192,15 +191,15 @@ ptlrpc_ldebugfs_register(struct dentry *root, char *dir, unsigned int svc_counter_config = LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV; - LASSERT(*debugfs_root_ret == NULL); - LASSERT(*stats_ret == NULL); + LASSERT(!*debugfs_root_ret); + LASSERT(!*stats_ret); svc_stats = lprocfs_alloc_stats(EXTRA_MAX_OPCODES+LUSTRE_MAX_OPCODES, 0); - if (svc_stats == NULL) + if (!svc_stats) return; - if (dir != NULL) { + if (dir) { svc_debugfs_entry = ldebugfs_register(dir, root, NULL, NULL); if (IS_ERR(svc_debugfs_entry)) { lprocfs_free_stats(&svc_stats); @@ -246,11 +245,11 @@ ptlrpc_ldebugfs_register(struct dentry *root, char *dir, rc = ldebugfs_register_stats(svc_debugfs_entry, name, svc_stats); if (rc < 0) { - if (dir != NULL) + if (dir) ldebugfs_remove(&svc_debugfs_entry); lprocfs_free_stats(&svc_stats); } else { - if (dir != NULL) + if (dir) *debugfs_root_ret = svc_debugfs_entry; *stats_ret = svc_stats; } @@ -307,7 +306,8 @@ ptlrpc_lprocfs_req_history_max_seq_write(struct file *file, /* This sanity check is more of an insanity check; we can still * hose a kernel by allowing the request history to grow too - * far. */ + * far. + */ bufpages = (svc->srv_buf_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; if (val > totalram_pages / (2 * bufpages)) return -ERANGE; @@ -454,10 +454,8 @@ static const char *nrs_state2str(enum ptlrpc_nrs_pol_state state) * \param[out] info Holds returned status information */ static void nrs_policy_get_info_locked(struct ptlrpc_nrs_policy *policy, - struct ptlrpc_nrs_pol_info *info) + struct ptlrpc_nrs_pol_info *info) { - LASSERT(policy != NULL); - LASSERT(info != NULL); assert_spin_locked(&policy->pol_nrs->nrs_lock); memcpy(info->pi_name, policy->pol_desc->pd_name, NRS_POL_NAME_MAX); @@ -508,7 +506,7 @@ static int ptlrpc_lprocfs_nrs_seq_show(struct seq_file *m, void *n) spin_unlock(&nrs->nrs_lock); infos = kcalloc(num_pols, sizeof(*infos), GFP_NOFS); - if (infos == NULL) { + if (!infos) { rc = -ENOMEM; goto unlock; } @@ -520,8 +518,7 @@ again: pol_idx = 0; - list_for_each_entry(policy, &nrs->nrs_policy_list, - pol_list) { + list_for_each_entry(policy, &nrs->nrs_policy_list, pol_list) { LASSERT(pol_idx < num_pols); nrs_policy_get_info_locked(policy, &tmp); @@ -592,7 +589,7 @@ again: * active: 0 */ seq_printf(m, "%s\n", - !hp ? "\nregular_requests:" : "high_priority_requests:"); + !hp ? "\nregular_requests:" : "high_priority_requests:"); for (pol_idx = 0; pol_idx < num_pols; pol_idx++) { seq_printf(m, " - name: %s\n" @@ -676,7 +673,7 @@ static ssize_t ptlrpc_lprocfs_nrs_seq_write(struct file *file, /** * No [reg|hp] token has been specified */ - if (cmd == NULL) + if (!cmd) goto default_queue; /** @@ -733,15 +730,15 @@ ptlrpc_lprocfs_svc_req_history_seek(struct ptlrpc_service_part *svcpt, struct list_head *e; struct ptlrpc_request *req; - if (srhi->srhi_req != NULL && - srhi->srhi_seq > svcpt->scp_hist_seq_culled && + if (srhi->srhi_req && srhi->srhi_seq > svcpt->scp_hist_seq_culled && srhi->srhi_seq <= seq) { /* If srhi_req was set previously, hasn't been culled and * we're searching for a seq on or after it (i.e. more * recent), search from it onwards. * Since the service history is LRU (i.e. culled reqs will * be near the head), we shouldn't have to do long - * re-scans */ + * re-scans + */ LASSERTF(srhi->srhi_seq == srhi->srhi_req->rq_history_seq, "%s:%d: seek seq %llu, request seq %llu\n", svcpt->scp_service->srv_name, svcpt->scp_cpt, @@ -919,7 +916,8 @@ static int ptlrpc_lprocfs_svc_req_history_show(struct seq_file *s, void *iter) * here. The request could contain any old crap, so you * must be just as careful as the service's request * parser. Currently I only print stuff here I know is OK - * to look at coz it was set up in request_in_callback()!!! */ + * to look at coz it was set up in request_in_callback()!!! + */ seq_printf(s, "%lld:%s:%s:x%llu:%d:%s:%lld:%lds(%+lds) ", req->rq_history_seq, nidstr, libcfs_id2str(req->rq_peer), req->rq_xid, @@ -927,7 +925,7 @@ static int ptlrpc_lprocfs_svc_req_history_show(struct seq_file *s, void *iter) (s64)req->rq_arrival_time.tv_sec, (long)(req->rq_sent - req->rq_arrival_time.tv_sec), (long)(req->rq_sent - req->rq_deadline)); - if (svc->srv_ops.so_req_printer == NULL) + if (!svc->srv_ops.so_req_printer) seq_putc(s, '\n'); else svc->srv_ops.so_req_printer(s, srhi->srhi_req); @@ -971,7 +969,7 @@ static int ptlrpc_lprocfs_timeouts_seq_show(struct seq_file *m, void *n) if (AT_OFF) { seq_printf(m, "adaptive timeouts off, using obd_timeout %u\n", - obd_timeout); + obd_timeout); return 0; } @@ -982,8 +980,8 @@ static int ptlrpc_lprocfs_timeouts_seq_show(struct seq_file *m, void *n) s2dhms(&ts, ktime_get_real_seconds() - worstt); seq_printf(m, "%10s : cur %3u worst %3u (at %lld, " - DHMS_FMT" ago) ", "service", - cur, worst, (s64)worstt, DHMS_VARS(&ts)); + DHMS_FMT " ago) ", "service", + cur, worst, (s64)worstt, DHMS_VARS(&ts)); lprocfs_at_hist_helper(m, &svcpt->scp_at_estimate); } @@ -1103,7 +1101,7 @@ void ptlrpc_ldebugfs_register_service(struct dentry *entry, "stats", &svc->srv_debugfs_entry, &svc->srv_stats); - if (svc->srv_debugfs_entry == NULL) + if (IS_ERR_OR_NULL(svc->srv_debugfs_entry)) return; ldebugfs_add_vars(svc->srv_debugfs_entry, lproc_vars, NULL); @@ -1129,7 +1127,7 @@ void ptlrpc_lprocfs_rpc_sent(struct ptlrpc_request *req, long amount) int opc = opcode_offset(op); svc_stats = req->rq_import->imp_obd->obd_svc_stats; - if (svc_stats == NULL || opc <= 0) + if (!svc_stats || opc <= 0) return; LASSERT(opc < LUSTRE_MAX_OPCODES); if (!(op == LDLM_ENQUEUE || op == MDS_REINT)) @@ -1166,7 +1164,7 @@ EXPORT_SYMBOL(ptlrpc_lprocfs_brw); void ptlrpc_lprocfs_unregister_service(struct ptlrpc_service *svc) { - if (svc->srv_debugfs_entry != NULL) + if (!IS_ERR_OR_NULL(svc->srv_debugfs_entry)) ldebugfs_remove(&svc->srv_debugfs_entry); if (svc->srv_stats) @@ -1198,7 +1196,7 @@ int lprocfs_wr_ping(struct file *file, const char __user *buffer, req = ptlrpc_prep_ping(obd->u.cli.cl_import); up_read(&obd->u.cli.cl_sem); - if (req == NULL) + if (!req) return -ENOMEM; req->rq_send_state = LUSTRE_IMP_FULL; @@ -1298,7 +1296,7 @@ int lprocfs_rd_pinger_recov(struct seq_file *m, void *n) EXPORT_SYMBOL(lprocfs_rd_pinger_recov); int lprocfs_wr_pinger_recov(struct file *file, const char __user *buffer, - size_t count, loff_t *off) + size_t count, loff_t *off) { struct obd_device *obd = ((struct seq_file *)file->private_data)->private; struct client_obd *cli = &obd->u.cli; diff --git a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c index c5d7ff5cbd73..10b8fe82a342 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c +++ b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c @@ -56,7 +56,6 @@ static int ptl_send_buf(lnet_handle_md_t *mdh, void *base, int len, lnet_md_t md; LASSERT(portal != 0); - LASSERT(conn != NULL); CDEBUG(D_INFO, "conn=%p id %s\n", conn, libcfs_id2str(conn->c_peer)); md.start = base; md.length = len; @@ -88,7 +87,8 @@ static int ptl_send_buf(lnet_handle_md_t *mdh, void *base, int len, int rc2; /* We're going to get an UNLINK event when I unlink below, * which will complete just like any other failed send, so - * I fall through and return success here! */ + * I fall through and return success here! + */ CERROR("LNetPut(%s, %d, %lld) failed: %d\n", libcfs_id2str(conn->c_peer), portal, xid, rc); rc2 = LNetMDUnlink(*mdh); @@ -130,7 +130,7 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req) LASSERT(desc->bd_md_count == 0); LASSERT(desc->bd_md_max_brw <= PTLRPC_BULK_OPS_COUNT); LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES); - LASSERT(desc->bd_req != NULL); + LASSERT(desc->bd_req); LASSERT(desc->bd_type == BULK_PUT_SINK || desc->bd_type == BULK_GET_SOURCE); @@ -153,7 +153,8 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req) * using the same RDMA match bits after an error. * * For multi-bulk RPCs, rq_xid is the last XID needed for bulks. The - * first bulk XID is power-of-two aligned before rq_xid. LU-1431 */ + * first bulk XID is power-of-two aligned before rq_xid. LU-1431 + */ xid = req->rq_xid & ~((__u64)desc->bd_md_max_brw - 1); LASSERTF(!(desc->bd_registered && req->rq_send_state != LUSTRE_IMP_REPLAY) || @@ -209,7 +210,8 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req) } /* Set rq_xid to matchbits of the final bulk so that server can - * infer the number of bulks that were prepared */ + * infer the number of bulks that were prepared + */ req->rq_xid = --xid; LASSERTF(desc->bd_last_xid == (req->rq_xid & PTLRPC_BULK_OPS_MASK), "bd_last_xid = x%llu, rq_xid = x%llu\n", @@ -260,7 +262,8 @@ int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async) /* the unlink ensures the callback happens ASAP and is the last * one. If it fails, it must be because completion just happened, * but we must still l_wait_event() in this case to give liblustre - * a chance to run client_bulk_callback() */ + * a chance to run client_bulk_callback() + */ mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw); if (ptlrpc_client_bulk_active(req) == 0) /* completed or */ @@ -273,14 +276,15 @@ int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async) if (async) return 0; - if (req->rq_set != NULL) + if (req->rq_set) wq = &req->rq_set->set_waitq; else wq = &req->rq_reply_waitq; for (;;) { /* Network access will complete in finite time but the HUGE - * timeout lets us CWARN for visibility of sluggish NALs */ + * timeout lets us CWARN for visibility of sluggish LNDs + */ lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK), cfs_time_seconds(1), NULL, NULL); rc = l_wait_event(*wq, !ptlrpc_client_bulk_active(req), &lwi); @@ -305,13 +309,13 @@ static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags) req->rq_arrival_time.tv_sec, 1); if (!(flags & PTLRPC_REPLY_EARLY) && - (req->rq_type != PTL_RPC_MSG_ERR) && - (req->rq_reqmsg != NULL) && + (req->rq_type != PTL_RPC_MSG_ERR) && req->rq_reqmsg && !(lustre_msg_get_flags(req->rq_reqmsg) & (MSG_RESENT | MSG_REPLAY | MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE))) { /* early replies, errors and recovery requests don't count - * toward our service time estimate */ + * toward our service time estimate + */ int oldse = at_measured(&svcpt->scp_at_estimate, service_time); if (oldse != 0) { @@ -325,7 +329,8 @@ static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags) lustre_msg_set_service_time(req->rq_repmsg, service_time); /* Report service time estimate for future client reqs, but report 0 * (to be ignored by client) if it's a error reply during recovery. - * (bz15815) */ + * (bz15815) + */ if (req->rq_type == PTL_RPC_MSG_ERR && !req->rq_export) lustre_msg_set_timeout(req->rq_repmsg, 0); else @@ -360,10 +365,10 @@ int ptlrpc_send_reply(struct ptlrpc_request *req, int flags) * target_queue_final_reply(). */ LASSERT(req->rq_no_reply == 0); - LASSERT(req->rq_reqbuf != NULL); - LASSERT(rs != NULL); + LASSERT(req->rq_reqbuf); + LASSERT(rs); LASSERT((flags & PTLRPC_REPLY_MAYBE_DIFFICULT) || !rs->rs_difficult); - LASSERT(req->rq_repmsg != NULL); + LASSERT(req->rq_repmsg); LASSERT(req->rq_repmsg == rs->rs_msg); LASSERT(rs->rs_cb_id.cbid_fn == reply_out_callback); LASSERT(rs->rs_cb_id.cbid_arg == rs); @@ -403,12 +408,12 @@ int ptlrpc_send_reply(struct ptlrpc_request *req, int flags) ptlrpc_at_set_reply(req, flags); - if (req->rq_export == NULL || req->rq_export->exp_connection == NULL) + if (!req->rq_export || !req->rq_export->exp_connection) conn = ptlrpc_connection_get(req->rq_peer, req->rq_self, NULL); else conn = ptlrpc_connection_addref(req->rq_export->exp_connection); - if (unlikely(conn == NULL)) { + if (unlikely(!conn)) { CERROR("not replying on NULL connection\n"); /* bug 9635 */ return -ENOTCONN; } @@ -498,14 +503,15 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply) LASSERT(request->rq_wait_ctx == 0); /* If this is a re-transmit, we're required to have disengaged - * cleanly from the previous attempt */ + * cleanly from the previous attempt + */ LASSERT(!request->rq_receiving_reply); LASSERT(!((lustre_msg_get_flags(request->rq_reqmsg) & MSG_REPLAY) && - (request->rq_import->imp_state == LUSTRE_IMP_FULL))); + (request->rq_import->imp_state == LUSTRE_IMP_FULL))); - if (unlikely(obd != NULL && obd->obd_fail)) { + if (unlikely(obd && obd->obd_fail)) { CDEBUG(D_HA, "muting rpc for failed imp obd %s\n", - obd->obd_name); + obd->obd_name); /* this prevents us from waiting in ptlrpc_queue_wait */ spin_lock(&request->rq_lock); request->rq_err = 1; @@ -535,7 +541,7 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply) goto out; /* bulk register should be done after wrap_request() */ - if (request->rq_bulk != NULL) { + if (request->rq_bulk) { rc = ptlrpc_register_bulk(request); if (rc != 0) goto out; @@ -543,14 +549,15 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply) if (!noreply) { LASSERT(request->rq_replen != 0); - if (request->rq_repbuf == NULL) { - LASSERT(request->rq_repdata == NULL); - LASSERT(request->rq_repmsg == NULL); + if (!request->rq_repbuf) { + LASSERT(!request->rq_repdata); + LASSERT(!request->rq_repmsg); rc = sptlrpc_cli_alloc_repbuf(request, request->rq_replen); if (rc) { /* this prevents us from looping in - * ptlrpc_queue_wait */ + * ptlrpc_queue_wait + */ spin_lock(&request->rq_lock); request->rq_err = 1; spin_unlock(&request->rq_lock); @@ -602,7 +609,8 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply) reply_md.eq_handle = ptlrpc_eq_h; /* We must see the unlink callback to unset rq_reply_unlink, - so we can't auto-unlink */ + * so we can't auto-unlink + */ rc = LNetMDAttach(reply_me_h, reply_md, LNET_RETAIN, &request->rq_reply_md_h); if (rc != 0) { @@ -623,7 +631,7 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply) /* add references on request for request_out_callback */ ptlrpc_request_addref(request); - if (obd != NULL && obd->obd_svc_stats != NULL) + if (obd && obd->obd_svc_stats) lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQACTIVE_CNTR, atomic_read(&request->rq_import->imp_inflight)); @@ -632,7 +640,8 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply) ktime_get_real_ts64(&request->rq_arrival_time); request->rq_sent = ktime_get_real_seconds(); /* We give the server rq_timeout secs to process the req, and - add the network latency for our local timeout. */ + * add the network latency for our local timeout. + */ request->rq_deadline = request->rq_sent + request->rq_timeout + ptlrpc_at_get_net_latency(request); @@ -656,7 +665,8 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply) cleanup_me: /* MEUnlink is safe; the PUT didn't even get off the ground, and * nobody apart from the PUT's target has the right nid+XID to - * access the reply buffer. */ + * access the reply buffer. + */ rc2 = LNetMEUnlink(reply_me_h); LASSERT(rc2 == 0); /* UNLINKED callback called synchronously */ @@ -664,7 +674,8 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply) cleanup_bulk: /* We do sync unlink here as there was no real transfer here so - * the chance to have long unlink to sluggish net is smaller here. */ + * the chance to have long unlink to sluggish net is smaller here. + */ ptlrpc_unregister_bulk(request, 0); out: if (request->rq_memalloc) @@ -692,7 +703,8 @@ int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd) /* NB: CPT affinity service should use new LNet flag LNET_INS_LOCAL, * which means buffer can only be attached on local CPT, and LND - * threads can find it by grabbing a local lock */ + * threads can find it by grabbing a local lock + */ rc = LNetMEAttach(service->srv_req_portal, match_id, 0, ~0, LNET_UNLINK, rqbd->rqbd_svcpt->scp_cpt >= 0 ? diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs.c b/drivers/staging/lustre/lustre/ptlrpc/nrs.c index 7044e1ff6692..710fb806f122 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/nrs.c +++ b/drivers/staging/lustre/lustre/ptlrpc/nrs.c @@ -13,10 +13,6 @@ * GNU General Public License version 2 for more details. A copy is * included in the COPYING file that accompanied this code. - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * * GPL HEADER END */ /* @@ -47,9 +43,6 @@ #include "../../include/linux/libcfs/libcfs.h" #include "ptlrpc_internal.h" -/* XXX: This is just for liblustre. Remove the #if defined directive when the - * "cfs_" prefix is dropped from cfs_list_head. */ - /** * NRS core object. */ @@ -57,7 +50,7 @@ struct nrs_core nrs_core; static int nrs_policy_init(struct ptlrpc_nrs_policy *policy) { - return policy->pol_desc->pd_ops->op_policy_init != NULL ? + return policy->pol_desc->pd_ops->op_policy_init ? policy->pol_desc->pd_ops->op_policy_init(policy) : 0; } @@ -66,7 +59,7 @@ static void nrs_policy_fini(struct ptlrpc_nrs_policy *policy) LASSERT(policy->pol_ref == 0); LASSERT(policy->pol_req_queued == 0); - if (policy->pol_desc->pd_ops->op_policy_fini != NULL) + if (policy->pol_desc->pd_ops->op_policy_fini) policy->pol_desc->pd_ops->op_policy_fini(policy); } @@ -82,7 +75,7 @@ static int nrs_policy_ctl_locked(struct ptlrpc_nrs_policy *policy, if (policy->pol_state == NRS_POL_STATE_STOPPED) return -ENODEV; - return policy->pol_desc->pd_ops->op_policy_ctl != NULL ? + return policy->pol_desc->pd_ops->op_policy_ctl ? policy->pol_desc->pd_ops->op_policy_ctl(policy, opc, arg) : -ENOSYS; } @@ -91,7 +84,7 @@ static void nrs_policy_stop0(struct ptlrpc_nrs_policy *policy) { struct ptlrpc_nrs *nrs = policy->pol_nrs; - if (policy->pol_desc->pd_ops->op_policy_stop != NULL) { + if (policy->pol_desc->pd_ops->op_policy_stop) { spin_unlock(&nrs->nrs_lock); policy->pol_desc->pd_ops->op_policy_stop(policy); @@ -154,7 +147,7 @@ static void nrs_policy_stop_primary(struct ptlrpc_nrs *nrs) { struct ptlrpc_nrs_policy *tmp = nrs->nrs_policy_primary; - if (tmp == NULL) + if (!tmp) return; nrs->nrs_policy_primary = NULL; @@ -220,12 +213,12 @@ static int nrs_policy_start_locked(struct ptlrpc_nrs_policy *policy) * nrs_policy_flags::PTLRPC_NRS_FL_FALLBACK flag set can * register with NRS core. */ - LASSERT(nrs->nrs_policy_fallback == NULL); + LASSERT(!nrs->nrs_policy_fallback); } else { /** * Shouldn't start primary policy if w/o fallback policy. */ - if (nrs->nrs_policy_fallback == NULL) + if (!nrs->nrs_policy_fallback) return -EPERM; if (policy->pol_state == NRS_POL_STATE_STARTED) @@ -311,7 +304,7 @@ static void nrs_policy_put_locked(struct ptlrpc_nrs_policy *policy) policy->pol_ref--; if (unlikely(policy->pol_ref == 0 && - policy->pol_state == NRS_POL_STATE_STOPPING)) + policy->pol_state == NRS_POL_STATE_STOPPING)) nrs_policy_stop0(policy); } @@ -326,7 +319,7 @@ static void nrs_policy_put(struct ptlrpc_nrs_policy *policy) * Find and return a policy by name. */ static struct ptlrpc_nrs_policy *nrs_policy_find_locked(struct ptlrpc_nrs *nrs, - char *name) + char *name) { struct ptlrpc_nrs_policy *tmp; @@ -348,10 +341,10 @@ static void nrs_resource_put(struct ptlrpc_nrs_resource *res) { struct ptlrpc_nrs_policy *policy = res->res_policy; - if (policy->pol_desc->pd_ops->op_res_put != NULL) { + if (policy->pol_desc->pd_ops->op_res_put) { struct ptlrpc_nrs_resource *parent; - for (; res != NULL; res = parent) { + for (; res; res = parent) { parent = res->res_parent; policy->pol_desc->pd_ops->op_res_put(policy, res); } @@ -390,12 +383,11 @@ struct ptlrpc_nrs_resource *nrs_resource_get(struct ptlrpc_nrs_policy *policy, rc = policy->pol_desc->pd_ops->op_res_get(policy, nrq, res, &tmp, moving_req); if (rc < 0) { - if (res != NULL) + if (res) nrs_resource_put(res); return NULL; } - LASSERT(tmp != NULL); tmp->res_parent = res; tmp->res_policy = policy; res = tmp; @@ -445,7 +437,7 @@ static void nrs_resource_get_safe(struct ptlrpc_nrs *nrs, nrs_policy_get_locked(fallback); primary = nrs->nrs_policy_primary; - if (primary != NULL) + if (primary) nrs_policy_get_locked(primary); spin_unlock(&nrs->nrs_lock); @@ -454,9 +446,9 @@ static void nrs_resource_get_safe(struct ptlrpc_nrs *nrs, * Obtain resource hierarchy references. */ resp[NRS_RES_FALLBACK] = nrs_resource_get(fallback, nrq, moving_req); - LASSERT(resp[NRS_RES_FALLBACK] != NULL); + LASSERT(resp[NRS_RES_FALLBACK]); - if (primary != NULL) { + if (primary) { resp[NRS_RES_PRIMARY] = nrs_resource_get(primary, nrq, moving_req); /** @@ -465,7 +457,7 @@ static void nrs_resource_get_safe(struct ptlrpc_nrs *nrs, * reference on the policy as it will not be used for this * request. */ - if (resp[NRS_RES_PRIMARY] == NULL) + if (!resp[NRS_RES_PRIMARY]) nrs_policy_put(primary); } } @@ -482,11 +474,10 @@ static void nrs_resource_get_safe(struct ptlrpc_nrs *nrs, static void nrs_resource_put_safe(struct ptlrpc_nrs_resource **resp) { struct ptlrpc_nrs_policy *pols[NRS_RES_MAX]; - struct ptlrpc_nrs *nrs = NULL; int i; for (i = 0; i < NRS_RES_MAX; i++) { - if (resp[i] != NULL) { + if (resp[i]) { pols[i] = resp[i]->res_policy; nrs_resource_put(resp[i]); resp[i] = NULL; @@ -496,18 +487,9 @@ static void nrs_resource_put_safe(struct ptlrpc_nrs_resource **resp) } for (i = 0; i < NRS_RES_MAX; i++) { - if (pols[i] == NULL) - continue; - - if (nrs == NULL) { - nrs = pols[i]->pol_nrs; - spin_lock(&nrs->nrs_lock); - } - nrs_policy_put_locked(pols[i]); + if (pols[i]) + nrs_policy_put(pols[i]); } - - if (nrs != NULL) - spin_unlock(&nrs->nrs_lock); } /** @@ -536,7 +518,7 @@ struct ptlrpc_nrs_request *nrs_request_get(struct ptlrpc_nrs_policy *policy, nrq = policy->pol_desc->pd_ops->op_req_get(policy, peek, force); - LASSERT(ergo(nrq != NULL, nrs_request_policy(nrq) == policy)); + LASSERT(ergo(nrq, nrs_request_policy(nrq) == policy)); return nrq; } @@ -562,7 +544,7 @@ static inline void nrs_request_enqueue(struct ptlrpc_nrs_request *nrq) * the preferred choice. */ for (i = NRS_RES_MAX - 1; i >= 0; i--) { - if (nrq->nr_res_ptrs[i] == NULL) + if (!nrq->nr_res_ptrs[i]) continue; nrq->nr_res_idx = i; @@ -632,7 +614,7 @@ static int nrs_policy_ctl(struct ptlrpc_nrs *nrs, char *name, spin_lock(&nrs->nrs_lock); policy = nrs_policy_find_locked(nrs, name); - if (policy == NULL) { + if (!policy) { rc = -ENOENT; goto out; } @@ -654,7 +636,7 @@ static int nrs_policy_ctl(struct ptlrpc_nrs *nrs, char *name, break; } out: - if (policy != NULL) + if (policy) nrs_policy_put_locked(policy); spin_unlock(&nrs->nrs_lock); @@ -679,7 +661,7 @@ static int nrs_policy_unregister(struct ptlrpc_nrs *nrs, char *name) spin_lock(&nrs->nrs_lock); policy = nrs_policy_find_locked(nrs, name); - if (policy == NULL) { + if (!policy) { spin_unlock(&nrs->nrs_lock); CERROR("Can't find NRS policy %s\n", name); @@ -712,7 +694,7 @@ static int nrs_policy_unregister(struct ptlrpc_nrs *nrs, char *name) nrs_policy_fini(policy); - LASSERT(policy->pol_private == NULL); + LASSERT(!policy->pol_private); kfree(policy); return 0; @@ -736,18 +718,16 @@ static int nrs_policy_register(struct ptlrpc_nrs *nrs, struct ptlrpc_service_part *svcpt = nrs->nrs_svcpt; int rc; - LASSERT(svcpt != NULL); - LASSERT(desc->pd_ops != NULL); - LASSERT(desc->pd_ops->op_res_get != NULL); - LASSERT(desc->pd_ops->op_req_get != NULL); - LASSERT(desc->pd_ops->op_req_enqueue != NULL); - LASSERT(desc->pd_ops->op_req_dequeue != NULL); - LASSERT(desc->pd_compat != NULL); + LASSERT(desc->pd_ops->op_res_get); + LASSERT(desc->pd_ops->op_req_get); + LASSERT(desc->pd_ops->op_req_enqueue); + LASSERT(desc->pd_ops->op_req_dequeue); + LASSERT(desc->pd_compat); policy = kzalloc_node(sizeof(*policy), GFP_NOFS, cfs_cpt_spread_node(svcpt->scp_service->srv_cptable, svcpt->scp_cpt)); - if (policy == NULL) + if (!policy) return -ENOMEM; policy->pol_nrs = nrs; @@ -767,7 +747,7 @@ static int nrs_policy_register(struct ptlrpc_nrs *nrs, spin_lock(&nrs->nrs_lock); tmp = nrs_policy_find_locked(nrs, policy->pol_desc->pd_name); - if (tmp != NULL) { + if (tmp) { CERROR("NRS policy %s has been registered, can't register it for %s\n", policy->pol_desc->pd_name, svcpt->scp_service->srv_name); @@ -817,7 +797,7 @@ static void ptlrpc_nrs_req_add_nolock(struct ptlrpc_request *req) */ if (unlikely(list_empty(&policy->pol_list_queued))) list_add_tail(&policy->pol_list_queued, - &policy->pol_nrs->nrs_policy_queued); + &policy->pol_nrs->nrs_policy_queued); } /** @@ -957,14 +937,14 @@ static int nrs_svcpt_setup_locked(struct ptlrpc_service_part *svcpt) /** * Optionally allocate a high-priority NRS head. */ - if (svcpt->scp_service->srv_ops.so_hpreq_handler == NULL) + if (!svcpt->scp_service->srv_ops.so_hpreq_handler) goto out; svcpt->scp_nrs_hp = kzalloc_node(sizeof(*svcpt->scp_nrs_hp), GFP_NOFS, cfs_cpt_spread_node(svcpt->scp_service->srv_cptable, svcpt->scp_cpt)); - if (svcpt->scp_nrs_hp == NULL) { + if (!svcpt->scp_nrs_hp) { rc = -ENOMEM; goto out; } @@ -998,8 +978,7 @@ again: nrs = nrs_svcpt2nrs(svcpt, hp); nrs->nrs_stopping = 1; - list_for_each_entry_safe(policy, tmp, &nrs->nrs_policy_list, - pol_list) { + list_for_each_entry_safe(policy, tmp, &nrs->nrs_policy_list, pol_list) { rc = nrs_policy_unregister(nrs, policy->pol_desc->pd_name); LASSERT(rc == 0); } @@ -1089,7 +1068,7 @@ again: } } - if (desc->pd_ops->op_lprocfs_fini != NULL) + if (desc->pd_ops->op_lprocfs_fini) desc->pd_ops->op_lprocfs_fini(svc); } @@ -1115,15 +1094,15 @@ static int ptlrpc_nrs_policy_register(struct ptlrpc_nrs_pol_conf *conf) { struct ptlrpc_service *svc; struct ptlrpc_nrs_pol_desc *desc; + size_t len; int rc = 0; - LASSERT(conf != NULL); - LASSERT(conf->nc_ops != NULL); - LASSERT(conf->nc_compat != NULL); + LASSERT(conf->nc_ops); + LASSERT(conf->nc_compat); LASSERT(ergo(conf->nc_compat == nrs_policy_compat_one, - conf->nc_compat_svc_name != NULL)); + conf->nc_compat_svc_name)); LASSERT(ergo((conf->nc_flags & PTLRPC_NRS_FL_REG_EXTERN) != 0, - conf->nc_owner != NULL)); + conf->nc_owner)); conf->nc_name[NRS_POL_NAME_MAX - 1] = '\0'; @@ -1146,7 +1125,7 @@ static int ptlrpc_nrs_policy_register(struct ptlrpc_nrs_pol_conf *conf) mutex_lock(&nrs_core.nrs_mutex); - if (nrs_policy_find_desc_locked(conf->nc_name) != NULL) { + if (nrs_policy_find_desc_locked(conf->nc_name)) { CERROR("NRS: failing to register policy %s which has already been registered with NRS core!\n", conf->nc_name); rc = -EEXIST; @@ -1159,7 +1138,12 @@ static int ptlrpc_nrs_policy_register(struct ptlrpc_nrs_pol_conf *conf) goto fail; } - strncpy(desc->pd_name, conf->nc_name, NRS_POL_NAME_MAX); + len = strlcpy(desc->pd_name, conf->nc_name, sizeof(desc->pd_name)); + if (len >= sizeof(desc->pd_name)) { + kfree(desc); + rc = -E2BIG; + goto fail; + } desc->pd_ops = conf->nc_ops; desc->pd_compat = conf->nc_compat; desc->pd_compat_svc_name = conf->nc_compat_svc_name; @@ -1224,7 +1208,7 @@ again: * No need to take a reference to other modules here, as we * will be calling from the module's init() function. */ - if (desc->pd_ops->op_lprocfs_init != NULL) { + if (desc->pd_ops->op_lprocfs_init) { rc = desc->pd_ops->op_lprocfs_init(svc); if (rc != 0) { rc2 = nrs_policy_unregister_locked(desc); @@ -1288,7 +1272,7 @@ int ptlrpc_service_nrs_setup(struct ptlrpc_service *svc) if (!nrs_policy_compatible(svc, desc)) continue; - if (desc->pd_ops->op_lprocfs_init != NULL) { + if (desc->pd_ops->op_lprocfs_init) { rc = desc->pd_ops->op_lprocfs_init(svc); if (rc != 0) goto failed; @@ -1329,7 +1313,7 @@ void ptlrpc_service_nrs_cleanup(struct ptlrpc_service *svc) if (!nrs_policy_compatible(svc, desc)) continue; - if (desc->pd_ops->op_lprocfs_fini != NULL) + if (desc->pd_ops->op_lprocfs_fini) desc->pd_ops->op_lprocfs_fini(svc); } @@ -1376,7 +1360,8 @@ void ptlrpc_nrs_req_finalize(struct ptlrpc_request *req) if (req->rq_nrq.nr_initialized) { nrs_resource_put_safe(req->rq_nrq.nr_res_ptrs); /* no protection on bit nr_initialized because no - * contention at this late stage */ + * contention at this late stage + */ req->rq_nrq.nr_finalized = 1; } } @@ -1434,7 +1419,7 @@ static void nrs_request_removed(struct ptlrpc_nrs_policy *policy) policy->pol_nrs->nrs_req_queued); list_move_tail(&policy->pol_list_queued, - &policy->pol_nrs->nrs_policy_queued); + &policy->pol_nrs->nrs_policy_queued); } } @@ -1466,10 +1451,9 @@ ptlrpc_nrs_req_get_nolock0(struct ptlrpc_service_part *svcpt, bool hp, * Always try to drain requests from all NRS polices even if they are * inactive, because the user can change policy status at runtime. */ - list_for_each_entry(policy, &nrs->nrs_policy_queued, - pol_list_queued) { + list_for_each_entry(policy, &nrs->nrs_policy_queued, pol_list_queued) { nrq = nrs_request_get(policy, peek, force); - if (nrq != NULL) { + if (nrq) { if (likely(!peek)) { nrq->nr_started = 1; @@ -1619,8 +1603,7 @@ void ptlrpc_nrs_fini(void) struct ptlrpc_nrs_pol_desc *desc; struct ptlrpc_nrs_pol_desc *tmp; - list_for_each_entry_safe(desc, tmp, &nrs_core.nrs_policies, - pd_list) { + list_for_each_entry_safe(desc, tmp, &nrs_core.nrs_policies, pd_list) { list_del_init(&desc->pd_list); kfree(desc); } diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c b/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c index 8e21f0cdc8f8..b123a93242ba 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c +++ b/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c @@ -13,10 +13,6 @@ * GNU General Public License version 2 for more details. A copy is * included in the COPYING file that accompanied this code. - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * * GPL HEADER END */ /* @@ -83,7 +79,7 @@ static int nrs_fifo_start(struct ptlrpc_nrs_policy *policy) head = kzalloc_node(sizeof(*head), GFP_NOFS, cfs_cpt_spread_node(nrs_pol2cptab(policy), nrs_pol2cptid(policy))); - if (head == NULL) + if (!head) return -ENOMEM; INIT_LIST_HEAD(&head->fh_list); @@ -104,7 +100,7 @@ static void nrs_fifo_stop(struct ptlrpc_nrs_policy *policy) { struct nrs_fifo_head *head = policy->pol_private; - LASSERT(head != NULL); + LASSERT(head); LASSERT(list_empty(&head->fh_list)); kfree(head); @@ -167,9 +163,9 @@ struct ptlrpc_nrs_request *nrs_fifo_req_get(struct ptlrpc_nrs_policy *policy, nrq = unlikely(list_empty(&head->fh_list)) ? NULL : list_entry(head->fh_list.next, struct ptlrpc_nrs_request, - nr_u.fifo.fr_list); + nr_u.fifo.fr_list); - if (likely(!peek && nrq != NULL)) { + if (likely(!peek && nrq)) { struct ptlrpc_request *req = container_of(nrq, struct ptlrpc_request, rq_nrq); diff --git a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c index f3cb5184fa85..492d63fad6f9 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c +++ b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c @@ -133,7 +133,8 @@ EXPORT_SYMBOL(lustre_msg_size_v2); * NOTE: this should only be used for NEW requests, and should always be * in the form of a v2 request. If this is a connection to a v1 * target then the first buffer will be stripped because the ptlrpc - * data is part of the lustre_msg_v1 header. b=14043 */ + * data is part of the lustre_msg_v1 header. b=14043 + */ int lustre_msg_size(__u32 magic, int count, __u32 *lens) { __u32 size[] = { sizeof(struct ptlrpc_body) }; @@ -157,7 +158,8 @@ int lustre_msg_size(__u32 magic, int count, __u32 *lens) EXPORT_SYMBOL(lustre_msg_size); /* This is used to determine the size of a buffer that was already packed - * and will correctly handle the different message formats. */ + * and will correctly handle the different message formats. + */ int lustre_packed_msg_size(struct lustre_msg *msg) { switch (msg->lm_magic) { @@ -183,7 +185,7 @@ void lustre_init_msg_v2(struct lustre_msg_v2 *msg, int count, __u32 *lens, for (i = 0; i < count; i++) msg->lm_buflens[i] = lens[i]; - if (bufs == NULL) + if (!bufs) return; ptr = (char *)msg + lustre_msg_hdr_size_v2(count); @@ -267,7 +269,8 @@ lustre_get_emerg_rs(struct ptlrpc_service_part *svcpt) spin_unlock(&svcpt->scp_rep_lock); /* If we cannot get anything for some long time, we better - * bail out instead of waiting infinitely */ + * bail out instead of waiting infinitely + */ lwi = LWI_TIMEOUT(cfs_time_seconds(10), NULL, NULL); rc = l_wait_event(svcpt->scp_rep_waitq, !list_empty(&svcpt->scp_rep_idle), &lwi); @@ -277,7 +280,7 @@ lustre_get_emerg_rs(struct ptlrpc_service_part *svcpt) } rs = list_entry(svcpt->scp_rep_idle.next, - struct ptlrpc_reply_state, rs_list); + struct ptlrpc_reply_state, rs_list); list_del(&rs->rs_list); spin_unlock(&svcpt->scp_rep_lock); @@ -306,7 +309,7 @@ int lustre_pack_reply_v2(struct ptlrpc_request *req, int count, struct ptlrpc_reply_state *rs; int msg_len, rc; - LASSERT(req->rq_reply_state == NULL); + LASSERT(!req->rq_reply_state); if ((flags & LPRFL_EARLY_REPLY) == 0) { spin_lock(&req->rq_lock); @@ -383,7 +386,6 @@ void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, int n, int min_size) { int i, offset, buflen, bufcount; - LASSERT(m != NULL); LASSERT(n >= 0); bufcount = m->lm_bufcount; @@ -488,7 +490,7 @@ void lustre_free_reply_state(struct ptlrpc_reply_state *rs) LASSERT(!rs->rs_difficult || rs->rs_handled); LASSERT(!rs->rs_on_net); LASSERT(!rs->rs_scheduled); - LASSERT(rs->rs_export == NULL); + LASSERT(!rs->rs_export); LASSERT(rs->rs_nlocks == 0); LASSERT(list_empty(&rs->rs_exp_list)); LASSERT(list_empty(&rs->rs_obd_list)); @@ -677,7 +679,8 @@ int lustre_msg_buflen(struct lustre_msg *m, int n) EXPORT_SYMBOL(lustre_msg_buflen); /* NB return the bufcount for lustre_msg_v2 format, so if message is packed - * in V1 format, the result is one bigger. (add struct ptlrpc_body). */ + * in V1 format, the result is one bigger. (add struct ptlrpc_body). + */ int lustre_msg_bufcount(struct lustre_msg *m) { switch (m->lm_magic) { @@ -705,7 +708,7 @@ char *lustre_msg_string(struct lustre_msg *m, int index, int max_len) LASSERTF(0, "incorrect message magic: %08x\n", m->lm_magic); } - if (str == NULL) { + if (!str) { CERROR("can't unpack string in msg %p buffer[%d]\n", m, index); return NULL; } @@ -740,7 +743,6 @@ static inline void *__lustre_swab_buf(struct lustre_msg *msg, int index, { void *ptr = NULL; - LASSERT(msg != NULL); switch (msg->lm_magic) { case LUSTRE_MSG_MAGIC_V2: ptr = lustre_msg_buf_v2(msg, index, min_size); @@ -799,7 +801,8 @@ __u32 lustre_msg_get_flags(struct lustre_msg *msg) /* no break */ default: /* flags might be printed in debug code while message - * uninitialized */ + * uninitialized + */ return 0; } } @@ -1032,7 +1035,8 @@ int lustre_msg_get_status(struct lustre_msg *msg) /* no break */ default: /* status might be printed in debug code while message - * uninitialized */ + * uninitialized + */ return -EINVAL; } } @@ -1368,7 +1372,8 @@ void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid) struct ptlrpc_body *pb; /* Don't set jobid for ldlm ast RPCs, they've been shrunk. - * See the comment in ptlrpc_request_pack(). */ + * See the comment in ptlrpc_request_pack(). + */ if (!opc || opc == LDLM_BL_CALLBACK || opc == LDLM_CP_CALLBACK || opc == LDLM_GL_CALLBACK) return; @@ -1377,7 +1382,7 @@ void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid) sizeof(struct ptlrpc_body)); LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); - if (jobid != NULL) + if (jobid) memcpy(pb->pb_jobid, jobid, JOBSTATS_JOBID_SIZE); else if (pb->pb_jobid[0] == '\0') lustre_get_jobid(pb->pb_jobid); @@ -1427,7 +1432,7 @@ int do_set_info_async(struct obd_import *imp, int rc; req = ptlrpc_request_alloc(imp, &RQF_OBD_SET_INFO); - if (req == NULL) + if (!req) return -ENOMEM; req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY, @@ -1488,7 +1493,8 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *b) * clients and servers without ptlrpc_body_v2 (< 2.3) * do not swab any fields beyond pb_jobid, as we are * using this swab function for both ptlrpc_body - * and ptlrpc_body_v2. */ + * and ptlrpc_body_v2. + */ CLASSERT(offsetof(typeof(*b), pb_jobid) != 0); } EXPORT_SYMBOL(lustre_swab_ptlrpc_body); @@ -1502,7 +1508,8 @@ void lustre_swab_connect(struct obd_connect_data *ocd) __swab32s(&ocd->ocd_index); __swab32s(&ocd->ocd_brw_size); /* ocd_blocksize and ocd_inodespace don't need to be swabbed because - * they are 8-byte values */ + * they are 8-byte values + */ __swab16s(&ocd->ocd_grant_extent); __swab32s(&ocd->ocd_unused); __swab64s(&ocd->ocd_transno); @@ -1512,7 +1519,8 @@ void lustre_swab_connect(struct obd_connect_data *ocd) /* Fields after ocd_cksum_types are only accessible by the receiver * if the corresponding flag in ocd_connect_flags is set. Accessing * any field after ocd_maxbytes on the receiver without a valid flag - * may result in out-of-bound memory access and kernel oops. */ + * may result in out-of-bound memory access and kernel oops. + */ if (ocd->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE) __swab32s(&ocd->ocd_max_easize); if (ocd->ocd_connect_flags & OBD_CONNECT_MAXBYTES) @@ -1848,20 +1856,6 @@ void lustre_swab_fiemap(struct ll_user_fiemap *fiemap) } EXPORT_SYMBOL(lustre_swab_fiemap); -void lustre_swab_idx_info(struct idx_info *ii) -{ - __swab32s(&ii->ii_magic); - __swab32s(&ii->ii_flags); - __swab16s(&ii->ii_count); - __swab32s(&ii->ii_attrs); - lustre_swab_lu_fid(&ii->ii_fid); - __swab64s(&ii->ii_version); - __swab64s(&ii->ii_hash_start); - __swab64s(&ii->ii_hash_end); - __swab16s(&ii->ii_keysize); - __swab16s(&ii->ii_recsize); -} - void lustre_swab_mdt_rec_reint (struct mdt_rec_reint *rr) { __swab32s(&rr->rr_opcode); @@ -1914,7 +1908,7 @@ static void print_lum(struct lov_user_md *lum) CDEBUG(D_OTHER, "\tlmm_stripe_size: %#x\n", lum->lmm_stripe_size); CDEBUG(D_OTHER, "\tlmm_stripe_count: %#x\n", lum->lmm_stripe_count); CDEBUG(D_OTHER, "\tlmm_stripe_offset/lmm_layout_gen: %#x\n", - lum->lmm_stripe_offset); + lum->lmm_stripe_offset); } static void lustre_swab_lmm_oi(struct ost_id *oi) @@ -1986,7 +1980,8 @@ static void lustre_swab_ldlm_policy_data(ldlm_wire_policy_data_t *d) { /* the lock data is a union and the first two fields are always an * extent so it's ok to process an LDLM_EXTENT and LDLM_FLOCK lock - * data the same way. */ + * data the same way. + */ __swab64s(&d->l_extent.start); __swab64s(&d->l_extent.end); __swab64s(&d->l_extent.gid); @@ -2035,16 +2030,6 @@ void lustre_swab_ldlm_reply(struct ldlm_reply *r) } EXPORT_SYMBOL(lustre_swab_ldlm_reply); -void lustre_swab_quota_body(struct quota_body *b) -{ - lustre_swab_lu_fid(&b->qb_fid); - lustre_swab_lu_fid((struct lu_fid *)&b->qb_id); - __swab32s(&b->qb_flags); - __swab64s(&b->qb_count); - __swab64s(&b->qb_usage); - __swab64s(&b->qb_slv_ver); -} - /* Dump functions */ void dump_ioo(struct obd_ioobj *ioo) { @@ -2288,24 +2273,6 @@ void lustre_swab_hsm_request(struct hsm_request *hr) } EXPORT_SYMBOL(lustre_swab_hsm_request); -void lustre_swab_update_buf(struct update_buf *ub) -{ - __swab32s(&ub->ub_magic); - __swab32s(&ub->ub_count); -} -EXPORT_SYMBOL(lustre_swab_update_buf); - -void lustre_swab_update_reply_buf(struct update_reply *ur) -{ - int i; - - __swab32s(&ur->ur_version); - __swab32s(&ur->ur_count); - for (i = 0; i < ur->ur_count; i++) - __swab32s(&ur->ur_lens[i]); -} -EXPORT_SYMBOL(lustre_swab_update_reply_buf); - void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl) { __swab64s(&msl->msl_flags); diff --git a/drivers/staging/lustre/lustre/ptlrpc/pinger.c b/drivers/staging/lustre/lustre/ptlrpc/pinger.c index fb2d5236a971..8a869315c258 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/pinger.c +++ b/drivers/staging/lustre/lustre/ptlrpc/pinger.c @@ -68,7 +68,7 @@ int ptlrpc_obd_ping(struct obd_device *obd) struct ptlrpc_request *req; req = ptlrpc_prep_ping(obd->u.cli.cl_import); - if (req == NULL) + if (!req) return -ENOMEM; req->rq_send_state = LUSTRE_IMP_FULL; @@ -86,7 +86,7 @@ static int ptlrpc_ping(struct obd_import *imp) struct ptlrpc_request *req; req = ptlrpc_prep_ping(imp); - if (req == NULL) { + if (!req) { CERROR("OOM trying to ping %s->%s\n", imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd)); @@ -242,7 +242,7 @@ static int ptlrpc_pinger_main(void *arg) list_for_each(iter, &pinger_imports) { struct obd_import *imp = list_entry(iter, struct obd_import, - imp_pinger_chain); + imp_pinger_chain); ptlrpc_pinger_process_import(imp, this_ping); /* obd_timeout might have changed */ @@ -257,11 +257,12 @@ static int ptlrpc_pinger_main(void *arg) /* Wait until the next ping time, or until we're stopped. */ time_to_next_wake = pinger_check_timeout(this_ping); /* The ping sent by ptlrpc_send_rpc may get sent out - say .01 second after this. - ptlrpc_pinger_sending_on_import will then set the - next ping time to next_ping + .01 sec, which means - we will SKIP the next ping at next_ping, and the - ping will get sent 2 timeouts from now! Beware. */ + * say .01 second after this. + * ptlrpc_pinger_sending_on_import will then set the + * next ping time to next_ping + .01 sec, which means + * we will SKIP the next ping at next_ping, and the + * ping will get sent 2 timeouts from now! Beware. + */ CDEBUG(D_INFO, "next wakeup in " CFS_DURATION_T " (%ld)\n", time_to_next_wake, cfs_time_add(this_ping, @@ -293,6 +294,7 @@ static struct ptlrpc_thread pinger_thread; int ptlrpc_start_pinger(void) { struct l_wait_info lwi = { 0 }; + struct task_struct *task; int rc; if (!thread_is_init(&pinger_thread) && @@ -303,10 +305,11 @@ int ptlrpc_start_pinger(void) strcpy(pinger_thread.t_name, "ll_ping"); - rc = PTR_ERR(kthread_run(ptlrpc_pinger_main, &pinger_thread, - "%s", pinger_thread.t_name)); - if (IS_ERR_VALUE(rc)) { - CERROR("cannot start thread: %d\n", rc); + task = kthread_run(ptlrpc_pinger_main, &pinger_thread, + pinger_thread.t_name); + if (IS_ERR(task)) { + rc = PTR_ERR(task); + CERROR("cannot start pinger thread: rc = %d\n", rc); return rc; } l_wait_event(pinger_thread.t_ctl_waitq, @@ -401,7 +404,8 @@ EXPORT_SYMBOL(ptlrpc_pinger_del_import); * be called when timeout happens. */ static struct timeout_item *ptlrpc_new_timeout(int time, - enum timeout_event event, timeout_cb_t cb, void *data) + enum timeout_event event, + timeout_cb_t cb, void *data) { struct timeout_item *ti; @@ -489,7 +493,6 @@ int ptlrpc_del_timeout_client(struct list_head *obd_list, break; } } - LASSERTF(ti != NULL, "ti is NULL !\n"); if (list_empty(&ti->ti_obd_list)) { list_del(&ti->ti_chain); kfree(ti); diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h index 8f67e0562b73..6ca26c98de1b 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h +++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h @@ -101,8 +101,6 @@ struct nrs_core { * registration/unregistration, and NRS core lprocfs operations. */ struct mutex nrs_mutex; - /* XXX: This is just for liblustre. Remove the #if defined directive - * when the * "cfs_" prefix is dropped from cfs_list_head. */ /** * List of all policy descriptors registered with NRS core; protected * by nrs_core::nrs_mutex. diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c index c4f1d0f5deb2..a8ec0e9d7b2e 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c +++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c @@ -162,8 +162,8 @@ static void __exit ptlrpc_exit(void) MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>"); MODULE_DESCRIPTION("Lustre Request Processor and Lock Management"); +MODULE_VERSION(LUSTRE_VERSION_STRING); MODULE_LICENSE("GPL"); -MODULE_VERSION("1.0.0"); module_init(ptlrpc_init); module_exit(ptlrpc_exit); diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c index 60fb0ced7137..db003f5da09e 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c +++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c @@ -163,8 +163,6 @@ void ptlrpcd_wake(struct ptlrpc_request *req) { struct ptlrpc_request_set *rq_set = req->rq_set; - LASSERT(rq_set != NULL); - wake_up(&rq_set->set_waitq); } EXPORT_SYMBOL(ptlrpcd_wake); @@ -176,7 +174,7 @@ ptlrpcd_select_pc(struct ptlrpc_request *req) int cpt; int idx; - if (req != NULL && req->rq_send_state != LUSTRE_IMP_FULL) + if (req && req->rq_send_state != LUSTRE_IMP_FULL) return &ptlrpcd_rcv; cpt = cfs_cpt_current(cfs_cpt_table, 1); @@ -209,11 +207,10 @@ static int ptlrpcd_steal_rqset(struct ptlrpc_request_set *des, if (likely(!list_empty(&src->set_new_requests))) { list_for_each_safe(pos, tmp, &src->set_new_requests) { req = list_entry(pos, struct ptlrpc_request, - rq_set_chain); + rq_set_chain); req->rq_set = des; } - list_splice_init(&src->set_new_requests, - &des->set_requests); + list_splice_init(&src->set_new_requests, &des->set_requests); rc = atomic_read(&src->set_new_count); atomic_add(rc, &des->set_remaining); atomic_set(&src->set_new_count, 0); @@ -240,10 +237,11 @@ void ptlrpcd_add_req(struct ptlrpc_request *req) req->rq_invalid_rqset = 0; spin_unlock(&req->rq_lock); - l_wait_event(req->rq_set_waitq, (req->rq_set == NULL), &lwi); + l_wait_event(req->rq_set_waitq, !req->rq_set, &lwi); } else if (req->rq_set) { /* If we have a valid "rq_set", just reuse it to avoid double - * linked. */ + * linked. + */ LASSERT(req->rq_phase == RQ_PHASE_NEW); LASSERT(req->rq_send_state == LUSTRE_IMP_REPLAY); @@ -286,9 +284,9 @@ static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc) spin_lock(&set->set_new_req_lock); if (likely(!list_empty(&set->set_new_requests))) { list_splice_init(&set->set_new_requests, - &set->set_requests); + &set->set_requests); atomic_add(atomic_read(&set->set_new_count), - &set->set_remaining); + &set->set_remaining); atomic_set(&set->set_new_count, 0); /* * Need to calculate its timeout. @@ -321,7 +319,8 @@ static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc) rc |= ptlrpc_check_set(env, set); /* NB: ptlrpc_check_set has already moved completed request at the - * head of seq::set_requests */ + * head of seq::set_requests + */ list_for_each_safe(pos, tmp, &set->set_requests) { req = list_entry(pos, struct ptlrpc_request, rq_set_chain); if (req->rq_phase != RQ_PHASE_COMPLETE) @@ -339,7 +338,8 @@ static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc) rc = atomic_read(&set->set_new_count); /* If we have nothing to do, check whether we can take some - * work from our partner threads. */ + * work from our partner threads. + */ if (rc == 0 && pc->pc_npartners > 0) { struct ptlrpcd_ctl *partner; struct ptlrpc_request_set *ps; @@ -349,12 +349,12 @@ static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc) partner = pc->pc_partners[pc->pc_cursor++]; if (pc->pc_cursor >= pc->pc_npartners) pc->pc_cursor = 0; - if (partner == NULL) + if (!partner) continue; spin_lock(&partner->pc_lock); ps = partner->pc_set; - if (ps == NULL) { + if (!ps) { spin_unlock(&partner->pc_lock); continue; } @@ -422,7 +422,6 @@ static int ptlrpcd(void *arg) complete(&pc->pc_starting); /* - * This mainloop strongly resembles ptlrpc_set_wait() except that our * set never completes. ptlrpcd_check() calls ptlrpc_check_set() when * there are requests in the set. New requests come in on the set's @@ -580,7 +579,7 @@ int ptlrpcd_start(struct ptlrpcd_ctl *pc) return 0; out_set: - if (pc->pc_set != NULL) { + if (pc->pc_set) { struct ptlrpc_request_set *set = pc->pc_set; spin_lock(&pc->pc_lock); @@ -631,7 +630,7 @@ void ptlrpcd_free(struct ptlrpcd_ctl *pc) out: if (pc->pc_npartners > 0) { - LASSERT(pc->pc_partners != NULL); + LASSERT(pc->pc_partners); kfree(pc->pc_partners); pc->pc_partners = NULL; @@ -645,7 +644,7 @@ static void ptlrpcd_fini(void) int i; int j; - if (ptlrpcds != NULL) { + if (ptlrpcds) { for (i = 0; i < ptlrpcds_num; i++) { if (!ptlrpcds[i]) break; diff --git a/drivers/staging/lustre/lustre/ptlrpc/recover.c b/drivers/staging/lustre/lustre/ptlrpc/recover.c index db6626cab6f2..5f27d9c2e4ef 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/recover.c +++ b/drivers/staging/lustre/lustre/ptlrpc/recover.c @@ -107,14 +107,14 @@ int ptlrpc_replay_next(struct obd_import *imp, int *inflight) /* Replay all the committed open requests on committed_list first */ if (!list_empty(&imp->imp_committed_list)) { tmp = imp->imp_committed_list.prev; - req = list_entry(tmp, struct ptlrpc_request, - rq_replay_list); + req = list_entry(tmp, struct ptlrpc_request, rq_replay_list); /* The last request on committed_list hasn't been replayed */ if (req->rq_transno > last_transno) { /* Since the imp_committed_list is immutable before * all of it's requests being replayed, it's safe to - * use a cursor to accelerate the search */ + * use a cursor to accelerate the search + */ imp->imp_replay_cursor = imp->imp_replay_cursor->next; while (imp->imp_replay_cursor != @@ -137,8 +137,9 @@ int ptlrpc_replay_next(struct obd_import *imp, int *inflight) } /* All the requests in committed list have been replayed, let's replay - * the imp_replay_list */ - if (req == NULL) { + * the imp_replay_list + */ + if (!req) { list_for_each_safe(tmp, pos, &imp->imp_replay_list) { req = list_entry(tmp, struct ptlrpc_request, rq_replay_list); @@ -152,15 +153,16 @@ int ptlrpc_replay_next(struct obd_import *imp, int *inflight) /* If need to resend the last sent transno (because a reconnect * has occurred), then stop on the matching req and send it again. * If, however, the last sent transno has been committed then we - * continue replay from the next request. */ - if (req != NULL && imp->imp_resend_replay) + * continue replay from the next request. + */ + if (req && imp->imp_resend_replay) lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT); spin_lock(&imp->imp_lock); imp->imp_resend_replay = 0; spin_unlock(&imp->imp_lock); - if (req != NULL) { + if (req) { rc = ptlrpc_replay_req(req); if (rc) { CERROR("recovery replay error %d for req %llu\n", @@ -192,8 +194,7 @@ int ptlrpc_resend(struct obd_import *imp) return -1; } - list_for_each_entry_safe(req, next, &imp->imp_sending_list, - rq_list) { + list_for_each_entry_safe(req, next, &imp->imp_sending_list, rq_list) { LASSERTF((long)req > PAGE_CACHE_SIZE && req != LP_POISON, "req %p bad\n", req); LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req); @@ -249,7 +250,8 @@ void ptlrpc_request_handle_notconn(struct ptlrpc_request *failed_req) } /* Wait for recovery to complete and resend. If evicted, then - this request will be errored out later.*/ + * this request will be errored out later. + */ spin_lock(&failed_req->rq_lock); if (!failed_req->rq_no_resend) failed_req->rq_resend = 1; @@ -260,7 +262,7 @@ void ptlrpc_request_handle_notconn(struct ptlrpc_request *failed_req) * Administratively active/deactive a client. * This should only be called by the ioctl interface, currently * - the lctl deactivate and activate commands - * - echo 0/1 >> /proc/osc/XXX/active + * - echo 0/1 >> /sys/fs/lustre/osc/XXX/active * - client umount -f (ll_umount_begin) */ int ptlrpc_set_import_active(struct obd_import *imp, int active) @@ -271,13 +273,15 @@ int ptlrpc_set_import_active(struct obd_import *imp, int active) LASSERT(obd); /* When deactivating, mark import invalid, and abort in-flight - * requests. */ + * requests. + */ if (!active) { LCONSOLE_WARN("setting import %s INACTIVE by administrator request\n", obd2cli_tgt(imp->imp_obd)); /* set before invalidate to avoid messages about imp_inval - * set without imp_deactive in ptlrpc_import_delay_req */ + * set without imp_deactive in ptlrpc_import_delay_req + */ spin_lock(&imp->imp_lock); imp->imp_deactive = 1; spin_unlock(&imp->imp_lock); diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec.c b/drivers/staging/lustre/lustre/ptlrpc/sec.c index 39f5261c9854..187fd1d6898c 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/sec.c +++ b/drivers/staging/lustre/lustre/ptlrpc/sec.c @@ -94,7 +94,7 @@ int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy) LASSERT(number < SPTLRPC_POLICY_MAX); write_lock(&policy_lock); - if (unlikely(policies[number] == NULL)) { + if (unlikely(!policies[number])) { write_unlock(&policy_lock); CERROR("%s: already unregistered\n", policy->sp_name); return -EINVAL; @@ -126,11 +126,11 @@ struct ptlrpc_sec_policy *sptlrpc_wireflavor2policy(__u32 flavor) policy = policies[number]; if (policy && !try_module_get(policy->sp_owner)) policy = NULL; - if (policy == NULL) + if (!policy) flag = atomic_read(&loaded); read_unlock(&policy_lock); - if (policy != NULL || flag != 0 || + if (policy || flag != 0 || number != SPTLRPC_POLICY_GSS) break; @@ -327,7 +327,7 @@ static int import_sec_validate_get(struct obd_import *imp, } *sec = sptlrpc_import_sec_ref(imp); - if (*sec == NULL) { + if (!*sec) { CERROR("import %p (%s) with no sec\n", imp, ptlrpc_import_state_name(imp->imp_state)); return -EACCES; @@ -429,7 +429,7 @@ int sptlrpc_req_ctx_switch(struct ptlrpc_request *req, reqmsg_size = req->rq_reqlen; if (reqmsg_size != 0) { reqmsg = libcfs_kvzalloc(reqmsg_size, GFP_NOFS); - if (reqmsg == NULL) + if (!reqmsg) return -ENOMEM; memcpy(reqmsg, req->rq_reqmsg, reqmsg_size); } @@ -445,7 +445,8 @@ int sptlrpc_req_ctx_switch(struct ptlrpc_request *req, /* alloc new request buffer * we don't need to alloc reply buffer here, leave it to the - * rest procedure of ptlrpc */ + * rest procedure of ptlrpc + */ if (reqmsg_size != 0) { rc = sptlrpc_cli_alloc_reqbuf(req, reqmsg_size); if (!rc) { @@ -609,7 +610,7 @@ again: if (sec->ps_flvr.sf_rpc != req->rq_flvr.sf_rpc) { CDEBUG(D_SEC, "req %p: flavor has changed %x -> %x\n", - req, req->rq_flvr.sf_rpc, sec->ps_flvr.sf_rpc); + req, req->rq_flvr.sf_rpc, sec->ps_flvr.sf_rpc); req_off_ctx_list(req, ctx); sptlrpc_req_replace_dead_ctx(req); ctx = req->rq_cli_ctx; @@ -798,7 +799,8 @@ void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode) spin_unlock(&sec->ps_lock); /* force SVC_NULL for context initiation rpc, SVC_INTG for context - * destruction rpc */ + * destruction rpc + */ if (unlikely(req->rq_ctx_init)) flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_NULL); else if (unlikely(req->rq_ctx_fini)) @@ -938,7 +940,7 @@ static int do_cli_unwrap_reply(struct ptlrpc_request *req) LASSERT(ctx->cc_sec); LASSERT(req->rq_repbuf); LASSERT(req->rq_repdata); - LASSERT(req->rq_repmsg == NULL); + LASSERT(!req->rq_repmsg); req->rq_rep_swab_mask = 0; @@ -1000,8 +1002,8 @@ static int do_cli_unwrap_reply(struct ptlrpc_request *req) int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req) { LASSERT(req->rq_repbuf); - LASSERT(req->rq_repdata == NULL); - LASSERT(req->rq_repmsg == NULL); + LASSERT(!req->rq_repdata); + LASSERT(!req->rq_repmsg); LASSERT(req->rq_reply_off + req->rq_nob_received <= req->rq_repbuf_len); if (req->rq_reply_off == 0 && @@ -1046,13 +1048,13 @@ int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req, int rc; early_req = ptlrpc_request_cache_alloc(GFP_NOFS); - if (early_req == NULL) + if (!early_req) return -ENOMEM; early_size = req->rq_nob_received; early_bufsz = size_roundup_power2(early_size); early_buf = libcfs_kvzalloc(early_bufsz, GFP_NOFS); - if (early_buf == NULL) { + if (!early_buf) { rc = -ENOMEM; goto err_req; } @@ -1067,8 +1069,8 @@ int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req, } LASSERT(req->rq_repbuf); - LASSERT(req->rq_repdata == NULL); - LASSERT(req->rq_repmsg == NULL); + LASSERT(!req->rq_repdata); + LASSERT(!req->rq_repmsg); if (req->rq_reply_off != 0) { CERROR("early reply with offset %u\n", req->rq_reply_off); @@ -1354,12 +1356,12 @@ int sptlrpc_import_sec_adapt(struct obd_import *imp, might_sleep(); - if (imp == NULL) + if (!imp) return 0; conn = imp->imp_connection; - if (svc_ctx == NULL) { + if (!svc_ctx) { struct client_obd *cliobd = &imp->imp_obd->u.cli; /* * normal import, determine flavor from rule set, except @@ -1447,11 +1449,11 @@ static void import_flush_ctx_common(struct obd_import *imp, { struct ptlrpc_sec *sec; - if (imp == NULL) + if (!imp) return; sec = sptlrpc_import_sec_ref(imp); - if (sec == NULL) + if (!sec) return; sec_cop_flush_ctx_cache(sec, uid, grace, force); @@ -1484,7 +1486,7 @@ int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize) LASSERT(ctx); LASSERT(ctx->cc_sec); LASSERT(ctx->cc_sec->ps_policy); - LASSERT(req->rq_reqmsg == NULL); + LASSERT(!req->rq_reqmsg); LASSERT_ATOMIC_POS(&ctx->cc_refcount); policy = ctx->cc_sec->ps_policy; @@ -1515,7 +1517,7 @@ void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req) LASSERT(ctx->cc_sec->ps_policy); LASSERT_ATOMIC_POS(&ctx->cc_refcount); - if (req->rq_reqbuf == NULL && req->rq_clrbuf == NULL) + if (!req->rq_reqbuf && !req->rq_clrbuf) return; policy = ctx->cc_sec->ps_policy; @@ -1632,7 +1634,7 @@ void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req) LASSERT(ctx->cc_sec->ps_policy); LASSERT_ATOMIC_POS(&ctx->cc_refcount); - if (req->rq_repbuf == NULL) + if (!req->rq_repbuf) return; LASSERT(req->rq_repbuf_len); @@ -1684,12 +1686,13 @@ int sptlrpc_target_export_check(struct obd_export *exp, { struct sptlrpc_flavor flavor; - if (exp == NULL) + if (!exp) return 0; /* client side export has no imp_reverse, skip - * FIXME maybe we should check flavor this as well??? */ - if (exp->exp_imp_reverse == NULL) + * FIXME maybe we should check flavor this as well??? + */ + if (!exp->exp_imp_reverse) return 0; /* don't care about ctx fini rpc */ @@ -1702,11 +1705,13 @@ int sptlrpc_target_export_check(struct obd_export *exp, * the first req with the new flavor, then treat it as current flavor, * adapt reverse sec according to it. * note the first rpc with new flavor might not be with root ctx, in - * which case delay the sec_adapt by leaving exp_flvr_adapt == 1. */ + * which case delay the sec_adapt by leaving exp_flvr_adapt == 1. + */ if (unlikely(exp->exp_flvr_changed) && flavor_allowed(&exp->exp_flvr_old[1], req)) { /* make the new flavor as "current", and old ones as - * about-to-expire */ + * about-to-expire + */ CDEBUG(D_SEC, "exp %p: just changed: %x->%x\n", exp, exp->exp_flvr.sf_rpc, exp->exp_flvr_old[1].sf_rpc); flavor = exp->exp_flvr_old[1]; @@ -1742,10 +1747,12 @@ int sptlrpc_target_export_check(struct obd_export *exp, } /* if it equals to the current flavor, we accept it, but need to - * dealing with reverse sec/ctx */ + * dealing with reverse sec/ctx + */ if (likely(flavor_allowed(&exp->exp_flvr, req))) { /* most cases should return here, we only interested in - * gss root ctx init */ + * gss root ctx init + */ if (!req->rq_auth_gss || !req->rq_ctx_init || (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt && !req->rq_auth_usr_ost)) { @@ -1755,7 +1762,8 @@ int sptlrpc_target_export_check(struct obd_export *exp, /* if flavor just changed, we should not proceed, just leave * it and current flavor will be discovered and replaced - * shortly, and let _this_ rpc pass through */ + * shortly, and let _this_ rpc pass through + */ if (exp->exp_flvr_changed) { LASSERT(exp->exp_flvr_adapt); spin_unlock(&exp->exp_lock); @@ -1809,7 +1817,8 @@ int sptlrpc_target_export_check(struct obd_export *exp, } /* now it doesn't match the current flavor, the only chance we can - * accept it is match the old flavors which is not expired. */ + * accept it is match the old flavors which is not expired. + */ if (exp->exp_flvr_changed == 0 && exp->exp_flvr_expire[1]) { if (exp->exp_flvr_expire[1] >= ktime_get_real_seconds()) { if (flavor_allowed(&exp->exp_flvr_old[1], req)) { @@ -1915,9 +1924,9 @@ int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req) int rc; LASSERT(msg); - LASSERT(req->rq_reqmsg == NULL); - LASSERT(req->rq_repmsg == NULL); - LASSERT(req->rq_svc_ctx == NULL); + LASSERT(!req->rq_reqmsg); + LASSERT(!req->rq_repmsg); + LASSERT(!req->rq_svc_ctx); req->rq_req_swab_mask = 0; @@ -1986,15 +1995,15 @@ int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen) if (svcpt->scp_service->srv_max_reply_size < msglen + sizeof(struct ptlrpc_reply_state)) { /* Just return failure if the size is too big */ - CERROR("size of message is too big (%zd), %d allowed", - msglen + sizeof(struct ptlrpc_reply_state), - svcpt->scp_service->srv_max_reply_size); + CERROR("size of message is too big (%zd), %d allowed\n", + msglen + sizeof(struct ptlrpc_reply_state), + svcpt->scp_service->srv_max_reply_size); return -ENOMEM; } /* failed alloc, try emergency pool */ rs = lustre_get_emerg_rs(svcpt); - if (rs == NULL) + if (!rs) return -ENOMEM; req->rq_reply_state = rs; @@ -2059,7 +2068,7 @@ void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req) { struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx; - if (ctx != NULL) + if (ctx) atomic_inc(&ctx->sc_refcount); } @@ -2067,7 +2076,7 @@ void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req) { struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx; - if (ctx == NULL) + if (!ctx) return; LASSERT_ATOMIC_POS(&ctx->sc_refcount); @@ -2156,7 +2165,7 @@ int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req, * in case of privacy mode, nob_transferred needs to be adjusted. */ if (desc->bd_nob != desc->bd_nob_transferred) { - CERROR("nob %d doesn't match transferred nob %d", + CERROR("nob %d doesn't match transferred nob %d\n", desc->bd_nob, desc->bd_nob_transferred); return -EPROTO; } diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c index 6152c1b766c3..72d5b9bf5b29 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c +++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c @@ -120,7 +120,7 @@ static struct ptlrpc_enc_page_pool { } page_pools; /* - * /proc/fs/lustre/sptlrpc/encrypt_page_pools + * /sys/kernel/debug/lustre/sptlrpc/encrypt_page_pools */ int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v) { @@ -195,7 +195,7 @@ static void enc_pools_release_free_pages(long npages) while (npages--) { LASSERT(page_pools.epp_pools[p_idx]); - LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL); + LASSERT(page_pools.epp_pools[p_idx][g_idx]); __free_page(page_pools.epp_pools[p_idx][g_idx]); page_pools.epp_pools[p_idx][g_idx] = NULL; @@ -304,7 +304,6 @@ static unsigned long enc_pools_cleanup(struct page ***pools, int npools) static inline void enc_pools_wakeup(void) { assert_spin_locked(&page_pools.epp_lock); - LASSERT(page_pools.epp_waitqlen >= 0); if (unlikely(page_pools.epp_waitqlen)) { LASSERT(waitqueue_active(&page_pools.epp_waitq)); @@ -317,7 +316,7 @@ void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc) int p_idx, g_idx; int i; - if (desc->bd_enc_iov == NULL) + if (!desc->bd_enc_iov) return; LASSERT(desc->bd_iov_count > 0); @@ -332,9 +331,9 @@ void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc) LASSERT(page_pools.epp_pools[p_idx]); for (i = 0; i < desc->bd_iov_count; i++) { - LASSERT(desc->bd_enc_iov[i].kiov_page != NULL); + LASSERT(desc->bd_enc_iov[i].kiov_page); LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]); - LASSERT(page_pools.epp_pools[p_idx][g_idx] == NULL); + LASSERT(!page_pools.epp_pools[p_idx][g_idx]); page_pools.epp_pools[p_idx][g_idx] = desc->bd_enc_iov[i].kiov_page; @@ -413,7 +412,7 @@ int sptlrpc_enc_pool_init(void) page_pools.epp_st_max_wait = 0; enc_pools_alloc(); - if (page_pools.epp_pools == NULL) + if (!page_pools.epp_pools) return -ENOMEM; register_shrinker(&pools_shrinker); @@ -476,7 +475,7 @@ int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset, int swabbed) int size = msg->lm_buflens[offset]; bsd = lustre_msg_buf(msg, offset, sizeof(*bsd)); - if (bsd == NULL) { + if (!bsd) { CERROR("Invalid bulk sec desc: size %d\n", size); return -EINVAL; } diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c index 4b0b81c115ee..a51b18bbfd34 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c +++ b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c @@ -78,7 +78,7 @@ int sptlrpc_parse_flavor(const char *str, struct sptlrpc_flavor *flvr) memset(flvr, 0, sizeof(*flvr)); - if (str == NULL || str[0] == '\0') { + if (!str || str[0] == '\0') { flvr->sf_rpc = SPTLRPC_FLVR_INVALID; return 0; } @@ -103,7 +103,7 @@ int sptlrpc_parse_flavor(const char *str, struct sptlrpc_flavor *flvr) * format: plain-hash:<hash_alg> */ alg = strchr(bulk, ':'); - if (alg == NULL) + if (!alg) goto err_out; *alg++ = '\0'; @@ -166,7 +166,7 @@ static int sptlrpc_parse_rule(char *param, struct sptlrpc_rule *rule) sptlrpc_rule_init(rule); flavor = strchr(param, '='); - if (flavor == NULL) { + if (!flavor) { CERROR("invalid param, no '='\n"); return -EINVAL; } @@ -216,7 +216,7 @@ static int sptlrpc_parse_rule(char *param, struct sptlrpc_rule *rule) static void sptlrpc_rule_set_free(struct sptlrpc_rule_set *rset) { LASSERT(rset->srs_nslot || - (rset->srs_nrule == 0 && rset->srs_rules == NULL)); + (rset->srs_nrule == 0 && !rset->srs_rules)); if (rset->srs_nslot) { kfree(rset->srs_rules); @@ -241,7 +241,7 @@ static int sptlrpc_rule_set_expand(struct sptlrpc_rule_set *rset) /* better use realloc() if available */ rules = kcalloc(nslot, sizeof(*rset->srs_rules), GFP_NOFS); - if (rules == NULL) + if (!rules) return -ENOMEM; if (rset->srs_nrule) { @@ -450,7 +450,7 @@ static void target2fsname(const char *tgt, char *fsname, int buflen) } /* if we didn't find the pattern, treat the whole string as fsname */ - if (ptr == NULL) + if (!ptr) len = strlen(tgt); else len = ptr - tgt; @@ -467,7 +467,7 @@ static void sptlrpc_conf_free_rsets(struct sptlrpc_conf *conf) sptlrpc_rule_set_free(&conf->sc_rset); list_for_each_entry_safe(conf_tgt, conf_tgt_next, - &conf->sc_tgts, sct_list) { + &conf->sc_tgts, sct_list) { sptlrpc_rule_set_free(&conf_tgt->sct_rset); list_del(&conf_tgt->sct_list); kfree(conf_tgt); @@ -517,6 +517,7 @@ struct sptlrpc_conf *sptlrpc_conf_get(const char *fsname, int create) { struct sptlrpc_conf *conf; + size_t len; list_for_each_entry(conf, &sptlrpc_confs, sc_list) { if (strcmp(conf->sc_fsname, fsname) == 0) @@ -530,7 +531,11 @@ struct sptlrpc_conf *sptlrpc_conf_get(const char *fsname, if (!conf) return NULL; - strcpy(conf->sc_fsname, fsname); + len = strlcpy(conf->sc_fsname, fsname, sizeof(conf->sc_fsname)); + if (len >= sizeof(conf->sc_fsname)) { + kfree(conf); + return NULL; + } sptlrpc_rule_set_init(&conf->sc_rset); INIT_LIST_HEAD(&conf->sc_tgts); list_add(&conf->sc_list, &sptlrpc_confs); @@ -579,13 +584,13 @@ static int __sptlrpc_process_config(struct lustre_cfg *lcfg, int rc; target = lustre_cfg_string(lcfg, 1); - if (target == NULL) { + if (!target) { CERROR("missing target name\n"); return -EINVAL; } param = lustre_cfg_string(lcfg, 2); - if (param == NULL) { + if (!param) { CERROR("missing parameter\n"); return -EINVAL; } @@ -603,12 +608,12 @@ static int __sptlrpc_process_config(struct lustre_cfg *lcfg, if (rc) return -EINVAL; - if (conf == NULL) { + if (!conf) { target2fsname(target, fsname, sizeof(fsname)); mutex_lock(&sptlrpc_conf_lock); conf = sptlrpc_conf_get(fsname, 0); - if (conf == NULL) { + if (!conf) { CERROR("can't find conf\n"); rc = -ENOMEM; } else { @@ -638,7 +643,7 @@ static int logname2fsname(const char *logname, char *buf, int buflen) int len; ptr = strrchr(logname, '-'); - if (ptr == NULL || strcmp(ptr, "-sptlrpc")) { + if (!ptr || strcmp(ptr, "-sptlrpc")) { CERROR("%s is not a sptlrpc config log\n", logname); return -EINVAL; } @@ -772,7 +777,7 @@ void sptlrpc_conf_choose_flavor(enum lustre_sec_part from, mutex_lock(&sptlrpc_conf_lock); conf = sptlrpc_conf_get(name, 0); - if (conf == NULL) + if (!conf) goto out; /* convert uuid name (supposed end with _UUID) to target name */ diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c b/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c index 6e58d5f955d6..9082da06b28a 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c +++ b/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c @@ -109,7 +109,7 @@ static void sec_process_ctx_list(void) while (!list_empty(&sec_gc_ctx_list)) { ctx = list_entry(sec_gc_ctx_list.next, - struct ptlrpc_cli_ctx, cc_gc_chain); + struct ptlrpc_cli_ctx, cc_gc_chain); list_del_init(&ctx->cc_gc_chain); spin_unlock(&sec_gc_ctx_list_lock); @@ -131,7 +131,7 @@ static void sec_do_gc(struct ptlrpc_sec *sec) if (unlikely(sec->ps_gc_next == 0)) { CDEBUG(D_SEC, "sec %p(%s) has 0 gc time\n", - sec, sec->ps_policy->sp_name); + sec, sec->ps_policy->sp_name); return; } @@ -166,11 +166,13 @@ again: * is not optimal. we perhaps want to use balanced binary tree * to trace each sec as order of expiry time. * another issue here is we wakeup as fixed interval instead of - * according to each sec's expiry time */ + * according to each sec's expiry time + */ mutex_lock(&sec_gc_mutex); list_for_each_entry(sec, &sec_gc_list, ps_gc_list) { /* if someone is waiting to be deleted, let it - * proceed as soon as possible. */ + * proceed as soon as possible. + */ if (atomic_read(&sec_gc_wait_del)) { CDEBUG(D_SEC, "deletion pending, start over\n"); mutex_unlock(&sec_gc_mutex); diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c b/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c index bda9a77af67a..e610a8ddd223 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c +++ b/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c @@ -82,7 +82,7 @@ static int sptlrpc_info_lprocfs_seq_show(struct seq_file *seq, void *v) if (cli->cl_import) sec = sptlrpc_import_sec_ref(cli->cl_import); - if (sec == NULL) + if (!sec) goto out; sec_flags2str(sec->ps_flvr.sf_flags, str, sizeof(str)); @@ -121,7 +121,7 @@ static int sptlrpc_ctxs_lprocfs_seq_show(struct seq_file *seq, void *v) if (cli->cl_import) sec = sptlrpc_import_sec_ref(cli->cl_import); - if (sec == NULL) + if (!sec) goto out; if (sec->ps_policy->sp_cops->display) @@ -178,7 +178,7 @@ int sptlrpc_lproc_init(void) { int rc; - LASSERT(sptlrpc_debugfs_dir == NULL); + LASSERT(!sptlrpc_debugfs_dir); sptlrpc_debugfs_dir = ldebugfs_register("sptlrpc", debugfs_lustre_root, sptlrpc_lprocfs_vars, NULL); diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_null.c b/drivers/staging/lustre/lustre/ptlrpc/sec_null.c index ebfa6092be14..40e5349de38c 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/sec_null.c +++ b/drivers/staging/lustre/lustre/ptlrpc/sec_null.c @@ -250,7 +250,7 @@ int null_enlarge_reqbuf(struct ptlrpc_sec *sec, alloc_size = size_roundup_power2(newmsg_size); newbuf = libcfs_kvzalloc(alloc_size, GFP_NOFS); - if (newbuf == NULL) + if (!newbuf) return -ENOMEM; /* Must lock this, so that otherwise unprotected change of @@ -258,7 +258,8 @@ int null_enlarge_reqbuf(struct ptlrpc_sec *sec, * imp_replay_list traversing threads. See LU-3333 * This is a bandaid at best, we really need to deal with this * in request enlarging code before unpacking that's already - * there */ + * there + */ if (req->rq_import) spin_lock(&req->rq_import->imp_lock); memcpy(newbuf, req->rq_reqbuf, req->rq_reqlen); @@ -319,7 +320,7 @@ int null_alloc_rs(struct ptlrpc_request *req, int msgsize) LASSERT(rs->rs_size >= rs_size); } else { rs = libcfs_kvzalloc(rs_size, GFP_NOFS); - if (rs == NULL) + if (!rs) return -ENOMEM; rs->rs_size = rs_size; diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c index 905a41451ca3..6276bf59c3aa 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c +++ b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c @@ -104,7 +104,7 @@ static int plain_unpack_bsd(struct lustre_msg *msg, int swabbed) return -EPROTO; bsd = lustre_msg_buf(msg, PLAIN_PACK_BULK_OFF, PLAIN_BSD_SIZE); - if (bsd == NULL) { + if (!bsd) { CERROR("bulk sec desc has short size %d\n", lustre_msg_buflen(msg, PLAIN_PACK_BULK_OFF)); return -EPROTO; @@ -227,7 +227,7 @@ int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req) swabbed = ptlrpc_rep_need_swab(req); phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr)); - if (phdr == NULL) { + if (!phdr) { CERROR("missing plain header\n"); return -EPROTO; } @@ -264,7 +264,8 @@ int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req) } } else { /* whether we sent with bulk or not, we expect the same - * in reply, except for early reply */ + * in reply, except for early reply + */ if (!req->rq_early && !equi(req->rq_pack_bulk == 1, phdr->ph_flags & PLAIN_FL_BULK)) { @@ -419,7 +420,7 @@ void plain_destroy_sec(struct ptlrpc_sec *sec) LASSERT(sec->ps_import); LASSERT(atomic_read(&sec->ps_refcount) == 0); LASSERT(atomic_read(&sec->ps_nctx) == 0); - LASSERT(plsec->pls_ctx == NULL); + LASSERT(!plsec->pls_ctx); class_import_put(sec->ps_import); @@ -468,7 +469,7 @@ struct ptlrpc_sec *plain_create_sec(struct obd_import *imp, /* install ctx immediately if this is a reverse sec */ if (svc_ctx) { ctx = plain_sec_install_ctx(plsec); - if (ctx == NULL) { + if (!ctx) { plain_destroy_sec(sec); return NULL; } @@ -492,7 +493,7 @@ struct ptlrpc_cli_ctx *plain_lookup_ctx(struct ptlrpc_sec *sec, atomic_inc(&ctx->cc_refcount); read_unlock(&plsec->pls_lock); - if (unlikely(ctx == NULL)) + if (unlikely(!ctx)) ctx = plain_sec_install_ctx(plsec); return ctx; @@ -665,7 +666,7 @@ int plain_enlarge_reqbuf(struct ptlrpc_sec *sec, newbuf_size = size_roundup_power2(newbuf_size); newbuf = libcfs_kvzalloc(newbuf_size, GFP_NOFS); - if (newbuf == NULL) + if (!newbuf) return -ENOMEM; /* Must lock this, so that otherwise unprotected change of @@ -673,7 +674,8 @@ int plain_enlarge_reqbuf(struct ptlrpc_sec *sec, * imp_replay_list traversing threads. See LU-3333 * This is a bandaid at best, we really need to deal with this * in request enlarging code before unpacking that's already - * there */ + * there + */ if (req->rq_import) spin_lock(&req->rq_import->imp_lock); @@ -732,7 +734,7 @@ int plain_accept(struct ptlrpc_request *req) swabbed = ptlrpc_req_need_swab(req); phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr)); - if (phdr == NULL) { + if (!phdr) { CERROR("missing plain header\n"); return -EPROTO; } @@ -801,7 +803,7 @@ int plain_alloc_rs(struct ptlrpc_request *req, int msgsize) LASSERT(rs->rs_size >= rs_size); } else { rs = libcfs_kvzalloc(rs_size, GFP_NOFS); - if (rs == NULL) + if (!rs) return -ENOMEM; rs->rs_size = rs_size; diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c index 8598300a61d1..1bbd1d39ccf8 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/service.c +++ b/drivers/staging/lustre/lustre/ptlrpc/service.c @@ -77,7 +77,7 @@ ptlrpc_alloc_rqbd(struct ptlrpc_service_part *svcpt) rqbd = kzalloc_node(sizeof(*rqbd), GFP_NOFS, cfs_cpt_spread_node(svc->srv_cptable, svcpt->scp_cpt)); - if (rqbd == NULL) + if (!rqbd) return NULL; rqbd->rqbd_svcpt = svcpt; @@ -89,7 +89,7 @@ ptlrpc_alloc_rqbd(struct ptlrpc_service_part *svcpt) svcpt->scp_cpt, svc->srv_buf_size, GFP_KERNEL); - if (rqbd->rqbd_buffer == NULL) { + if (!rqbd->rqbd_buffer) { kfree(rqbd); return NULL; } @@ -144,13 +144,14 @@ ptlrpc_grow_req_bufs(struct ptlrpc_service_part *svcpt, int post) for (i = 0; i < svc->srv_nbuf_per_group; i++) { /* NB: another thread might have recycled enough rqbds, we - * need to make sure it wouldn't over-allocate, see LU-1212. */ + * need to make sure it wouldn't over-allocate, see LU-1212. + */ if (svcpt->scp_nrqbds_posted >= svc->srv_nbuf_per_group) break; rqbd = ptlrpc_alloc_rqbd(svcpt); - if (rqbd == NULL) { + if (!rqbd) { CERROR("%s: Can't allocate request buffer\n", svc->srv_name); rc = -ENOMEM; @@ -298,8 +299,8 @@ ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt) } rqbd = list_entry(svcpt->scp_rqbd_idle.next, - struct ptlrpc_request_buffer_desc, - rqbd_list); + struct ptlrpc_request_buffer_desc, + rqbd_list); list_del(&rqbd->rqbd_list); /* assume we will post successfully */ @@ -322,7 +323,8 @@ ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt) list_add_tail(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle); /* Don't complain if no request buffers are posted right now; LNET - * won't drop requests because we set the portal lazy! */ + * won't drop requests because we set the portal lazy! + */ spin_unlock(&svcpt->scp_lock); @@ -363,13 +365,15 @@ ptlrpc_server_nthreads_check(struct ptlrpc_service *svc, init = max_t(int, init, tc->tc_nthrs_init); /* NB: please see comments in lustre_lnet.h for definition - * details of these members */ + * details of these members + */ LASSERT(tc->tc_nthrs_max != 0); if (tc->tc_nthrs_user != 0) { /* In case there is a reason to test a service with many * threads, we give a less strict check here, it can - * be up to 8 * nthrs_max */ + * be up to 8 * nthrs_max + */ total = min(tc->tc_nthrs_max * 8, tc->tc_nthrs_user); nthrs = total / svc->srv_ncpts; init = max(init, nthrs); @@ -379,7 +383,8 @@ ptlrpc_server_nthreads_check(struct ptlrpc_service *svc, total = tc->tc_nthrs_max; if (tc->tc_nthrs_base == 0) { /* don't care about base threads number per partition, - * this is most for non-affinity service */ + * this is most for non-affinity service + */ nthrs = total / svc->srv_ncpts; goto out; } @@ -390,7 +395,8 @@ ptlrpc_server_nthreads_check(struct ptlrpc_service *svc, /* NB: Increase the base number if it's single partition * and total number of cores/HTs is larger or equal to 4. - * result will always < 2 * nthrs_base */ + * result will always < 2 * nthrs_base + */ weight = cfs_cpt_weight(svc->srv_cptable, CFS_CPT_ANY); for (i = 1; (weight >> (i + 1)) != 0 && /* >= 4 cores/HTs */ (tc->tc_nthrs_base >> i) != 0; i++) @@ -490,7 +496,7 @@ ptlrpc_service_part_init(struct ptlrpc_service *svc, array->paa_reqs_array = kzalloc_node(sizeof(struct list_head) * size, GFP_NOFS, cfs_cpt_spread_node(svc->srv_cptable, cpt)); - if (array->paa_reqs_array == NULL) + if (!array->paa_reqs_array) return -ENOMEM; for (index = 0; index < size; index++) @@ -499,14 +505,15 @@ ptlrpc_service_part_init(struct ptlrpc_service *svc, array->paa_reqs_count = kzalloc_node(sizeof(__u32) * size, GFP_NOFS, cfs_cpt_spread_node(svc->srv_cptable, cpt)); - if (array->paa_reqs_count == NULL) + if (!array->paa_reqs_count) goto free_reqs_array; setup_timer(&svcpt->scp_at_timer, ptlrpc_at_timer, (unsigned long)svcpt); /* At SOW, service time should be quick; 10s seems generous. If client - * timeout is less than this, we'll be sending an early reply. */ + * timeout is less than this, we'll be sending an early reply. + */ at_init(&svcpt->scp_at_estimate, 10, 0); /* assign this before call ptlrpc_grow_req_bufs */ @@ -514,7 +521,8 @@ ptlrpc_service_part_init(struct ptlrpc_service *svc, /* Now allocate the request buffers, but don't post them now */ rc = ptlrpc_grow_req_bufs(svcpt, 0); /* We shouldn't be under memory pressure at startup, so - * fail if we can't allocate all our buffers at this time. */ + * fail if we can't allocate all our buffers at this time. + */ if (rc != 0) goto free_reqs_count; @@ -556,14 +564,14 @@ ptlrpc_register_service(struct ptlrpc_service_conf *conf, LASSERT(conf->psc_thr.tc_ctx_tags != 0); cptable = cconf->cc_cptable; - if (cptable == NULL) + if (!cptable) cptable = cfs_cpt_table; if (!conf->psc_thr.tc_cpu_affinity) { ncpts = 1; } else { ncpts = cfs_cpt_number(cptable); - if (cconf->cc_pattern != NULL) { + if (cconf->cc_pattern) { struct cfs_expr_list *el; rc = cfs_expr_list_parse(cconf->cc_pattern, @@ -632,11 +640,11 @@ ptlrpc_register_service(struct ptlrpc_service_conf *conf, if (!conf->psc_thr.tc_cpu_affinity) cpt = CFS_CPT_ANY; else - cpt = cpts != NULL ? cpts[i] : i; + cpt = cpts ? cpts[i] : i; svcpt = kzalloc_node(sizeof(*svcpt), GFP_NOFS, cfs_cpt_spread_node(cptable, cpt)); - if (svcpt == NULL) { + if (!svcpt) { rc = -ENOMEM; goto failed; } @@ -696,7 +704,8 @@ static void ptlrpc_server_free_request(struct ptlrpc_request *req) LASSERT(list_empty(&req->rq_timed_list)); /* DEBUG_REQ() assumes the reply state of a request with a valid - * ref will not be destroyed until that reference is dropped. */ + * ref will not be destroyed until that reference is dropped. + */ ptlrpc_req_drop_rs(req); sptlrpc_svc_ctx_decref(req); @@ -704,7 +713,8 @@ static void ptlrpc_server_free_request(struct ptlrpc_request *req) if (req != &req->rq_rqbd->rqbd_req) { /* NB request buffers use an embedded * req if the incoming req unlinked the - * MD; this isn't one of them! */ + * MD; this isn't one of them! + */ ptlrpc_request_cache_free(req); } } @@ -728,7 +738,8 @@ static void ptlrpc_server_drop_request(struct ptlrpc_request *req) if (req->rq_at_linked) { spin_lock(&svcpt->scp_at_lock); /* recheck with lock, in case it's unlinked by - * ptlrpc_at_check_timed() */ + * ptlrpc_at_check_timed() + */ if (likely(req->rq_at_linked)) ptlrpc_at_remove_timed(req); spin_unlock(&svcpt->scp_at_lock); @@ -755,20 +766,22 @@ static void ptlrpc_server_drop_request(struct ptlrpc_request *req) svcpt->scp_hist_nrqbds++; /* cull some history? - * I expect only about 1 or 2 rqbds need to be recycled here */ + * I expect only about 1 or 2 rqbds need to be recycled here + */ while (svcpt->scp_hist_nrqbds > svc->srv_hist_nrqbds_cpt_max) { rqbd = list_entry(svcpt->scp_hist_rqbds.next, - struct ptlrpc_request_buffer_desc, - rqbd_list); + struct ptlrpc_request_buffer_desc, + rqbd_list); list_del(&rqbd->rqbd_list); svcpt->scp_hist_nrqbds--; /* remove rqbd's reqs from svc's req history while - * I've got the service lock */ + * I've got the service lock + */ list_for_each(tmp, &rqbd->rqbd_reqs) { req = list_entry(tmp, struct ptlrpc_request, - rq_list); + rq_list); /* Track the highest culled req seq */ if (req->rq_history_seq > svcpt->scp_hist_seq_culled) { @@ -782,8 +795,8 @@ static void ptlrpc_server_drop_request(struct ptlrpc_request *req) list_for_each_safe(tmp, nxt, &rqbd->rqbd_reqs) { req = list_entry(rqbd->rqbd_reqs.next, - struct ptlrpc_request, - rq_list); + struct ptlrpc_request, + rq_list); list_del(&req->rq_list); ptlrpc_server_free_request(req); } @@ -795,8 +808,7 @@ static void ptlrpc_server_drop_request(struct ptlrpc_request *req) */ LASSERT(atomic_read(&rqbd->rqbd_req.rq_refcount) == 0); - list_add_tail(&rqbd->rqbd_list, - &svcpt->scp_rqbd_idle); + list_add_tail(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle); } spin_unlock(&svcpt->scp_lock); @@ -846,7 +858,7 @@ static void ptlrpc_server_finish_active_request( ptlrpc_nrs_req_finalize(req); - if (req->rq_export != NULL) + if (req->rq_export) class_export_rpc_dec(req->rq_export); ptlrpc_server_finish_request(svcpt, req); @@ -869,13 +881,13 @@ static int ptlrpc_check_req(struct ptlrpc_request *req) req->rq_export->exp_conn_cnt); return -EEXIST; } - if (unlikely(obd == NULL || obd->obd_fail)) { + if (unlikely(!obd || obd->obd_fail)) { /* * Failing over, don't handle any more reqs, send * error response instead. */ CDEBUG(D_RPCTRACE, "Dropping req %p for failed obd %s\n", - req, (obd != NULL) ? obd->obd_name : "unknown"); + req, obd ? obd->obd_name : "unknown"); rc = -ENODEV; } else if (lustre_msg_get_flags(req->rq_reqmsg) & (MSG_REPLAY | MSG_REQ_REPLAY_DONE)) { @@ -942,13 +954,13 @@ static int ptlrpc_at_add_timed(struct ptlrpc_request *req) div_u64_rem(req->rq_deadline, array->paa_size, &index); if (array->paa_reqs_count[index] > 0) { /* latest rpcs will have the latest deadlines in the list, - * so search backward. */ - list_for_each_entry_reverse(rq, - &array->paa_reqs_array[index], - rq_timed_list) { + * so search backward. + */ + list_for_each_entry_reverse(rq, &array->paa_reqs_array[index], + rq_timed_list) { if (req->rq_deadline >= rq->rq_deadline) { list_add(&req->rq_timed_list, - &rq->rq_timed_list); + &rq->rq_timed_list); break; } } @@ -956,8 +968,7 @@ static int ptlrpc_at_add_timed(struct ptlrpc_request *req) /* Add the request at the head of the list */ if (list_empty(&req->rq_timed_list)) - list_add(&req->rq_timed_list, - &array->paa_reqs_array[index]); + list_add(&req->rq_timed_list, &array->paa_reqs_array[index]); spin_lock(&req->rq_lock); req->rq_at_linked = 1; @@ -1003,7 +1014,8 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req) int rc; /* deadline is when the client expects us to reply, margin is the - difference between clients' and servers' expectations */ + * difference between clients' and servers' expectations + */ DEBUG_REQ(D_ADAPTTO, req, "%ssending early reply (deadline %+lds, margin %+lds) for %d+%d", AT_OFF ? "AT off - not " : "", @@ -1027,12 +1039,14 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req) } /* Fake our processing time into the future to ask the clients - * for some extra amount of time */ + * for some extra amount of time + */ at_measured(&svcpt->scp_at_estimate, at_extra + ktime_get_real_seconds() - req->rq_arrival_time.tv_sec); /* Check to see if we've actually increased the deadline - - * we may be past adaptive_max */ + * we may be past adaptive_max + */ if (req->rq_deadline >= req->rq_arrival_time.tv_sec + at_get(&svcpt->scp_at_estimate)) { DEBUG_REQ(D_WARNING, req, "Couldn't add any time (%ld/%lld), not sending early reply\n", @@ -1044,7 +1058,7 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req) newdl = ktime_get_real_seconds() + at_get(&svcpt->scp_at_estimate); reqcopy = ptlrpc_request_cache_alloc(GFP_NOFS); - if (reqcopy == NULL) + if (!reqcopy) return -ENOMEM; reqmsg = libcfs_kvzalloc(req->rq_reqlen, GFP_NOFS); if (!reqmsg) { @@ -1074,7 +1088,7 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req) /* Connection ref */ reqcopy->rq_export = class_conn2export( lustre_msg_get_handle(reqcopy->rq_reqmsg)); - if (reqcopy->rq_export == NULL) { + if (!reqcopy->rq_export) { rc = -ENODEV; goto out; } @@ -1102,7 +1116,8 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req) } /* Free the (early) reply state from lustre_pack_reply. - (ptlrpc_send_reply takes it's own rs ref, so this is safe here) */ + * (ptlrpc_send_reply takes it's own rs ref, so this is safe here) + */ ptlrpc_req_drop_rs(reqcopy); out_put: @@ -1117,8 +1132,9 @@ out_free: } /* Send early replies to everybody expiring within at_early_margin - asking for at_extra time */ -static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt) + * asking for at_extra time + */ +static void ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt) { struct ptlrpc_at_array *array = &svcpt->scp_at_array; struct ptlrpc_request *rq, *n; @@ -1132,14 +1148,14 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt) spin_lock(&svcpt->scp_at_lock); if (svcpt->scp_at_check == 0) { spin_unlock(&svcpt->scp_at_lock); - return 0; + return; } delay = cfs_time_sub(cfs_time_current(), svcpt->scp_at_checktime); svcpt->scp_at_check = 0; if (array->paa_count == 0) { spin_unlock(&svcpt->scp_at_lock); - return 0; + return; } /* The timer went off, but maybe the nearest rpc already completed. */ @@ -1148,20 +1164,20 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt) /* We've still got plenty of time. Reset the timer. */ ptlrpc_at_set_timer(svcpt); spin_unlock(&svcpt->scp_at_lock); - return 0; + return; } /* We're close to a timeout, and we don't know how much longer the - server will take. Send early replies to everyone expiring soon. */ + * server will take. Send early replies to everyone expiring soon. + */ INIT_LIST_HEAD(&work_list); deadline = -1; div_u64_rem(array->paa_deadline, array->paa_size, &index); count = array->paa_count; while (count > 0) { count -= array->paa_reqs_count[index]; - list_for_each_entry_safe(rq, n, - &array->paa_reqs_array[index], - rq_timed_list) { + list_for_each_entry_safe(rq, n, &array->paa_reqs_array[index], + rq_timed_list) { if (rq->rq_deadline > now + at_early_margin) { /* update the earliest deadline */ if (deadline == -1 || @@ -1194,7 +1210,8 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt) first, at_extra, counter); if (first < 0) { /* We're already past request deadlines before we even get a - chance to send early replies */ + * chance to send early replies + */ LCONSOLE_WARN("%s: This server is not able to keep up with request traffic (cpu-bound).\n", svcpt->scp_service->srv_name); CWARN("earlyQ=%d reqQ=%d recA=%d, svcEst=%d, delay=%ld(jiff)\n", @@ -1204,10 +1221,11 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt) } /* we took additional refcount so entries can't be deleted from list, no - * locking is needed */ + * locking is needed + */ while (!list_empty(&work_list)) { rq = list_entry(work_list.next, struct ptlrpc_request, - rq_timed_list); + rq_timed_list); list_del_init(&rq->rq_timed_list); if (ptlrpc_at_send_early_reply(rq) == 0) @@ -1215,8 +1233,6 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt) ptlrpc_server_drop_request(rq); } - - return 1; /* return "did_something" for liblustre */ } /** @@ -1237,7 +1253,8 @@ static int ptlrpc_server_hpreq_init(struct ptlrpc_service_part *svcpt, if (req->rq_export && req->rq_ops) { /* Perform request specific check. We should do this check * before the request is added into exp_hp_rpcs list otherwise - * it may hit swab race at LU-1044. */ + * it may hit swab race at LU-1044. + */ if (req->rq_ops->hpreq_check) { rc = req->rq_ops->hpreq_check(req); /** @@ -1257,8 +1274,7 @@ static int ptlrpc_server_hpreq_init(struct ptlrpc_service_part *svcpt, } spin_lock_bh(&req->rq_export->exp_rpc_lock); - list_add(&req->rq_exp_list, - &req->rq_export->exp_hp_rpcs); + list_add(&req->rq_exp_list, &req->rq_export->exp_hp_rpcs); spin_unlock_bh(&req->rq_export->exp_rpc_lock); } @@ -1272,7 +1288,8 @@ static void ptlrpc_server_hpreq_fini(struct ptlrpc_request *req) { if (req->rq_export && req->rq_ops) { /* refresh lock timeout again so that client has more - * room to send lock cancel RPC. */ + * room to send lock cancel RPC. + */ if (req->rq_ops->hpreq_fini) req->rq_ops->hpreq_fini(req); @@ -1316,7 +1333,7 @@ static bool ptlrpc_server_allow_high(struct ptlrpc_service_part *svcpt, CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND))) { /* leave just 1 thread for normal RPCs */ running = PTLRPC_NTHRS_INIT; - if (svcpt->scp_service->srv_ops.so_hpreq_handler != NULL) + if (svcpt->scp_service->srv_ops.so_hpreq_handler) running += 1; } @@ -1355,7 +1372,7 @@ static bool ptlrpc_server_allow_normal(struct ptlrpc_service_part *svcpt, CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND))) { /* leave just 1 thread for normal RPCs */ running = PTLRPC_NTHRS_INIT; - if (svcpt->scp_service->srv_ops.so_hpreq_handler != NULL) + if (svcpt->scp_service->srv_ops.so_hpreq_handler) running += 1; } @@ -1405,7 +1422,7 @@ ptlrpc_server_request_get(struct ptlrpc_service_part *svcpt, bool force) if (ptlrpc_server_high_pending(svcpt, force)) { req = ptlrpc_nrs_req_get_nolock(svcpt, true, force); - if (req != NULL) { + if (req) { svcpt->scp_hreq_count++; goto got_request; } @@ -1413,7 +1430,7 @@ ptlrpc_server_request_get(struct ptlrpc_service_part *svcpt, bool force) if (ptlrpc_server_normal_pending(svcpt, force)) { req = ptlrpc_nrs_req_get_nolock(svcpt, false, force); - if (req != NULL) { + if (req) { svcpt->scp_hreq_count = 0; goto got_request; } @@ -1457,11 +1474,12 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt, } req = list_entry(svcpt->scp_req_incoming.next, - struct ptlrpc_request, rq_list); + struct ptlrpc_request, rq_list); list_del_init(&req->rq_list); svcpt->scp_nreqs_incoming--; /* Consider this still a "queued" request as far as stats are - * concerned */ + * concerned + */ spin_unlock(&svcpt->scp_lock); /* go through security check/transform */ @@ -1598,7 +1616,7 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt, int fail_opc = 0; request = ptlrpc_server_request_get(svcpt, false); - if (request == NULL) + if (!request) return 0; if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT)) @@ -1620,7 +1638,7 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt, timediff = timespec64_sub(work_start, request->rq_arrival_time); timediff_usecs = timediff.tv_sec * USEC_PER_SEC + timediff.tv_nsec / NSEC_PER_USEC; - if (likely(svc->srv_stats != NULL)) { + if (likely(svc->srv_stats)) { lprocfs_counter_add(svc->srv_stats, PTLRPC_REQWAIT_CNTR, timediff_usecs); lprocfs_counter_add(svc->srv_stats, PTLRPC_REQQDEPTH_CNTR, @@ -1652,7 +1670,8 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt, } /* Discard requests queued for longer than the deadline. - The deadline is increased if we send an early reply. */ + * The deadline is increased if we send an early reply. + */ if (ktime_get_real_seconds() > request->rq_deadline) { DEBUG_REQ(D_ERROR, request, "Dropping timed-out request from %s: deadline " CFS_DURATION_T ":" CFS_DURATION_T "s ago\n", libcfs_id2str(request->rq_peer), @@ -1718,7 +1737,7 @@ put_conn: request->rq_status, (request->rq_repmsg ? lustre_msg_get_status(request->rq_repmsg) : -999)); - if (likely(svc->srv_stats != NULL && request->rq_reqmsg != NULL)) { + if (likely(svc->srv_stats && request->rq_reqmsg)) { __u32 op = lustre_msg_get_opc(request->rq_reqmsg); int opc = opcode_offset(op); @@ -1804,7 +1823,8 @@ ptlrpc_handle_rs(struct ptlrpc_reply_state *rs) if (nlocks == 0 && !been_handled) { /* If we see this, we should already have seen the warning - * in mds_steal_ack_locks() */ + * in mds_steal_ack_locks() + */ CDEBUG(D_HA, "All locks stolen from rs %p x%lld.t%lld o%d NID %s\n", rs, rs->rs_xid, rs->rs_transno, rs->rs_opc, @@ -1858,7 +1878,8 @@ ptlrpc_check_rqbd_pool(struct ptlrpc_service_part *svcpt) /* CAVEAT EMPTOR: We might be allocating buffers here because we've * allowed the request history to grow out of control. We could put a * sanity check on that here and cull some history if we need the - * space. */ + * space. + */ if (avail <= low_water) ptlrpc_grow_req_bufs(svcpt, 1); @@ -1992,7 +2013,8 @@ static int ptlrpc_main(void *arg) /* NB: we will call cfs_cpt_bind() for all threads, because we * might want to run lustre server only on a subset of system CPUs, - * in that case ->scp_cpt is CFS_CPT_ANY */ + * in that case ->scp_cpt is CFS_CPT_ANY + */ rc = cfs_cpt_bind(svc->srv_cptable, svcpt->scp_cpt); if (rc != 0) { CWARN("%s: failed to bind %s on CPT %d\n", @@ -2008,7 +2030,7 @@ static int ptlrpc_main(void *arg) set_current_groups(ginfo); put_group_info(ginfo); - if (svc->srv_ops.so_thr_init != NULL) { + if (svc->srv_ops.so_thr_init) { rc = svc->srv_ops.so_thr_init(thread); if (rc) goto out; @@ -2035,7 +2057,7 @@ static int ptlrpc_main(void *arg) continue; CERROR("Failed to post rqbd for %s on CPT %d: %d\n", - svc->srv_name, svcpt->scp_cpt, rc); + svc->srv_name, svcpt->scp_cpt, rc); goto out_srv_fini; } @@ -2057,7 +2079,8 @@ static int ptlrpc_main(void *arg) /* SVC_STOPPING may already be set here if someone else is trying * to stop the service while this new thread has been dynamically * forked. We still set SVC_RUNNING to let our creator know that - * we are now running, however we will exit as soon as possible */ + * we are now running, however we will exit as soon as possible + */ thread_add_flags(thread, SVC_RUNNING); svcpt->scp_nthrs_running++; spin_unlock(&svcpt->scp_lock); @@ -2116,7 +2139,8 @@ static int ptlrpc_main(void *arg) ptlrpc_server_post_idle_rqbds(svcpt) < 0) { /* I just failed to repost request buffers. * Wait for a timeout (unless something else - * happens) before I try again */ + * happens) before I try again + */ svcpt->scp_rqbd_timeout = cfs_time_seconds(1) / 10; CDEBUG(D_RPCTRACE, "Posted buffers: %d\n", svcpt->scp_nrqbds_posted); @@ -2132,10 +2156,10 @@ out_srv_fini: /* * deconstruct service specific state created by ptlrpc_start_thread() */ - if (svc->srv_ops.so_thr_done != NULL) + if (svc->srv_ops.so_thr_done) svc->srv_ops.so_thr_done(thread); - if (env != NULL) { + if (env) { lu_context_fini(&env->le_ctx); kfree(env); } @@ -2183,7 +2207,7 @@ static int ptlrpc_hr_main(void *arg) { struct ptlrpc_hr_thread *hrt = arg; struct ptlrpc_hr_partition *hrp = hrt->hrt_partition; - LIST_HEAD (replies); + LIST_HEAD(replies); char threadname[20]; int rc; @@ -2206,9 +2230,8 @@ static int ptlrpc_hr_main(void *arg) while (!list_empty(&replies)) { struct ptlrpc_reply_state *rs; - rs = list_entry(replies.prev, - struct ptlrpc_reply_state, - rs_list); + rs = list_entry(replies.prev, struct ptlrpc_reply_state, + rs_list); list_del_init(&rs->rs_list); ptlrpc_handle_rs(rs); } @@ -2229,18 +2252,18 @@ static void ptlrpc_stop_hr_threads(void) ptlrpc_hr.hr_stopping = 1; cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) { - if (hrp->hrp_thrs == NULL) + if (!hrp->hrp_thrs) continue; /* uninitialized */ for (j = 0; j < hrp->hrp_nthrs; j++) wake_up_all(&hrp->hrp_thrs[j].hrt_waitq); } cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) { - if (hrp->hrp_thrs == NULL) + if (!hrp->hrp_thrs) continue; /* uninitialized */ wait_event(ptlrpc_hr.hr_waitq, - atomic_read(&hrp->hrp_nstopped) == - atomic_read(&hrp->hrp_nstarted)); + atomic_read(&hrp->hrp_nstopped) == + atomic_read(&hrp->hrp_nstarted)); } } @@ -2255,24 +2278,26 @@ static int ptlrpc_start_hr_threads(void) for (j = 0; j < hrp->hrp_nthrs; j++) { struct ptlrpc_hr_thread *hrt = &hrp->hrp_thrs[j]; - - rc = PTR_ERR(kthread_run(ptlrpc_hr_main, - &hrp->hrp_thrs[j], - "ptlrpc_hr%02d_%03d", - hrp->hrp_cpt, - hrt->hrt_id)); - if (IS_ERR_VALUE(rc)) + struct task_struct *task; + + task = kthread_run(ptlrpc_hr_main, + &hrp->hrp_thrs[j], + "ptlrpc_hr%02d_%03d", + hrp->hrp_cpt, hrt->hrt_id); + if (IS_ERR(task)) { + rc = PTR_ERR(task); break; + } } wait_event(ptlrpc_hr.hr_waitq, - atomic_read(&hrp->hrp_nstarted) == j); - if (!IS_ERR_VALUE(rc)) - continue; + atomic_read(&hrp->hrp_nstarted) == j); - CERROR("Reply handling thread %d:%d Failed on starting: rc = %d\n", - i, j, rc); - ptlrpc_stop_hr_threads(); - return rc; + if (rc < 0) { + CERROR("cannot start reply handler thread %d:%d: rc = %d\n", + i, j, rc); + ptlrpc_stop_hr_threads(); + return rc; + } } return 0; } @@ -2281,7 +2306,7 @@ static void ptlrpc_svcpt_stop_threads(struct ptlrpc_service_part *svcpt) { struct l_wait_info lwi = { 0 }; struct ptlrpc_thread *thread; - LIST_HEAD (zombie); + LIST_HEAD(zombie); CDEBUG(D_INFO, "Stopping threads for service %s\n", svcpt->scp_service->srv_name); @@ -2298,7 +2323,7 @@ static void ptlrpc_svcpt_stop_threads(struct ptlrpc_service_part *svcpt) while (!list_empty(&svcpt->scp_threads)) { thread = list_entry(svcpt->scp_threads.next, - struct ptlrpc_thread, t_link); + struct ptlrpc_thread, t_link); if (thread_is_stopped(thread)) { list_del(&thread->t_link); list_add(&thread->t_link, &zombie); @@ -2333,7 +2358,7 @@ static void ptlrpc_stop_all_threads(struct ptlrpc_service *svc) int i; ptlrpc_service_for_each_part(svcpt, i, svc) { - if (svcpt->scp_service != NULL) + if (svcpt->scp_service) ptlrpc_svcpt_stop_threads(svcpt); } } @@ -2374,10 +2399,9 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait) struct l_wait_info lwi = { 0 }; struct ptlrpc_thread *thread; struct ptlrpc_service *svc; + struct task_struct *task; int rc; - LASSERT(svcpt != NULL); - svc = svcpt->scp_service; CDEBUG(D_RPCTRACE, "%s[%d] started %d min %d max %d\n", @@ -2396,7 +2420,7 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait) thread = kzalloc_node(sizeof(*thread), GFP_NOFS, cfs_cpt_spread_node(svc->srv_cptable, svcpt->scp_cpt)); - if (thread == NULL) + if (!thread) return -ENOMEM; init_waitqueue_head(&thread->t_ctl_waitq); @@ -2409,7 +2433,8 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait) if (svcpt->scp_nthrs_starting != 0) { /* serialize starting because some modules (obdfilter) - * might require unique and contiguous t_id */ + * might require unique and contiguous t_id + */ LASSERT(svcpt->scp_nthrs_starting == 1); spin_unlock(&svcpt->scp_lock); kfree(thread); @@ -2442,9 +2467,10 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait) } CDEBUG(D_RPCTRACE, "starting thread '%s'\n", thread->t_name); - rc = PTR_ERR(kthread_run(ptlrpc_main, thread, "%s", thread->t_name)); - if (IS_ERR_VALUE(rc)) { - CERROR("cannot start thread '%s': rc %d\n", + task = kthread_run(ptlrpc_main, thread, "%s", thread->t_name); + if (IS_ERR(task)) { + rc = PTR_ERR(task); + CERROR("cannot start thread '%s': rc = %d\n", thread->t_name, rc); spin_lock(&svcpt->scp_lock); --svcpt->scp_nthrs_starting; @@ -2488,7 +2514,7 @@ int ptlrpc_hr_init(void) ptlrpc_hr.hr_partitions = cfs_percpt_alloc(ptlrpc_hr.hr_cpt_table, sizeof(*hrp)); - if (ptlrpc_hr.hr_partitions == NULL) + if (!ptlrpc_hr.hr_partitions) return -ENOMEM; init_waitqueue_head(&ptlrpc_hr.hr_waitq); @@ -2509,7 +2535,7 @@ int ptlrpc_hr_init(void) kzalloc_node(hrp->hrp_nthrs * sizeof(*hrt), GFP_NOFS, cfs_cpt_spread_node(ptlrpc_hr.hr_cpt_table, i)); - if (hrp->hrp_thrs == NULL) { + if (!hrp->hrp_thrs) { rc = -ENOMEM; goto out; } @@ -2537,7 +2563,7 @@ void ptlrpc_hr_fini(void) struct ptlrpc_hr_partition *hrp; int i; - if (ptlrpc_hr.hr_partitions == NULL) + if (!ptlrpc_hr.hr_partitions) return; ptlrpc_stop_hr_threads(); @@ -2577,7 +2603,7 @@ ptlrpc_service_del_atimer(struct ptlrpc_service *svc) /* early disarm AT timer... */ ptlrpc_service_for_each_part(svcpt, i, svc) { - if (svcpt->scp_service != NULL) + if (svcpt->scp_service) del_timer(&svcpt->scp_at_timer); } } @@ -2592,18 +2618,20 @@ ptlrpc_service_unlink_rqbd(struct ptlrpc_service *svc) int i; /* All history will be culled when the next request buffer is - * freed in ptlrpc_service_purge_all() */ + * freed in ptlrpc_service_purge_all() + */ svc->srv_hist_nrqbds_cpt_max = 0; rc = LNetClearLazyPortal(svc->srv_req_portal); LASSERT(rc == 0); ptlrpc_service_for_each_part(svcpt, i, svc) { - if (svcpt->scp_service == NULL) + if (!svcpt->scp_service) break; /* Unlink all the request buffers. This forces a 'final' - * event with its 'unlink' flag set for each posted rqbd */ + * event with its 'unlink' flag set for each posted rqbd + */ list_for_each_entry(rqbd, &svcpt->scp_rqbd_posted, rqbd_list) { rc = LNetMDUnlink(rqbd->rqbd_md_h); @@ -2612,17 +2640,19 @@ ptlrpc_service_unlink_rqbd(struct ptlrpc_service *svc) } ptlrpc_service_for_each_part(svcpt, i, svc) { - if (svcpt->scp_service == NULL) + if (!svcpt->scp_service) break; /* Wait for the network to release any buffers - * it's currently filling */ + * it's currently filling + */ spin_lock(&svcpt->scp_lock); while (svcpt->scp_nrqbds_posted != 0) { spin_unlock(&svcpt->scp_lock); /* Network access will complete in finite time but * the HUGE timeout lets us CWARN for visibility - * of sluggish NALs */ + * of sluggish LNDs + */ lwi = LWI_TIMEOUT_INTERVAL( cfs_time_seconds(LONG_UNLINK), cfs_time_seconds(1), NULL, NULL); @@ -2648,13 +2678,13 @@ ptlrpc_service_purge_all(struct ptlrpc_service *svc) int i; ptlrpc_service_for_each_part(svcpt, i, svc) { - if (svcpt->scp_service == NULL) + if (!svcpt->scp_service) break; spin_lock(&svcpt->scp_rep_lock); while (!list_empty(&svcpt->scp_rep_active)) { rs = list_entry(svcpt->scp_rep_active.next, - struct ptlrpc_reply_state, rs_list); + struct ptlrpc_reply_state, rs_list); spin_lock(&rs->rs_lock); ptlrpc_schedule_difficult_reply(rs); spin_unlock(&rs->rs_lock); @@ -2663,10 +2693,11 @@ ptlrpc_service_purge_all(struct ptlrpc_service *svc) /* purge the request queue. NB No new replies (rqbds * all unlinked) and no service threads, so I'm the only - * thread noodling the request queue now */ + * thread noodling the request queue now + */ while (!list_empty(&svcpt->scp_req_incoming)) { req = list_entry(svcpt->scp_req_incoming.next, - struct ptlrpc_request, rq_list); + struct ptlrpc_request, rq_list); list_del(&req->rq_list); svcpt->scp_nreqs_incoming--; @@ -2682,24 +2713,26 @@ ptlrpc_service_purge_all(struct ptlrpc_service *svc) LASSERT(svcpt->scp_nreqs_incoming == 0); LASSERT(svcpt->scp_nreqs_active == 0); /* history should have been culled by - * ptlrpc_server_finish_request */ + * ptlrpc_server_finish_request + */ LASSERT(svcpt->scp_hist_nrqbds == 0); /* Now free all the request buffers since nothing - * references them any more... */ + * references them any more... + */ while (!list_empty(&svcpt->scp_rqbd_idle)) { rqbd = list_entry(svcpt->scp_rqbd_idle.next, - struct ptlrpc_request_buffer_desc, - rqbd_list); + struct ptlrpc_request_buffer_desc, + rqbd_list); ptlrpc_free_rqbd(rqbd); } ptlrpc_wait_replies(svcpt); while (!list_empty(&svcpt->scp_rep_idle)) { rs = list_entry(svcpt->scp_rep_idle.next, - struct ptlrpc_reply_state, - rs_list); + struct ptlrpc_reply_state, + rs_list); list_del(&rs->rs_list); kvfree(rs); } @@ -2714,7 +2747,7 @@ ptlrpc_service_free(struct ptlrpc_service *svc) int i; ptlrpc_service_for_each_part(svcpt, i, svc) { - if (svcpt->scp_service == NULL) + if (!svcpt->scp_service) break; /* In case somebody rearmed this in the meantime */ @@ -2730,7 +2763,7 @@ ptlrpc_service_free(struct ptlrpc_service *svc) ptlrpc_service_for_each_part(svcpt, i, svc) kfree(svcpt); - if (svc->srv_cpts != NULL) + if (svc->srv_cpts) cfs_expr_list_values_free(svc->srv_cpts, svc->srv_ncpts); kfree(svc); diff --git a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c index 61d9ca93c53a..3ffd2d91f274 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c +++ b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c @@ -333,17 +333,9 @@ void lustre_assert_wire_constants(void) CLASSERT(LDLM_MAX_TYPE == 14); CLASSERT(LUSTRE_RES_ID_SEQ_OFF == 0); CLASSERT(LUSTRE_RES_ID_VER_OID_OFF == 1); - LASSERTF(UPDATE_OBJ == 1000, "found %lld\n", - (long long)UPDATE_OBJ); - LASSERTF(UPDATE_LAST_OPC == 1001, "found %lld\n", - (long long)UPDATE_LAST_OPC); CLASSERT(LUSTRE_RES_ID_QUOTA_SEQ_OFF == 2); CLASSERT(LUSTRE_RES_ID_QUOTA_VER_OID_OFF == 3); CLASSERT(LUSTRE_RES_ID_HSH_OFF == 3); - CLASSERT(LQUOTA_TYPE_USR == 0); - CLASSERT(LQUOTA_TYPE_GRP == 1); - CLASSERT(LQUOTA_RES_MD == 1); - CLASSERT(LQUOTA_RES_DT == 2); LASSERTF(OBD_PING == 400, "found %lld\n", (long long)OBD_PING); LASSERTF(OBD_LOG_CANCEL == 401, "found %lld\n", @@ -437,30 +429,6 @@ void lustre_assert_wire_constants(void) (unsigned)LMAC_NOT_IN_OI); LASSERTF(LMAC_FID_ON_OST == 0x00000008UL, "found 0x%.8xUL\n", (unsigned)LMAC_FID_ON_OST); - LASSERTF(OBJ_CREATE == 1, "found %lld\n", - (long long)OBJ_CREATE); - LASSERTF(OBJ_DESTROY == 2, "found %lld\n", - (long long)OBJ_DESTROY); - LASSERTF(OBJ_REF_ADD == 3, "found %lld\n", - (long long)OBJ_REF_ADD); - LASSERTF(OBJ_REF_DEL == 4, "found %lld\n", - (long long)OBJ_REF_DEL); - LASSERTF(OBJ_ATTR_SET == 5, "found %lld\n", - (long long)OBJ_ATTR_SET); - LASSERTF(OBJ_ATTR_GET == 6, "found %lld\n", - (long long)OBJ_ATTR_GET); - LASSERTF(OBJ_XATTR_SET == 7, "found %lld\n", - (long long)OBJ_XATTR_SET); - LASSERTF(OBJ_XATTR_GET == 8, "found %lld\n", - (long long)OBJ_XATTR_GET); - LASSERTF(OBJ_INDEX_LOOKUP == 9, "found %lld\n", - (long long)OBJ_INDEX_LOOKUP); - LASSERTF(OBJ_INDEX_LOOKUP == 9, "found %lld\n", - (long long)OBJ_INDEX_LOOKUP); - LASSERTF(OBJ_INDEX_INSERT == 10, "found %lld\n", - (long long)OBJ_INDEX_INSERT); - LASSERTF(OBJ_INDEX_DELETE == 11, "found %lld\n", - (long long)OBJ_INDEX_DELETE); /* Checks for struct ost_id */ LASSERTF((int)sizeof(struct ost_id) == 16, "found %lld\n", @@ -587,9 +555,6 @@ void lustre_assert_wire_constants(void) (long long)LDF_COLLIDE); LASSERTF(LU_PAGE_SIZE == 4096, "found %lld\n", (long long)LU_PAGE_SIZE); - /* Checks for union lu_page */ - LASSERTF((int)sizeof(union lu_page) == 4096, "found %lld\n", - (long long)(int)sizeof(union lu_page)); /* Checks for struct lustre_handle */ LASSERTF((int)sizeof(struct lustre_handle) == 8, "found %lld\n", @@ -1535,11 +1500,6 @@ void lustre_assert_wire_constants(void) LASSERTF((int)sizeof(union lquota_id) == 16, "found %lld\n", (long long)(int)sizeof(union lquota_id)); - LASSERTF(QUOTABLOCK_BITS == 10, "found %lld\n", - (long long)QUOTABLOCK_BITS); - LASSERTF(QUOTABLOCK_SIZE == 1024, "found %lld\n", - (long long)QUOTABLOCK_SIZE); - /* Checks for struct obd_quotactl */ LASSERTF((int)sizeof(struct obd_quotactl) == 112, "found %lld\n", (long long)(int)sizeof(struct obd_quotactl)); @@ -1642,138 +1602,6 @@ void lustre_assert_wire_constants(void) LASSERTF(Q_FINVALIDATE == 0x800104, "found 0x%.8x\n", Q_FINVALIDATE); - /* Checks for struct lquota_acct_rec */ - LASSERTF((int)sizeof(struct lquota_acct_rec) == 16, "found %lld\n", - (long long)(int)sizeof(struct lquota_acct_rec)); - LASSERTF((int)offsetof(struct lquota_acct_rec, bspace) == 0, "found %lld\n", - (long long)(int)offsetof(struct lquota_acct_rec, bspace)); - LASSERTF((int)sizeof(((struct lquota_acct_rec *)0)->bspace) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lquota_acct_rec *)0)->bspace)); - LASSERTF((int)offsetof(struct lquota_acct_rec, ispace) == 8, "found %lld\n", - (long long)(int)offsetof(struct lquota_acct_rec, ispace)); - LASSERTF((int)sizeof(((struct lquota_acct_rec *)0)->ispace) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lquota_acct_rec *)0)->ispace)); - - /* Checks for struct lquota_glb_rec */ - LASSERTF((int)sizeof(struct lquota_glb_rec) == 32, "found %lld\n", - (long long)(int)sizeof(struct lquota_glb_rec)); - LASSERTF((int)offsetof(struct lquota_glb_rec, qbr_hardlimit) == 0, "found %lld\n", - (long long)(int)offsetof(struct lquota_glb_rec, qbr_hardlimit)); - LASSERTF((int)sizeof(((struct lquota_glb_rec *)0)->qbr_hardlimit) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lquota_glb_rec *)0)->qbr_hardlimit)); - LASSERTF((int)offsetof(struct lquota_glb_rec, qbr_softlimit) == 8, "found %lld\n", - (long long)(int)offsetof(struct lquota_glb_rec, qbr_softlimit)); - LASSERTF((int)sizeof(((struct lquota_glb_rec *)0)->qbr_softlimit) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lquota_glb_rec *)0)->qbr_softlimit)); - LASSERTF((int)offsetof(struct lquota_glb_rec, qbr_time) == 16, "found %lld\n", - (long long)(int)offsetof(struct lquota_glb_rec, qbr_time)); - LASSERTF((int)sizeof(((struct lquota_glb_rec *)0)->qbr_time) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lquota_glb_rec *)0)->qbr_time)); - LASSERTF((int)offsetof(struct lquota_glb_rec, qbr_granted) == 24, "found %lld\n", - (long long)(int)offsetof(struct lquota_glb_rec, qbr_granted)); - LASSERTF((int)sizeof(((struct lquota_glb_rec *)0)->qbr_granted) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lquota_glb_rec *)0)->qbr_granted)); - - /* Checks for struct lquota_slv_rec */ - LASSERTF((int)sizeof(struct lquota_slv_rec) == 8, "found %lld\n", - (long long)(int)sizeof(struct lquota_slv_rec)); - LASSERTF((int)offsetof(struct lquota_slv_rec, qsr_granted) == 0, "found %lld\n", - (long long)(int)offsetof(struct lquota_slv_rec, qsr_granted)); - LASSERTF((int)sizeof(((struct lquota_slv_rec *)0)->qsr_granted) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lquota_slv_rec *)0)->qsr_granted)); - - /* Checks for struct idx_info */ - LASSERTF((int)sizeof(struct idx_info) == 80, "found %lld\n", - (long long)(int)sizeof(struct idx_info)); - LASSERTF((int)offsetof(struct idx_info, ii_magic) == 0, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_magic)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_magic) == 4, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_magic)); - LASSERTF((int)offsetof(struct idx_info, ii_flags) == 4, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_flags)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_flags) == 4, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_flags)); - LASSERTF((int)offsetof(struct idx_info, ii_count) == 8, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_count)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_count) == 2, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_count)); - LASSERTF((int)offsetof(struct idx_info, ii_pad0) == 10, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_pad0)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_pad0) == 2, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_pad0)); - LASSERTF((int)offsetof(struct idx_info, ii_attrs) == 12, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_attrs)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_attrs) == 4, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_attrs)); - LASSERTF((int)offsetof(struct idx_info, ii_fid) == 16, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_fid)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_fid) == 16, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_fid)); - LASSERTF((int)offsetof(struct idx_info, ii_version) == 32, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_version)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_version) == 8, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_version)); - LASSERTF((int)offsetof(struct idx_info, ii_hash_start) == 40, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_hash_start)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_hash_start) == 8, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_hash_start)); - LASSERTF((int)offsetof(struct idx_info, ii_hash_end) == 48, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_hash_end)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_hash_end) == 8, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_hash_end)); - LASSERTF((int)offsetof(struct idx_info, ii_keysize) == 56, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_keysize)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_keysize) == 2, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_keysize)); - LASSERTF((int)offsetof(struct idx_info, ii_recsize) == 58, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_recsize)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_recsize) == 2, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_recsize)); - LASSERTF((int)offsetof(struct idx_info, ii_pad1) == 60, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_pad1)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_pad1) == 4, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_pad1)); - LASSERTF((int)offsetof(struct idx_info, ii_pad2) == 64, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_pad2)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_pad2) == 8, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_pad2)); - LASSERTF((int)offsetof(struct idx_info, ii_pad3) == 72, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_pad3)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_pad3) == 8, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_pad3)); - CLASSERT(IDX_INFO_MAGIC == 0x3D37CC37); - - /* Checks for struct lu_idxpage */ - LASSERTF((int)sizeof(struct lu_idxpage) == 16, "found %lld\n", - (long long)(int)sizeof(struct lu_idxpage)); - LASSERTF((int)offsetof(struct lu_idxpage, lip_magic) == 0, "found %lld\n", - (long long)(int)offsetof(struct lu_idxpage, lip_magic)); - LASSERTF((int)sizeof(((struct lu_idxpage *)0)->lip_magic) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lu_idxpage *)0)->lip_magic)); - LASSERTF((int)offsetof(struct lu_idxpage, lip_flags) == 4, "found %lld\n", - (long long)(int)offsetof(struct lu_idxpage, lip_flags)); - LASSERTF((int)sizeof(((struct lu_idxpage *)0)->lip_flags) == 2, "found %lld\n", - (long long)(int)sizeof(((struct lu_idxpage *)0)->lip_flags)); - LASSERTF((int)offsetof(struct lu_idxpage, lip_nr) == 6, "found %lld\n", - (long long)(int)offsetof(struct lu_idxpage, lip_nr)); - LASSERTF((int)sizeof(((struct lu_idxpage *)0)->lip_nr) == 2, "found %lld\n", - (long long)(int)sizeof(((struct lu_idxpage *)0)->lip_nr)); - LASSERTF((int)offsetof(struct lu_idxpage, lip_pad0) == 8, "found %lld\n", - (long long)(int)offsetof(struct lu_idxpage, lip_pad0)); - LASSERTF((int)sizeof(((struct lu_idxpage *)0)->lip_pad0) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lu_idxpage *)0)->lip_pad0)); - CLASSERT(LIP_MAGIC == 0x8A6D6B6C); - LASSERTF(LIP_HDR_SIZE == 16, "found %lld\n", - (long long)LIP_HDR_SIZE); - LASSERTF(II_FL_NOHASH == 1, "found %lld\n", - (long long)II_FL_NOHASH); - LASSERTF(II_FL_VARKEY == 2, "found %lld\n", - (long long)II_FL_VARKEY); - LASSERTF(II_FL_VARREC == 4, "found %lld\n", - (long long)II_FL_VARREC); - LASSERTF(II_FL_NONUNQ == 8, "found %lld\n", - (long long)II_FL_NONUNQ); - /* Checks for struct niobuf_remote */ LASSERTF((int)sizeof(struct niobuf_remote) == 16, "found %lld\n", (long long)(int)sizeof(struct niobuf_remote)); @@ -3753,50 +3581,6 @@ void lustre_assert_wire_constants(void) LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->fiemap) == 32, "found %lld\n", (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->fiemap)); - /* Checks for struct quota_body */ - LASSERTF((int)sizeof(struct quota_body) == 112, "found %lld\n", - (long long)(int)sizeof(struct quota_body)); - LASSERTF((int)offsetof(struct quota_body, qb_fid) == 0, "found %lld\n", - (long long)(int)offsetof(struct quota_body, qb_fid)); - LASSERTF((int)sizeof(((struct quota_body *)0)->qb_fid) == 16, "found %lld\n", - (long long)(int)sizeof(((struct quota_body *)0)->qb_fid)); - LASSERTF((int)offsetof(struct quota_body, qb_id) == 16, "found %lld\n", - (long long)(int)offsetof(struct quota_body, qb_id)); - LASSERTF((int)sizeof(((struct quota_body *)0)->qb_id) == 16, "found %lld\n", - (long long)(int)sizeof(((struct quota_body *)0)->qb_id)); - LASSERTF((int)offsetof(struct quota_body, qb_flags) == 32, "found %lld\n", - (long long)(int)offsetof(struct quota_body, qb_flags)); - LASSERTF((int)sizeof(((struct quota_body *)0)->qb_flags) == 4, "found %lld\n", - (long long)(int)sizeof(((struct quota_body *)0)->qb_flags)); - LASSERTF((int)offsetof(struct quota_body, qb_padding) == 36, "found %lld\n", - (long long)(int)offsetof(struct quota_body, qb_padding)); - LASSERTF((int)sizeof(((struct quota_body *)0)->qb_padding) == 4, "found %lld\n", - (long long)(int)sizeof(((struct quota_body *)0)->qb_padding)); - LASSERTF((int)offsetof(struct quota_body, qb_count) == 40, "found %lld\n", - (long long)(int)offsetof(struct quota_body, qb_count)); - LASSERTF((int)sizeof(((struct quota_body *)0)->qb_count) == 8, "found %lld\n", - (long long)(int)sizeof(((struct quota_body *)0)->qb_count)); - LASSERTF((int)offsetof(struct quota_body, qb_usage) == 48, "found %lld\n", - (long long)(int)offsetof(struct quota_body, qb_usage)); - LASSERTF((int)sizeof(((struct quota_body *)0)->qb_usage) == 8, "found %lld\n", - (long long)(int)sizeof(((struct quota_body *)0)->qb_usage)); - LASSERTF((int)offsetof(struct quota_body, qb_slv_ver) == 56, "found %lld\n", - (long long)(int)offsetof(struct quota_body, qb_slv_ver)); - LASSERTF((int)sizeof(((struct quota_body *)0)->qb_slv_ver) == 8, "found %lld\n", - (long long)(int)sizeof(((struct quota_body *)0)->qb_slv_ver)); - LASSERTF((int)offsetof(struct quota_body, qb_lockh) == 64, "found %lld\n", - (long long)(int)offsetof(struct quota_body, qb_lockh)); - LASSERTF((int)sizeof(((struct quota_body *)0)->qb_lockh) == 8, "found %lld\n", - (long long)(int)sizeof(((struct quota_body *)0)->qb_lockh)); - LASSERTF((int)offsetof(struct quota_body, qb_glb_lockh) == 72, "found %lld\n", - (long long)(int)offsetof(struct quota_body, qb_glb_lockh)); - LASSERTF((int)sizeof(((struct quota_body *)0)->qb_glb_lockh) == 8, "found %lld\n", - (long long)(int)sizeof(((struct quota_body *)0)->qb_glb_lockh)); - LASSERTF((int)offsetof(struct quota_body, qb_padding1[4]) == 112, "found %lld\n", - (long long)(int)offsetof(struct quota_body, qb_padding1[4])); - LASSERTF((int)sizeof(((struct quota_body *)0)->qb_padding1[4]) == 8, "found %lld\n", - (long long)(int)sizeof(((struct quota_body *)0)->qb_padding1[4])); - /* Checks for struct mgs_target_info */ LASSERTF((int)sizeof(struct mgs_target_info) == 4544, "found %lld\n", (long long)(int)sizeof(struct mgs_target_info)); @@ -4431,60 +4215,4 @@ void lustre_assert_wire_constants(void) LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_archive_id) == 4, "found %lld\n", (long long)sizeof(((struct hsm_user_import *)0)->hui_archive_id)); - - /* Checks for struct update_buf */ - LASSERTF((int)sizeof(struct update_buf) == 8, "found %lld\n", - (long long)(int)sizeof(struct update_buf)); - LASSERTF((int)offsetof(struct update_buf, ub_magic) == 0, "found %lld\n", - (long long)(int)offsetof(struct update_buf, ub_magic)); - LASSERTF((int)sizeof(((struct update_buf *)0)->ub_magic) == 4, "found %lld\n", - (long long)(int)sizeof(((struct update_buf *)0)->ub_magic)); - LASSERTF((int)offsetof(struct update_buf, ub_count) == 4, "found %lld\n", - (long long)(int)offsetof(struct update_buf, ub_count)); - LASSERTF((int)sizeof(((struct update_buf *)0)->ub_count) == 4, "found %lld\n", - (long long)(int)sizeof(((struct update_buf *)0)->ub_count)); - LASSERTF((int)offsetof(struct update_buf, ub_bufs) == 8, "found %lld\n", - (long long)(int)offsetof(struct update_buf, ub_bufs)); - LASSERTF((int)sizeof(((struct update_buf *)0)->ub_bufs) == 0, "found %lld\n", - (long long)(int)sizeof(((struct update_buf *)0)->ub_bufs)); - - /* Checks for struct update_reply */ - LASSERTF((int)sizeof(struct update_reply) == 8, "found %lld\n", - (long long)(int)sizeof(struct update_reply)); - LASSERTF((int)offsetof(struct update_reply, ur_version) == 0, "found %lld\n", - (long long)(int)offsetof(struct update_reply, ur_version)); - LASSERTF((int)sizeof(((struct update_reply *)0)->ur_version) == 4, "found %lld\n", - (long long)(int)sizeof(((struct update_reply *)0)->ur_version)); - LASSERTF((int)offsetof(struct update_reply, ur_count) == 4, "found %lld\n", - (long long)(int)offsetof(struct update_reply, ur_count)); - LASSERTF((int)sizeof(((struct update_reply *)0)->ur_count) == 4, "found %lld\n", - (long long)(int)sizeof(((struct update_reply *)0)->ur_count)); - LASSERTF((int)offsetof(struct update_reply, ur_lens) == 8, "found %lld\n", - (long long)(int)offsetof(struct update_reply, ur_lens)); - LASSERTF((int)sizeof(((struct update_reply *)0)->ur_lens) == 0, "found %lld\n", - (long long)(int)sizeof(((struct update_reply *)0)->ur_lens)); - - /* Checks for struct update */ - LASSERTF((int)sizeof(struct update) == 56, "found %lld\n", - (long long)(int)sizeof(struct update)); - LASSERTF((int)offsetof(struct update, u_type) == 0, "found %lld\n", - (long long)(int)offsetof(struct update, u_type)); - LASSERTF((int)sizeof(((struct update *)0)->u_type) == 4, "found %lld\n", - (long long)(int)sizeof(((struct update *)0)->u_type)); - LASSERTF((int)offsetof(struct update, u_batchid) == 4, "found %lld\n", - (long long)(int)offsetof(struct update, u_batchid)); - LASSERTF((int)sizeof(((struct update *)0)->u_batchid) == 4, "found %lld\n", - (long long)(int)sizeof(((struct update *)0)->u_batchid)); - LASSERTF((int)offsetof(struct update, u_fid) == 8, "found %lld\n", - (long long)(int)offsetof(struct update, u_fid)); - LASSERTF((int)sizeof(((struct update *)0)->u_fid) == 16, "found %lld\n", - (long long)(int)sizeof(((struct update *)0)->u_fid)); - LASSERTF((int)offsetof(struct update, u_lens) == 24, "found %lld\n", - (long long)(int)offsetof(struct update, u_lens)); - LASSERTF((int)sizeof(((struct update *)0)->u_lens) == 32, "found %lld\n", - (long long)(int)sizeof(((struct update *)0)->u_lens)); - LASSERTF((int)offsetof(struct update, u_bufs) == 56, "found %lld\n", - (long long)(int)offsetof(struct update, u_bufs)); - LASSERTF((int)sizeof(((struct update *)0)->u_bufs) == 0, "found %lld\n", - (long long)(int)sizeof(((struct update *)0)->u_bufs)); } diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c index 8fdf0ac4f287..abf330f92c0b 100644 --- a/drivers/staging/media/bcm2048/radio-bcm2048.c +++ b/drivers/staging/media/bcm2048/radio-bcm2048.c @@ -1828,17 +1828,14 @@ static int bcm2048_deinit(struct bcm2048_device *bdev) err = bcm2048_set_audio_route(bdev, 0); if (err < 0) - goto exit; + return err; err = bcm2048_set_dac_output(bdev, 0); if (err < 0) - goto exit; + return err; err = bcm2048_set_power_state(bdev, BCM2048_POWER_OFF); - if (err < 0) - goto exit; -exit: return err; } diff --git a/drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h b/drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h index 3cc9be776f8b..f4f35c9ad1ab 100644 --- a/drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h +++ b/drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h @@ -538,8 +538,8 @@ struct vpfe_isif_raw_config { }; /********************************************************************** -* IPIPE API Structures -**********************************************************************/ + * IPIPE API Structures + **********************************************************************/ /* IPIPE module configurations */ diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c index ac78ed2f8bcc..ff47a8f369fc 100644 --- a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c +++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c @@ -1350,21 +1350,16 @@ error: */ static long ipipe_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) { - int ret = 0; - switch (cmd) { case VIDIOC_VPFE_IPIPE_S_CONFIG: - ret = ipipe_s_config(sd, arg); - break; + return ipipe_s_config(sd, arg); case VIDIOC_VPFE_IPIPE_G_CONFIG: - ret = ipipe_g_config(sd, arg); - break; + return ipipe_g_config(sd, arg); default: - ret = -ENOIOCTLCMD; + return -ENOIOCTLCMD; } - return ret; } void vpfe_ipipe_enable(struct vpfe_device *vpfe_dev, int en) diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c index b1d5e23ae6e0..958ef71ee4d5 100644 --- a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c +++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c @@ -242,7 +242,7 @@ static int get_ipipe_mode(struct vpfe_ipipe_device *ipipe) if (ipipeif_sink == IPIPEIF_INPUT_MEMORY) return IPIPE_MODE_SINGLE_SHOT; - else if (ipipeif_sink == IPIPEIF_INPUT_ISIF) + if (ipipeif_sink == IPIPEIF_INPUT_ISIF) return IPIPE_MODE_CONTINUOUS; return -EINVAL; @@ -682,8 +682,10 @@ ipipe_set_rgb2rgb_regs(void __iomem *base_addr, unsigned int id, ipipe_clock_enable(base_addr); if (id == IPIPE_RGB2RGB_2) { - /* For second RGB module, gain integer is 3 bits instead - of 4, offset has 11 bits insread of 13 */ + /* + * For second RGB module, gain integer is 3 bits instead + * of 4, offset has 11 bits insread of 13 + */ offset = RGB2_MUL_BASE; integ_mask = 0x7; offset_mask = RGB2RGB_2_OFST_MASK; @@ -792,8 +794,10 @@ ipipe_set_3d_lut_regs(void __iomem *base_addr, void __iomem *isp5_base_addr, /* valied table */ tbl = lut_3d->table; for (i = 0; i < VPFE_IPIPE_MAX_SIZE_3D_LUT; i++) { - /* Each entry has 0-9 (B), 10-19 (G) and - 20-29 R values */ + /* + * Each entry has 0-9 (B), 10-19 (G) and + * 20-29 R values + */ val = tbl[i].b & D3_LUT_ENTRY_MASK; val |= (tbl[i].g & D3_LUT_ENTRY_MASK) << D3_LUT_ENTRY_G_SHIFT; diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h index 2bf2f7a69173..7ee157233047 100644 --- a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h +++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h @@ -278,9 +278,10 @@ /* Resizer Rescale Parameters */ #define RSZ_EN_A 0x58 #define RSZ_EN_B 0xe8 -/* offset of the registers to be added with base register of - either RSZ0 or RSZ1 -*/ +/* + * offset of the registers to be added with base register of + * either RSZ0 or RSZ1 + */ #define RSZ_MODE 0x4 #define RSZ_420 0x8 #define RSZ_I_VPS 0xc diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c index 633d6456fdce..46fd2c7f69c3 100644 --- a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c +++ b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c @@ -94,7 +94,7 @@ static int get_oneshot_mode(enum ipipeif_input_entity input) { if (input == IPIPEIF_INPUT_MEMORY) return IPIPEIF_MODE_ONE_SHOT; - else if (input == IPIPEIF_INPUT_ISIF) + if (input == IPIPEIF_INPUT_ISIF) return IPIPEIF_MODE_CONTINUOUS; return -EINVAL; @@ -641,8 +641,9 @@ ipipeif_try_format(struct vpfe_ipipeif_device *ipipeif, } static int -ipipeif_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_frame_size_enum *fse) +ipipeif_enum_frame_size(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_frame_size_enum *fse) { struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt format; diff --git a/drivers/staging/media/davinci_vpfe/dm365_isif.c b/drivers/staging/media/davinci_vpfe/dm365_isif.c index 99057892d88d..ae9202ded59f 100644 --- a/drivers/staging/media/davinci_vpfe/dm365_isif.c +++ b/drivers/staging/media/davinci_vpfe/dm365_isif.c @@ -282,7 +282,8 @@ isif_config_format(struct vpfe_device *vpfe_dev, unsigned int pad) * @fmt: pointer to v4l2 subdev format structure */ static void -isif_try_format(struct vpfe_isif_device *isif, struct v4l2_subdev_pad_config *cfg, +isif_try_format(struct vpfe_isif_device *isif, + struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_format *fmt) { unsigned int width = fmt->format.width; @@ -625,21 +626,16 @@ static int isif_set_params(struct v4l2_subdev *sd, void *params) */ static long isif_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) { - int ret; - switch (cmd) { case VIDIOC_VPFE_ISIF_S_RAW_PARAMS: - ret = isif_set_params(sd, arg); - break; + return isif_set_params(sd, arg); case VIDIOC_VPFE_ISIF_G_RAW_PARAMS: - ret = isif_get_params(sd, arg); - break; + return isif_get_params(sd, arg); default: - ret = -ENOIOCTLCMD; + return -ENOIOCTLCMD; } - return ret; } static void isif_config_gain_offset(struct vpfe_isif_device *isif) @@ -1239,7 +1235,8 @@ static int isif_config_ycbcr(struct v4l2_subdev *sd, int mode) * a lot of registers that we didn't touch */ /* start with all bits zero */ - ccdcfg = modeset = 0; + ccdcfg = 0; + modeset = 0; pix_fmt = isif_get_pix_fmt(format->code); if (pix_fmt < 0) { pr_debug("Invalid pix_fmt(input mode)\n"); @@ -1398,8 +1395,9 @@ static int isif_set_stream(struct v4l2_subdev *sd, int enable) * @which: wanted subdev format. */ static struct v4l2_mbus_framefmt * -__isif_get_format(struct vpfe_isif_device *isif, struct v4l2_subdev_pad_config *cfg, - unsigned int pad, enum v4l2_subdev_format_whence which) +__isif_get_format(struct vpfe_isif_device *isif, + struct v4l2_subdev_pad_config *cfg, unsigned int pad, + enum v4l2_subdev_format_whence which) { if (which == V4L2_SUBDEV_FORMAT_TRY) { struct v4l2_subdev_format fmt; @@ -1570,7 +1568,7 @@ isif_pad_set_selection(struct v4l2_subdev *sd, sel->r.height = format->height; } /* adjust the width to 16 pixel boundary */ - sel->r.width = ((sel->r.width + 15) & ~0xf); + sel->r.width = (sel->r.width + 15) & ~0xf; vpfe_isif->crop = sel->r; if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) { isif_set_image_window(vpfe_isif); diff --git a/drivers/staging/media/davinci_vpfe/dm365_resizer.c b/drivers/staging/media/davinci_vpfe/dm365_resizer.c index a91395ce91e1..3cd56cc132c7 100644 --- a/drivers/staging/media/davinci_vpfe/dm365_resizer.c +++ b/drivers/staging/media/davinci_vpfe/dm365_resizer.c @@ -404,7 +404,7 @@ resizer_calculate_down_scale_f_div_param(struct device *dev, param->f_div.pass[0].src_hsz = upper_h1 + o; param->f_div.pass[1].o_hsz = h2 - 1; param->f_div.pass[1].i_hps = 10 + (val1 * two_power); - param->f_div.pass[1].h_phs = (val - (val1 << 8)); + param->f_div.pass[1].h_phs = val - (val1 << 8); param->f_div.pass[1].src_hps = upper_h1 - o; param->f_div.pass[1].src_hsz = upper_h2 + o; @@ -425,8 +425,8 @@ resizer_configure_common_in_params(struct vpfe_resizer_device *resizer) param->rsz_common.hps = param->user_config.hst; if (vpfe_ipipeif_decimation_enabled(vpfe_dev)) - param->rsz_common.hsz = (((informat->width - 1) * - IPIPEIF_RSZ_CONST) / vpfe_ipipeif_get_rsz(vpfe_dev)); + param->rsz_common.hsz = ((informat->width - 1) * + IPIPEIF_RSZ_CONST) / vpfe_ipipeif_get_rsz(vpfe_dev); else param->rsz_common.hsz = informat->width - 1; @@ -650,7 +650,7 @@ resizer_calculate_normal_f_div_param(struct device *dev, int input_width, param->f_div.pass[0].src_hsz = (input_width >> 2) + o; param->f_div.pass[1].o_hsz = h2 - 1; param->f_div.pass[1].i_hps = val1; - param->f_div.pass[1].h_phs = (val - (val1 << 8)); + param->f_div.pass[1].h_phs = val - (val1 << 8); param->f_div.pass[1].src_hps = (input_width >> 2) - o; param->f_div.pass[1].src_hsz = (input_width >> 2) + o; @@ -1387,8 +1387,9 @@ resizer_try_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, * @fmt: pointer to v4l2 subdev format structure * return -EINVAL or zero on success */ -static int resizer_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *fmt) +static int resizer_set_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) { struct vpfe_resizer_device *resizer = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt *format; @@ -1447,8 +1448,9 @@ static int resizer_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_con * @fmt: pointer to v4l2 subdev format structure * return -EINVAL or zero on success */ -static int resizer_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *fmt) +static int resizer_get_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) { struct v4l2_mbus_framefmt *format; @@ -1670,7 +1672,7 @@ static int resizer_link_setup(struct media_entity *entity, resizer->crop_resizer.input = RESIZER_CROP_INPUT_IPIPEIF; else if (ipipe_source == IPIPE_OUTPUT_RESIZER) - resizer->crop_resizer.input = + resizer->crop_resizer.input = RESIZER_CROP_INPUT_IPIPE; else return -EINVAL; diff --git a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c index ec46f366dd17..bf077f8342f6 100644 --- a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c +++ b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c @@ -442,8 +442,10 @@ static int vpfe_register_entities(struct vpfe_device *vpfe_dev) /* create links now, starting with external(i2c) entities */ for (i = 0; i < vpfe_dev->num_ext_subdevs; i++) - /* if entity has no pads (ex: amplifier), - cant establish link */ + /* + * if entity has no pads (ex: amplifier), + * cant establish link + */ if (vpfe_dev->sd[i]->entity.num_pads) { ret = media_create_pad_link(&vpfe_dev->sd[i]->entity, 0, &vpfe_dev->vpfe_isif.subdev.entity, diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c index db49af90217e..b793c04028a3 100644 --- a/drivers/staging/media/davinci_vpfe/vpfe_video.c +++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c @@ -172,21 +172,19 @@ static int vpfe_prepare_pipeline(struct vpfe_video_device *video) static int vpfe_update_pipe_state(struct vpfe_video_device *video) { struct vpfe_pipeline *pipe = &video->pipe; - int ret; - ret = vpfe_prepare_pipeline(video); - if (ret) - return ret; + if (vpfe_prepare_pipeline(video)) + return vpfe_prepare_pipeline(video); - /* Find out if there is any input video - if yes, it is single shot. - */ + /* + * Find out if there is any input video + * if yes, it is single shot. + */ if (pipe->input_num == 0) { pipe->state = VPFE_PIPELINE_STREAM_CONTINUOUS; - ret = vpfe_update_current_ext_subdev(video); - if (ret) { + if (vpfe_update_current_ext_subdev(video)) { pr_err("Invalid external subdev\n"); - return ret; + return vpfe_update_current_ext_subdev(video); } } else { pipe->state = VPFE_PIPELINE_STREAM_SINGLESHOT; @@ -460,7 +458,7 @@ void vpfe_video_schedule_next_buffer(struct vpfe_video_device *video) video->next_frm = list_entry(video->dma_queue.next, struct vpfe_cap_buffer, list); - if (VPFE_PIPELINE_STREAM_SINGLESHOT == video->pipe.state) + if (video->pipe.state == VPFE_PIPELINE_STREAM_SINGLESHOT) video->cur_frm = video->next_frm; list_del(&video->next_frm->list); @@ -529,10 +527,11 @@ static int vpfe_release(struct file *file) if (fh->io_allowed) { if (video->started) { vpfe_stop_capture(video); - /* mark pipe state as stopped in vpfe_release(), - as app might call streamon() after streamoff() - in which case driver has to start streaming. - */ + /* + * mark pipe state as stopped in vpfe_release(), + * as app might call streamon() after streamoff() + * in which case driver has to start streaming. + */ video->pipe.state = VPFE_PIPELINE_STREAM_STOPPED; vb2_streamoff(&video->buffer_queue, video->buffer_queue.type); @@ -668,12 +667,13 @@ static int vpfe_enum_fmt(struct file *file, void *priv, struct v4l2_subdev *subdev; struct v4l2_format format; struct media_pad *remote; - int ret; v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_fmt\n"); - /* since already subdev pad format is set, - only one pixel format is available */ + /* + * since already subdev pad format is set, + * only one pixel format is available + */ if (fmt->index > 0) { v4l2_err(&vpfe_dev->v4l2_dev, "Invalid index\n"); return -EINVAL; @@ -695,11 +695,10 @@ static int vpfe_enum_fmt(struct file *file, void *priv, sd_fmt.pad = remote->index; sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; /* get output format of remote subdev */ - ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt); - if (ret) { + if (v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt)) { v4l2_err(&vpfe_dev->v4l2_dev, "invalid remote subdev for video node\n"); - return ret; + return v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt); } /* convert to pix format */ mbus.code = sd_fmt.format.code; @@ -726,7 +725,6 @@ static int vpfe_s_fmt(struct file *file, void *priv, struct vpfe_video_device *video = video_drvdata(file); struct vpfe_device *vpfe_dev = video->vpfe_dev; struct v4l2_format format; - int ret; v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_fmt\n"); /* If streaming is started, return error */ @@ -735,9 +733,8 @@ static int vpfe_s_fmt(struct file *file, void *priv, return -EBUSY; } /* get adjacent subdev's output pad format */ - ret = __vpfe_video_get_format(video, &format); - if (ret) - return ret; + if (__vpfe_video_get_format(video, &format)) + return __vpfe_video_get_format(video, &format); *fmt = format; video->fmt = *fmt; return 0; @@ -760,13 +757,11 @@ static int vpfe_try_fmt(struct file *file, void *priv, struct vpfe_video_device *video = video_drvdata(file); struct vpfe_device *vpfe_dev = video->vpfe_dev; struct v4l2_format format; - int ret; v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_try_fmt\n"); /* get adjacent subdev's output pad format */ - ret = __vpfe_video_get_format(video, &format); - if (ret) - return ret; + if (__vpfe_video_get_format(video, &format)) + return __vpfe_video_get_format(video, &format); *fmt = format; return 0; @@ -843,9 +838,8 @@ static int vpfe_s_input(struct file *file, void *priv, unsigned int index) v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_input\n"); - ret = mutex_lock_interruptible(&video->lock); - if (ret) - return ret; + if (mutex_lock_interruptible(&video->lock)) + return mutex_lock_interruptible(&video->lock); /* * If streaming is started return device busy * error @@ -946,9 +940,8 @@ static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id) v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_std\n"); /* Call decoder driver function to set the standard */ - ret = mutex_lock_interruptible(&video->lock); - if (ret) - return ret; + if (mutex_lock_interruptible(&video->lock)) + return mutex_lock_interruptible(&video->lock); sdinfo = video->current_ext_subdev; /* If streaming is started, return device busy error */ if (video->started) { @@ -1328,15 +1321,14 @@ static int vpfe_reqbufs(struct file *file, void *priv, v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_reqbufs\n"); - if (V4L2_BUF_TYPE_VIDEO_CAPTURE != req_buf->type && - V4L2_BUF_TYPE_VIDEO_OUTPUT != req_buf->type) { + if (req_buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && + req_buf->type != V4L2_BUF_TYPE_VIDEO_OUTPUT){ v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buffer type\n"); return -EINVAL; } - ret = mutex_lock_interruptible(&video->lock); - if (ret) - return ret; + if (mutex_lock_interruptible(&video->lock)) + return mutex_lock_interruptible(&video->lock); if (video->io_usrs != 0) { v4l2_err(&vpfe_dev->v4l2_dev, "Only one IO user allowed\n"); @@ -1362,11 +1354,10 @@ static int vpfe_reqbufs(struct file *file, void *priv, q->buf_struct_size = sizeof(struct vpfe_cap_buffer); q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; - ret = vb2_queue_init(q); - if (ret) { + if (vb2_queue_init(q)) { v4l2_err(&vpfe_dev->v4l2_dev, "vb2_queue_init() failed\n"); vb2_dma_contig_cleanup_ctx(vpfe_dev->pdev); - return ret; + return vb2_queue_init(q); } fh->io_allowed = 1; @@ -1390,8 +1381,8 @@ static int vpfe_querybuf(struct file *file, void *priv, v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querybuf\n"); - if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf->type && - V4L2_BUF_TYPE_VIDEO_OUTPUT != buf->type) { + if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && + buf->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) { v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n"); return -EINVAL; } @@ -1417,8 +1408,8 @@ static int vpfe_qbuf(struct file *file, void *priv, v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_qbuf\n"); - if (V4L2_BUF_TYPE_VIDEO_CAPTURE != p->type && - V4L2_BUF_TYPE_VIDEO_OUTPUT != p->type) { + if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && + p->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) { v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n"); return -EINVAL; } @@ -1445,8 +1436,8 @@ static int vpfe_dqbuf(struct file *file, void *priv, v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_dqbuf\n"); - if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf->type && - V4L2_BUF_TYPE_VIDEO_OUTPUT != buf->type) { + if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && + buf->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) { v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n"); return -EINVAL; } @@ -1478,8 +1469,8 @@ static int vpfe_streamon(struct file *file, void *priv, v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_streamon\n"); - if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf_type && - V4L2_BUF_TYPE_VIDEO_OUTPUT != buf_type) { + if (buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE && + buf_type != V4L2_BUF_TYPE_VIDEO_OUTPUT) { v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n"); return ret; } @@ -1495,7 +1486,7 @@ static int vpfe_streamon(struct file *file, void *priv, return -EIO; } /* Validate the pipeline */ - if (V4L2_BUF_TYPE_VIDEO_CAPTURE == buf_type) { + if (buf_type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { ret = vpfe_video_validate_pipeline(pipe); if (ret < 0) return ret; @@ -1542,9 +1533,8 @@ static int vpfe_streamoff(struct file *file, void *priv, return -EINVAL; } - ret = mutex_lock_interruptible(&video->lock); - if (ret) - return ret; + if (mutex_lock_interruptible(&video->lock)) + return mutex_lock_interruptible(&video->lock); vpfe_stop_capture(video); ret = vb2_streamoff(&video->buffer_queue, buf_type); diff --git a/drivers/staging/media/mn88472/mn88472.c b/drivers/staging/media/mn88472/mn88472.c index cf2e96bcf395..7ea749cf19f9 100644 --- a/drivers/staging/media/mn88472/mn88472.c +++ b/drivers/staging/media/mn88472/mn88472.c @@ -96,9 +96,9 @@ static int mn88472_set_frontend(struct dvb_frontend *fe) /* Calculate IF registers ( (1<<24)*IF / Xtal ) */ tmp = div_u64(if_frequency * (u64)(1<<24) + (dev->xtal / 2), dev->xtal); - if_val[0] = ((tmp >> 16) & 0xff); - if_val[1] = ((tmp >> 8) & 0xff); - if_val[2] = ((tmp >> 0) & 0xff); + if_val[0] = (tmp >> 16) & 0xff; + if_val[1] = (tmp >> 8) & 0xff; + if_val[2] = (tmp >> 0) & 0xff; ret = regmap_write(dev->regmap[2], 0xfb, 0x13); ret = regmap_write(dev->regmap[2], 0xef, 0x13); @@ -456,7 +456,7 @@ static int mn88472_probe(struct i2c_client *client, } dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (dev == NULL) { + if (!dev) { ret = -ENOMEM; goto err; } @@ -483,7 +483,7 @@ static int mn88472_probe(struct i2c_client *client, * 0x1a and 0x1c, in order to get own I2C client for each register page. */ dev->client[1] = i2c_new_dummy(client->adapter, 0x1a); - if (dev->client[1] == NULL) { + if (!dev->client[1]) { ret = -ENODEV; dev_err(&client->dev, "I2C registration failed\n"); if (ret) @@ -497,7 +497,7 @@ static int mn88472_probe(struct i2c_client *client, i2c_set_clientdata(dev->client[1], dev); dev->client[2] = i2c_new_dummy(client->adapter, 0x1c); - if (dev->client[2] == NULL) { + if (!dev->client[2]) { ret = -ENODEV; dev_err(&client->dev, "2nd I2C registration failed\n"); if (ret) diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c index fb80d2bc5a25..c5a5138b3d3b 100644 --- a/drivers/staging/media/omap4iss/iss.c +++ b/drivers/staging/media/omap4iss/iss.c @@ -1318,8 +1318,6 @@ error_modules: error_iss: omap4iss_put(iss); error: - platform_set_drvdata(pdev, NULL); - mutex_destroy(&iss->iss_mutex); return ret; diff --git a/drivers/staging/most/aim-cdev/cdev.c b/drivers/staging/most/aim-cdev/cdev.c index dc3fb25b52aa..de4f76abfb47 100644 --- a/drivers/staging/most/aim-cdev/cdev.c +++ b/drivers/staging/most/aim-cdev/cdev.c @@ -32,6 +32,7 @@ static struct most_aim cdev_aim; struct aim_channel { wait_queue_head_t wq; + spinlock_t unlink; /* synchronization lock to unlink channels */ struct cdev cdev; struct device *dev; struct mutex io_mutex; @@ -39,11 +40,9 @@ struct aim_channel { struct most_channel_config *cfg; unsigned int channel_id; dev_t devno; - bool keep_mbo; - unsigned int mbo_offs; - struct mbo *stacked_mbo; + size_t mbo_offs; DECLARE_KFIFO_PTR(fifo, typeof(struct mbo *)); - atomic_t access_ref; + int access_ref; struct list_head list; }; @@ -51,15 +50,26 @@ struct aim_channel { static struct list_head channel_list; static spinlock_t ch_list_lock; +static inline bool ch_has_mbo(struct aim_channel *c) +{ + return channel_has_mbo(c->iface, c->channel_id, &cdev_aim) > 0; +} + +static inline bool ch_get_mbo(struct aim_channel *c, struct mbo **mbo) +{ + *mbo = most_get_mbo(c->iface, c->channel_id, &cdev_aim); + return *mbo; +} + static struct aim_channel *get_channel(struct most_interface *iface, int id) { - struct aim_channel *channel, *tmp; + struct aim_channel *c, *tmp; unsigned long flags; int found_channel = 0; spin_lock_irqsave(&ch_list_lock, flags); - list_for_each_entry_safe(channel, tmp, &channel_list, list) { - if ((channel->iface == iface) && (channel->channel_id == id)) { + list_for_each_entry_safe(c, tmp, &channel_list, list) { + if ((c->iface == iface) && (c->channel_id == id)) { found_channel = 1; break; } @@ -67,7 +77,29 @@ static struct aim_channel *get_channel(struct most_interface *iface, int id) spin_unlock_irqrestore(&ch_list_lock, flags); if (!found_channel) return NULL; - return channel; + return c; +} + +static void stop_channel(struct aim_channel *c) +{ + struct mbo *mbo; + + while (kfifo_out((struct kfifo *)&c->fifo, &mbo, 1)) + most_put_mbo(mbo); + most_stop_channel(c->iface, c->channel_id, &cdev_aim); +} + +static void destroy_cdev(struct aim_channel *c) +{ + unsigned long flags; + + device_destroy(aim_class, c->devno); + cdev_del(&c->cdev); + kfifo_free(&c->fifo); + spin_lock_irqsave(&ch_list_lock, flags); + list_del(&c->list); + spin_unlock_irqrestore(&ch_list_lock, flags); + ida_simple_remove(&minor_id, MINOR(c->devno)); } /** @@ -80,29 +112,38 @@ static struct aim_channel *get_channel(struct most_interface *iface, int id) */ static int aim_open(struct inode *inode, struct file *filp) { - struct aim_channel *channel; + struct aim_channel *c; int ret; - channel = to_channel(inode->i_cdev); - filp->private_data = channel; + c = to_channel(inode->i_cdev); + filp->private_data = c; - if (((channel->cfg->direction == MOST_CH_RX) && + if (((c->cfg->direction == MOST_CH_RX) && ((filp->f_flags & O_ACCMODE) != O_RDONLY)) || - ((channel->cfg->direction == MOST_CH_TX) && + ((c->cfg->direction == MOST_CH_TX) && ((filp->f_flags & O_ACCMODE) != O_WRONLY))) { pr_info("WARN: Access flags mismatch\n"); return -EACCES; } - if (!atomic_inc_and_test(&channel->access_ref)) { + + mutex_lock(&c->io_mutex); + if (!c->dev) { + pr_info("WARN: Device is destroyed\n"); + mutex_unlock(&c->io_mutex); + return -EBUSY; + } + + if (c->access_ref) { pr_info("WARN: Device is busy\n"); - atomic_dec(&channel->access_ref); + mutex_unlock(&c->io_mutex); return -EBUSY; } - ret = most_start_channel(channel->iface, channel->channel_id, - &cdev_aim); - if (ret) - atomic_dec(&channel->access_ref); + c->mbo_offs = 0; + ret = most_start_channel(c->iface, c->channel_id, &cdev_aim); + if (!ret) + c->access_ref = 1; + mutex_unlock(&c->io_mutex); return ret; } @@ -115,33 +156,21 @@ static int aim_open(struct inode *inode, struct file *filp) */ static int aim_close(struct inode *inode, struct file *filp) { - int ret; - struct mbo *mbo; - struct aim_channel *channel = to_channel(inode->i_cdev); - - mutex_lock(&channel->io_mutex); - if (!channel->dev) { - mutex_unlock(&channel->io_mutex); - atomic_dec(&channel->access_ref); - device_destroy(aim_class, channel->devno); - cdev_del(&channel->cdev); - kfifo_free(&channel->fifo); - list_del(&channel->list); - ida_simple_remove(&minor_id, MINOR(channel->devno)); - wake_up_interruptible(&channel->wq); - kfree(channel); - return 0; + struct aim_channel *c = to_channel(inode->i_cdev); + + mutex_lock(&c->io_mutex); + spin_lock(&c->unlink); + c->access_ref = 0; + spin_unlock(&c->unlink); + if (c->dev) { + stop_channel(c); + mutex_unlock(&c->io_mutex); + } else { + destroy_cdev(c); + mutex_unlock(&c->io_mutex); + kfree(c); } - mutex_unlock(&channel->io_mutex); - - while (kfifo_out((struct kfifo *)&channel->fifo, &mbo, 1)) - most_put_mbo(mbo); - if (channel->keep_mbo) - most_put_mbo(channel->stacked_mbo); - ret = most_stop_channel(channel->iface, channel->channel_id, &cdev_aim); - atomic_dec(&channel->access_ref); - wake_up_interruptible(&channel->wq); - return ret; + return 0; } /** @@ -154,62 +183,48 @@ static int aim_close(struct inode *inode, struct file *filp) static ssize_t aim_write(struct file *filp, const char __user *buf, size_t count, loff_t *offset) { - int ret, err; - size_t actual_len = 0; - size_t max_len = 0; - ssize_t retval; - struct mbo *mbo; - struct aim_channel *channel = filp->private_data; - - mutex_lock(&channel->io_mutex); - if (unlikely(!channel->dev)) { - mutex_unlock(&channel->io_mutex); - return -EPIPE; - } - mutex_unlock(&channel->io_mutex); + int ret; + size_t actual_len; + size_t max_len; + struct mbo *mbo = NULL; + struct aim_channel *c = filp->private_data; - mbo = most_get_mbo(channel->iface, channel->channel_id, &cdev_aim); + mutex_lock(&c->io_mutex); + while (c->dev && !ch_get_mbo(c, &mbo)) { + mutex_unlock(&c->io_mutex); - if (!mbo) { if ((filp->f_flags & O_NONBLOCK)) return -EAGAIN; - if (wait_event_interruptible( - channel->wq, - (mbo = most_get_mbo(channel->iface, - channel->channel_id, - &cdev_aim)) || - (!channel->dev))) + if (wait_event_interruptible(c->wq, ch_has_mbo(c) || !c->dev)) return -ERESTARTSYS; + mutex_lock(&c->io_mutex); } - mutex_lock(&channel->io_mutex); - if (unlikely(!channel->dev)) { - mutex_unlock(&channel->io_mutex); - err = -EPIPE; - goto error; + if (unlikely(!c->dev)) { + ret = -EPIPE; + goto unlock; } - mutex_unlock(&channel->io_mutex); - max_len = channel->cfg->buffer_size; + max_len = c->cfg->buffer_size; actual_len = min(count, max_len); mbo->buffer_length = actual_len; - retval = copy_from_user(mbo->virt_address, buf, mbo->buffer_length); - if (retval) { - err = -EIO; - goto error; + if (copy_from_user(mbo->virt_address, buf, mbo->buffer_length)) { + ret = -EFAULT; + goto put_mbo; } ret = most_submit_mbo(mbo); - if (ret) { - pr_info("submitting MBO to core failed\n"); - err = ret; - goto error; - } - return actual_len - retval; -error: + if (ret) + goto put_mbo; + + mutex_unlock(&c->io_mutex); + return actual_len; +put_mbo: most_put_mbo(mbo); - return err; +unlock: + mutex_unlock(&c->io_mutex); + return ret; } /** @@ -222,59 +237,46 @@ error: static ssize_t aim_read(struct file *filp, char __user *buf, size_t count, loff_t *offset) { - ssize_t retval; - size_t not_copied, proc_len; + size_t to_copy, not_copied, copied; struct mbo *mbo; - struct aim_channel *channel = filp->private_data; + struct aim_channel *c = filp->private_data; - if (channel->keep_mbo) { - mbo = channel->stacked_mbo; - channel->keep_mbo = false; - goto start_copy; - } - while ((!kfifo_out(&channel->fifo, &mbo, 1)) && (channel->dev)) { + mutex_lock(&c->io_mutex); + while (c->dev && !kfifo_peek(&c->fifo, &mbo)) { + mutex_unlock(&c->io_mutex); if (filp->f_flags & O_NONBLOCK) return -EAGAIN; - if (wait_event_interruptible(channel->wq, - (!kfifo_is_empty(&channel->fifo) || - (!channel->dev)))) + if (wait_event_interruptible(c->wq, + (!kfifo_is_empty(&c->fifo) || + (!c->dev)))) return -ERESTARTSYS; + mutex_lock(&c->io_mutex); } -start_copy: /* make sure we don't submit to gone devices */ - mutex_lock(&channel->io_mutex); - if (unlikely(!channel->dev)) { - mutex_unlock(&channel->io_mutex); + if (unlikely(!c->dev)) { + mutex_unlock(&c->io_mutex); return -EIO; } - if (count < mbo->processed_length) - channel->keep_mbo = true; - - proc_len = min((int)count, - (int)(mbo->processed_length - channel->mbo_offs)); + to_copy = min_t(size_t, + count, + mbo->processed_length - c->mbo_offs); not_copied = copy_to_user(buf, - mbo->virt_address + channel->mbo_offs, - proc_len); + mbo->virt_address + c->mbo_offs, + to_copy); - retval = not_copied ? proc_len - not_copied : proc_len; + copied = to_copy - not_copied; - if (channel->keep_mbo) { - channel->mbo_offs = retval; - channel->stacked_mbo = mbo; - } else { + c->mbo_offs += copied; + if (c->mbo_offs >= mbo->processed_length) { + kfifo_skip(&c->fifo); most_put_mbo(mbo); - channel->mbo_offs = 0; + c->mbo_offs = 0; } - mutex_unlock(&channel->io_mutex); - return retval; -} - -static inline bool __must_check IS_ERR_OR_FALSE(int x) -{ - return x <= 0; + mutex_unlock(&c->io_mutex); + return copied; } static unsigned int aim_poll(struct file *filp, poll_table *wait) @@ -288,7 +290,7 @@ static unsigned int aim_poll(struct file *filp, poll_table *wait) if (!kfifo_is_empty(&c->fifo)) mask |= POLLIN | POLLRDNORM; } else { - if (!IS_ERR_OR_FALSE(channel_has_mbo(c->iface, c->channel_id))) + if (ch_has_mbo(c)) mask |= POLLOUT | POLLWRNORM; } return mask; @@ -316,33 +318,29 @@ static const struct file_operations channel_fops = { */ static int aim_disconnect_channel(struct most_interface *iface, int channel_id) { - struct aim_channel *channel; - unsigned long flags; + struct aim_channel *c; if (!iface) { pr_info("Bad interface pointer\n"); return -EINVAL; } - channel = get_channel(iface, channel_id); - if (!channel) + c = get_channel(iface, channel_id); + if (!c) return -ENXIO; - mutex_lock(&channel->io_mutex); - channel->dev = NULL; - mutex_unlock(&channel->io_mutex); - - if (atomic_read(&channel->access_ref)) { - device_destroy(aim_class, channel->devno); - cdev_del(&channel->cdev); - kfifo_free(&channel->fifo); - ida_simple_remove(&minor_id, MINOR(channel->devno)); - spin_lock_irqsave(&ch_list_lock, flags); - list_del(&channel->list); - spin_unlock_irqrestore(&ch_list_lock, flags); - kfree(channel); + mutex_lock(&c->io_mutex); + spin_lock(&c->unlink); + c->dev = NULL; + spin_unlock(&c->unlink); + if (c->access_ref) { + stop_channel(c); + wake_up_interruptible(&c->wq); + mutex_unlock(&c->io_mutex); } else { - wake_up_interruptible(&channel->wq); + destroy_cdev(c); + mutex_unlock(&c->io_mutex); + kfree(c); } return 0; } @@ -356,21 +354,27 @@ static int aim_disconnect_channel(struct most_interface *iface, int channel_id) */ static int aim_rx_completion(struct mbo *mbo) { - struct aim_channel *channel; + struct aim_channel *c; if (!mbo) return -EINVAL; - channel = get_channel(mbo->ifp, mbo->hdm_channel_id); - if (!channel) + c = get_channel(mbo->ifp, mbo->hdm_channel_id); + if (!c) return -ENXIO; - kfifo_in(&channel->fifo, &mbo, 1); + spin_lock(&c->unlink); + if (!c->access_ref || !c->dev) { + spin_unlock(&c->unlink); + return -EFAULT; + } + kfifo_in(&c->fifo, &mbo, 1); + spin_unlock(&c->unlink); #ifdef DEBUG_MESG - if (kfifo_is_full(&channel->fifo)) + if (kfifo_is_full(&c->fifo)) pr_info("WARN: Fifo is full\n"); #endif - wake_up_interruptible(&channel->wq); + wake_up_interruptible(&c->wq); return 0; } @@ -383,7 +387,7 @@ static int aim_rx_completion(struct mbo *mbo) */ static int aim_tx_completion(struct most_interface *iface, int channel_id) { - struct aim_channel *channel; + struct aim_channel *c; if (!iface) { pr_info("Bad interface pointer\n"); @@ -394,15 +398,13 @@ static int aim_tx_completion(struct most_interface *iface, int channel_id) return -EINVAL; } - channel = get_channel(iface, channel_id); - if (!channel) + c = get_channel(iface, channel_id); + if (!c) return -ENXIO; - wake_up_interruptible(&channel->wq); + wake_up_interruptible(&c->wq); return 0; } -static struct most_aim cdev_aim; - /** * aim_probe - probe function of the driver module * @iface: pointer to interface instance @@ -419,7 +421,7 @@ static int aim_probe(struct most_interface *iface, int channel_id, struct most_channel_config *cfg, struct kobject *parent, char *name) { - struct aim_channel *channel; + struct aim_channel *c; unsigned long cl_flags; int retval; int current_minor; @@ -428,60 +430,60 @@ static int aim_probe(struct most_interface *iface, int channel_id, pr_info("Probing AIM with bad arguments"); return -EINVAL; } - channel = get_channel(iface, channel_id); - if (channel) + c = get_channel(iface, channel_id); + if (c) return -EEXIST; current_minor = ida_simple_get(&minor_id, 0, 0, GFP_KERNEL); if (current_minor < 0) return current_minor; - channel = kzalloc(sizeof(*channel), GFP_KERNEL); - if (!channel) { + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) { retval = -ENOMEM; goto error_alloc_channel; } - channel->devno = MKDEV(major, current_minor); - cdev_init(&channel->cdev, &channel_fops); - channel->cdev.owner = THIS_MODULE; - cdev_add(&channel->cdev, channel->devno, 1); - channel->iface = iface; - channel->cfg = cfg; - channel->channel_id = channel_id; - channel->mbo_offs = 0; - atomic_set(&channel->access_ref, -1); - INIT_KFIFO(channel->fifo); - retval = kfifo_alloc(&channel->fifo, cfg->num_buffers, GFP_KERNEL); + c->devno = MKDEV(major, current_minor); + cdev_init(&c->cdev, &channel_fops); + c->cdev.owner = THIS_MODULE; + cdev_add(&c->cdev, c->devno, 1); + c->iface = iface; + c->cfg = cfg; + c->channel_id = channel_id; + c->access_ref = 0; + spin_lock_init(&c->unlink); + INIT_KFIFO(c->fifo); + retval = kfifo_alloc(&c->fifo, cfg->num_buffers, GFP_KERNEL); if (retval) { pr_info("failed to alloc channel kfifo"); goto error_alloc_kfifo; } - init_waitqueue_head(&channel->wq); - mutex_init(&channel->io_mutex); + init_waitqueue_head(&c->wq); + mutex_init(&c->io_mutex); spin_lock_irqsave(&ch_list_lock, cl_flags); - list_add_tail(&channel->list, &channel_list); + list_add_tail(&c->list, &channel_list); spin_unlock_irqrestore(&ch_list_lock, cl_flags); - channel->dev = device_create(aim_class, + c->dev = device_create(aim_class, NULL, - channel->devno, + c->devno, NULL, "%s", name); - retval = IS_ERR(channel->dev); - if (retval) { + if (IS_ERR(c->dev)) { + retval = PTR_ERR(c->dev); pr_info("failed to create new device node %s\n", name); goto error_create_device; } - kobject_uevent(&channel->dev->kobj, KOBJ_ADD); + kobject_uevent(&c->dev->kobj, KOBJ_ADD); return 0; error_create_device: - kfifo_free(&channel->fifo); - list_del(&channel->list); + kfifo_free(&c->fifo); + list_del(&c->list); error_alloc_kfifo: - cdev_del(&channel->cdev); - kfree(channel); + cdev_del(&c->cdev); + kfree(c); error_alloc_channel: ida_simple_remove(&minor_id, current_minor); return retval; @@ -526,19 +528,15 @@ free_cdev: static void __exit mod_exit(void) { - struct aim_channel *channel, *tmp; + struct aim_channel *c, *tmp; pr_info("exit module\n"); most_deregister_aim(&cdev_aim); - list_for_each_entry_safe(channel, tmp, &channel_list, list) { - device_destroy(aim_class, channel->devno); - cdev_del(&channel->cdev); - kfifo_free(&channel->fifo); - list_del(&channel->list); - ida_simple_remove(&minor_id, MINOR(channel->devno)); - kfree(channel); + list_for_each_entry_safe(c, tmp, &channel_list, list) { + destroy_cdev(c); + kfree(c); } class_destroy(aim_class); unregister_chrdev_region(aim_devno, 1); diff --git a/drivers/staging/most/aim-network/networking.c b/drivers/staging/most/aim-network/networking.c index 3c7beb03871d..2f42de44d051 100644 --- a/drivers/staging/most/aim-network/networking.c +++ b/drivers/staging/most/aim-network/networking.c @@ -431,6 +431,7 @@ static int aim_rx_data(struct mbo *mbo) u32 len = mbo->processed_length; struct sk_buff *skb; struct net_device *dev; + unsigned int skb_len; nd = get_net_dev_context(mbo->ifp); if (!nd || !nd->channels_opened || nd->rx.ch_id != mbo->hdm_channel_id) @@ -482,9 +483,13 @@ static int aim_rx_data(struct mbo *mbo) memcpy(skb_put(skb, len), buf, len); skb->protocol = eth_type_trans(skb, dev); - dev->stats.rx_packets++; - dev->stats.rx_bytes += skb->len; - netif_rx(skb); + skb_len = skb->len; + if (netif_rx(skb) == NET_RX_SUCCESS) { + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb_len; + } else { + dev->stats.rx_dropped++; + } out: most_put_mbo(mbo); diff --git a/drivers/staging/most/hdm-dim2/dim2_hal.c b/drivers/staging/most/hdm-dim2/dim2_hal.c index 172257596f1f..3c524506ee22 100644 --- a/drivers/staging/most/hdm-dim2/dim2_hal.c +++ b/drivers/staging/most/hdm-dim2/dim2_hal.c @@ -84,7 +84,7 @@ static inline bool dim_on_error(u8 error_id, const char *error_message) struct lld_global_vars_t { bool dim_is_initialized; bool mcm_is_initialized; - struct dim2_regs *dim2; /* DIM2 core base address */ + struct dim2_regs __iomem *dim2; /* DIM2 core base address */ u32 dbr_map[DBR_MAP_SIZE]; }; @@ -650,7 +650,7 @@ static bool channel_detach_buffers(struct dim_channel *ch, u16 buffers_number) /* -------------------------------------------------------------------------- */ /* API */ -u8 dim_startup(void *dim_base_address, u32 mlb_clock) +u8 dim_startup(struct dim2_regs __iomem *dim_base_address, u32 mlb_clock) { g.dim_is_initialized = false; diff --git a/drivers/staging/most/hdm-dim2/dim2_hal.h b/drivers/staging/most/hdm-dim2/dim2_hal.h index 48cdd9c8cde1..fc73d4f97734 100644 --- a/drivers/staging/most/hdm-dim2/dim2_hal.h +++ b/drivers/staging/most/hdm-dim2/dim2_hal.h @@ -16,6 +16,7 @@ #define _DIM2_HAL_H #include <linux/types.h> +#include "dim2_reg.h" #ifdef __cplusplus extern "C" { @@ -65,7 +66,7 @@ struct dim_channel { u16 done_sw_buffers_number; /*< Done software buffers number. */ }; -u8 dim_startup(void *dim_base_address, u32 mlb_clock); +u8 dim_startup(struct dim2_regs __iomem *dim_base_address, u32 mlb_clock); void dim_shutdown(void); @@ -103,9 +104,9 @@ bool dim_enqueue_buffer(struct dim_channel *ch, u32 buffer_addr, bool dim_detach_buffers(struct dim_channel *ch, u16 buffers_number); -u32 dimcb_io_read(u32 *ptr32); +u32 dimcb_io_read(u32 __iomem *ptr32); -void dimcb_io_write(u32 *ptr32, u32 value); +void dimcb_io_write(u32 __iomem *ptr32, u32 value); void dimcb_on_error(u8 error_id, const char *error_message); diff --git a/drivers/staging/most/hdm-dim2/dim2_hdm.c b/drivers/staging/most/hdm-dim2/dim2_hdm.c index 327d738c7194..0dc86add7161 100644 --- a/drivers/staging/most/hdm-dim2/dim2_hdm.c +++ b/drivers/staging/most/hdm-dim2/dim2_hdm.c @@ -99,7 +99,7 @@ struct dim2_hdm { struct most_channel_capability capabilities[DMA_CHANNELS]; struct most_interface most_iface; char name[16 + sizeof "dim2-"]; - void *io_base; + void __iomem *io_base; unsigned int irq_ahb0; int clk_speed; struct task_struct *netinfo_task; @@ -138,9 +138,9 @@ bool dim2_sysfs_get_state_cb(void) * dimcb_io_read - callback from HAL to read an I/O register * @ptr32: register address */ -u32 dimcb_io_read(u32 *ptr32) +u32 dimcb_io_read(u32 __iomem *ptr32) { - return __raw_readl(ptr32); + return readl(ptr32); } /** @@ -148,9 +148,9 @@ u32 dimcb_io_read(u32 *ptr32) * @ptr32: register address * @value: value to write */ -void dimcb_io_write(u32 *ptr32, u32 value) +void dimcb_io_write(u32 __iomem *ptr32, u32 value) { - __raw_writel(value, ptr32); + writel(value, ptr32); } /** @@ -251,7 +251,7 @@ static int try_start_dim_transfer(struct hdm_channel *hdm_ch) return -EAGAIN; } - mbo = list_entry(head->next, struct mbo, list); + mbo = list_first_entry(head, struct mbo, list); buf_size = mbo->buffer_length; BUG_ON(mbo->bus_address == 0); @@ -362,7 +362,7 @@ static void service_done_flag(struct dim2_hdm *dev, int ch_idx) break; } - mbo = list_entry(head->next, struct mbo, list); + mbo = list_first_entry(head, struct mbo, list); list_del(head->next); spin_unlock_irqrestore(&dim_lock, flags); @@ -495,7 +495,7 @@ static void complete_all_mbos(struct list_head *head) break; } - mbo = list_entry(head->next, struct mbo, list); + mbo = list_first_entry(head, struct mbo, list); list_del(head->next); spin_unlock_irqrestore(&dim_lock, flags); @@ -736,7 +736,7 @@ static int dim2_probe(struct platform_device *pdev) int ret, i; struct kobject *kobj; - dev = kzalloc(sizeof(*dev), GFP_KERNEL); + dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; @@ -747,47 +747,31 @@ static int dim2_probe(struct platform_device *pdev) test_dev = dev; #else res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - pr_err("no memory region defined\n"); - ret = -ENOENT; - goto err_free_dev; - } - - if (!request_mem_region(res->start, resource_size(res), pdev->name)) { - pr_err("failed to request mem region\n"); - ret = -EBUSY; - goto err_free_dev; - } - - dev->io_base = ioremap(res->start, resource_size(res)); - if (!dev->io_base) { - pr_err("failed to ioremap\n"); - ret = -ENOMEM; - goto err_release_mem; - } + dev->io_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(dev->io_base)) + return PTR_ERR(dev->io_base); ret = platform_get_irq(pdev, 0); if (ret < 0) { - pr_err("failed to get irq\n"); - goto err_unmap_io; + dev_err(&pdev->dev, "failed to get irq\n"); + return -ENODEV; } dev->irq_ahb0 = ret; - ret = request_irq(dev->irq_ahb0, dim2_ahb_isr, 0, "mlb_ahb0", dev); + ret = devm_request_irq(&pdev->dev, dev->irq_ahb0, dim2_ahb_isr, 0, + "mlb_ahb0", dev); if (ret) { - pr_err("failed to request IRQ: %d, err: %d\n", - dev->irq_ahb0, ret); - goto err_unmap_io; + dev_err(&pdev->dev, "failed to request IRQ: %d, err: %d\n", + dev->irq_ahb0, ret); + return ret; } #endif init_waitqueue_head(&dev->netinfo_waitq); dev->deliver_netinfo = 0; dev->netinfo_task = kthread_run(&deliver_netinfo_thread, (void *)dev, "dim2_netinfo"); - if (IS_ERR(dev->netinfo_task)) { + if (IS_ERR(dev->netinfo_task)) ret = PTR_ERR(dev->netinfo_task); - goto err_free_irq; - } for (i = 0; i < DMA_CHANNELS; i++) { struct most_channel_capability *cap = dev->capabilities + i; @@ -833,7 +817,7 @@ static int dim2_probe(struct platform_device *pdev) kobj = most_register_interface(&dev->most_iface); if (IS_ERR(kobj)) { ret = PTR_ERR(kobj); - pr_err("failed to register MOST interface\n"); + dev_err(&pdev->dev, "failed to register MOST interface\n"); goto err_stop_thread; } @@ -843,7 +827,7 @@ static int dim2_probe(struct platform_device *pdev) ret = startup_dim(pdev); if (ret) { - pr_err("failed to initialize DIM2\n"); + dev_err(&pdev->dev, "failed to initialize DIM2\n"); goto err_destroy_bus; } @@ -855,16 +839,6 @@ err_unreg_iface: most_deregister_interface(&dev->most_iface); err_stop_thread: kthread_stop(dev->netinfo_task); -err_free_irq: -#if !defined(ENABLE_HDM_TEST) - free_irq(dev->irq_ahb0, dev); -err_unmap_io: - iounmap(dev->io_base); -err_release_mem: - release_mem_region(res->start, resource_size(res)); -err_free_dev: -#endif - kfree(dev); return ret; } @@ -878,7 +852,6 @@ err_free_dev: static int dim2_remove(struct platform_device *pdev) { struct dim2_hdm *dev = platform_get_drvdata(pdev); - struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct dim2_platform_data *pdata = pdev->dev.platform_data; unsigned long flags; @@ -892,13 +865,6 @@ static int dim2_remove(struct platform_device *pdev) dim2_sysfs_destroy(&dev->bus); most_deregister_interface(&dev->most_iface); kthread_stop(dev->netinfo_task); -#if !defined(ENABLE_HDM_TEST) - free_irq(dev->irq_ahb0, dev); - iounmap(dev->io_base); - release_mem_region(res->start, resource_size(res)); -#endif - kfree(dev); - platform_set_drvdata(pdev, NULL); /* * break link to local platform_device_id struct diff --git a/drivers/staging/most/hdm-dim2/dim2_hdm.h b/drivers/staging/most/hdm-dim2/dim2_hdm.h index 1c94e3355fcc..4050e7c764ed 100644 --- a/drivers/staging/most/hdm-dim2/dim2_hdm.h +++ b/drivers/staging/most/hdm-dim2/dim2_hdm.h @@ -18,7 +18,7 @@ struct device; /* platform dependent data for dim2 interface */ struct dim2_platform_data { - int (*init)(struct dim2_platform_data *pd, void *io_base, + int (*init)(struct dim2_platform_data *pd, void __iomem *io_base, int clk_speed); void (*destroy)(struct dim2_platform_data *pd); void *priv; diff --git a/drivers/staging/most/hdm-dim2/dim2_sysfs.c b/drivers/staging/most/hdm-dim2/dim2_sysfs.c index c5b10c7d2fac..2b28e4a51131 100644 --- a/drivers/staging/most/hdm-dim2/dim2_sysfs.c +++ b/drivers/staging/most/hdm-dim2/dim2_sysfs.c @@ -63,7 +63,6 @@ static ssize_t bus_kobj_attr_show(struct kobject *kobj, struct attribute *attr, static ssize_t bus_kobj_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { - ssize_t ret; struct medialb_bus *bus = container_of(kobj, struct medialb_bus, kobj_group); struct bus_attr *xattr = container_of(attr, struct bus_attr, attr); @@ -71,8 +70,7 @@ static ssize_t bus_kobj_attr_store(struct kobject *kobj, struct attribute *attr, if (!xattr->store) return -EIO; - ret = xattr->store(bus, buf, count); - return ret; + return xattr->store(bus, buf, count); } static struct sysfs_ops const bus_kobj_sysfs_ops = { diff --git a/drivers/staging/most/hdm-usb/hdm_usb.c b/drivers/staging/most/hdm-usb/hdm_usb.c index 41690f801fb8..aeae071f2823 100644 --- a/drivers/staging/most/hdm-usb/hdm_usb.c +++ b/drivers/staging/most/hdm-usb/hdm_usb.c @@ -40,7 +40,6 @@ #define MAX_SUFFIX_LEN 10 #define MAX_STRING_LEN 80 #define MAX_BUF_SIZE 0xFFFF -#define CEILING(x, y) (((x) + (y) - 1) / (y)) #define USB_VENDOR_ID_SMSC 0x0424 /* VID: SMSC */ #define USB_DEV_ID_BRDG 0xC001 /* PID: USB Bridge */ @@ -137,7 +136,6 @@ struct most_dev { #define to_mdev(d) container_of(d, struct most_dev, iface) #define to_mdev_from_work(w) container_of(w, struct most_dev, poll_work_obj) -static struct workqueue_struct *schedule_usb_work; static void wq_clear_halt(struct work_struct *wq_obj); static void wq_netinfo(struct work_struct *wq_obj); @@ -223,6 +221,7 @@ static void free_anchored_buffers(struct most_dev *mdev, unsigned int channel) } spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags); list_del(&anchor->list); + cancel_work_sync(&anchor->clear_work_obj); kfree(anchor); } spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags); @@ -411,7 +410,7 @@ static void hdm_write_completion(struct urb *urb) mbo->status = MBO_E_INVAL; usb_unlink_urb(urb); INIT_WORK(&anchor->clear_work_obj, wq_clear_halt); - queue_work(schedule_usb_work, &anchor->clear_work_obj); + schedule_work(&anchor->clear_work_obj); return; case -ENODEV: case -EPROTO: @@ -575,7 +574,7 @@ static void hdm_read_completion(struct urb *urb) mbo->status = MBO_E_INVAL; usb_unlink_urb(urb); INIT_WORK(&anchor->clear_work_obj, wq_clear_halt); - queue_work(schedule_usb_work, &anchor->clear_work_obj); + schedule_work(&anchor->clear_work_obj); return; case -ENODEV: case -EPROTO: @@ -785,7 +784,7 @@ static int hdm_configure_channel(struct most_interface *iface, int channel, temp_size += tail_space; /* calculate extra length to comply w/ HW padding */ - conf->extra_len = (CEILING(temp_size, USB_MTU) * USB_MTU) + conf->extra_len = (DIV_ROUND_UP(temp_size, USB_MTU) * USB_MTU) - conf->buffer_size; exit: mdev->conf[channel] = *conf; @@ -872,7 +871,7 @@ static void link_stat_timer_handler(unsigned long data) { struct most_dev *mdev = (struct most_dev *)data; - queue_work(schedule_usb_work, &mdev->poll_work_obj); + schedule_work(&mdev->poll_work_obj); mdev->link_stat_timer.expires = jiffies + (2 * HZ); add_timer(&mdev->link_stat_timer); } @@ -1299,7 +1298,7 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id) tmp_cap->num_buffers_streaming = BUF_CHAIN_SIZE; tmp_cap->data_type = MOST_CH_CONTROL | MOST_CH_ASYNC | MOST_CH_ISOC_AVP | MOST_CH_SYNC; - if (ep_desc->bEndpointAddress & USB_DIR_IN) + if (usb_endpoint_dir_in(ep_desc)) tmp_cap->direction = MOST_CH_RX; else tmp_cap->direction = MOST_CH_TX; @@ -1415,19 +1414,13 @@ static int __init hdm_usb_init(void) pr_err("could not register hdm_usb driver\n"); return -EIO; } - schedule_usb_work = create_workqueue("hdmu_work"); - if (!schedule_usb_work) { - pr_err("could not create workqueue\n"); - usb_deregister(&hdm_usb); - return -ENOMEM; - } + return 0; } static void __exit hdm_usb_exit(void) { pr_info("hdm_usb_exit()\n"); - destroy_workqueue(schedule_usb_work); usb_deregister(&hdm_usb); } diff --git a/drivers/staging/most/mostcore/core.c b/drivers/staging/most/mostcore/core.c index ed1ed25b6d1d..7c619feb12d3 100644 --- a/drivers/staging/most/mostcore/core.c +++ b/drivers/staging/most/mostcore/core.c @@ -35,7 +35,6 @@ static struct class *most_class; static struct device *class_glue_dir; static struct ida mdev_id; -static int modref; static int dummy_num_buffers; struct most_c_aim_obj { @@ -66,7 +65,6 @@ struct most_c_obj { struct most_c_aim_obj aim1; struct list_head trash_fifo; struct task_struct *hdm_enqueue_task; - struct mutex stop_task_mutex; wait_queue_head_t hdm_fifo_wq; }; @@ -74,7 +72,6 @@ struct most_c_obj { struct most_inst_obj { int dev_id; - atomic_t tainted; struct most_interface *iface; struct list_head channel_list; struct most_c_obj *channel[MAX_CHANNELS]; @@ -82,6 +79,14 @@ struct most_inst_obj { struct list_head list; }; +static const struct { + int most_ch_data_type; + char *name; +} ch_data_type[] = { { MOST_CH_CONTROL, "control\n" }, + { MOST_CH_ASYNC, "async\n" }, + { MOST_CH_SYNC, "sync\n" }, + { MOST_CH_ISOC_AVP, "isoc_avp\n"} }; + #define to_inst_obj(d) container_of(d, struct most_inst_obj, kobj) /** @@ -95,8 +100,6 @@ struct most_inst_obj { _mbo; \ }) -static struct mutex deregister_mutex; - /* ___ ___ * ___C H A N N E L___ */ @@ -414,14 +417,12 @@ static ssize_t show_set_datatype(struct most_c_obj *c, struct most_c_attr *attr, char *buf) { - if (c->cfg.data_type & MOST_CH_CONTROL) - return snprintf(buf, PAGE_SIZE, "control\n"); - else if (c->cfg.data_type & MOST_CH_ASYNC) - return snprintf(buf, PAGE_SIZE, "async\n"); - else if (c->cfg.data_type & MOST_CH_SYNC) - return snprintf(buf, PAGE_SIZE, "sync\n"); - else if (c->cfg.data_type & MOST_CH_ISOC_AVP) - return snprintf(buf, PAGE_SIZE, "isoc_avp\n"); + int i; + + for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) { + if (c->cfg.data_type & ch_data_type[i].most_ch_data_type) + return snprintf(buf, PAGE_SIZE, ch_data_type[i].name); + } return snprintf(buf, PAGE_SIZE, "unconfigured\n"); } @@ -430,15 +431,16 @@ static ssize_t store_set_datatype(struct most_c_obj *c, const char *buf, size_t count) { - if (!strcmp(buf, "control\n")) { - c->cfg.data_type = MOST_CH_CONTROL; - } else if (!strcmp(buf, "async\n")) { - c->cfg.data_type = MOST_CH_ASYNC; - } else if (!strcmp(buf, "sync\n")) { - c->cfg.data_type = MOST_CH_SYNC; - } else if (!strcmp(buf, "isoc_avp\n")) { - c->cfg.data_type = MOST_CH_ISOC_AVP; - } else { + int i; + + for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) { + if (!strcmp(buf, ch_data_type[i].name)) { + c->cfg.data_type = ch_data_type[i].most_ch_data_type; + break; + } + } + + if (i == ARRAY_SIZE(ch_data_type)) { pr_info("WARN: invalid attribute settings\n"); return -EINVAL; } @@ -551,29 +553,6 @@ create_most_c_obj(const char *name, struct kobject *parent) return c; } -/** - * destroy_most_c_obj - channel release function - * @c: pointer to channel object - * - * This decrements the reference counter of the channel object. - * If the reference count turns zero, its release function is called. - */ -static void destroy_most_c_obj(struct most_c_obj *c) -{ - if (c->aim0.ptr) - c->aim0.ptr->disconnect_channel(c->iface, c->channel_id); - if (c->aim1.ptr) - c->aim1.ptr->disconnect_channel(c->iface, c->channel_id); - c->aim0.ptr = NULL; - c->aim1.ptr = NULL; - - mutex_lock(&deregister_mutex); - flush_trash_fifo(c); - flush_channel_fifos(c); - mutex_unlock(&deregister_mutex); - kobject_put(&c->kobj); -} - /* ___ ___ * ___I N S T A N C E___ */ @@ -761,12 +740,10 @@ static void destroy_most_inst_obj(struct most_inst_obj *inst) { struct most_c_obj *c, *tmp; - /* need to destroy channels first, since - * each channel incremented the - * reference count of the inst->kobj - */ list_for_each_entry_safe(c, tmp, &inst->channel_list, list) { - destroy_most_c_obj(c); + flush_trash_fifo(c); + flush_channel_fifos(c); + kobject_put(&c->kobj); } kobject_put(&inst->kobj); } @@ -1006,11 +983,14 @@ static ssize_t store_add_link(struct most_aim_obj *aim_obj, else return -ENOSPC; + *aim_ptr = aim_obj->driver; ret = aim_obj->driver->probe_channel(c->iface, c->channel_id, &c->cfg, &c->kobj, mdev_devnod); - if (ret) + if (ret) { + *aim_ptr = NULL; return ret; - *aim_ptr = aim_obj->driver; + } + return len; } @@ -1056,12 +1036,12 @@ static ssize_t store_remove_link(struct most_aim_obj *aim_obj, if (IS_ERR(c)) return -ENODEV; + if (aim_obj->driver->disconnect_channel(c->iface, c->channel_id)) + return -EIO; if (c->aim0.ptr == aim_obj->driver) c->aim0.ptr = NULL; if (c->aim1.ptr == aim_obj->driver) c->aim1.ptr = NULL; - if (aim_obj->driver->disconnect_channel(c->iface, c->channel_id)) - return -EIO; return len; } @@ -1279,7 +1259,6 @@ static int arm_mbo_chain(struct most_c_obj *c, int dir, for (i = 0; i < c->cfg.num_buffers; i++) { mbo = kzalloc(sizeof(*mbo), GFP_KERNEL); if (!mbo) { - pr_info("WARN: Allocation of MBO failed.\n"); retval = i; goto _exit; } @@ -1319,18 +1298,10 @@ _exit: */ int most_submit_mbo(struct mbo *mbo) { - struct most_c_obj *c; - struct most_inst_obj *i; - if (unlikely((!mbo) || (!mbo->context))) { pr_err("Bad MBO or missing channel reference\n"); return -EINVAL; } - c = mbo->context; - i = c->inst; - - if (unlikely(atomic_read(&i->tainted))) - return -ENODEV; nq_hdm_mbo(mbo); return 0; @@ -1387,7 +1358,7 @@ most_c_obj *get_channel_by_iface(struct most_interface *iface, int id) return i->channel[id]; } -int channel_has_mbo(struct most_interface *iface, int id) +int channel_has_mbo(struct most_interface *iface, int id, struct most_aim *aim) { struct most_c_obj *c = get_channel_by_iface(iface, id); unsigned long flags; @@ -1396,6 +1367,11 @@ int channel_has_mbo(struct most_interface *iface, int id) if (unlikely(!c)) return -EINVAL; + if (c->aim0.refs && c->aim1.refs && + ((aim == c->aim0.ptr && c->aim0.num_buffers <= 0) || + (aim == c->aim1.ptr && c->aim1.num_buffers <= 0))) + return 0; + spin_lock_irqsave(&c->fifo_lock, flags); empty = list_empty(&c->fifo); spin_unlock_irqrestore(&c->fifo_lock, flags); @@ -1456,17 +1432,8 @@ EXPORT_SYMBOL_GPL(most_get_mbo); */ void most_put_mbo(struct mbo *mbo) { - struct most_c_obj *c; - struct most_inst_obj *i; - - c = mbo->context; - i = c->inst; + struct most_c_obj *c = mbo->context; - if (unlikely(atomic_read(&i->tainted))) { - mbo->status = MBO_E_CLOSE; - trash_mbo(mbo); - return; - } if (c->cfg.direction == MOST_CH_TX) { arm_mbo(mbo); return; @@ -1546,7 +1513,6 @@ int most_start_channel(struct most_interface *iface, int id, mutex_unlock(&c->start_mutex); return -ENOLCK; } - modref++; c->cfg.extra_len = 0; if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) { @@ -1588,7 +1554,6 @@ out: error: module_put(iface->mod); - modref--; mutex_unlock(&c->start_mutex); return ret; } @@ -1616,24 +1581,12 @@ int most_stop_channel(struct most_interface *iface, int id, if (c->aim0.refs + c->aim1.refs >= 2) goto out; - mutex_lock(&c->stop_task_mutex); if (c->hdm_enqueue_task) kthread_stop(c->hdm_enqueue_task); c->hdm_enqueue_task = NULL; - mutex_unlock(&c->stop_task_mutex); - mutex_lock(&deregister_mutex); - if (atomic_read(&c->inst->tainted)) { - mutex_unlock(&deregister_mutex); - mutex_unlock(&c->start_mutex); - return -ENODEV; - } - mutex_unlock(&deregister_mutex); - - if (iface->mod && modref) { + if (iface->mod) module_put(iface->mod); - modref--; - } c->is_poisoned = true; if (c->iface->poison_channel(c->iface, c->channel_id)) { @@ -1762,6 +1715,7 @@ struct kobject *most_register_interface(struct most_interface *iface) inst = create_most_inst_obj(name); if (!inst) { pr_info("Failed to allocate interface instance\n"); + ida_simple_remove(&mdev_id, id); return ERR_PTR(-ENOMEM); } @@ -1769,7 +1723,6 @@ struct kobject *most_register_interface(struct most_interface *iface) INIT_LIST_HEAD(&inst->channel_list); inst->iface = iface; inst->dev_id = id; - atomic_set(&inst->tainted, 0); list_add_tail(&inst->list, &instance_list); for (i = 0; i < iface->num_channels; i++) { @@ -1808,7 +1761,6 @@ struct kobject *most_register_interface(struct most_interface *iface) init_completion(&c->cleanup); atomic_set(&c->mbo_ref, 0); mutex_init(&c->start_mutex); - mutex_init(&c->stop_task_mutex); list_add_tail(&c->list, &inst->channel_list); } pr_info("registered new MOST device mdev%d (%s)\n", @@ -1818,6 +1770,7 @@ struct kobject *most_register_interface(struct most_interface *iface) free_instance: pr_info("Failed allocate channel(s)\n"); list_del(&inst->list); + ida_simple_remove(&mdev_id, id); destroy_most_inst_obj(inst); return ERR_PTR(-ENOMEM); } @@ -1835,37 +1788,24 @@ void most_deregister_interface(struct most_interface *iface) struct most_inst_obj *i = iface->priv; struct most_c_obj *c; - mutex_lock(&deregister_mutex); if (unlikely(!i)) { pr_info("Bad Interface\n"); - mutex_unlock(&deregister_mutex); return; } pr_info("deregistering MOST device %s (%s)\n", i->kobj.name, iface->description); - atomic_set(&i->tainted, 1); - mutex_unlock(&deregister_mutex); - - while (modref) { - if (iface->mod && modref) - module_put(iface->mod); - modref--; - } - list_for_each_entry(c, &i->channel_list, list) { - if (c->aim0.refs + c->aim1.refs <= 0) - continue; - - mutex_lock(&c->stop_task_mutex); - if (c->hdm_enqueue_task) - kthread_stop(c->hdm_enqueue_task); - c->hdm_enqueue_task = NULL; - mutex_unlock(&c->stop_task_mutex); - - if (iface->poison_channel(iface, c->channel_id)) - pr_err("Can't poison channel %d\n", c->channel_id); + if (c->aim0.ptr) + c->aim0.ptr->disconnect_channel(c->iface, + c->channel_id); + if (c->aim1.ptr) + c->aim1.ptr->disconnect_channel(c->iface, + c->channel_id); + c->aim0.ptr = NULL; + c->aim1.ptr = NULL; } + ida_simple_remove(&mdev_id, i->dev_id); list_del(&i->list); destroy_most_inst_obj(i); @@ -1913,41 +1853,52 @@ EXPORT_SYMBOL_GPL(most_resume_enqueue); static int __init most_init(void) { + int err; + pr_info("init()\n"); INIT_LIST_HEAD(&instance_list); INIT_LIST_HEAD(&aim_list); - mutex_init(&deregister_mutex); ida_init(&mdev_id); - if (bus_register(&most_bus)) { + err = bus_register(&most_bus); + if (err) { pr_info("Cannot register most bus\n"); - goto exit; + return err; } most_class = class_create(THIS_MODULE, "most"); if (IS_ERR(most_class)) { pr_info("No udev support.\n"); + err = PTR_ERR(most_class); goto exit_bus; } - if (driver_register(&mostcore)) { + + err = driver_register(&mostcore); + if (err) { pr_info("Cannot register core driver\n"); goto exit_class; } class_glue_dir = device_create(most_class, NULL, 0, NULL, "mostcore"); - if (!class_glue_dir) + if (IS_ERR(class_glue_dir)) { + err = PTR_ERR(class_glue_dir); goto exit_driver; + } most_aim_kset = kset_create_and_add("aims", NULL, &class_glue_dir->kobj); - if (!most_aim_kset) + if (!most_aim_kset) { + err = -ENOMEM; goto exit_class_container; + } most_inst_kset = kset_create_and_add("devices", NULL, &class_glue_dir->kobj); - if (!most_inst_kset) + if (!most_inst_kset) { + err = -ENOMEM; goto exit_driver_kset; + } return 0; @@ -1961,8 +1912,7 @@ exit_class: class_destroy(most_class); exit_bus: bus_unregister(&most_bus); -exit: - return -ENOMEM; + return err; } static void __exit most_exit(void) diff --git a/drivers/staging/most/mostcore/mostcore.h b/drivers/staging/most/mostcore/mostcore.h index bda3850d5435..60e018e499ef 100644 --- a/drivers/staging/most/mostcore/mostcore.h +++ b/drivers/staging/most/mostcore/mostcore.h @@ -310,7 +310,8 @@ int most_deregister_aim(struct most_aim *aim); struct mbo *most_get_mbo(struct most_interface *iface, int channel_idx, struct most_aim *); void most_put_mbo(struct mbo *mbo); -int channel_has_mbo(struct most_interface *iface, int channel_idx); +int channel_has_mbo(struct most_interface *iface, int channel_idx, + struct most_aim *aim); int most_start_channel(struct most_interface *iface, int channel_idx, struct most_aim *); int most_stop_channel(struct most_interface *iface, int channel_idx, diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.c b/drivers/staging/mt29f_spinand/mt29f_spinand.c index 197d1124733d..9d47c5db24a6 100644 --- a/drivers/staging/mt29f_spinand/mt29f_spinand.c +++ b/drivers/staging/mt29f_spinand/mt29f_spinand.c @@ -33,7 +33,7 @@ static inline struct spinand_state *mtd_to_state(struct mtd_info *mtd) { struct nand_chip *chip = mtd_to_nand(mtd); struct spinand_info *info = nand_get_controller_data(chip); - struct spinand_state *state = (struct spinand_state *)info->priv; + struct spinand_state *state = info->priv; return state; } @@ -63,8 +63,8 @@ static struct nand_ecclayout spinand_oob_64 = { }; #endif -/* - * spinand_cmd - to process a command to send to the SPI Nand +/** + * spinand_cmd - process a command to send to the SPI Nand * Description: * Set up the command buffer to send to the SPI controller. * The command buffer has to initialized to 0. @@ -110,10 +110,10 @@ static int spinand_cmd(struct spi_device *spi, struct spinand_cmd *cmd) return spi_sync(spi, &message); } -/* - * spinand_read_id- Read SPI Nand ID +/** + * spinand_read_id - Read SPI Nand ID * Description: - * Read ID: read two ID bytes from the SPI Nand device + * read two ID bytes from the SPI Nand device */ static int spinand_read_id(struct spi_device *spi_nand, u8 *id) { @@ -135,8 +135,8 @@ static int spinand_read_id(struct spi_device *spi_nand, u8 *id) return retval; } -/* - * spinand_read_status- send command 0xf to the SPI Nand status register +/** + * spinand_read_status - send command 0xf to the SPI Nand status register * Description: * After read, write, or erase, the Nand device is expected to set the * busy status. @@ -175,7 +175,7 @@ static int wait_till_ready(struct spi_device *spi_nand) retval = spinand_read_status(spi_nand, &stat); if (retval < 0) return -1; - else if (!(stat & 0x1)) + if (!(stat & 0x1)) break; cond_resched(); @@ -188,7 +188,7 @@ static int wait_till_ready(struct spi_device *spi_nand) } /** - * spinand_get_otp- send command 0xf to read the SPI Nand OTP register + * spinand_get_otp - send command 0xf to read the SPI Nand OTP register * Description: * There is one bit( bit 0x10 ) to set or to clear the internal ECC. * Enable chip internal ECC, set the bit to 1 @@ -212,7 +212,7 @@ static int spinand_get_otp(struct spi_device *spi_nand, u8 *otp) } /** - * spinand_set_otp- send command 0x1f to write the SPI Nand OTP register + * spinand_set_otp - send command 0x1f to write the SPI Nand OTP register * Description: * There is one bit( bit 0x10 ) to set or to clear the internal ECC. * Enable chip internal ECC, set the bit to 1 @@ -223,11 +223,11 @@ static int spinand_set_otp(struct spi_device *spi_nand, u8 *otp) int retval; struct spinand_cmd cmd = {0}; - cmd.cmd = CMD_WRITE_REG, - cmd.n_addr = 1, - cmd.addr[0] = REG_OTP, - cmd.n_tx = 1, - cmd.tx_buf = otp, + cmd.cmd = CMD_WRITE_REG; + cmd.n_addr = 1; + cmd.addr[0] = REG_OTP; + cmd.n_tx = 1; + cmd.tx_buf = otp; retval = spinand_cmd(spi_nand, &cmd); if (retval < 0) @@ -238,7 +238,7 @@ static int spinand_set_otp(struct spi_device *spi_nand, u8 *otp) #ifdef CONFIG_MTD_SPINAND_ONDIEECC /** - * spinand_enable_ecc- send command 0x1f to write the SPI Nand OTP register + * spinand_enable_ecc - send command 0x1f to write the SPI Nand OTP register * Description: * There is one bit( bit 0x10 ) to set or to clear the internal ECC. * Enable chip internal ECC, set the bit to 1 @@ -283,7 +283,7 @@ static int spinand_disable_ecc(struct spi_device *spi_nand) } /** - * spinand_write_enable- send command 0x06 to enable write or erase the + * spinand_write_enable - send command 0x06 to enable write or erase the * Nand cells * Description: * Before write and erase the Nand cells, the write enable has to be set. @@ -313,9 +313,9 @@ static int spinand_read_page_to_cache(struct spi_device *spi_nand, u16 page_id) return spinand_cmd(spi_nand, &cmd); } -/* - * spinand_read_from_cache- send command 0x03 to read out the data from the - * cache register(2112 bytes max) +/** + * spinand_read_from_cache - send command 0x03 to read out the data from the + * cache register (2112 bytes max) * Description: * The read can specify 1 to 2112 bytes of data read at the corresponding * locations. @@ -341,15 +341,15 @@ static int spinand_read_from_cache(struct spi_device *spi_nand, u16 page_id, return spinand_cmd(spi_nand, &cmd); } -/* - * spinand_read_page-to read a page with: +/** + * spinand_read_page - read a page * @page_id: the physical page number * @offset: the location from 0 to 2111 * @len: number of bytes to read * @rbuf: read buffer to hold @len bytes * * Description: - * The read includes two commands to the Nand: 0x13 and 0x03 commands + * The read includes two commands to the Nand - 0x13 and 0x03 commands * Poll to read status to wait for tRD time. */ static int spinand_read_page(struct spi_device *spi_nand, u16 page_id, @@ -408,11 +408,11 @@ static int spinand_read_page(struct spi_device *spi_nand, u16 page_id, return ret; } -/* - * spinand_program_data_to_cache--to write a page to cache with: +/** + * spinand_program_data_to_cache - write a page to cache * @byte_id: the location to write to the cache * @len: number of bytes to write - * @rbuf: read buffer to hold @len bytes + * @wbuf: write buffer holding @len bytes * * Description: * The write command used here is 0x84--indicating that the cache is @@ -439,7 +439,7 @@ static int spinand_program_data_to_cache(struct spi_device *spi_nand, } /** - * spinand_program_execute--to write a page from cache to the Nand array with + * spinand_program_execute - write a page from cache to the Nand array * @page_id: the physical page location to write the page. * * Description: @@ -462,11 +462,11 @@ static int spinand_program_execute(struct spi_device *spi_nand, u16 page_id) } /** - * spinand_program_page--to write a page with: + * spinand_program_page - write a page * @page_id: the physical page location to write the page. * @offset: the location from the cache starting from 0 to 2111 * @len: the number of bytes to write - * @wbuf: the buffer to hold the number of bytes + * @buf: the buffer holding @len bytes * * Description: * The commands used here are 0x06, 0x84, and 0x10--indicating that @@ -483,8 +483,11 @@ static int spinand_program_page(struct spi_device *spi_nand, #ifdef CONFIG_MTD_SPINAND_ONDIEECC unsigned int i, j; - enable_read_hw_ecc = 0; wbuf = devm_kzalloc(&spi_nand->dev, CACHE_BUF, GFP_KERNEL); + if (!wbuf) + return -ENOMEM; + + enable_read_hw_ecc = 0; spinand_read_page(spi_nand, page_id, 0, CACHE_BUF, wbuf); for (i = offset, j = 0; i < len; i++, j++) @@ -547,7 +550,7 @@ static int spinand_program_page(struct spi_device *spi_nand, } /** - * spinand_erase_block_erase--to erase a page with: + * spinand_erase_block_erase - erase a page * @block_id: the physical block location to erase. * * Description: @@ -570,7 +573,7 @@ static int spinand_erase_block_erase(struct spi_device *spi_nand, u16 block_id) } /** - * spinand_erase_block--to erase a page with: + * spinand_erase_block - erase a page * @block_id: the physical block location to erase. * * Description: @@ -746,7 +749,7 @@ static void spinand_cmdfunc(struct mtd_info *mtd, unsigned int command, { struct nand_chip *chip = mtd_to_nand(mtd); struct spinand_info *info = nand_get_controller_data(chip); - struct spinand_state *state = (struct spinand_state *)info->priv; + struct spinand_state *state = info->priv; switch (command) { /* @@ -810,7 +813,7 @@ static void spinand_cmdfunc(struct mtd_info *mtd, unsigned int command, } /** - * spinand_lock_block- send write register 0x1f command to the Nand device + * spinand_lock_block - send write register 0x1f command to the Nand device * * Description: * After power up, all the Nand blocks are locked. This function allows @@ -837,12 +840,12 @@ static int spinand_lock_block(struct spi_device *spi_nand, u8 lock) return ret; } -/* +/** * spinand_probe - [spinand Interface] * @spi_nand: registered device driver. * * Description: - * To set up the device driver parameters to make the device available. + * Set up the device driver parameters to make the device available. */ static int spinand_probe(struct spi_device *spi_nand) { @@ -890,7 +893,8 @@ static int spinand_probe(struct spi_device *spi_nand) #else chip->ecc.mode = NAND_ECC_SOFT; if (spinand_disable_ecc(spi_nand) < 0) - pr_info("%s: disable ecc failed!\n", __func__); + dev_info(&spi_nand->dev, "%s: disable ecc failed!\n", + __func__); #endif nand_set_flash_node(chip, spi_nand->dev.of_node); @@ -916,12 +920,12 @@ static int spinand_probe(struct spi_device *spi_nand) return mtd_device_register(mtd, NULL, 0); } -/* - * spinand_remove: Remove the device driver +/** + * spinand_remove - remove the device driver * @spi: the spi device. * * Description: - * To remove the device driver parameters and free up allocated memories. + * Remove the device driver parameters and free up allocated memories. */ static int spinand_remove(struct spi_device *spi) { diff --git a/drivers/staging/netlogic/platform_net.c b/drivers/staging/netlogic/platform_net.c index 7806c2bc3af3..abf4c71ee66b 100644 --- a/drivers/staging/netlogic/platform_net.c +++ b/drivers/staging/netlogic/platform_net.c @@ -86,7 +86,8 @@ static void xlr_resource_init(struct resource *res, int offset, int irq) res++; res->name = "gmac"; - res->start = res->end = irq; + res->start = irq; + res->end = irq; res->flags = IORESOURCE_IRQ; } @@ -121,8 +122,8 @@ static struct platform_device *gmac_controller2_init(void *gmac0_addr) ndata1.phy_addr[mac] = mac + 4 + 0x10; xlr_resource_init(&xlr_net1_res[mac * 2], - xlr_gmac_offsets[mac + 4], - xlr_gmac_irqs[mac + 4]); + xlr_gmac_offsets[mac + 4], + xlr_gmac_irqs[mac + 4]); } xlr_net_dev1.num_resources = 8; @@ -169,7 +170,7 @@ static void xls_gmac_init(void) xlr_net_dev0.num_resources = 2; xlr_resource_init(&xlr_net0_res[0], xlr_gmac_offsets[0], - xlr_gmac_irqs[0]); + xlr_gmac_irqs[0]); platform_device_register(&xlr_net_dev0); /* second block is XAUI, not supported yet */ @@ -182,7 +183,7 @@ static void xls_gmac_init(void) ndata0.phy_addr[mac] = mac + 0x10; xlr_resource_init(&xlr_net0_res[mac * 2], - xlr_gmac_offsets[mac], + xlr_gmac_offsets[mac], xlr_gmac_irqs[mac]); } xlr_net_dev0.num_resources = 8; @@ -208,7 +209,6 @@ static void xlr_gmac_init(void) .gpio_addr = NULL, }; - static struct platform_device xlr_net_dev0 = { .name = "xlr-net", .id = 0, @@ -223,7 +223,7 @@ static void xlr_gmac_init(void) ndata0.tx_stnid[mac] = FMN_STNID_GMAC0_TX0 + mac; ndata0.phy_addr[mac] = mac; xlr_resource_init(&xlr_net0_res[mac * 2], xlr_gmac_offsets[mac], - xlr_gmac_irqs[mac]); + xlr_gmac_irqs[mac]); } xlr_net_dev0.num_resources = 8; xlr_net_dev0.resource = xlr_net0_res; diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c index 0b4e819f5164..aa1cdf602cf6 100644 --- a/drivers/staging/netlogic/xlr_net.c +++ b/drivers/staging/netlogic/xlr_net.c @@ -69,8 +69,7 @@ static inline u32 xlr_nae_rdreg(u32 __iomem *base, unsigned int reg) return __raw_readl(base + reg); } -static inline void xlr_reg_update(u32 *base_addr, - u32 off, u32 val, u32 mask) +static inline void xlr_reg_update(u32 *base_addr, u32 off, u32 val, u32 mask) { u32 tmp; @@ -100,7 +99,7 @@ static int send_to_rfr_fifo(struct xlr_net_priv *priv, void *addr) return 0; } while (++num_try < 10000); - pr_err("Send to RFR failed in RX path\n"); + netdev_err(priv->ndev, "Send to RFR failed in RX path\n"); return ret; } @@ -122,8 +121,8 @@ static inline unsigned char *xlr_alloc_skb(void) return skb->data; } -static void xlr_net_fmn_handler(int bkt, int src_stnid, int size, - int code, struct nlm_fmn_msg *msg, void *arg) +static void xlr_net_fmn_handler(int bkt, int src_stnid, int size, int code, + struct nlm_fmn_msg *msg, void *arg) { struct sk_buff *skb; void *skb_data = NULL; @@ -131,13 +130,13 @@ static void xlr_net_fmn_handler(int bkt, int src_stnid, int size, struct xlr_net_priv *priv; u32 port, length; unsigned char *addr; - struct xlr_adapter *adapter = (struct xlr_adapter *) arg; + struct xlr_adapter *adapter = arg; length = (msg->msg0 >> 40) & 0x3fff; if (length == 0) { addr = bus_to_virt(msg->msg0 & 0xffffffffffULL); addr = addr - MAC_SKB_BACK_PTR_SIZE; - skb = (struct sk_buff *) *(unsigned long *)addr; + skb = (struct sk_buff *)(*(unsigned long *)addr); dev_kfree_skb_any((struct sk_buff *)addr); } else { addr = (unsigned char *) @@ -145,9 +144,9 @@ static void xlr_net_fmn_handler(int bkt, int src_stnid, int size, length = length - BYTE_OFFSET - MAC_CRC_LEN; port = ((int)msg->msg0) & 0x0f; addr = addr - MAC_SKB_BACK_PTR_SIZE; - skb = (struct sk_buff *) *(unsigned long *)addr; + skb = (struct sk_buff *)(*(unsigned long *)addr); skb->dev = adapter->netdev[port]; - if (skb->dev == NULL) + if (!skb->dev) return; ndev = skb->dev; priv = netdev_priv(ndev); @@ -207,15 +206,15 @@ static int xlr_net_fill_rx_ring(struct net_device *ndev) struct xlr_net_priv *priv = netdev_priv(ndev); int i; - for (i = 0; i < MAX_FRIN_SPILL/4; i++) { + for (i = 0; i < MAX_FRIN_SPILL / 4; i++) { skb_data = xlr_alloc_skb(); if (!skb_data) { - pr_err("SKB allocation failed\n"); + netdev_err(ndev, "SKB allocation failed\n"); return -ENOMEM; } send_to_rfr_fifo(priv, skb_data); } - pr_info("Rx ring setup done\n"); + netdev_info(ndev, "Rx ring setup done\n"); return 0; } @@ -252,7 +251,7 @@ static int xlr_net_stop(struct net_device *ndev) } static void xlr_make_tx_desc(struct nlm_fmn_msg *msg, unsigned long addr, - struct sk_buff *skb) + struct sk_buff *skb) { unsigned long physkb = virt_to_phys(skb); int cpu_core = nlm_core_id(); @@ -266,12 +265,13 @@ static void xlr_make_tx_desc(struct nlm_fmn_msg *msg, unsigned long addr, ((u64)fr_stn_id << 54) | /* Free back id */ (u64)0 << 40 | /* Set len to 0 */ ((u64)physkb & 0xffffffff)); /* 32bit address */ - msg->msg2 = msg->msg3 = 0; + msg->msg2 = 0; + msg->msg3 = 0; } static void __maybe_unused xlr_wakeup_queue(unsigned long dev) { - struct net_device *ndev = (struct net_device *) dev; + struct net_device *ndev = (struct net_device *)dev; struct xlr_net_priv *priv = netdev_priv(ndev); struct phy_device *phydev = xlr_get_phydev(priv); @@ -280,7 +280,7 @@ static void __maybe_unused xlr_wakeup_queue(unsigned long dev) } static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb, - struct net_device *ndev) + struct net_device *ndev) { struct nlm_fmn_msg msg; struct xlr_net_priv *priv = netdev_priv(ndev); @@ -309,10 +309,10 @@ static void xlr_hw_set_mac_addr(struct net_device *ndev) /* set mac station address */ xlr_nae_wreg(priv->base_addr, R_MAC_ADDR0, - ((ndev->dev_addr[5] << 24) | (ndev->dev_addr[4] << 16) | - (ndev->dev_addr[3] << 8) | (ndev->dev_addr[2]))); + ((ndev->dev_addr[5] << 24) | (ndev->dev_addr[4] << 16) | + (ndev->dev_addr[3] << 8) | (ndev->dev_addr[2]))); xlr_nae_wreg(priv->base_addr, R_MAC_ADDR0 + 1, - ((ndev->dev_addr[1] << 24) | (ndev->dev_addr[0] << 16))); + ((ndev->dev_addr[1] << 24) | (ndev->dev_addr[0] << 16))); xlr_nae_wreg(priv->base_addr, R_MAC_ADDR_MASK2, 0xffffffff); xlr_nae_wreg(priv->base_addr, R_MAC_ADDR_MASK2 + 1, 0xffffffff); @@ -320,12 +320,12 @@ static void xlr_hw_set_mac_addr(struct net_device *ndev) xlr_nae_wreg(priv->base_addr, R_MAC_ADDR_MASK3 + 1, 0xffffffff); xlr_nae_wreg(priv->base_addr, R_MAC_FILTER_CONFIG, - (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) | - (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) | - (1 << O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID)); + (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) | + (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) | + (1 << O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID)); if (priv->nd->phy_interface == PHY_INTERFACE_MODE_RGMII || - priv->nd->phy_interface == PHY_INTERFACE_MODE_SGMII) + priv->nd->phy_interface == PHY_INTERFACE_MODE_SGMII) xlr_reg_update(priv->base_addr, R_IPG_IFG, MAC_B2B_IPG, 0x7f); } @@ -406,7 +406,8 @@ static void xlr_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats) } static struct rtnl_link_stats64 *xlr_get_stats64(struct net_device *ndev, - struct rtnl_link_stats64 *stats) + struct rtnl_link_stats64 *stats + ) { xlr_stats(ndev, stats); return stats; @@ -426,7 +427,7 @@ static struct net_device_ops xlr_netdev_ops = { * Gmac init */ static void *xlr_config_spill(struct xlr_net_priv *priv, int reg_start_0, - int reg_start_1, int reg_size, int size) + int reg_start_1, int reg_size, int size) { void *spill; u32 *base; @@ -436,13 +437,15 @@ static void *xlr_config_spill(struct xlr_net_priv *priv, int reg_start_0, base = priv->base_addr; spill_size = size; spill = kmalloc(spill_size + SMP_CACHE_BYTES, GFP_ATOMIC); - if (!spill) + if (!spill) { pr_err("Unable to allocate memory for spill area!\n"); + return ZERO_SIZE_PTR; + } spill = PTR_ALIGN(spill, SMP_CACHE_BYTES); phys_addr = virt_to_phys(spill); dev_dbg(&priv->ndev->dev, "Allocated spill %d bytes at %lx\n", - size, phys_addr); + size, phys_addr); xlr_nae_wreg(base, reg_start_0, (phys_addr >> 5) & 0xffffffff); xlr_nae_wreg(base, reg_start_1, ((u64)phys_addr >> 37) & 0x07); xlr_nae_wreg(base, reg_size, spill_size); @@ -511,19 +514,19 @@ static void xlr_config_pde(struct xlr_net_priv *priv) xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_0, (bkt_map & 0xffffffff)); xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_0 + 1, - ((bkt_map >> 32) & 0xffffffff)); + ((bkt_map >> 32) & 0xffffffff)); xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_1, (bkt_map & 0xffffffff)); xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_1 + 1, - ((bkt_map >> 32) & 0xffffffff)); + ((bkt_map >> 32) & 0xffffffff)); xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_2, (bkt_map & 0xffffffff)); xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_2 + 1, - ((bkt_map >> 32) & 0xffffffff)); + ((bkt_map >> 32) & 0xffffffff)); xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_3, (bkt_map & 0xffffffff)); xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_3 + 1, - ((bkt_map >> 32) & 0xffffffff)); + ((bkt_map >> 32) & 0xffffffff)); } /* @@ -541,8 +544,8 @@ static int xlr_config_common(struct xlr_net_priv *priv) /* Setting non-core MsgBktSize(0x321 - 0x325) */ for (i = start_stn_id; i <= end_stn_id; i++) { xlr_nae_wreg(priv->base_addr, - R_GMAC_RFR0_BUCKET_SIZE + i - start_stn_id, - bucket_size[i]); + R_GMAC_RFR0_BUCKET_SIZE + i - start_stn_id, + bucket_size[i]); } /* @@ -552,8 +555,8 @@ static int xlr_config_common(struct xlr_net_priv *priv) for (i = 0; i < 8; i++) { for (j = 0; j < 8; j++) xlr_nae_wreg(priv->base_addr, - (R_CC_CPU0_0 + (i * 8)) + j, - gmac->credit_config[(i * 8) + j]); + (R_CC_CPU0_0 + (i * 8)) + j, + gmac->credit_config[(i * 8) + j]); } xlr_nae_wreg(priv->base_addr, R_MSG_TX_THRESHOLD, 3); @@ -567,7 +570,7 @@ static int xlr_config_common(struct xlr_net_priv *priv) if (err) return err; nlm_register_fmn_handler(start_stn_id, end_stn_id, xlr_net_fmn_handler, - priv->adapter); + priv->adapter); return 0; } @@ -583,7 +586,7 @@ static void xlr_config_translate_table(struct xlr_net_priv *priv) cpu_mask = priv->nd->cpu_mask; pr_info("Using %s-based distribution\n", - (use_bkt) ? "bucket" : "class"); + (use_bkt) ? "bucket" : "class"); j = 0; for (i = 0; i < 32; i++) { if ((1 << i) & cpu_mask) { @@ -614,7 +617,7 @@ static void xlr_config_translate_table(struct xlr_net_priv *priv) val = ((c1 << 23) | (b1 << 17) | (use_bkt << 16) | (c2 << 7) | (b2 << 1) | (use_bkt << 0)); dev_dbg(&priv->ndev->dev, "Table[%d] b1=%d b2=%d c1=%d c2=%d\n", - i, b1, b2, c1, c2); + i, b1, b2, c1, c2); xlr_nae_wreg(priv->base_addr, R_TRANSLATETABLE + i, val); c1 = c2; } @@ -629,16 +632,16 @@ static void xlr_config_parser(struct xlr_net_priv *priv) /* Use 7bit CRChash for flow classification with 127 as CRC polynomial*/ xlr_nae_wreg(priv->base_addr, R_PARSERCONFIGREG, - ((0x7f << 8) | (1 << 1))); + ((0x7f << 8) | (1 << 1))); /* configure the parser : L2 Type is configured in the bootloader */ /* extract IP: src, dest protocol */ xlr_nae_wreg(priv->base_addr, R_L3CTABLE, - (9 << 20) | (1 << 19) | (1 << 18) | (0x01 << 16) | - (0x0800 << 0)); + (9 << 20) | (1 << 19) | (1 << 18) | (0x01 << 16) | + (0x0800 << 0)); xlr_nae_wreg(priv->base_addr, R_L3CTABLE + 1, - (9 << 25) | (1 << 21) | (12 << 14) | (4 << 10) | - (16 << 4) | 4); + (9 << 25) | (1 << 21) | (12 << 14) | (4 << 10) | + (16 << 4) | 4); /* Configure to extract SRC port and Dest port for TCP and UDP pkts */ xlr_nae_wreg(priv->base_addr, R_L4CTABLE, 6); @@ -663,7 +666,7 @@ static int xlr_phy_write(u32 *base_addr, int phy_addr, int regnum, u16 val) xlr_nae_wreg(base_addr, R_MII_MGMT_ADDRESS, (phy_addr << 8) | regnum); /* Write the data which starts the write cycle */ - xlr_nae_wreg(base_addr, R_MII_MGMT_WRITE_DATA, (u32) val); + xlr_nae_wreg(base_addr, R_MII_MGMT_WRITE_DATA, (u32)val); /* poll for the read cycle to complete */ while (!timedout) { @@ -692,11 +695,11 @@ static int xlr_phy_read(u32 *base_addr, int phy_addr, int regnum) /* setup the phy reg to be used */ xlr_nae_wreg(base_addr, R_MII_MGMT_ADDRESS, - (phy_addr << 8) | (regnum << 0)); + (phy_addr << 8) | (regnum << 0)); /* Issue the read command */ xlr_nae_wreg(base_addr, R_MII_MGMT_COMMAND, - (1 << O_MII_MGMT_COMMAND__rstat)); + (1 << O_MII_MGMT_COMMAND__rstat)); /* poll for the read cycle to complete */ while (!timedout) { @@ -724,7 +727,7 @@ static int xlr_mii_write(struct mii_bus *bus, int phy_addr, int regnum, u16 val) ret = xlr_phy_write(priv->mii_addr, phy_addr, regnum, val); dev_dbg(&priv->ndev->dev, "mii_write phy %d : %d <- %x [%x]\n", - phy_addr, regnum, val, ret); + phy_addr, regnum, val, ret); return ret; } @@ -735,7 +738,7 @@ static int xlr_mii_read(struct mii_bus *bus, int phy_addr, int regnum) ret = xlr_phy_read(priv->mii_addr, phy_addr, regnum); dev_dbg(&priv->ndev->dev, "mii_read phy %d : %d [%x]\n", - phy_addr, regnum, ret); + phy_addr, regnum, ret); return ret; } @@ -797,13 +800,16 @@ void xlr_set_gmac_speed(struct xlr_net_priv *priv) if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { if (speed == SPEED_10) xlr_nae_wreg(priv->base_addr, - R_INTERFACE_CONTROL, SGMII_SPEED_10); + R_INTERFACE_CONTROL, + SGMII_SPEED_10); if (speed == SPEED_100) xlr_nae_wreg(priv->base_addr, - R_INTERFACE_CONTROL, SGMII_SPEED_100); + R_INTERFACE_CONTROL, + SGMII_SPEED_100); if (speed == SPEED_1000) xlr_nae_wreg(priv->base_addr, - R_INTERFACE_CONTROL, SGMII_SPEED_1000); + R_INTERFACE_CONTROL, + SGMII_SPEED_1000); } if (speed == SPEED_10) xlr_nae_wreg(priv->base_addr, R_CORECONTROL, 0x2); @@ -864,7 +870,7 @@ static int xlr_mii_probe(struct xlr_net_priv *priv) } static int xlr_setup_mdio(struct xlr_net_priv *priv, - struct platform_device *pdev) + struct platform_device *pdev) { int err; @@ -877,7 +883,7 @@ static int xlr_setup_mdio(struct xlr_net_priv *priv, priv->mii_bus->priv = priv; priv->mii_bus->name = "xlr-mdio"; snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d", - priv->mii_bus->name, priv->port_id); + priv->mii_bus->name, priv->port_id); priv->mii_bus->read = xlr_mii_read; priv->mii_bus->write = xlr_mii_write; priv->mii_bus->parent = &pdev->dev; @@ -910,25 +916,31 @@ static void xlr_port_enable(struct xlr_net_priv *priv) /* Setup MAC_CONFIG reg if (xls & rgmii) */ if ((prid == 0x8000 || prid == 0x4000 || prid == 0xc000) && - priv->nd->phy_interface == PHY_INTERFACE_MODE_RGMII) + priv->nd->phy_interface == PHY_INTERFACE_MODE_RGMII) xlr_reg_update(priv->base_addr, R_RX_CONTROL, - (1 << O_RX_CONTROL__RGMII), (1 << O_RX_CONTROL__RGMII)); + (1 << O_RX_CONTROL__RGMII), + (1 << O_RX_CONTROL__RGMII)); /* Rx Tx enable */ xlr_reg_update(priv->base_addr, R_MAC_CONFIG_1, - ((1 << O_MAC_CONFIG_1__rxen) | (1 << O_MAC_CONFIG_1__txen) | - (1 << O_MAC_CONFIG_1__rxfc) | (1 << O_MAC_CONFIG_1__txfc)), - ((1 << O_MAC_CONFIG_1__rxen) | (1 << O_MAC_CONFIG_1__txen) | - (1 << O_MAC_CONFIG_1__rxfc) | (1 << O_MAC_CONFIG_1__txfc))); + ((1 << O_MAC_CONFIG_1__rxen) | + (1 << O_MAC_CONFIG_1__txen) | + (1 << O_MAC_CONFIG_1__rxfc) | + (1 << O_MAC_CONFIG_1__txfc)), + ((1 << O_MAC_CONFIG_1__rxen) | + (1 << O_MAC_CONFIG_1__txen) | + (1 << O_MAC_CONFIG_1__rxfc) | + (1 << O_MAC_CONFIG_1__txfc))); /* Setup tx control reg */ xlr_reg_update(priv->base_addr, R_TX_CONTROL, - ((1 << O_TX_CONTROL__TxEnable) | - (512 << O_TX_CONTROL__TxThreshold)), 0x3fff); + ((1 << O_TX_CONTROL__TXENABLE) | + (512 << O_TX_CONTROL__TXTHRESHOLD)), 0x3fff); /* Setup rx control reg */ xlr_reg_update(priv->base_addr, R_RX_CONTROL, - 1 << O_RX_CONTROL__RxEnable, 1 << O_RX_CONTROL__RxEnable); + 1 << O_RX_CONTROL__RXENABLE, + 1 << O_RX_CONTROL__RXENABLE); } static void xlr_port_disable(struct xlr_net_priv *priv) @@ -936,25 +948,26 @@ static void xlr_port_disable(struct xlr_net_priv *priv) /* Setup MAC_CONFIG reg */ /* Rx Tx disable*/ xlr_reg_update(priv->base_addr, R_MAC_CONFIG_1, - ((1 << O_MAC_CONFIG_1__rxen) | (1 << O_MAC_CONFIG_1__txen) | - (1 << O_MAC_CONFIG_1__rxfc) | (1 << O_MAC_CONFIG_1__txfc)), - 0x0); + ((1 << O_MAC_CONFIG_1__rxen) | + (1 << O_MAC_CONFIG_1__txen) | + (1 << O_MAC_CONFIG_1__rxfc) | + (1 << O_MAC_CONFIG_1__txfc)), 0x0); /* Setup tx control reg */ xlr_reg_update(priv->base_addr, R_TX_CONTROL, - ((1 << O_TX_CONTROL__TxEnable) | - (512 << O_TX_CONTROL__TxThreshold)), 0); + ((1 << O_TX_CONTROL__TXENABLE) | + (512 << O_TX_CONTROL__TXTHRESHOLD)), 0); /* Setup rx control reg */ xlr_reg_update(priv->base_addr, R_RX_CONTROL, - 1 << O_RX_CONTROL__RxEnable, 0); + 1 << O_RX_CONTROL__RXENABLE, 0); } /* * Initialization of gmac */ static int xlr_gmac_init(struct xlr_net_priv *priv, - struct platform_device *pdev) + struct platform_device *pdev) { int ret; @@ -963,9 +976,9 @@ static int xlr_gmac_init(struct xlr_net_priv *priv, xlr_port_disable(priv); xlr_nae_wreg(priv->base_addr, R_DESC_PACK_CTRL, - (1 << O_DESC_PACK_CTRL__MaxEntry) - | (BYTE_OFFSET << O_DESC_PACK_CTRL__ByteOffset) - | (1600 << O_DESC_PACK_CTRL__RegularSize)); + (1 << O_DESC_PACK_CTRL__MAXENTRY) | + (BYTE_OFFSET << O_DESC_PACK_CTRL__BYTEOFFSET) | + (1600 << O_DESC_PACK_CTRL__REGULARSIZE)); ret = xlr_setup_mdio(priv, pdev); if (ret) @@ -977,21 +990,14 @@ static int xlr_gmac_init(struct xlr_net_priv *priv, /* speed 2.5Mhz */ xlr_nae_wreg(priv->base_addr, R_CORECONTROL, 0x02); /* Setup Interrupt mask reg */ - xlr_nae_wreg(priv->base_addr, R_INTMASK, - (1 << O_INTMASK__TxIllegal) | - (1 << O_INTMASK__MDInt) | - (1 << O_INTMASK__TxFetchError) | - (1 << O_INTMASK__P2PSpillEcc) | - (1 << O_INTMASK__TagFull) | - (1 << O_INTMASK__Underrun) | - (1 << O_INTMASK__Abort) - ); + xlr_nae_wreg(priv->base_addr, R_INTMASK, (1 << O_INTMASK__TXILLEGAL) | + (1 << O_INTMASK__MDINT) | (1 << O_INTMASK__TXFETCHERROR) | + (1 << O_INTMASK__P2PSPILLECC) | (1 << O_INTMASK__TAGFULL) | + (1 << O_INTMASK__UNDERRUN) | (1 << O_INTMASK__ABORT)); /* Clear all stats */ - xlr_reg_update(priv->base_addr, R_STATCTRL, - 0, 1 << O_STATCTRL__ClrCnt); - xlr_reg_update(priv->base_addr, R_STATCTRL, 1 << 2, - 1 << 2); + xlr_reg_update(priv->base_addr, R_STATCTRL, 0, 1 << O_STATCTRL__CLRCNT); + xlr_reg_update(priv->base_addr, R_STATCTRL, 1 << 2, 1 << 2); return 0; } @@ -1019,10 +1025,11 @@ static int xlr_net_probe(struct platform_device *pdev) * Each controller has 4 gmac ports, mapping each controller * under one parent device, 4 gmac ports under one device. */ - for (port = 0; port < pdev->num_resources/2; port++) { + for (port = 0; port < pdev->num_resources / 2; port++) { ndev = alloc_etherdev_mq(sizeof(struct xlr_net_priv), 32); if (!ndev) { - pr_err("Allocation of Ethernet device failed\n"); + dev_err(&pdev->dev, + "Allocation of Ethernet device failed\n"); return -ENOMEM; } @@ -1032,13 +1039,6 @@ static int xlr_net_probe(struct platform_device *pdev) priv->port_id = (pdev->id * 4) + port; priv->nd = (struct xlr_net_data *)pdev->dev.platform_data; res = platform_get_resource(pdev, IORESOURCE_MEM, port); - - if (res == NULL) { - pr_err("No memory resource for MAC %d\n", - priv->port_id); - err = -ENODEV; - goto err_gmac; - } priv->base_addr = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv->base_addr)) { err = PTR_ERR(priv->base_addr); @@ -1048,8 +1048,9 @@ static int xlr_net_probe(struct platform_device *pdev) adapter->netdev[port] = ndev; res = platform_get_resource(pdev, IORESOURCE_IRQ, port); - if (res == NULL) { - pr_err("No irq resource for MAC %d\n", priv->port_id); + if (!res) { + dev_err(&pdev->dev, "No irq resource for MAC %d\n", + priv->port_id); err = -ENODEV; goto err_gmac; } @@ -1084,7 +1085,8 @@ static int xlr_net_probe(struct platform_device *pdev) if (strcmp(res->name, "gmac") == 0) { err = xlr_gmac_init(priv, pdev); if (err) { - pr_err("gmac%d init failed\n", priv->port_id); + dev_err(&pdev->dev, "gmac%d init failed\n", + priv->port_id); goto err_gmac; } } @@ -1097,8 +1099,9 @@ static int xlr_net_probe(struct platform_device *pdev) err = register_netdev(ndev); if (err) { - pr_err("Registering netdev failed for gmac%d\n", - priv->port_id); + dev_err(&pdev->dev, + "Registering netdev failed for gmac%d\n", + priv->port_id); goto err_netdev; } platform_set_drvdata(pdev, priv); diff --git a/drivers/staging/netlogic/xlr_net.h b/drivers/staging/netlogic/xlr_net.h index 7ae8874daee8..f76e16cfd15d 100644 --- a/drivers/staging/netlogic/xlr_net.h +++ b/drivers/staging/netlogic/xlr_net.h @@ -277,332 +277,332 @@ #define O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID 0 #define R_HASH_TABLE_VECTOR 0x30 #define R_TX_CONTROL 0x0A0 -#define O_TX_CONTROL__Tx15Halt 31 -#define O_TX_CONTROL__Tx14Halt 30 -#define O_TX_CONTROL__Tx13Halt 29 -#define O_TX_CONTROL__Tx12Halt 28 -#define O_TX_CONTROL__Tx11Halt 27 -#define O_TX_CONTROL__Tx10Halt 26 -#define O_TX_CONTROL__Tx9Halt 25 -#define O_TX_CONTROL__Tx8Halt 24 -#define O_TX_CONTROL__Tx7Halt 23 -#define O_TX_CONTROL__Tx6Halt 22 -#define O_TX_CONTROL__Tx5Halt 21 -#define O_TX_CONTROL__Tx4Halt 20 -#define O_TX_CONTROL__Tx3Halt 19 -#define O_TX_CONTROL__Tx2Halt 18 -#define O_TX_CONTROL__Tx1Halt 17 -#define O_TX_CONTROL__Tx0Halt 16 -#define O_TX_CONTROL__TxIdle 15 -#define O_TX_CONTROL__TxEnable 14 -#define O_TX_CONTROL__TxThreshold 0 -#define W_TX_CONTROL__TxThreshold 14 +#define O_TX_CONTROL__TX15HALT 31 +#define O_TX_CONTROL__TX14HALT 30 +#define O_TX_CONTROL__TX13HALT 29 +#define O_TX_CONTROL__TX12HALT 28 +#define O_TX_CONTROL__TX11HALT 27 +#define O_TX_CONTROL__TX10HALT 26 +#define O_TX_CONTROL__TX9HALT 25 +#define O_TX_CONTROL__TX8HALT 24 +#define O_TX_CONTROL__TX7HALT 23 +#define O_TX_CONTROL__TX6HALT 22 +#define O_TX_CONTROL__TX5HALT 21 +#define O_TX_CONTROL__TX4HALT 20 +#define O_TX_CONTROL__TX3HALT 19 +#define O_TX_CONTROL__TX2HALT 18 +#define O_TX_CONTROL__TX1HALT 17 +#define O_TX_CONTROL__TX0HALT 16 +#define O_TX_CONTROL__TXIDLE 15 +#define O_TX_CONTROL__TXENABLE 14 +#define O_TX_CONTROL__TXTHRESHOLD 0 +#define W_TX_CONTROL__TXTHRESHOLD 14 #define R_RX_CONTROL 0x0A1 #define O_RX_CONTROL__RGMII 10 -#define O_RX_CONTROL__SoftReset 2 -#define O_RX_CONTROL__RxHalt 1 -#define O_RX_CONTROL__RxEnable 0 +#define O_RX_CONTROL__SOFTRESET 2 +#define O_RX_CONTROL__RXHALT 1 +#define O_RX_CONTROL__RXENABLE 0 #define R_DESC_PACK_CTRL 0x0A2 -#define O_DESC_PACK_CTRL__ByteOffset 17 -#define W_DESC_PACK_CTRL__ByteOffset 3 -#define O_DESC_PACK_CTRL__PrePadEnable 16 -#define O_DESC_PACK_CTRL__MaxEntry 14 -#define W_DESC_PACK_CTRL__MaxEntry 2 -#define O_DESC_PACK_CTRL__RegularSize 0 -#define W_DESC_PACK_CTRL__RegularSize 14 +#define O_DESC_PACK_CTRL__BYTEOFFSET 17 +#define W_DESC_PACK_CTRL__BYTEOFFSET 3 +#define O_DESC_PACK_CTRL__PREPADENABLE 16 +#define O_DESC_PACK_CTRL__MAXENTRY 14 +#define W_DESC_PACK_CTRL__MAXENTRY 2 +#define O_DESC_PACK_CTRL__REGULARSIZE 0 +#define W_DESC_PACK_CTRL__REGULARSIZE 14 #define R_STATCTRL 0x0A3 -#define O_STATCTRL__OverFlowEn 4 +#define O_STATCTRL__OVERFLOWEN 4 #define O_STATCTRL__GIG 3 -#define O_STATCTRL__Sten 2 -#define O_STATCTRL__ClrCnt 1 -#define O_STATCTRL__AutoZ 0 +#define O_STATCTRL__STEN 2 +#define O_STATCTRL__CLRCNT 1 +#define O_STATCTRL__AUTOZ 0 #define R_L2ALLOCCTRL 0x0A4 -#define O_L2ALLOCCTRL__TxL2Allocate 9 -#define W_L2ALLOCCTRL__TxL2Allocate 9 -#define O_L2ALLOCCTRL__RxL2Allocate 0 -#define W_L2ALLOCCTRL__RxL2Allocate 9 +#define O_L2ALLOCCTRL__TXL2ALLOCATE 9 +#define W_L2ALLOCCTRL__TXL2ALLOCATE 9 +#define O_L2ALLOCCTRL__RXL2ALLOCATE 0 +#define W_L2ALLOCCTRL__RXL2ALLOCATE 9 #define R_INTMASK 0x0A5 -#define O_INTMASK__Spi4TxError 28 -#define O_INTMASK__Spi4RxError 27 -#define O_INTMASK__RGMIIHalfDupCollision 27 -#define O_INTMASK__Abort 26 -#define O_INTMASK__Underrun 25 -#define O_INTMASK__DiscardPacket 24 -#define O_INTMASK__AsyncFifoFull 23 -#define O_INTMASK__TagFull 22 -#define O_INTMASK__Class3Full 21 -#define O_INTMASK__C3EarlyFull 20 -#define O_INTMASK__Class2Full 19 -#define O_INTMASK__C2EarlyFull 18 -#define O_INTMASK__Class1Full 17 -#define O_INTMASK__C1EarlyFull 16 -#define O_INTMASK__Class0Full 15 -#define O_INTMASK__C0EarlyFull 14 -#define O_INTMASK__RxDataFull 13 -#define O_INTMASK__RxEarlyFull 12 -#define O_INTMASK__RFreeEmpty 9 -#define O_INTMASK__RFEarlyEmpty 8 -#define O_INTMASK__P2PSpillEcc 7 -#define O_INTMASK__FreeDescFull 5 -#define O_INTMASK__FreeEarlyFull 4 -#define O_INTMASK__TxFetchError 3 -#define O_INTMASK__StatCarry 2 -#define O_INTMASK__MDInt 1 -#define O_INTMASK__TxIllegal 0 +#define O_INTMASK__SPI4TXERROR 28 +#define O_INTMASK__SPI4RXERROR 27 +#define O_INTMASK__RGMIIHALFDUPCOLLISION 27 +#define O_INTMASK__ABORT 26 +#define O_INTMASK__UNDERRUN 25 +#define O_INTMASK__DISCARDPACKET 24 +#define O_INTMASK__ASYNCFIFOFULL 23 +#define O_INTMASK__TAGFULL 22 +#define O_INTMASK__CLASS3FULL 21 +#define O_INTMASK__C3EARLYFULL 20 +#define O_INTMASK__CLASS2FULL 19 +#define O_INTMASK__C2EARLYFULL 18 +#define O_INTMASK__CLASS1FULL 17 +#define O_INTMASK__C1EARLYFULL 16 +#define O_INTMASK__CLASS0FULL 15 +#define O_INTMASK__C0EARLYFULL 14 +#define O_INTMASK__RXDATAFULL 13 +#define O_INTMASK__RXEARLYFULL 12 +#define O_INTMASK__RFREEEMPTY 9 +#define O_INTMASK__RFEARLYEMPTY 8 +#define O_INTMASK__P2PSPILLECC 7 +#define O_INTMASK__FREEDESCFULL 5 +#define O_INTMASK__FREEEARLYFULL 4 +#define O_INTMASK__TXFETCHERROR 3 +#define O_INTMASK__STATCARRY 2 +#define O_INTMASK__MDINT 1 +#define O_INTMASK__TXILLEGAL 0 #define R_INTREG 0x0A6 -#define O_INTREG__Spi4TxError 28 -#define O_INTREG__Spi4RxError 27 -#define O_INTREG__RGMIIHalfDupCollision 27 -#define O_INTREG__Abort 26 -#define O_INTREG__Underrun 25 -#define O_INTREG__DiscardPacket 24 -#define O_INTREG__AsyncFifoFull 23 -#define O_INTREG__TagFull 22 -#define O_INTREG__Class3Full 21 -#define O_INTREG__C3EarlyFull 20 -#define O_INTREG__Class2Full 19 -#define O_INTREG__C2EarlyFull 18 -#define O_INTREG__Class1Full 17 -#define O_INTREG__C1EarlyFull 16 -#define O_INTREG__Class0Full 15 -#define O_INTREG__C0EarlyFull 14 -#define O_INTREG__RxDataFull 13 -#define O_INTREG__RxEarlyFull 12 -#define O_INTREG__RFreeEmpty 9 -#define O_INTREG__RFEarlyEmpty 8 -#define O_INTREG__P2PSpillEcc 7 -#define O_INTREG__FreeDescFull 5 -#define O_INTREG__FreeEarlyFull 4 -#define O_INTREG__TxFetchError 3 -#define O_INTREG__StatCarry 2 -#define O_INTREG__MDInt 1 -#define O_INTREG__TxIllegal 0 +#define O_INTREG__SPI4TXERROR 28 +#define O_INTREG__SPI4RXERROR 27 +#define O_INTREG__RGMIIHALFDUPCOLLISION 27 +#define O_INTREG__ABORT 26 +#define O_INTREG__UNDERRUN 25 +#define O_INTREG__DISCARDPACKET 24 +#define O_INTREG__ASYNCFIFOFULL 23 +#define O_INTREG__TAGFULL 22 +#define O_INTREG__CLASS3FULL 21 +#define O_INTREG__C3EARLYFULL 20 +#define O_INTREG__CLASS2FULL 19 +#define O_INTREG__C2EARLYFULL 18 +#define O_INTREG__CLASS1FULL 17 +#define O_INTREG__C1EARLYFULL 16 +#define O_INTREG__CLASS0FULL 15 +#define O_INTREG__C0EARLYFULL 14 +#define O_INTREG__RXDATAFULL 13 +#define O_INTREG__RXEARLYFULL 12 +#define O_INTREG__RFREEEMPTY 9 +#define O_INTREG__RFEARLYEMPTY 8 +#define O_INTREG__P2PSPILLECC 7 +#define O_INTREG__FREEDESCFULL 5 +#define O_INTREG__FREEEARLYFULL 4 +#define O_INTREG__TXFETCHERROR 3 +#define O_INTREG__STATCARRY 2 +#define O_INTREG__MDINT 1 +#define O_INTREG__TXILLEGAL 0 #define R_TXRETRY 0x0A7 -#define O_TXRETRY__CollisionRetry 6 -#define O_TXRETRY__BusErrorRetry 5 -#define O_TXRETRY__UnderRunRetry 4 -#define O_TXRETRY__Retries 0 -#define W_TXRETRY__Retries 4 +#define O_TXRETRY__COLLISIONRETRY 6 +#define O_TXRETRY__BUSERRORRETRY 5 +#define O_TXRETRY__UNDERRUNRETRY 4 +#define O_TXRETRY__RETRIES 0 +#define W_TXRETRY__RETRIES 4 #define R_CORECONTROL 0x0A8 -#define O_CORECONTROL__ErrorThread 4 -#define W_CORECONTROL__ErrorThread 7 -#define O_CORECONTROL__Shutdown 2 -#define O_CORECONTROL__Speed 0 -#define W_CORECONTROL__Speed 2 +#define O_CORECONTROL__ERRORTHREAD 4 +#define W_CORECONTROL__ERRORTHREAD 7 +#define O_CORECONTROL__SHUTDOWN 2 +#define O_CORECONTROL__SPEED 0 +#define W_CORECONTROL__SPEED 2 #define R_BYTEOFFSET0 0x0A9 #define R_BYTEOFFSET1 0x0AA #define R_L2TYPE_0 0x0F0 -#define O_L2TYPE__ExtraHdrProtoSize 26 -#define W_L2TYPE__ExtraHdrProtoSize 5 -#define O_L2TYPE__ExtraHdrProtoOffset 20 -#define W_L2TYPE__ExtraHdrProtoOffset 6 -#define O_L2TYPE__ExtraHeaderSize 14 -#define W_L2TYPE__ExtraHeaderSize 6 -#define O_L2TYPE__ProtoOffset 8 -#define W_L2TYPE__ProtoOffset 6 -#define O_L2TYPE__L2HdrOffset 2 -#define W_L2TYPE__L2HdrOffset 6 -#define O_L2TYPE__L2Proto 0 -#define W_L2TYPE__L2Proto 2 +#define O_L2TYPE__EXTRAHDRPROTOSIZE 26 +#define W_L2TYPE__EXTRAHDRPROTOSIZE 5 +#define O_L2TYPE__EXTRAHDRPROTOOFFSET 20 +#define W_L2TYPE__EXTRAHDRPROTOOFFSET 6 +#define O_L2TYPE__EXTRAHEADERSIZE 14 +#define W_L2TYPE__EXTRAHEADERSIZE 6 +#define O_L2TYPE__PROTOOFFSET 8 +#define W_L2TYPE__PROTOOFFSET 6 +#define O_L2TYPE__L2HDROFFSET 2 +#define W_L2TYPE__L2HDROFFSET 6 +#define O_L2TYPE__L2PROTO 0 +#define W_L2TYPE__L2PROTO 2 #define R_L2TYPE_1 0xF0 #define R_L2TYPE_2 0xF0 #define R_L2TYPE_3 0xF0 #define R_PARSERCONFIGREG 0x100 -#define O_PARSERCONFIGREG__CRCHashPoly 8 -#define W_PARSERCONFIGREG__CRCHashPoly 7 -#define O_PARSERCONFIGREG__PrePadOffset 4 -#define W_PARSERCONFIGREG__PrePadOffset 4 -#define O_PARSERCONFIGREG__UseCAM 2 -#define O_PARSERCONFIGREG__UseHASH 1 -#define O_PARSERCONFIGREG__UseProto 0 +#define O_PARSERCONFIGREG__CRCHASHPOLY 8 +#define W_PARSERCONFIGREG__CRCHASHPOLY 7 +#define O_PARSERCONFIGREG__PREPADOFFSET 4 +#define W_PARSERCONFIGREG__PREPADOFFSET 4 +#define O_PARSERCONFIGREG__USECAM 2 +#define O_PARSERCONFIGREG__USEHASH 1 +#define O_PARSERCONFIGREG__USEPROTO 0 #define R_L3CTABLE 0x140 -#define O_L3CTABLE__Offset0 25 -#define W_L3CTABLE__Offset0 7 -#define O_L3CTABLE__Len0 21 -#define W_L3CTABLE__Len0 4 -#define O_L3CTABLE__Offset1 14 -#define W_L3CTABLE__Offset1 7 -#define O_L3CTABLE__Len1 10 -#define W_L3CTABLE__Len1 4 -#define O_L3CTABLE__Offset2 4 -#define W_L3CTABLE__Offset2 6 -#define O_L3CTABLE__Len2 0 -#define W_L3CTABLE__Len2 4 -#define O_L3CTABLE__L3HdrOffset 26 -#define W_L3CTABLE__L3HdrOffset 6 -#define O_L3CTABLE__L4ProtoOffset 20 -#define W_L3CTABLE__L4ProtoOffset 6 -#define O_L3CTABLE__IPChksumCompute 19 -#define O_L3CTABLE__L4Classify 18 -#define O_L3CTABLE__L2Proto 16 -#define W_L3CTABLE__L2Proto 2 -#define O_L3CTABLE__L3ProtoKey 0 -#define W_L3CTABLE__L3ProtoKey 16 +#define O_L3CTABLE__OFFSET0 25 +#define W_L3CTABLE__OFFSET0 7 +#define O_L3CTABLE__LEN0 21 +#define W_L3CTABLE__LEN0 4 +#define O_L3CTABLE__OFFSET1 14 +#define W_L3CTABLE__OFFSET1 7 +#define O_L3CTABLE__LEN1 10 +#define W_L3CTABLE__LEN1 4 +#define O_L3CTABLE__OFFSET2 4 +#define W_L3CTABLE__OFFSET2 6 +#define O_L3CTABLE__LEN2 0 +#define W_L3CTABLE__LEN2 4 +#define O_L3CTABLE__L3HDROFFSET 26 +#define W_L3CTABLE__L3HDROFFSET 6 +#define O_L3CTABLE__L4PROTOOFFSET 20 +#define W_L3CTABLE__L4PROTOOFFSET 6 +#define O_L3CTABLE__IPCHKSUMCOMPUTE 19 +#define O_L3CTABLE__L4CLASSIFY 18 +#define O_L3CTABLE__L2PROTO 16 +#define W_L3CTABLE__L2PROTO 2 +#define O_L3CTABLE__L3PROTOKEY 0 +#define W_L3CTABLE__L3PROTOKEY 16 #define R_L4CTABLE 0x160 -#define O_L4CTABLE__Offset0 21 -#define W_L4CTABLE__Offset0 6 -#define O_L4CTABLE__Len0 17 -#define W_L4CTABLE__Len0 4 -#define O_L4CTABLE__Offset1 11 -#define W_L4CTABLE__Offset1 6 -#define O_L4CTABLE__Len1 7 -#define W_L4CTABLE__Len1 4 -#define O_L4CTABLE__TCPChksumEnable 0 +#define O_L4CTABLE__OFFSET0 21 +#define W_L4CTABLE__OFFSET0 6 +#define O_L4CTABLE__LEN0 17 +#define W_L4CTABLE__LEN0 4 +#define O_L4CTABLE__OFFSET1 11 +#define W_L4CTABLE__OFFSET1 6 +#define O_L4CTABLE__LEN1 7 +#define W_L4CTABLE__LEN1 4 +#define O_L4CTABLE__TCPCHKSUMENABLE 0 #define R_CAM4X128TABLE 0x172 -#define O_CAM4X128TABLE__ClassId 7 -#define W_CAM4X128TABLE__ClassId 2 -#define O_CAM4X128TABLE__BucketId 1 -#define W_CAM4X128TABLE__BucketId 6 -#define O_CAM4X128TABLE__UseBucket 0 +#define O_CAM4X128TABLE__CLASSID 7 +#define W_CAM4X128TABLE__CLASSID 2 +#define O_CAM4X128TABLE__BUCKETID 1 +#define W_CAM4X128TABLE__BUCKETID 6 +#define O_CAM4X128TABLE__USEBUCKET 0 #define R_CAM4X128KEY 0x180 #define R_TRANSLATETABLE 0x1A0 #define R_DMACR0 0x200 -#define O_DMACR0__Data0WrMaxCr 27 -#define W_DMACR0__Data0WrMaxCr 3 -#define O_DMACR0__Data0RdMaxCr 24 -#define W_DMACR0__Data0RdMaxCr 3 -#define O_DMACR0__Data1WrMaxCr 21 -#define W_DMACR0__Data1WrMaxCr 3 -#define O_DMACR0__Data1RdMaxCr 18 -#define W_DMACR0__Data1RdMaxCr 3 -#define O_DMACR0__Data2WrMaxCr 15 -#define W_DMACR0__Data2WrMaxCr 3 -#define O_DMACR0__Data2RdMaxCr 12 -#define W_DMACR0__Data2RdMaxCr 3 -#define O_DMACR0__Data3WrMaxCr 9 -#define W_DMACR0__Data3WrMaxCr 3 -#define O_DMACR0__Data3RdMaxCr 6 -#define W_DMACR0__Data3RdMaxCr 3 -#define O_DMACR0__Data4WrMaxCr 3 -#define W_DMACR0__Data4WrMaxCr 3 -#define O_DMACR0__Data4RdMaxCr 0 -#define W_DMACR0__Data4RdMaxCr 3 +#define O_DMACR0__DATA0WRMAXCR 27 +#define W_DMACR0__DATA0WRMAXCR 3 +#define O_DMACR0__DATA0RDMAXCR 24 +#define W_DMACR0__DATA0RDMAXCR 3 +#define O_DMACR0__DATA1WRMAXCR 21 +#define W_DMACR0__DATA1WRMAXCR 3 +#define O_DMACR0__DATA1RDMAXCR 18 +#define W_DMACR0__DATA1RDMAXCR 3 +#define O_DMACR0__DATA2WRMAXCR 15 +#define W_DMACR0__DATA2WRMAXCR 3 +#define O_DMACR0__DATA2RDMAXCR 12 +#define W_DMACR0__DATA2RDMAXCR 3 +#define O_DMACR0__DATA3WRMAXCR 9 +#define W_DMACR0__DATA3WRMAXCR 3 +#define O_DMACR0__DATA3RDMAXCR 6 +#define W_DMACR0__DATA3RDMAXCR 3 +#define O_DMACR0__DATA4WRMAXCR 3 +#define W_DMACR0__DATA4WRMAXCR 3 +#define O_DMACR0__DATA4RDMAXCR 0 +#define W_DMACR0__DATA4RDMAXCR 3 #define R_DMACR1 0x201 -#define O_DMACR1__Data5WrMaxCr 27 -#define W_DMACR1__Data5WrMaxCr 3 -#define O_DMACR1__Data5RdMaxCr 24 -#define W_DMACR1__Data5RdMaxCr 3 -#define O_DMACR1__Data6WrMaxCr 21 -#define W_DMACR1__Data6WrMaxCr 3 -#define O_DMACR1__Data6RdMaxCr 18 -#define W_DMACR1__Data6RdMaxCr 3 -#define O_DMACR1__Data7WrMaxCr 15 -#define W_DMACR1__Data7WrMaxCr 3 -#define O_DMACR1__Data7RdMaxCr 12 -#define W_DMACR1__Data7RdMaxCr 3 -#define O_DMACR1__Data8WrMaxCr 9 -#define W_DMACR1__Data8WrMaxCr 3 -#define O_DMACR1__Data8RdMaxCr 6 -#define W_DMACR1__Data8RdMaxCr 3 -#define O_DMACR1__Data9WrMaxCr 3 -#define W_DMACR1__Data9WrMaxCr 3 -#define O_DMACR1__Data9RdMaxCr 0 -#define W_DMACR1__Data9RdMaxCr 3 +#define O_DMACR1__DATA5WRMAXCR 27 +#define W_DMACR1__DATA5WRMAXCR 3 +#define O_DMACR1__DATA5RDMAXCR 24 +#define W_DMACR1__DATA5RDMAXCR 3 +#define O_DMACR1__DATA6WRMAXCR 21 +#define W_DMACR1__DATA6WRMAXCR 3 +#define O_DMACR1__DATA6RDMAXCR 18 +#define W_DMACR1__DATA6RDMAXCR 3 +#define O_DMACR1__DATA7WRMAXCR 15 +#define W_DMACR1__DATA7WRMAXCR 3 +#define O_DMACR1__DATA7RDMAXCR 12 +#define W_DMACR1__DATA7RDMAXCR 3 +#define O_DMACR1__DATA8WRMAXCR 9 +#define W_DMACR1__DATA8WRMAXCR 3 +#define O_DMACR1__DATA8RDMAXCR 6 +#define W_DMACR1__DATA8RDMAXCR 3 +#define O_DMACR1__DATA9WRMAXCR 3 +#define W_DMACR1__DATA9WRMAXCR 3 +#define O_DMACR1__DATA9RDMAXCR 0 +#define W_DMACR1__DATA9RDMAXCR 3 #define R_DMACR2 0x202 -#define O_DMACR2__Data10WrMaxCr 27 -#define W_DMACR2__Data10WrMaxCr 3 -#define O_DMACR2__Data10RdMaxCr 24 -#define W_DMACR2__Data10RdMaxCr 3 -#define O_DMACR2__Data11WrMaxCr 21 -#define W_DMACR2__Data11WrMaxCr 3 -#define O_DMACR2__Data11RdMaxCr 18 -#define W_DMACR2__Data11RdMaxCr 3 -#define O_DMACR2__Data12WrMaxCr 15 -#define W_DMACR2__Data12WrMaxCr 3 -#define O_DMACR2__Data12RdMaxCr 12 -#define W_DMACR2__Data12RdMaxCr 3 -#define O_DMACR2__Data13WrMaxCr 9 -#define W_DMACR2__Data13WrMaxCr 3 -#define O_DMACR2__Data13RdMaxCr 6 -#define W_DMACR2__Data13RdMaxCr 3 -#define O_DMACR2__Data14WrMaxCr 3 -#define W_DMACR2__Data14WrMaxCr 3 -#define O_DMACR2__Data14RdMaxCr 0 -#define W_DMACR2__Data14RdMaxCr 3 +#define O_DMACR2__DATA10WRMAXCR 27 +#define W_DMACR2__DATA10WRMAXCR 3 +#define O_DMACR2__DATA10RDMAXCR 24 +#define W_DMACR2__DATA10RDMAXCR 3 +#define O_DMACR2__DATA11WRMAXCR 21 +#define W_DMACR2__DATA11WRMAXCR 3 +#define O_DMACR2__DATA11RDMAXCR 18 +#define W_DMACR2__DATA11RDMAXCR 3 +#define O_DMACR2__DATA12WRMAXCR 15 +#define W_DMACR2__DATA12WRMAXCR 3 +#define O_DMACR2__DATA12RDMAXCR 12 +#define W_DMACR2__DATA12RDMAXCR 3 +#define O_DMACR2__DATA13WRMAXCR 9 +#define W_DMACR2__DATA13WRMAXCR 3 +#define O_DMACR2__DATA13RDMAXCR 6 +#define W_DMACR2__DATA13RDMAXCR 3 +#define O_DMACR2__DATA14WRMAXCR 3 +#define W_DMACR2__DATA14WRMAXCR 3 +#define O_DMACR2__DATA14RDMAXCR 0 +#define W_DMACR2__DATA14RDMAXCR 3 #define R_DMACR3 0x203 -#define O_DMACR3__Data15WrMaxCr 27 -#define W_DMACR3__Data15WrMaxCr 3 -#define O_DMACR3__Data15RdMaxCr 24 -#define W_DMACR3__Data15RdMaxCr 3 -#define O_DMACR3__SpClassWrMaxCr 21 -#define W_DMACR3__SpClassWrMaxCr 3 -#define O_DMACR3__SpClassRdMaxCr 18 -#define W_DMACR3__SpClassRdMaxCr 3 -#define O_DMACR3__JumFrInWrMaxCr 15 -#define W_DMACR3__JumFrInWrMaxCr 3 -#define O_DMACR3__JumFrInRdMaxCr 12 -#define W_DMACR3__JumFrInRdMaxCr 3 -#define O_DMACR3__RegFrInWrMaxCr 9 -#define W_DMACR3__RegFrInWrMaxCr 3 -#define O_DMACR3__RegFrInRdMaxCr 6 -#define W_DMACR3__RegFrInRdMaxCr 3 -#define O_DMACR3__FrOutWrMaxCr 3 -#define W_DMACR3__FrOutWrMaxCr 3 -#define O_DMACR3__FrOutRdMaxCr 0 -#define W_DMACR3__FrOutRdMaxCr 3 +#define O_DMACR3__DATA15WRMAXCR 27 +#define W_DMACR3__DATA15WRMAXCR 3 +#define O_DMACR3__DATA15RDMAXCR 24 +#define W_DMACR3__DATA15RDMAXCR 3 +#define O_DMACR3__SPCLASSWRMAXCR 21 +#define W_DMACR3__SPCLASSWRMAXCR 3 +#define O_DMACR3__SPCLASSRDMAXCR 18 +#define W_DMACR3__SPCLASSRDMAXCR 3 +#define O_DMACR3__JUMFRINWRMAXCR 15 +#define W_DMACR3__JUMFRINWRMAXCR 3 +#define O_DMACR3__JUMFRINRDMAXCR 12 +#define W_DMACR3__JUMFRINRDMAXCR 3 +#define O_DMACR3__REGFRINWRMAXCR 9 +#define W_DMACR3__REGFRINWRMAXCR 3 +#define O_DMACR3__REGFRINRDMAXCR 6 +#define W_DMACR3__REGFRINRDMAXCR 3 +#define O_DMACR3__FROUTWRMAXCR 3 +#define W_DMACR3__FROUTWRMAXCR 3 +#define O_DMACR3__FROUTRDMAXCR 0 +#define W_DMACR3__FROUTRDMAXCR 3 #define R_REG_FRIN_SPILL_MEM_START_0 0x204 -#define O_REG_FRIN_SPILL_MEM_START_0__RegFrInSpillMemStart0 0 -#define W_REG_FRIN_SPILL_MEM_START_0__RegFrInSpillMemStart0 32 +#define O_REG_FRIN_SPILL_MEM_START_0__REGFRINSPILLMEMSTART0 0 +#define W_REG_FRIN_SPILL_MEM_START_0__REGFRINSPILLMEMSTART0 32 #define R_REG_FRIN_SPILL_MEM_START_1 0x205 -#define O_REG_FRIN_SPILL_MEM_START_1__RegFrInSpillMemStart1 0 -#define W_REG_FRIN_SPILL_MEM_START_1__RegFrInSpillMemStart1 3 +#define O_REG_FRIN_SPILL_MEM_START_1__REGFRINSPILLMEMSTART1 0 +#define W_REG_FRIN_SPILL_MEM_START_1__REGFRINSPILLMEMSTART1 3 #define R_REG_FRIN_SPILL_MEM_SIZE 0x206 -#define O_REG_FRIN_SPILL_MEM_SIZE__RegFrInSpillMemSize 0 -#define W_REG_FRIN_SPILL_MEM_SIZE__RegFrInSpillMemSize 32 +#define O_REG_FRIN_SPILL_MEM_SIZE__REGFRINSPILLMEMSIZE 0 +#define W_REG_FRIN_SPILL_MEM_SIZE__REGFRINSPILLMEMSIZE 32 #define R_FROUT_SPILL_MEM_START_0 0x207 -#define O_FROUT_SPILL_MEM_START_0__FrOutSpillMemStart0 0 -#define W_FROUT_SPILL_MEM_START_0__FrOutSpillMemStart0 32 +#define O_FROUT_SPILL_MEM_START_0__FROUTSPILLMEMSTART0 0 +#define W_FROUT_SPILL_MEM_START_0__FROUTSPILLMEMSTART0 32 #define R_FROUT_SPILL_MEM_START_1 0x208 -#define O_FROUT_SPILL_MEM_START_1__FrOutSpillMemStart1 0 -#define W_FROUT_SPILL_MEM_START_1__FrOutSpillMemStart1 3 +#define O_FROUT_SPILL_MEM_START_1__FROUTSPILLMEMSTART1 0 +#define W_FROUT_SPILL_MEM_START_1__FROUTSPILLMEMSTART1 3 #define R_FROUT_SPILL_MEM_SIZE 0x209 -#define O_FROUT_SPILL_MEM_SIZE__FrOutSpillMemSize 0 -#define W_FROUT_SPILL_MEM_SIZE__FrOutSpillMemSize 32 +#define O_FROUT_SPILL_MEM_SIZE__FROUTSPILLMEMSIZE 0 +#define W_FROUT_SPILL_MEM_SIZE__FROUTSPILLMEMSIZE 32 #define R_CLASS0_SPILL_MEM_START_0 0x20A -#define O_CLASS0_SPILL_MEM_START_0__Class0SpillMemStart0 0 -#define W_CLASS0_SPILL_MEM_START_0__Class0SpillMemStart0 32 +#define O_CLASS0_SPILL_MEM_START_0__CLASS0SPILLMEMSTART0 0 +#define W_CLASS0_SPILL_MEM_START_0__CLASS0SPILLMEMSTART0 32 #define R_CLASS0_SPILL_MEM_START_1 0x20B -#define O_CLASS0_SPILL_MEM_START_1__Class0SpillMemStart1 0 -#define W_CLASS0_SPILL_MEM_START_1__Class0SpillMemStart1 3 +#define O_CLASS0_SPILL_MEM_START_1__CLASS0SPILLMEMSTART1 0 +#define W_CLASS0_SPILL_MEM_START_1__CLASS0SPILLMEMSTART1 3 #define R_CLASS0_SPILL_MEM_SIZE 0x20C -#define O_CLASS0_SPILL_MEM_SIZE__Class0SpillMemSize 0 -#define W_CLASS0_SPILL_MEM_SIZE__Class0SpillMemSize 32 +#define O_CLASS0_SPILL_MEM_SIZE__CLASS0SPILLMEMSIZE 0 +#define W_CLASS0_SPILL_MEM_SIZE__CLASS0SPILLMEMSIZE 32 #define R_JUMFRIN_SPILL_MEM_START_0 0x20D -#define O_JUMFRIN_SPILL_MEM_START_0__JumFrInSpillMemStar0 0 -#define W_JUMFRIN_SPILL_MEM_START_0__JumFrInSpillMemStar0 32 +#define O_JUMFRIN_SPILL_MEM_START_0__JUMFRINSPILLMEMSTART0 0 +#define W_JUMFRIN_SPILL_MEM_START_0__JUMFRINSPILLMEMSTART0 32 #define R_JUMFRIN_SPILL_MEM_START_1 0x20E -#define O_JUMFRIN_SPILL_MEM_START_1__JumFrInSpillMemStart1 0 -#define W_JUMFRIN_SPILL_MEM_START_1__JumFrInSpillMemStart1 3 +#define O_JUMFRIN_SPILL_MEM_START_1__JUMFRINSPILLMEMSTART1 0 +#define W_JUMFRIN_SPILL_MEM_START_1__JUMFRINSPILLMEMSTART1 3 #define R_JUMFRIN_SPILL_MEM_SIZE 0x20F -#define O_JUMFRIN_SPILL_MEM_SIZE__JumFrInSpillMemSize 0 -#define W_JUMFRIN_SPILL_MEM_SIZE__JumFrInSpillMemSize 32 +#define O_JUMFRIN_SPILL_MEM_SIZE__JUMFRINSPILLMEMSIZE 0 +#define W_JUMFRIN_SPILL_MEM_SIZE__JUMFRINSPILLMEMSIZE 32 #define R_CLASS1_SPILL_MEM_START_0 0x210 -#define O_CLASS1_SPILL_MEM_START_0__Class1SpillMemStart0 0 -#define W_CLASS1_SPILL_MEM_START_0__Class1SpillMemStart0 32 +#define O_CLASS1_SPILL_MEM_START_0__CLASS1SPILLMEMSTART0 0 +#define W_CLASS1_SPILL_MEM_START_0__CLASS1SPILLMEMSTART0 32 #define R_CLASS1_SPILL_MEM_START_1 0x211 -#define O_CLASS1_SPILL_MEM_START_1__Class1SpillMemStart1 0 -#define W_CLASS1_SPILL_MEM_START_1__Class1SpillMemStart1 3 +#define O_CLASS1_SPILL_MEM_START_1__CLASS1SPILLMEMSTART1 0 +#define W_CLASS1_SPILL_MEM_START_1__CLASS1SPILLMEMSTART1 3 #define R_CLASS1_SPILL_MEM_SIZE 0x212 -#define O_CLASS1_SPILL_MEM_SIZE__Class1SpillMemSize 0 -#define W_CLASS1_SPILL_MEM_SIZE__Class1SpillMemSize 32 +#define O_CLASS1_SPILL_MEM_SIZE__CLASS1SPILLMEMSIZE 0 +#define W_CLASS1_SPILL_MEM_SIZE__CLASS1SPILLMEMSIZE 32 #define R_CLASS2_SPILL_MEM_START_0 0x213 -#define O_CLASS2_SPILL_MEM_START_0__Class2SpillMemStart0 0 -#define W_CLASS2_SPILL_MEM_START_0__Class2SpillMemStart0 32 +#define O_CLASS2_SPILL_MEM_START_0__CLASS2SPILLMEMSTART0 0 +#define W_CLASS2_SPILL_MEM_START_0__CLASS2SPILLMEMSTART0 32 #define R_CLASS2_SPILL_MEM_START_1 0x214 -#define O_CLASS2_SPILL_MEM_START_1__Class2SpillMemStart1 0 -#define W_CLASS2_SPILL_MEM_START_1__Class2SpillMemStart1 3 +#define O_CLASS2_SPILL_MEM_START_1__CLASS2SPILLMEMSTART1 0 +#define W_CLASS2_SPILL_MEM_START_1__CLASS2SPILLMEMSTART1 3 #define R_CLASS2_SPILL_MEM_SIZE 0x215 -#define O_CLASS2_SPILL_MEM_SIZE__Class2SpillMemSize 0 -#define W_CLASS2_SPILL_MEM_SIZE__Class2SpillMemSize 32 +#define O_CLASS2_SPILL_MEM_SIZE__CLASS2SPILLMEMSIZE 0 +#define W_CLASS2_SPILL_MEM_SIZE__CLASS2SPILLMEMSIZE 32 #define R_CLASS3_SPILL_MEM_START_0 0x216 -#define O_CLASS3_SPILL_MEM_START_0__Class3SpillMemStart0 0 -#define W_CLASS3_SPILL_MEM_START_0__Class3SpillMemStart0 32 +#define O_CLASS3_SPILL_MEM_START_0__CLASS3SPILLMEMSTART0 0 +#define W_CLASS3_SPILL_MEM_START_0__CLASS3SPILLMEMSTART0 32 #define R_CLASS3_SPILL_MEM_START_1 0x217 -#define O_CLASS3_SPILL_MEM_START_1__Class3SpillMemStart1 0 -#define W_CLASS3_SPILL_MEM_START_1__Class3SpillMemStart1 3 +#define O_CLASS3_SPILL_MEM_START_1__CLASS3SPILLMEMSTART1 0 +#define W_CLASS3_SPILL_MEM_START_1__CLASS3SPILLMEMSTART1 3 #define R_CLASS3_SPILL_MEM_SIZE 0x218 -#define O_CLASS3_SPILL_MEM_SIZE__Class3SpillMemSize 0 -#define W_CLASS3_SPILL_MEM_SIZE__Class3SpillMemSize 32 +#define O_CLASS3_SPILL_MEM_SIZE__CLASS3SPILLMEMSIZE 0 +#define W_CLASS3_SPILL_MEM_SIZE__CLASS3SPILLMEMSIZE 32 #define R_REG_FRIN1_SPILL_MEM_START_0 0x219 #define R_REG_FRIN1_SPILL_MEM_START_1 0x21a #define R_REG_FRIN1_SPILL_MEM_SIZE 0x21b @@ -679,244 +679,244 @@ #define O_SPISTRV3__EG_STRV_THRESH_15 0 #define W_SPISTRV3__EG_STRV_THRESH_15 7 #define R_TXDATAFIFO0 0x221 -#define O_TXDATAFIFO0__Tx0DataFifoStart 24 -#define W_TXDATAFIFO0__Tx0DataFifoStart 7 -#define O_TXDATAFIFO0__Tx0DataFifoSize 16 -#define W_TXDATAFIFO0__Tx0DataFifoSize 7 -#define O_TXDATAFIFO0__Tx1DataFifoStart 8 -#define W_TXDATAFIFO0__Tx1DataFifoStart 7 -#define O_TXDATAFIFO0__Tx1DataFifoSize 0 -#define W_TXDATAFIFO0__Tx1DataFifoSize 7 +#define O_TXDATAFIFO0__TX0DATAFIFOSTART 24 +#define W_TXDATAFIFO0__TX0DATAFIFOSTART 7 +#define O_TXDATAFIFO0__TX0DATAFIFOSIZE 16 +#define W_TXDATAFIFO0__TX0DATAFIFOSIZE 7 +#define O_TXDATAFIFO0__TX1DATAFIFOSTART 8 +#define W_TXDATAFIFO0__TX1DATAFIFOSTART 7 +#define O_TXDATAFIFO0__TX1DATAFIFOSIZE 0 +#define W_TXDATAFIFO0__TX1DATAFIFOSIZE 7 #define R_TXDATAFIFO1 0x222 -#define O_TXDATAFIFO1__Tx2DataFifoStart 24 -#define W_TXDATAFIFO1__Tx2DataFifoStart 7 -#define O_TXDATAFIFO1__Tx2DataFifoSize 16 -#define W_TXDATAFIFO1__Tx2DataFifoSize 7 -#define O_TXDATAFIFO1__Tx3DataFifoStart 8 -#define W_TXDATAFIFO1__Tx3DataFifoStart 7 -#define O_TXDATAFIFO1__Tx3DataFifoSize 0 -#define W_TXDATAFIFO1__Tx3DataFifoSize 7 +#define O_TXDATAFIFO1__TX2DATAFIFOSTART 24 +#define W_TXDATAFIFO1__TX2DATAFIFOSTART 7 +#define O_TXDATAFIFO1__TX2DATAFIFOSIZE 16 +#define W_TXDATAFIFO1__TX2DATAFIFOSIZE 7 +#define O_TXDATAFIFO1__TX3DATAFIFOSTART 8 +#define W_TXDATAFIFO1__TX3DATAFIFOSTART 7 +#define O_TXDATAFIFO1__TX3DATAFIFOSIZE 0 +#define W_TXDATAFIFO1__TX3DATAFIFOSIZE 7 #define R_TXDATAFIFO2 0x223 -#define O_TXDATAFIFO2__Tx4DataFifoStart 24 -#define W_TXDATAFIFO2__Tx4DataFifoStart 7 -#define O_TXDATAFIFO2__Tx4DataFifoSize 16 -#define W_TXDATAFIFO2__Tx4DataFifoSize 7 -#define O_TXDATAFIFO2__Tx5DataFifoStart 8 -#define W_TXDATAFIFO2__Tx5DataFifoStart 7 -#define O_TXDATAFIFO2__Tx5DataFifoSize 0 -#define W_TXDATAFIFO2__Tx5DataFifoSize 7 +#define O_TXDATAFIFO2__TX4DATAFIFOSTART 24 +#define W_TXDATAFIFO2__TX4DATAFIFOSTART 7 +#define O_TXDATAFIFO2__TX4DATAFIFOSIZE 16 +#define W_TXDATAFIFO2__TX4DATAFIFOSIZE 7 +#define O_TXDATAFIFO2__TX5DATAFIFOSTART 8 +#define W_TXDATAFIFO2__TX5DATAFIFOSTART 7 +#define O_TXDATAFIFO2__TX5DATAFIFOSIZE 0 +#define W_TXDATAFIFO2__TX5DATAFIFOSIZE 7 #define R_TXDATAFIFO3 0x224 -#define O_TXDATAFIFO3__Tx6DataFifoStart 24 -#define W_TXDATAFIFO3__Tx6DataFifoStart 7 -#define O_TXDATAFIFO3__Tx6DataFifoSize 16 -#define W_TXDATAFIFO3__Tx6DataFifoSize 7 -#define O_TXDATAFIFO3__Tx7DataFifoStart 8 -#define W_TXDATAFIFO3__Tx7DataFifoStart 7 -#define O_TXDATAFIFO3__Tx7DataFifoSize 0 -#define W_TXDATAFIFO3__Tx7DataFifoSize 7 +#define O_TXDATAFIFO3__TX6DATAFIFOSTART 24 +#define W_TXDATAFIFO3__TX6DATAFIFOSTART 7 +#define O_TXDATAFIFO3__TX6DATAFIFOSIZE 16 +#define W_TXDATAFIFO3__TX6DATAFIFOSIZE 7 +#define O_TXDATAFIFO3__TX7DATAFIFOSTART 8 +#define W_TXDATAFIFO3__TX7DATAFIFOSTART 7 +#define O_TXDATAFIFO3__TX7DATAFIFOSIZE 0 +#define W_TXDATAFIFO3__TX7DATAFIFOSIZE 7 #define R_TXDATAFIFO4 0x225 -#define O_TXDATAFIFO4__Tx8DataFifoStart 24 -#define W_TXDATAFIFO4__Tx8DataFifoStart 7 -#define O_TXDATAFIFO4__Tx8DataFifoSize 16 -#define W_TXDATAFIFO4__Tx8DataFifoSize 7 -#define O_TXDATAFIFO4__Tx9DataFifoStart 8 -#define W_TXDATAFIFO4__Tx9DataFifoStart 7 -#define O_TXDATAFIFO4__Tx9DataFifoSize 0 -#define W_TXDATAFIFO4__Tx9DataFifoSize 7 +#define O_TXDATAFIFO4__TX8DATAFIFOSTART 24 +#define W_TXDATAFIFO4__TX8DATAFIFOSTART 7 +#define O_TXDATAFIFO4__TX8DATAFIFOSIZE 16 +#define W_TXDATAFIFO4__TX8DATAFIFOSIZE 7 +#define O_TXDATAFIFO4__TX9DATAFIFOSTART 8 +#define W_TXDATAFIFO4__TX9DATAFIFOSTART 7 +#define O_TXDATAFIFO4__TX9DATAFIFOSIZE 0 +#define W_TXDATAFIFO4__TX9DATAFIFOSIZE 7 #define R_TXDATAFIFO5 0x226 -#define O_TXDATAFIFO5__Tx10DataFifoStart 24 -#define W_TXDATAFIFO5__Tx10DataFifoStart 7 -#define O_TXDATAFIFO5__Tx10DataFifoSize 16 -#define W_TXDATAFIFO5__Tx10DataFifoSize 7 -#define O_TXDATAFIFO5__Tx11DataFifoStart 8 -#define W_TXDATAFIFO5__Tx11DataFifoStart 7 -#define O_TXDATAFIFO5__Tx11DataFifoSize 0 -#define W_TXDATAFIFO5__Tx11DataFifoSize 7 +#define O_TXDATAFIFO5__TX10DATAFIFOSTART 24 +#define W_TXDATAFIFO5__TX10DATAFIFOSTART 7 +#define O_TXDATAFIFO5__TX10DATAFIFOSIZE 16 +#define W_TXDATAFIFO5__TX10DATAFIFOSIZE 7 +#define O_TXDATAFIFO5__TX11DATAFIFOSTART 8 +#define W_TXDATAFIFO5__TX11DATAFIFOSTART 7 +#define O_TXDATAFIFO5__TX11DATAFIFOSIZE 0 +#define W_TXDATAFIFO5__TX11DATAFIFOSIZE 7 #define R_TXDATAFIFO6 0x227 -#define O_TXDATAFIFO6__Tx12DataFifoStart 24 -#define W_TXDATAFIFO6__Tx12DataFifoStart 7 -#define O_TXDATAFIFO6__Tx12DataFifoSize 16 -#define W_TXDATAFIFO6__Tx12DataFifoSize 7 -#define O_TXDATAFIFO6__Tx13DataFifoStart 8 -#define W_TXDATAFIFO6__Tx13DataFifoStart 7 -#define O_TXDATAFIFO6__Tx13DataFifoSize 0 -#define W_TXDATAFIFO6__Tx13DataFifoSize 7 +#define O_TXDATAFIFO6__TX12DATAFIFOSTART 24 +#define W_TXDATAFIFO6__TX12DATAFIFOSTART 7 +#define O_TXDATAFIFO6__TX12DATAFIFOSIZE 16 +#define W_TXDATAFIFO6__TX12DATAFIFOSIZE 7 +#define O_TXDATAFIFO6__TX13DATAFIFOSTART 8 +#define W_TXDATAFIFO6__TX13DATAFIFOSTART 7 +#define O_TXDATAFIFO6__TX13DATAFIFOSIZE 0 +#define W_TXDATAFIFO6__TX13DATAFIFOSIZE 7 #define R_TXDATAFIFO7 0x228 -#define O_TXDATAFIFO7__Tx14DataFifoStart 24 -#define W_TXDATAFIFO7__Tx14DataFifoStart 7 -#define O_TXDATAFIFO7__Tx14DataFifoSize 16 -#define W_TXDATAFIFO7__Tx14DataFifoSize 7 -#define O_TXDATAFIFO7__Tx15DataFifoStart 8 -#define W_TXDATAFIFO7__Tx15DataFifoStart 7 -#define O_TXDATAFIFO7__Tx15DataFifoSize 0 -#define W_TXDATAFIFO7__Tx15DataFifoSize 7 +#define O_TXDATAFIFO7__TX14DATAFIFOSTART 24 +#define W_TXDATAFIFO7__TX14DATAFIFOSTART 7 +#define O_TXDATAFIFO7__TX14DATAFIFOSIZE 16 +#define W_TXDATAFIFO7__TX14DATAFIFOSIZE 7 +#define O_TXDATAFIFO7__TX15DATAFIFOSTART 8 +#define W_TXDATAFIFO7__TX15DATAFIFOSTART 7 +#define O_TXDATAFIFO7__TX15DATAFIFOSIZE 0 +#define W_TXDATAFIFO7__TX15DATAFIFOSIZE 7 #define R_RXDATAFIFO0 0x229 -#define O_RXDATAFIFO0__Rx0DataFifoStart 24 -#define W_RXDATAFIFO0__Rx0DataFifoStart 7 -#define O_RXDATAFIFO0__Rx0DataFifoSize 16 -#define W_RXDATAFIFO0__Rx0DataFifoSize 7 -#define O_RXDATAFIFO0__Rx1DataFifoStart 8 -#define W_RXDATAFIFO0__Rx1DataFifoStart 7 -#define O_RXDATAFIFO0__Rx1DataFifoSize 0 -#define W_RXDATAFIFO0__Rx1DataFifoSize 7 +#define O_RXDATAFIFO0__RX0DATAFIFOSTART 24 +#define W_RXDATAFIFO0__RX0DATAFIFOSTART 7 +#define O_RXDATAFIFO0__RX0DATAFIFOSIZE 16 +#define W_RXDATAFIFO0__RX0DATAFIFOSIZE 7 +#define O_RXDATAFIFO0__RX1DATAFIFOSTART 8 +#define W_RXDATAFIFO0__RX1DATAFIFOSTART 7 +#define O_RXDATAFIFO0__RX1DATAFIFOSIZE 0 +#define W_RXDATAFIFO0__RX1DATAFIFOSIZE 7 #define R_RXDATAFIFO1 0x22A -#define O_RXDATAFIFO1__Rx2DataFifoStart 24 -#define W_RXDATAFIFO1__Rx2DataFifoStart 7 -#define O_RXDATAFIFO1__Rx2DataFifoSize 16 -#define W_RXDATAFIFO1__Rx2DataFifoSize 7 -#define O_RXDATAFIFO1__Rx3DataFifoStart 8 -#define W_RXDATAFIFO1__Rx3DataFifoStart 7 -#define O_RXDATAFIFO1__Rx3DataFifoSize 0 -#define W_RXDATAFIFO1__Rx3DataFifoSize 7 +#define O_RXDATAFIFO1__RX2DATAFIFOSTART 24 +#define W_RXDATAFIFO1__RX2DATAFIFOSTART 7 +#define O_RXDATAFIFO1__RX2DATAFIFOSIZE 16 +#define W_RXDATAFIFO1__RX2DATAFIFOSIZE 7 +#define O_RXDATAFIFO1__RX3DATAFIFOSTART 8 +#define W_RXDATAFIFO1__RX3DATAFIFOSTART 7 +#define O_RXDATAFIFO1__RX3DATAFIFOSIZE 0 +#define W_RXDATAFIFO1__RX3DATAFIFOSIZE 7 #define R_RXDATAFIFO2 0x22B -#define O_RXDATAFIFO2__Rx4DataFifoStart 24 -#define W_RXDATAFIFO2__Rx4DataFifoStart 7 -#define O_RXDATAFIFO2__Rx4DataFifoSize 16 -#define W_RXDATAFIFO2__Rx4DataFifoSize 7 -#define O_RXDATAFIFO2__Rx5DataFifoStart 8 -#define W_RXDATAFIFO2__Rx5DataFifoStart 7 -#define O_RXDATAFIFO2__Rx5DataFifoSize 0 -#define W_RXDATAFIFO2__Rx5DataFifoSize 7 +#define O_RXDATAFIFO2__RX4DATAFIFOSTART 24 +#define W_RXDATAFIFO2__RX4DATAFIFOSTART 7 +#define O_RXDATAFIFO2__RX4DATAFIFOSIZE 16 +#define W_RXDATAFIFO2__RX4DATAFIFOSIZE 7 +#define O_RXDATAFIFO2__RX5DATAFIFOSTART 8 +#define W_RXDATAFIFO2__RX5DATAFIFOSTART 7 +#define O_RXDATAFIFO2__RX5DATAFIFOSIZE 0 +#define W_RXDATAFIFO2__RX5DATAFIFOSIZE 7 #define R_RXDATAFIFO3 0x22C -#define O_RXDATAFIFO3__Rx6DataFifoStart 24 -#define W_RXDATAFIFO3__Rx6DataFifoStart 7 -#define O_RXDATAFIFO3__Rx6DataFifoSize 16 -#define W_RXDATAFIFO3__Rx6DataFifoSize 7 -#define O_RXDATAFIFO3__Rx7DataFifoStart 8 -#define W_RXDATAFIFO3__Rx7DataFifoStart 7 -#define O_RXDATAFIFO3__Rx7DataFifoSize 0 -#define W_RXDATAFIFO3__Rx7DataFifoSize 7 +#define O_RXDATAFIFO3__RX6DATAFIFOSTART 24 +#define W_RXDATAFIFO3__RX6DATAFIFOSTART 7 +#define O_RXDATAFIFO3__RX6DATAFIFOSIZE 16 +#define W_RXDATAFIFO3__RX6DATAFIFOSIZE 7 +#define O_RXDATAFIFO3__RX7DATAFIFOSTART 8 +#define W_RXDATAFIFO3__RX7DATAFIFOSTART 7 +#define O_RXDATAFIFO3__RX7DATAFIFOSIZE 0 +#define W_RXDATAFIFO3__RX7DATAFIFOSIZE 7 #define R_RXDATAFIFO4 0x22D -#define O_RXDATAFIFO4__Rx8DataFifoStart 24 -#define W_RXDATAFIFO4__Rx8DataFifoStart 7 -#define O_RXDATAFIFO4__Rx8DataFifoSize 16 -#define W_RXDATAFIFO4__Rx8DataFifoSize 7 -#define O_RXDATAFIFO4__Rx9DataFifoStart 8 -#define W_RXDATAFIFO4__Rx9DataFifoStart 7 -#define O_RXDATAFIFO4__Rx9DataFifoSize 0 -#define W_RXDATAFIFO4__Rx9DataFifoSize 7 +#define O_RXDATAFIFO4__RX8DATAFIFOSTART 24 +#define W_RXDATAFIFO4__RX8DATAFIFOSTART 7 +#define O_RXDATAFIFO4__RX8DATAFIFOSIZE 16 +#define W_RXDATAFIFO4__RX8DATAFIFOSIZE 7 +#define O_RXDATAFIFO4__RX9DATAFIFOSTART 8 +#define W_RXDATAFIFO4__RX9DATAFIFOSTART 7 +#define O_RXDATAFIFO4__RX9DATAFIFOSIZE 0 +#define W_RXDATAFIFO4__RX9DATAFIFOSIZE 7 #define R_RXDATAFIFO5 0x22E -#define O_RXDATAFIFO5__Rx10DataFifoStart 24 -#define W_RXDATAFIFO5__Rx10DataFifoStart 7 -#define O_RXDATAFIFO5__Rx10DataFifoSize 16 -#define W_RXDATAFIFO5__Rx10DataFifoSize 7 -#define O_RXDATAFIFO5__Rx11DataFifoStart 8 -#define W_RXDATAFIFO5__Rx11DataFifoStart 7 -#define O_RXDATAFIFO5__Rx11DataFifoSize 0 -#define W_RXDATAFIFO5__Rx11DataFifoSize 7 +#define O_RXDATAFIFO5__RX10DATAFIFOSTART 24 +#define W_RXDATAFIFO5__RX10DATAFIFOSTART 7 +#define O_RXDATAFIFO5__RX10DATAFIFOSIZE 16 +#define W_RXDATAFIFO5__RX10DATAFIFOSIZE 7 +#define O_RXDATAFIFO5__RX11DATAFIFOSTART 8 +#define W_RXDATAFIFO5__RX11DATAFIFOSTART 7 +#define O_RXDATAFIFO5__RX11DATAFIFOSIZE 0 +#define W_RXDATAFIFO5__RX11DATAFIFOSIZE 7 #define R_RXDATAFIFO6 0x22F -#define O_RXDATAFIFO6__Rx12DataFifoStart 24 -#define W_RXDATAFIFO6__Rx12DataFifoStart 7 -#define O_RXDATAFIFO6__Rx12DataFifoSize 16 -#define W_RXDATAFIFO6__Rx12DataFifoSize 7 -#define O_RXDATAFIFO6__Rx13DataFifoStart 8 -#define W_RXDATAFIFO6__Rx13DataFifoStart 7 -#define O_RXDATAFIFO6__Rx13DataFifoSize 0 -#define W_RXDATAFIFO6__Rx13DataFifoSize 7 +#define O_RXDATAFIFO6__RX12DATAFIFOSTART 24 +#define W_RXDATAFIFO6__RX12DATAFIFOSTART 7 +#define O_RXDATAFIFO6__RX12DATAFIFOSIZE 16 +#define W_RXDATAFIFO6__RX12DATAFIFOSIZE 7 +#define O_RXDATAFIFO6__RX13DATAFIFOSTART 8 +#define W_RXDATAFIFO6__RX13DATAFIFOSTART 7 +#define O_RXDATAFIFO6__RX13DATAFIFOSIZE 0 +#define W_RXDATAFIFO6__RX13DATAFIFOSIZE 7 #define R_RXDATAFIFO7 0x230 -#define O_RXDATAFIFO7__Rx14DataFifoStart 24 -#define W_RXDATAFIFO7__Rx14DataFifoStart 7 -#define O_RXDATAFIFO7__Rx14DataFifoSize 16 -#define W_RXDATAFIFO7__Rx14DataFifoSize 7 -#define O_RXDATAFIFO7__Rx15DataFifoStart 8 -#define W_RXDATAFIFO7__Rx15DataFifoStart 7 -#define O_RXDATAFIFO7__Rx15DataFifoSize 0 -#define W_RXDATAFIFO7__Rx15DataFifoSize 7 +#define O_RXDATAFIFO7__RX14DATAFIFOSTART 24 +#define W_RXDATAFIFO7__RX14DATAFIFOSTART 7 +#define O_RXDATAFIFO7__RX14DATAFIFOSIZE 16 +#define W_RXDATAFIFO7__RX14DATAFIFOSIZE 7 +#define O_RXDATAFIFO7__RX15DATAFIFOSTART 8 +#define W_RXDATAFIFO7__RX15DATAFIFOSTART 7 +#define O_RXDATAFIFO7__RX15DATAFIFOSIZE 0 +#define W_RXDATAFIFO7__RX15DATAFIFOSIZE 7 #define R_XGMACPADCALIBRATION 0x231 #define R_FREEQCARVE 0x233 #define R_SPI4STATICDELAY0 0x240 -#define O_SPI4STATICDELAY0__DataLine7 28 -#define W_SPI4STATICDELAY0__DataLine7 4 -#define O_SPI4STATICDELAY0__DataLine6 24 -#define W_SPI4STATICDELAY0__DataLine6 4 -#define O_SPI4STATICDELAY0__DataLine5 20 -#define W_SPI4STATICDELAY0__DataLine5 4 -#define O_SPI4STATICDELAY0__DataLine4 16 -#define W_SPI4STATICDELAY0__DataLine4 4 -#define O_SPI4STATICDELAY0__DataLine3 12 -#define W_SPI4STATICDELAY0__DataLine3 4 -#define O_SPI4STATICDELAY0__DataLine2 8 -#define W_SPI4STATICDELAY0__DataLine2 4 -#define O_SPI4STATICDELAY0__DataLine1 4 -#define W_SPI4STATICDELAY0__DataLine1 4 -#define O_SPI4STATICDELAY0__DataLine0 0 -#define W_SPI4STATICDELAY0__DataLine0 4 +#define O_SPI4STATICDELAY0__DATALINE7 28 +#define W_SPI4STATICDELAY0__DATALINE7 4 +#define O_SPI4STATICDELAY0__DATALINE6 24 +#define W_SPI4STATICDELAY0__DATALINE6 4 +#define O_SPI4STATICDELAY0__DATALINE5 20 +#define W_SPI4STATICDELAY0__DATALINE5 4 +#define O_SPI4STATICDELAY0__DATALINE4 16 +#define W_SPI4STATICDELAY0__DATALINE4 4 +#define O_SPI4STATICDELAY0__DATALINE3 12 +#define W_SPI4STATICDELAY0__DATALINE3 4 +#define O_SPI4STATICDELAY0__DATALINE2 8 +#define W_SPI4STATICDELAY0__DATALINE2 4 +#define O_SPI4STATICDELAY0__DATALINE1 4 +#define W_SPI4STATICDELAY0__DATALINE1 4 +#define O_SPI4STATICDELAY0__DATALINE0 0 +#define W_SPI4STATICDELAY0__DATALINE0 4 #define R_SPI4STATICDELAY1 0x241 -#define O_SPI4STATICDELAY1__DataLine15 28 -#define W_SPI4STATICDELAY1__DataLine15 4 -#define O_SPI4STATICDELAY1__DataLine14 24 -#define W_SPI4STATICDELAY1__DataLine14 4 -#define O_SPI4STATICDELAY1__DataLine13 20 -#define W_SPI4STATICDELAY1__DataLine13 4 -#define O_SPI4STATICDELAY1__DataLine12 16 -#define W_SPI4STATICDELAY1__DataLine12 4 -#define O_SPI4STATICDELAY1__DataLine11 12 -#define W_SPI4STATICDELAY1__DataLine11 4 -#define O_SPI4STATICDELAY1__DataLine10 8 -#define W_SPI4STATICDELAY1__DataLine10 4 -#define O_SPI4STATICDELAY1__DataLine9 4 -#define W_SPI4STATICDELAY1__DataLine9 4 -#define O_SPI4STATICDELAY1__DataLine8 0 -#define W_SPI4STATICDELAY1__DataLine8 4 +#define O_SPI4STATICDELAY1__DATALINE15 28 +#define W_SPI4STATICDELAY1__DATALINE15 4 +#define O_SPI4STATICDELAY1__DATALINE14 24 +#define W_SPI4STATICDELAY1__DATALINE14 4 +#define O_SPI4STATICDELAY1__DATALINE13 20 +#define W_SPI4STATICDELAY1__DATALINE13 4 +#define O_SPI4STATICDELAY1__DATALINE12 16 +#define W_SPI4STATICDELAY1__DATALINE12 4 +#define O_SPI4STATICDELAY1__DATALINE11 12 +#define W_SPI4STATICDELAY1__DATALINE11 4 +#define O_SPI4STATICDELAY1__DATALINE10 8 +#define W_SPI4STATICDELAY1__DATALINE10 4 +#define O_SPI4STATICDELAY1__DATALINE9 4 +#define W_SPI4STATICDELAY1__DATALINE9 4 +#define O_SPI4STATICDELAY1__DATALINE8 0 +#define W_SPI4STATICDELAY1__DATALINE8 4 #define R_SPI4STATICDELAY2 0x242 -#define O_SPI4STATICDELAY0__TxStat1 8 -#define W_SPI4STATICDELAY0__TxStat1 4 -#define O_SPI4STATICDELAY0__TxStat0 4 -#define W_SPI4STATICDELAY0__TxStat0 4 -#define O_SPI4STATICDELAY0__RxControl 0 -#define W_SPI4STATICDELAY0__RxControl 4 +#define O_SPI4STATICDELAY0__TXSTAT1 8 +#define W_SPI4STATICDELAY0__TXSTAT1 4 +#define O_SPI4STATICDELAY0__TXSTAT0 4 +#define W_SPI4STATICDELAY0__TXSTAT0 4 +#define O_SPI4STATICDELAY0__RXCONTROL 0 +#define W_SPI4STATICDELAY0__RXCONTROL 4 #define R_SPI4CONTROL 0x243 -#define O_SPI4CONTROL__StaticDelay 2 +#define O_SPI4CONTROL__STATICDELAY 2 #define O_SPI4CONTROL__LVDS_LVTTL 1 -#define O_SPI4CONTROL__SPI4Enable 0 +#define O_SPI4CONTROL__SPI4ENABLE 0 #define R_CLASSWATERMARKS 0x244 -#define O_CLASSWATERMARKS__Class0Watermark 24 -#define W_CLASSWATERMARKS__Class0Watermark 5 -#define O_CLASSWATERMARKS__Class1Watermark 16 -#define W_CLASSWATERMARKS__Class1Watermark 5 -#define O_CLASSWATERMARKS__Class3Watermark 0 -#define W_CLASSWATERMARKS__Class3Watermark 5 +#define O_CLASSWATERMARKS__CLASS0WATERMARK 24 +#define W_CLASSWATERMARKS__CLASS0WATERMARK 5 +#define O_CLASSWATERMARKS__CLASS1WATERMARK 16 +#define W_CLASSWATERMARKS__CLASS1WATERMARK 5 +#define O_CLASSWATERMARKS__CLASS3WATERMARK 0 +#define W_CLASSWATERMARKS__CLASS3WATERMARK 5 #define R_RXWATERMARKS1 0x245 -#define O_RXWATERMARKS__Rx0DataWatermark 24 -#define W_RXWATERMARKS__Rx0DataWatermark 7 -#define O_RXWATERMARKS__Rx1DataWatermark 16 -#define W_RXWATERMARKS__Rx1DataWatermark 7 -#define O_RXWATERMARKS__Rx3DataWatermark 0 -#define W_RXWATERMARKS__Rx3DataWatermark 7 +#define O_RXWATERMARKS__RX0DATAWATERMARK 24 +#define W_RXWATERMARKS__RX0DATAWATERMARK 7 +#define O_RXWATERMARKS__RX1DATAWATERMARK 16 +#define W_RXWATERMARKS__RX1DATAWATERMARK 7 +#define O_RXWATERMARKS__RX3DATAWATERMARK 0 +#define W_RXWATERMARKS__RX3DATAWATERMARK 7 #define R_RXWATERMARKS2 0x246 -#define O_RXWATERMARKS__Rx4DataWatermark 24 -#define W_RXWATERMARKS__Rx4DataWatermark 7 -#define O_RXWATERMARKS__Rx5DataWatermark 16 -#define W_RXWATERMARKS__Rx5DataWatermark 7 -#define O_RXWATERMARKS__Rx6DataWatermark 8 -#define W_RXWATERMARKS__Rx6DataWatermark 7 -#define O_RXWATERMARKS__Rx7DataWatermark 0 -#define W_RXWATERMARKS__Rx7DataWatermark 7 +#define O_RXWATERMARKS__RX4DATAWATERMARK 24 +#define W_RXWATERMARKS__RX4DATAWATERMARK 7 +#define O_RXWATERMARKS__RX5DATAWATERMARK 16 +#define W_RXWATERMARKS__RX5DATAWATERMARK 7 +#define O_RXWATERMARKS__RX6DATAWATERMARK 8 +#define W_RXWATERMARKS__RX6DATAWATERMARK 7 +#define O_RXWATERMARKS__RX7DATAWATERMARK 0 +#define W_RXWATERMARKS__RX7DATAWATERMARK 7 #define R_RXWATERMARKS3 0x247 -#define O_RXWATERMARKS__Rx8DataWatermark 24 -#define W_RXWATERMARKS__Rx8DataWatermark 7 -#define O_RXWATERMARKS__Rx9DataWatermark 16 -#define W_RXWATERMARKS__Rx9DataWatermark 7 -#define O_RXWATERMARKS__Rx10DataWatermark 8 -#define W_RXWATERMARKS__Rx10DataWatermark 7 -#define O_RXWATERMARKS__Rx11DataWatermark 0 -#define W_RXWATERMARKS__Rx11DataWatermark 7 +#define O_RXWATERMARKS__RX8DATAWATERMARK 24 +#define W_RXWATERMARKS__RX8DATAWATERMARK 7 +#define O_RXWATERMARKS__RX9DATAWATERMARK 16 +#define W_RXWATERMARKS__RX9DATAWATERMARK 7 +#define O_RXWATERMARKS__RX10DATAWATERMARK 8 +#define W_RXWATERMARKS__RX10DATAWATERMARK 7 +#define O_RXWATERMARKS__RX11DATAWATERMARK 0 +#define W_RXWATERMARKS__RX11DATAWATERMARK 7 #define R_RXWATERMARKS4 0x248 -#define O_RXWATERMARKS__Rx12DataWatermark 24 -#define W_RXWATERMARKS__Rx12DataWatermark 7 -#define O_RXWATERMARKS__Rx13DataWatermark 16 -#define W_RXWATERMARKS__Rx13DataWatermark 7 -#define O_RXWATERMARKS__Rx14DataWatermark 8 -#define W_RXWATERMARKS__Rx14DataWatermark 7 -#define O_RXWATERMARKS__Rx15DataWatermark 0 -#define W_RXWATERMARKS__Rx15DataWatermark 7 +#define O_RXWATERMARKS__RX12DATAWATERMARK 24 +#define W_RXWATERMARKS__RX12DATAWATERMARK 7 +#define O_RXWATERMARKS__RX13DATAWATERMARK 16 +#define W_RXWATERMARKS__RX13DATAWATERMARK 7 +#define O_RXWATERMARKS__RX14DATAWATERMARK 8 +#define W_RXWATERMARKS__RX14DATAWATERMARK 7 +#define O_RXWATERMARKS__RX15DATAWATERMARK 0 +#define W_RXWATERMARKS__RX15DATAWATERMARK 7 #define R_FREEWATERMARKS 0x249 -#define O_FREEWATERMARKS__FreeOutWatermark 16 -#define W_FREEWATERMARKS__FreeOutWatermark 16 -#define O_FREEWATERMARKS__JumFrWatermark 8 -#define W_FREEWATERMARKS__JumFrWatermark 7 -#define O_FREEWATERMARKS__RegFrWatermark 0 -#define W_FREEWATERMARKS__RegFrWatermark 7 +#define O_FREEWATERMARKS__FREEOUTWATERMARK 16 +#define W_FREEWATERMARKS__FREEOUTWATERMARK 16 +#define O_FREEWATERMARKS__JUMFRWATERMARK 8 +#define W_FREEWATERMARKS__JUMFRWATERMARK 7 +#define O_FREEWATERMARKS__REGFRWATERMARK 0 +#define W_FREEWATERMARKS__REGFRWATERMARK 7 #define R_EGRESSFIFOCARVINGSLOTS 0x24a #define CTRL_RES0 0 diff --git a/drivers/staging/nvec/TODO b/drivers/staging/nvec/TODO index e5ae42a0b44a..e4d85d9b4681 100644 --- a/drivers/staging/nvec/TODO +++ b/drivers/staging/nvec/TODO @@ -3,6 +3,4 @@ ToDo list (incomplete, unordered) - move half of the nvec init stuff to i2c-tegra.c - move event handling to nvec_events - finish suspend/resume support - - modifiy the sync_write method to return the received - message in a variable (and return the error code). - add support for more device implementations diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c index 4ae44a5168f9..9fda136b8e05 100644 --- a/drivers/staging/nvec/nvec.c +++ b/drivers/staging/nvec/nvec.c @@ -14,8 +14,6 @@ * */ -/* #define DEBUG */ - #include <linux/kernel.h> #include <linux/module.h> #include <linux/atomic.h> @@ -40,18 +38,18 @@ #include "nvec.h" #define I2C_CNFG 0x00 -#define I2C_CNFG_PACKET_MODE_EN (1 << 10) -#define I2C_CNFG_NEW_MASTER_SFM (1 << 11) +#define I2C_CNFG_PACKET_MODE_EN BIT(10) +#define I2C_CNFG_NEW_MASTER_SFM BIT(11) #define I2C_CNFG_DEBOUNCE_CNT_SHIFT 12 #define I2C_SL_CNFG 0x20 -#define I2C_SL_NEWSL (1 << 2) -#define I2C_SL_NACK (1 << 1) -#define I2C_SL_RESP (1 << 0) -#define I2C_SL_IRQ (1 << 3) -#define END_TRANS (1 << 4) -#define RCVD (1 << 2) -#define RNW (1 << 1) +#define I2C_SL_NEWSL BIT(2) +#define I2C_SL_NACK BIT(1) +#define I2C_SL_RESP BIT(0) +#define I2C_SL_IRQ BIT(3) +#define END_TRANS BIT(4) +#define RCVD BIT(2) +#define RNW BIT(1) #define I2C_SL_RCVD 0x24 #define I2C_SL_STATUS 0x28 @@ -143,14 +141,14 @@ static int nvec_status_notifier(struct notifier_block *nb, { struct nvec_chip *nvec = container_of(nb, struct nvec_chip, nvec_status_notifier); - unsigned char *msg = (unsigned char *)data; + unsigned char *msg = data; if (event_type != NVEC_CNTL) return NOTIFY_DONE; dev_warn(nvec->dev, "unhandled msg type %ld\n", event_type); print_hex_dump(KERN_WARNING, "payload: ", DUMP_PREFIX_NONE, 16, 1, - msg, msg[1] + 2, true); + msg, msg[1] + 2, true); return NOTIFY_OK; } @@ -259,7 +257,7 @@ static void nvec_gpio_set_value(struct nvec_chip *nvec, int value) * occurred, the nvec driver may print an error. */ int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data, - short size) + short size) { struct nvec_msg *msg; unsigned long flags; @@ -288,46 +286,49 @@ EXPORT_SYMBOL(nvec_write_async); * @nvec: An &struct nvec_chip * @data: The data to write * @size: The size of @data + * @msg: The response message received * * This is similar to nvec_write_async(), but waits for the * request to be answered before returning. This function * uses a mutex and can thus not be called from e.g. * interrupt handlers. * - * Returns: A pointer to the response message on success, - * %NULL on failure. Free with nvec_msg_free() once no longer - * used. + * Returns: 0 on success, a negative error code on failure. + * The response message is returned in @msg. Shall be freed with + * with nvec_msg_free() once no longer used. + * */ -struct nvec_msg *nvec_write_sync(struct nvec_chip *nvec, - const unsigned char *data, short size) +int nvec_write_sync(struct nvec_chip *nvec, + const unsigned char *data, short size, + struct nvec_msg **msg) { - struct nvec_msg *msg; - mutex_lock(&nvec->sync_write_mutex); + *msg = NULL; nvec->sync_write_pending = (data[1] << 8) + data[0]; if (nvec_write_async(nvec, data, size) < 0) { mutex_unlock(&nvec->sync_write_mutex); - return NULL; + return -ENOMEM; } dev_dbg(nvec->dev, "nvec_sync_write: 0x%04x\n", - nvec->sync_write_pending); + nvec->sync_write_pending); if (!(wait_for_completion_timeout(&nvec->sync_write, - msecs_to_jiffies(2000)))) { - dev_warn(nvec->dev, "timeout waiting for sync write to complete\n"); + msecs_to_jiffies(2000)))) { + dev_warn(nvec->dev, + "timeout waiting for sync write to complete\n"); mutex_unlock(&nvec->sync_write_mutex); - return NULL; + return -ETIMEDOUT; } dev_dbg(nvec->dev, "nvec_sync_write: pong!\n"); - msg = nvec->last_sync_msg; + *msg = nvec->last_sync_msg; mutex_unlock(&nvec->sync_write_mutex); - return msg; + return 0; } EXPORT_SYMBOL(nvec_write_sync); @@ -422,8 +423,8 @@ static int parse_msg(struct nvec_chip *nvec, struct nvec_msg *msg) if ((msg->data[0] >> 7) == 1 && (msg->data[0] & 0x0f) == 5) print_hex_dump(KERN_WARNING, "ec system event ", - DUMP_PREFIX_NONE, 16, 1, msg->data, - msg->data[1] + 2, true); + DUMP_PREFIX_NONE, 16, 1, msg->data, + msg->data[1] + 2, true); atomic_notifier_call_chain(&nvec->notifier_list, msg->data[0] & 0x8f, msg->data); @@ -493,8 +494,8 @@ static void nvec_rx_completed(struct nvec_chip *nvec) { if (nvec->rx->pos != nvec_msg_size(nvec->rx)) { dev_err(nvec->dev, "RX incomplete: Expected %u bytes, got %u\n", - (uint) nvec_msg_size(nvec->rx), - (uint) nvec->rx->pos); + (uint)nvec_msg_size(nvec->rx), + (uint)nvec->rx->pos); nvec_msg_free(nvec, nvec->rx); nvec->state = 0; @@ -508,8 +509,10 @@ static void nvec_rx_completed(struct nvec_chip *nvec) spin_lock(&nvec->rx_lock); - /* add the received data to the work list - and move the ring buffer pointer to the next entry */ + /* + * Add the received data to the work list and move the ring buffer + * pointer to the next entry. + */ list_add_tail(&nvec->rx->node, &nvec->rx_data); spin_unlock(&nvec->rx_lock); @@ -638,11 +641,9 @@ static irqreturn_t nvec_interrupt(int irq, void *dev) nvec_msg_free(nvec, nvec->rx); nvec->state = 3; nvec_tx_set(nvec); - BUG_ON(nvec->tx->size < 1); to_send = nvec->tx->data[0]; nvec->tx->pos = 1; } else if (status == (I2C_SL_IRQ)) { - BUG_ON(nvec->rx == NULL); nvec->rx->data[1] = received; nvec->rx->pos = 2; nvec->state = 4; @@ -686,8 +687,8 @@ static irqreturn_t nvec_interrupt(int irq, void *dev) if ((status & (RCVD | RNW)) == RCVD) { if (received != nvec->i2c_addr) dev_err(nvec->dev, - "received address 0x%02x, expected 0x%02x\n", - received, nvec->i2c_addr); + "received address 0x%02x, expected 0x%02x\n", + received, nvec->i2c_addr); nvec->state = 1; } @@ -776,7 +777,7 @@ static int nvec_i2c_parse_dt_pdata(struct nvec_chip *nvec) } if (of_property_read_u32(nvec->dev->of_node, "slave-addr", - &nvec->i2c_addr)) { + &nvec->i2c_addr)) { dev_err(nvec->dev, "no i2c address specified"); return -ENODEV; } @@ -852,14 +853,14 @@ static int tegra_nvec_probe(struct platform_device *pdev) INIT_WORK(&nvec->tx_work, nvec_request_master); err = devm_gpio_request_one(&pdev->dev, nvec->gpio, GPIOF_OUT_INIT_HIGH, - "nvec gpio"); + "nvec gpio"); if (err < 0) { dev_err(nvec->dev, "couldn't request gpio\n"); return -ENODEV; } err = devm_request_irq(&pdev->dev, nvec->irq, nvec_interrupt, 0, - "nvec", nvec); + "nvec", nvec); if (err) { dev_err(nvec->dev, "couldn't request irq\n"); return -ENODEV; @@ -878,11 +879,13 @@ static int tegra_nvec_probe(struct platform_device *pdev) pm_power_off = nvec_power_off; /* Get Firmware Version */ - msg = nvec_write_sync(nvec, get_firmware_version, 2); + err = nvec_write_sync(nvec, get_firmware_version, 2, &msg); - if (msg) { - dev_warn(nvec->dev, "ec firmware version %02x.%02x.%02x / %02x\n", - msg->data[4], msg->data[5], msg->data[6], msg->data[7]); + if (!err) { + dev_warn(nvec->dev, + "ec firmware version %02x.%02x.%02x / %02x\n", + msg->data[4], msg->data[5], + msg->data[6], msg->data[7]); nvec_msg_free(nvec, msg); } @@ -924,6 +927,7 @@ static int tegra_nvec_remove(struct platform_device *pdev) #ifdef CONFIG_PM_SLEEP static int nvec_suspend(struct device *dev) { + int err; struct platform_device *pdev = to_platform_device(dev); struct nvec_chip *nvec = platform_get_drvdata(pdev); struct nvec_msg *msg; @@ -934,8 +938,9 @@ static int nvec_suspend(struct device *dev) /* keep these sync or you'll break suspend */ nvec_toggle_global_events(nvec, false); - msg = nvec_write_sync(nvec, ap_suspend, sizeof(ap_suspend)); - nvec_msg_free(nvec, msg); + err = nvec_write_sync(nvec, ap_suspend, sizeof(ap_suspend), &msg); + if (!err) + nvec_msg_free(nvec, msg); nvec_disable_i2c_slave(nvec); diff --git a/drivers/staging/nvec/nvec.h b/drivers/staging/nvec/nvec.h index 2ec9de906ca3..c03ca8d9572a 100644 --- a/drivers/staging/nvec/nvec.h +++ b/drivers/staging/nvec/nvec.h @@ -168,8 +168,9 @@ struct nvec_chip { int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data, short size); -struct nvec_msg *nvec_write_sync(struct nvec_chip *nvec, - const unsigned char *data, short size); +int nvec_write_sync(struct nvec_chip *nvec, + const unsigned char *data, short size, + struct nvec_msg **msg); int nvec_register_notifier(struct nvec_chip *nvec, struct notifier_block *nb, diff --git a/drivers/staging/nvec/nvec_paz00.c b/drivers/staging/nvec/nvec_paz00.c index 68146bfee2b3..51dbeeb3320e 100644 --- a/drivers/staging/nvec/nvec_paz00.c +++ b/drivers/staging/nvec/nvec_paz00.c @@ -41,7 +41,6 @@ static void nvec_led_brightness_set(struct led_classdev *led_cdev, nvec_write_async(led->nvec, buf, sizeof(buf)); led->cdev.brightness = value; - } static int nvec_paz00_probe(struct platform_device *pdev) @@ -63,7 +62,7 @@ static int nvec_paz00_probe(struct platform_device *pdev) platform_set_drvdata(pdev, led); - ret = led_classdev_register(&pdev->dev, &led->cdev); + ret = devm_led_classdev_register(&pdev->dev, &led->cdev); if (ret < 0) return ret; @@ -73,18 +72,8 @@ static int nvec_paz00_probe(struct platform_device *pdev) return 0; } -static int nvec_paz00_remove(struct platform_device *pdev) -{ - struct nvec_led *led = platform_get_drvdata(pdev); - - led_classdev_unregister(&led->cdev); - - return 0; -} - static struct platform_driver nvec_paz00_driver = { .probe = nvec_paz00_probe, - .remove = nvec_paz00_remove, .driver = { .name = "nvec-paz00", }, diff --git a/drivers/staging/nvec/nvec_power.c b/drivers/staging/nvec/nvec_power.c index 04a7402ae2df..b4a0545e8806 100644 --- a/drivers/staging/nvec/nvec_power.c +++ b/drivers/staging/nvec/nvec_power.c @@ -207,8 +207,10 @@ static int nvec_power_bat_notifier(struct notifier_block *nb, case TYPE: memcpy(power->bat_type, &res->plc, res->length - 2); power->bat_type[res->length - 2] = '\0'; - /* this differs a little from the spec - fill in more if you find some */ + /* + * This differs a little from the spec fill in more if you find + * some. + */ if (!strncmp(power->bat_type, "Li", 30)) power->bat_type_enum = POWER_SUPPLY_TECHNOLOGY_LION; else @@ -356,12 +358,14 @@ static void nvec_power_poll(struct work_struct *work) if (counter >= ARRAY_SIZE(bat_iter)) counter = 0; -/* AC status via sys req */ + /* AC status via sys req */ nvec_write_async(power->nvec, buf, 2); msleep(100); -/* select a battery request function via round robin - doing it all at once seems to overload the power supply */ + /* + * Select a battery request function via round robin doing it all at + * once seems to overload the power supply. + */ buf[0] = NVEC_BAT; buf[1] = bat_iter[counter++]; nvec_write_async(power->nvec, buf, 2); diff --git a/drivers/staging/nvec/nvec_ps2.c b/drivers/staging/nvec/nvec_ps2.c index 0922dd3a08d3..a324322ee0ad 100644 --- a/drivers/staging/nvec/nvec_ps2.c +++ b/drivers/staging/nvec/nvec_ps2.c @@ -78,7 +78,7 @@ static int nvec_ps2_notifier(struct notifier_block *nb, unsigned long event_type, void *data) { int i; - unsigned char *msg = (unsigned char *)data; + unsigned char *msg = data; switch (event_type) { case NVEC_PS2_EVT: diff --git a/drivers/staging/octeon-usb/TODO b/drivers/staging/octeon-usb/TODO index cc58a7e88baf..2b29acca5caa 100644 --- a/drivers/staging/octeon-usb/TODO +++ b/drivers/staging/octeon-usb/TODO @@ -1,11 +1,8 @@ -This driver is functional and has been tested on EdgeRouter Lite with -USB mass storage. +This driver is functional and has been tested on EdgeRouter Lite, +D-Link DSR-1000N and EBH5600 evaluation board with USB mass storage. TODO: - kernel coding style - checkpatch warnings - - dead code elimination - - device tree bindings - - possibly eliminate the extra "hardware abstraction layer" Contact: Aaro Koskinen <aaro.koskinen@iki.fi> diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c index 6f2871784ba5..17442b3ed849 100644 --- a/drivers/staging/octeon-usb/octeon-hcd.c +++ b/drivers/staging/octeon-usb/octeon-hcd.c @@ -43,29 +43,15 @@ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR * PERFORMANCE OF THE SOFTWARE LIES WITH YOU. */ -#include <linux/kernel.h> + +#include <linux/usb.h> +#include <linux/slab.h> #include <linux/module.h> -#include <linux/init.h> -#include <linux/pci.h> +#include <linux/usb/hcd.h> #include <linux/prefetch.h> -#include <linux/interrupt.h> #include <linux/platform_device.h> -#include <linux/usb.h> - -#include <linux/time.h> -#include <linux/delay.h> - -#include <asm/octeon/cvmx.h> -#include <asm/octeon/cvmx-iob-defs.h> - -#include <linux/usb/hcd.h> - -#include <linux/err.h> #include <asm/octeon/octeon.h> -#include <asm/octeon/cvmx-helper.h> -#include <asm/octeon/cvmx-sysinfo.h> -#include <asm/octeon/cvmx-helper-board.h> #include "octeon-hcd.h" @@ -113,35 +99,35 @@ enum cvmx_usb_direction { }; /** - * enum cvmx_usb_complete - possible callback function status codes + * enum cvmx_usb_status - possible callback function status codes * - * @CVMX_USB_COMPLETE_SUCCESS: The transaction / operation finished without + * @CVMX_USB_STATUS_OK: The transaction / operation finished without * any errors - * @CVMX_USB_COMPLETE_SHORT: FIXME: This is currently not implemented - * @CVMX_USB_COMPLETE_CANCEL: The transaction was canceled while in flight + * @CVMX_USB_STATUS_SHORT: FIXME: This is currently not implemented + * @CVMX_USB_STATUS_CANCEL: The transaction was canceled while in flight * by a user call to cvmx_usb_cancel - * @CVMX_USB_COMPLETE_ERROR: The transaction aborted with an unexpected + * @CVMX_USB_STATUS_ERROR: The transaction aborted with an unexpected * error status - * @CVMX_USB_COMPLETE_STALL: The transaction received a USB STALL response + * @CVMX_USB_STATUS_STALL: The transaction received a USB STALL response * from the device - * @CVMX_USB_COMPLETE_XACTERR: The transaction failed with an error from the + * @CVMX_USB_STATUS_XACTERR: The transaction failed with an error from the * device even after a number of retries - * @CVMX_USB_COMPLETE_DATATGLERR: The transaction failed with a data toggle + * @CVMX_USB_STATUS_DATATGLERR: The transaction failed with a data toggle * error even after a number of retries - * @CVMX_USB_COMPLETE_BABBLEERR: The transaction failed with a babble error - * @CVMX_USB_COMPLETE_FRAMEERR: The transaction failed with a frame error + * @CVMX_USB_STATUS_BABBLEERR: The transaction failed with a babble error + * @CVMX_USB_STATUS_FRAMEERR: The transaction failed with a frame error * even after a number of retries */ -enum cvmx_usb_complete { - CVMX_USB_COMPLETE_SUCCESS, - CVMX_USB_COMPLETE_SHORT, - CVMX_USB_COMPLETE_CANCEL, - CVMX_USB_COMPLETE_ERROR, - CVMX_USB_COMPLETE_STALL, - CVMX_USB_COMPLETE_XACTERR, - CVMX_USB_COMPLETE_DATATGLERR, - CVMX_USB_COMPLETE_BABBLEERR, - CVMX_USB_COMPLETE_FRAMEERR, +enum cvmx_usb_status { + CVMX_USB_STATUS_OK, + CVMX_USB_STATUS_SHORT, + CVMX_USB_STATUS_CANCEL, + CVMX_USB_STATUS_ERROR, + CVMX_USB_STATUS_STALL, + CVMX_USB_STATUS_XACTERR, + CVMX_USB_STATUS_DATATGLERR, + CVMX_USB_STATUS_BABBLEERR, + CVMX_USB_STATUS_FRAMEERR, }; /** @@ -160,13 +146,13 @@ enum cvmx_usb_complete { * status call. */ struct cvmx_usb_port_status { - uint32_t reserved : 25; - uint32_t port_enabled : 1; - uint32_t port_over_current : 1; - uint32_t port_powered : 1; + u32 reserved : 25; + u32 port_enabled : 1; + u32 port_over_current : 1; + u32 port_powered : 1; enum cvmx_usb_speed port_speed : 2; - uint32_t connected : 1; - uint32_t connect_change : 1; + u32 connected : 1; + u32 connect_change : 1; }; /** @@ -180,7 +166,7 @@ struct cvmx_usb_port_status { struct cvmx_usb_iso_packet { int offset; int length; - enum cvmx_usb_complete status; + enum cvmx_usb_status status; }; /** @@ -234,13 +220,13 @@ enum cvmx_usb_pipe_flags { * The low level hardware can transfer a maximum of this number of bytes in each * transfer. The field is 19 bits wide */ -#define MAX_TRANSFER_BYTES ((1<<19)-1) +#define MAX_TRANSFER_BYTES ((1 << 19) - 1) /* * The low level hardware can transfer a maximum of this number of packets in * each transfer. The field is 10 bits wide */ -#define MAX_TRANSFER_PACKETS ((1<<10)-1) +#define MAX_TRANSFER_PACKETS ((1 << 10) - 1) /** * Logical transactions may take numerous low level @@ -284,9 +270,9 @@ enum cvmx_usb_stage { struct cvmx_usb_transaction { struct list_head node; enum cvmx_usb_transfer type; - uint64_t buffer; + u64 buffer; int buffer_length; - uint64_t control_header; + u64 control_header; int iso_start_frame; int iso_number_packets; struct cvmx_usb_iso_packet *iso_packets; @@ -328,36 +314,37 @@ struct cvmx_usb_transaction { struct cvmx_usb_pipe { struct list_head node; struct list_head transactions; - uint64_t interval; - uint64_t next_tx_frame; + u64 interval; + u64 next_tx_frame; enum cvmx_usb_pipe_flags flags; enum cvmx_usb_speed device_speed; enum cvmx_usb_transfer transfer_type; enum cvmx_usb_direction transfer_dir; int multi_count; - uint16_t max_packet; - uint8_t device_addr; - uint8_t endpoint_num; - uint8_t hub_device_addr; - uint8_t hub_port; - uint8_t pid_toggle; - uint8_t channel; - int8_t split_sc_frame; + u16 max_packet; + u8 device_addr; + u8 endpoint_num; + u8 hub_device_addr; + u8 hub_port; + u8 pid_toggle; + u8 channel; + s8 split_sc_frame; }; struct cvmx_usb_tx_fifo { struct { int channel; int size; - uint64_t address; - } entry[MAX_CHANNELS+1]; + u64 address; + } entry[MAX_CHANNELS + 1]; int head; int tail; }; /** - * struct cvmx_usb_state - the state of the USB block + * struct octeon_hcd - the state of the USB block * + * lock: Serialization lock. * init_flags: Flags passed to initialize. * index: Which USB block this is for. * idle_hardware_channels: Bit set for every idle hardware channel. @@ -372,7 +359,8 @@ struct cvmx_usb_tx_fifo { * frame_number: Increments every SOF interrupt for time keeping. * active_split: Points to the current active split, or NULL. */ -struct cvmx_usb_state { +struct octeon_hcd { + spinlock_t lock; /* serialization lock */ int init_flags; int index; int idle_hardware_channels; @@ -382,23 +370,18 @@ struct cvmx_usb_state { struct cvmx_usb_port_status port_status; struct list_head idle_pipes; struct list_head active_pipes[4]; - uint64_t frame_number; + u64 frame_number; struct cvmx_usb_transaction *active_split; struct cvmx_usb_tx_fifo periodic; struct cvmx_usb_tx_fifo nonperiodic; }; -struct octeon_hcd { - spinlock_t lock; - struct cvmx_usb_state usb; -}; - /* This macro spins on a register waiting for it to reach a condition. */ #define CVMX_WAIT_FOR_FIELD32(address, _union, cond, timeout_usec) \ ({int result; \ do { \ - uint64_t done = cvmx_get_cycle() + (uint64_t)timeout_usec * \ - octeon_get_clock_rate() / 1000000; \ + u64 done = cvmx_get_cycle() + (u64)timeout_usec * \ + octeon_get_clock_rate() / 1000000; \ union _union c; \ \ while (1) { \ @@ -431,7 +414,7 @@ struct octeon_hcd { /* Returns the IO address to push/pop stuff data from the FIFOs */ #define USB_FIFO_ADDRESS(channel, usb_index) \ - (CVMX_USBCX_GOTGCTL(usb_index) + ((channel)+1)*0x1000) + (CVMX_USBCX_GOTGCTL(usb_index) + ((channel) + 1) * 0x1000) /** * struct octeon_temp_buffer - a bounce buffer for USB transfers @@ -447,11 +430,6 @@ struct octeon_temp_buffer { u8 data[0]; }; -static inline struct octeon_hcd *cvmx_usb_to_octeon(struct cvmx_usb_state *p) -{ - return container_of(p, struct octeon_hcd, usb); -} - static inline struct usb_hcd *octeon_to_hcd(struct octeon_hcd *p) { return container_of((void *)p, struct usb_hcd, hcd_priv); @@ -562,14 +540,12 @@ static void octeon_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) * * Returns: Result of the read */ -static inline uint32_t cvmx_usb_read_csr32(struct cvmx_usb_state *usb, - uint64_t address) +static inline u32 cvmx_usb_read_csr32(struct octeon_hcd *usb, u64 address) { - uint32_t result = cvmx_read64_uint32(address ^ 4); + u32 result = cvmx_read64_uint32(address ^ 4); return result; } - /** * Write a USB 32bit CSR. It performs the necessary address * swizzle for 32bit CSRs and logs the value in a readable format @@ -579,8 +555,8 @@ static inline uint32_t cvmx_usb_read_csr32(struct cvmx_usb_state *usb, * @address: 64bit address to write * @value: Value to write */ -static inline void cvmx_usb_write_csr32(struct cvmx_usb_state *usb, - uint64_t address, uint32_t value) +static inline void cvmx_usb_write_csr32(struct octeon_hcd *usb, + u64 address, u32 value) { cvmx_write64_uint32(address ^ 4, value); cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index)); @@ -595,14 +571,13 @@ static inline void cvmx_usb_write_csr32(struct cvmx_usb_state *usb, * * Returns: Non zero if we need to do split transactions */ -static inline int cvmx_usb_pipe_needs_split(struct cvmx_usb_state *usb, +static inline int cvmx_usb_pipe_needs_split(struct octeon_hcd *usb, struct cvmx_usb_pipe *pipe) { return pipe->device_speed != CVMX_USB_SPEED_HIGH && usb->usbcx_hprt.s.prtspd == CVMX_USB_SPEED_HIGH; } - /** * Trivial utility function to return the correct PID for a pipe * @@ -617,7 +592,7 @@ static inline int cvmx_usb_get_data_pid(struct cvmx_usb_pipe *pipe) return 0; /* Data0 */ } -static void cvmx_fifo_setup(struct cvmx_usb_state *usb) +static void cvmx_fifo_setup(struct octeon_hcd *usb) { union cvmx_usbcx_ghwcfg3 usbcx_ghwcfg3; union cvmx_usbcx_gnptxfsiz npsiz; @@ -675,7 +650,7 @@ static void cvmx_fifo_setup(struct cvmx_usb_state *usb) * * Returns: 0 or a negative error code. */ -static int cvmx_usb_shutdown(struct cvmx_usb_state *usb) +static int cvmx_usb_shutdown(struct octeon_hcd *usb) { union cvmx_usbnx_clk_ctl usbn_clk_ctl; @@ -704,12 +679,12 @@ static int cvmx_usb_shutdown(struct cvmx_usb_state *usb) * off in the disabled state. * * @dev: Pointer to struct device for logging purposes. - * @usb: Pointer to struct cvmx_usb_state. + * @usb: Pointer to struct octeon_hcd. * * Returns: 0 or a negative error code. */ static int cvmx_usb_initialize(struct device *dev, - struct cvmx_usb_state *usb) + struct octeon_hcd *usb) { int channel; int divisor; @@ -975,7 +950,7 @@ retry: * * @usb: USB device state populated by cvmx_usb_initialize(). */ -static void cvmx_usb_reset_port(struct cvmx_usb_state *usb) +static void cvmx_usb_reset_port(struct octeon_hcd *usb) { usb->usbcx_hprt.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index)); @@ -1002,7 +977,6 @@ static void cvmx_usb_reset_port(struct cvmx_usb_state *usb) CVMX_USBCX_HPRT(usb->index)); } - /** * Disable a USB port. After this call the USB port will not * generate data transfers and will not generate events. @@ -1013,7 +987,7 @@ static void cvmx_usb_reset_port(struct cvmx_usb_state *usb) * * Returns: 0 or a negative error code. */ -static int cvmx_usb_disable(struct cvmx_usb_state *usb) +static int cvmx_usb_disable(struct octeon_hcd *usb) { /* Disable the port */ USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt, @@ -1021,7 +995,6 @@ static int cvmx_usb_disable(struct cvmx_usb_state *usb) return 0; } - /** * Get the current state of the USB port. Use this call to * determine if the usb port has anything connected, is enabled, @@ -1033,8 +1006,7 @@ static int cvmx_usb_disable(struct cvmx_usb_state *usb) * * Returns: Port status information */ -static struct cvmx_usb_port_status cvmx_usb_get_status( - struct cvmx_usb_state *usb) +static struct cvmx_usb_port_status cvmx_usb_get_status(struct octeon_hcd *usb) { union cvmx_usbcx_hprt usbc_hprt; struct cvmx_usb_port_status result; @@ -1048,7 +1020,7 @@ static struct cvmx_usb_port_status cvmx_usb_get_status( result.port_speed = usbc_hprt.s.prtspd; result.connected = usbc_hprt.s.prtconnsts; result.connect_change = - (result.connected != usb->port_status.connected); + result.connected != usb->port_status.connected; return result; } @@ -1105,7 +1077,7 @@ static struct cvmx_usb_port_status cvmx_usb_get_status( * * Returns: A non-NULL value is a pipe. NULL means an error. */ -static struct cvmx_usb_pipe *cvmx_usb_open_pipe(struct cvmx_usb_state *usb, +static struct cvmx_usb_pipe *cvmx_usb_open_pipe(struct octeon_hcd *usb, int device_addr, int endpoint_num, enum cvmx_usb_speed @@ -1125,8 +1097,8 @@ static struct cvmx_usb_pipe *cvmx_usb_open_pipe(struct cvmx_usb_state *usb, if (!pipe) return NULL; if ((device_speed == CVMX_USB_SPEED_HIGH) && - (transfer_dir == CVMX_USB_DIRECTION_OUT) && - (transfer_type == CVMX_USB_TRANSFER_BULK)) + (transfer_dir == CVMX_USB_DIRECTION_OUT) && + (transfer_type == CVMX_USB_TRANSFER_BULK)) pipe->flags |= CVMX_USB_PIPE_FLAGS_NEED_PING; pipe->device_addr = device_addr; pipe->endpoint_num = endpoint_num; @@ -1143,9 +1115,9 @@ static struct cvmx_usb_pipe *cvmx_usb_open_pipe(struct cvmx_usb_state *usb, if (!interval) interval = 1; if (cvmx_usb_pipe_needs_split(usb, pipe)) { - pipe->interval = interval*8; + pipe->interval = interval * 8; /* Force start splits to be schedule on uFrame 0 */ - pipe->next_tx_frame = ((usb->frame_number+7)&~7) + + pipe->next_tx_frame = ((usb->frame_number + 7) & ~7) + pipe->interval; } else { pipe->interval = interval; @@ -1166,7 +1138,6 @@ static struct cvmx_usb_pipe *cvmx_usb_open_pipe(struct cvmx_usb_state *usb, return pipe; } - /** * Poll the RX FIFOs and remove data as needed. This function is only used * in non DMA mode. It is very important that this function be called quickly @@ -1174,13 +1145,13 @@ static struct cvmx_usb_pipe *cvmx_usb_open_pipe(struct cvmx_usb_state *usb, * * @usb: USB device state populated by cvmx_usb_initialize(). */ -static void cvmx_usb_poll_rx_fifo(struct cvmx_usb_state *usb) +static void cvmx_usb_poll_rx_fifo(struct octeon_hcd *usb) { union cvmx_usbcx_grxstsph rx_status; int channel; int bytes; - uint64_t address; - uint32_t *ptr; + u64 address; + u32 *ptr; rx_status.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_GRXSTSPH(usb->index)); @@ -1213,7 +1184,6 @@ static void cvmx_usb_poll_rx_fifo(struct cvmx_usb_state *usb) CVMX_SYNCW; } - /** * Fill the TX hardware fifo with data out of the software * fifos @@ -1225,7 +1195,7 @@ static void cvmx_usb_poll_rx_fifo(struct cvmx_usb_state *usb) * Returns: Non zero if the hardware fifo was too small and needs * to be serviced again. */ -static int cvmx_usb_fill_tx_hw(struct cvmx_usb_state *usb, +static int cvmx_usb_fill_tx_hw(struct octeon_hcd *usb, struct cvmx_usb_tx_fifo *fifo, int available) { /* @@ -1234,9 +1204,9 @@ static int cvmx_usb_fill_tx_hw(struct cvmx_usb_state *usb, */ while (available && (fifo->head != fifo->tail)) { int i = fifo->tail; - const uint32_t *ptr = cvmx_phys_to_ptr(fifo->entry[i].address); - uint64_t csr_address = USB_FIFO_ADDRESS(fifo->entry[i].channel, - usb->index) ^ 4; + const u32 *ptr = cvmx_phys_to_ptr(fifo->entry[i].address); + u64 csr_address = USB_FIFO_ADDRESS(fifo->entry[i].channel, + usb->index) ^ 4; int words = available; /* Limit the amount of data to what the SW fifo has */ @@ -1275,13 +1245,12 @@ static int cvmx_usb_fill_tx_hw(struct cvmx_usb_state *usb, return fifo->head != fifo->tail; } - /** * Check the hardware FIFOs and fill them as needed * * @usb: USB device state populated by cvmx_usb_initialize(). */ -static void cvmx_usb_poll_tx_fifo(struct cvmx_usb_state *usb) +static void cvmx_usb_poll_tx_fifo(struct octeon_hcd *usb) { if (usb->periodic.head != usb->periodic.tail) { union cvmx_usbcx_hptxsts tx_status; @@ -1312,14 +1281,13 @@ static void cvmx_usb_poll_tx_fifo(struct cvmx_usb_state *usb) } } - /** * Fill the TX FIFO with an outgoing packet * * @usb: USB device state populated by cvmx_usb_initialize(). * @channel: Channel number to get packet from */ -static void cvmx_usb_fill_tx_fifo(struct cvmx_usb_state *usb, int channel) +static void cvmx_usb_fill_tx_fifo(struct octeon_hcd *usb, int channel) { union cvmx_usbcx_hccharx hcchar; union cvmx_usbcx_hcspltx usbc_hcsplt; @@ -1348,7 +1316,7 @@ static void cvmx_usb_fill_tx_fifo(struct cvmx_usb_state *usb, int channel) return; if ((hcchar.s.eptype == CVMX_USB_TRANSFER_INTERRUPT) || - (hcchar.s.eptype == CVMX_USB_TRANSFER_ISOCHRONOUS)) + (hcchar.s.eptype == CVMX_USB_TRANSFER_ISOCHRONOUS)) fifo = &usb->periodic; else fifo = &usb->nonperiodic; @@ -1357,7 +1325,7 @@ static void cvmx_usb_fill_tx_fifo(struct cvmx_usb_state *usb, int channel) fifo->entry[fifo->head].address = cvmx_read64_uint64(CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) + channel * 8); - fifo->entry[fifo->head].size = (usbc_hctsiz.s.xfersize+3)>>2; + fifo->entry[fifo->head].size = (usbc_hctsiz.s.xfersize + 3) >> 2; fifo->head++; if (fifo->head > MAX_CHANNELS) fifo->head = 0; @@ -1373,12 +1341,11 @@ static void cvmx_usb_fill_tx_fifo(struct cvmx_usb_state *usb, int channel) * @channel: Channel to setup * @pipe: Pipe for control transaction */ -static void cvmx_usb_start_channel_control(struct cvmx_usb_state *usb, +static void cvmx_usb_start_channel_control(struct octeon_hcd *usb, int channel, struct cvmx_usb_pipe *pipe) { - struct octeon_hcd *priv = cvmx_usb_to_octeon(usb); - struct usb_hcd *hcd = octeon_to_hcd(priv); + struct usb_hcd *hcd = octeon_to_hcd(usb); struct device *dev = hcd->self.controller; struct cvmx_usb_transaction *transaction = list_first_entry(&pipe->transactions, typeof(*transaction), @@ -1488,9 +1455,9 @@ static void cvmx_usb_start_channel_control(struct cvmx_usb_state *usb, */ packets_to_transfer = DIV_ROUND_UP(bytes_to_transfer, pipe->max_packet); - if (packets_to_transfer == 0) + if (packets_to_transfer == 0) { packets_to_transfer = 1; - else if ((packets_to_transfer > 1) && + } else if ((packets_to_transfer > 1) && (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) { /* * Limit to one packet when not using DMA. Channels must be @@ -1515,7 +1482,6 @@ static void cvmx_usb_start_channel_control(struct cvmx_usb_state *usb, usbc_hctsiz.u32); } - /** * Start a channel to perform the pipe's head transaction * @@ -1523,7 +1489,7 @@ static void cvmx_usb_start_channel_control(struct cvmx_usb_state *usb, * @channel: Channel to setup * @pipe: Pipe to start */ -static void cvmx_usb_start_channel(struct cvmx_usb_state *usb, int channel, +static void cvmx_usb_start_channel(struct octeon_hcd *usb, int channel, struct cvmx_usb_pipe *pipe) { struct cvmx_usb_transaction *transaction = @@ -1539,7 +1505,7 @@ static void cvmx_usb_start_channel(struct cvmx_usb_state *usb, int channel, pipe->flags |= CVMX_USB_PIPE_FLAGS_SCHEDULED; /* Mark this channel as in use */ - usb->idle_hardware_channels &= ~(1<<channel); + usb->idle_hardware_channels &= ~(1 << channel); /* Enable the channel interrupt bits */ { @@ -1579,22 +1545,22 @@ static void cvmx_usb_start_channel(struct cvmx_usb_state *usb, int channel, usbc_hcintmsk.s.xfercomplmsk = 1; } cvmx_usb_write_csr32(usb, - CVMX_USBCX_HCINTMSKX(channel, usb->index), - usbc_hcintmsk.u32); + CVMX_USBCX_HCINTMSKX(channel, usb->index), + usbc_hcintmsk.u32); /* Enable the channel interrupt to propagate */ usbc_haintmsk.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HAINTMSK(usb->index)); - usbc_haintmsk.s.haintmsk |= 1<<channel; + usbc_haintmsk.s.haintmsk |= 1 << channel; cvmx_usb_write_csr32(usb, CVMX_USBCX_HAINTMSK(usb->index), usbc_haintmsk.u32); } /* Setup the location the DMA engine uses. */ { - uint64_t reg; - uint64_t dma_address = transaction->buffer + - transaction->actual_bytes; + u64 reg; + u64 dma_address = transaction->buffer + + transaction->actual_bytes; if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS) dma_address = transaction->buffer + @@ -1636,15 +1602,16 @@ static void cvmx_usb_start_channel(struct cvmx_usb_state *usb, int channel, * We only store the lower two bits since the time ahead * can only be two frames */ - if ((transaction->stage&1) == 0) { + if ((transaction->stage & 1) == 0) { if (transaction->type == CVMX_USB_TRANSFER_BULK) pipe->split_sc_frame = (usb->frame_number + 1) & 0x7f; else pipe->split_sc_frame = (usb->frame_number + 2) & 0x7f; - } else + } else { pipe->split_sc_frame = -1; + } usbc_hcsplt.s.spltena = 1; usbc_hcsplt.s.hubaddr = pipe->hub_device_addr; @@ -1666,10 +1633,9 @@ static void cvmx_usb_start_channel(struct cvmx_usb_state *usb, int channel, * begin/middle/end of the data or all */ if (!usbc_hcsplt.s.compsplt && - (pipe->transfer_dir == - CVMX_USB_DIRECTION_OUT) && - (pipe->transfer_type == - CVMX_USB_TRANSFER_ISOCHRONOUS)) { + (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) && + (pipe->transfer_type == + CVMX_USB_TRANSFER_ISOCHRONOUS)) { /* * Clear the split complete frame number as * there isn't going to be a split complete @@ -1732,11 +1698,11 @@ static void cvmx_usb_start_channel(struct cvmx_usb_state *usb, int channel, */ packets_to_transfer = DIV_ROUND_UP(bytes_to_transfer, pipe->max_packet); - if (packets_to_transfer == 0) + if (packets_to_transfer == 0) { packets_to_transfer = 1; - else if ((packets_to_transfer > 1) && - (usb->init_flags & - CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) { + } else if ((packets_to_transfer > 1) && + (usb->init_flags & + CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) { /* * Limit to one packet when not using DMA. Channels must * be restarted between every packet for IN @@ -1783,7 +1749,7 @@ static void cvmx_usb_start_channel(struct cvmx_usb_state *usb, int channel, * Set the startframe odd/even properly. This is only used for * periodic */ - usbc_hcchar.s.oddfrm = usb->frame_number&1; + usbc_hcchar.s.oddfrm = usb->frame_number & 1; /* * Set the number of back to back packets allowed by this @@ -1843,9 +1809,11 @@ static void cvmx_usb_start_channel(struct cvmx_usb_state *usb, int channel, break; } { - union cvmx_usbcx_hctsizx usbc_hctsiz = {.u32 = + union cvmx_usbcx_hctsizx usbc_hctsiz = { .u32 = cvmx_usb_read_csr32(usb, - CVMX_USBCX_HCTSIZX(channel, usb->index))}; + CVMX_USBCX_HCTSIZX(channel, + usb->index)) + }; transaction->xfersize = usbc_hctsiz.s.xfersize; transaction->pktcnt = usbc_hctsiz.s.pktcnt; } @@ -1858,21 +1826,19 @@ static void cvmx_usb_start_channel(struct cvmx_usb_state *usb, int channel, cvmx_usb_fill_tx_fifo(usb, channel); } - /** * Find a pipe that is ready to be scheduled to hardware. * @usb: USB device state populated by cvmx_usb_initialize(). - * @list: Pipe list to search - * @current_frame: - * Frame counter to use as a time reference. + * @xfer_type: Transfer type * * Returns: Pipe or NULL if none are ready */ static struct cvmx_usb_pipe *cvmx_usb_find_ready_pipe( - struct cvmx_usb_state *usb, - struct list_head *list, - uint64_t current_frame) + struct octeon_hcd *usb, + enum cvmx_usb_transfer xfer_type) { + struct list_head *list = usb->active_pipes + xfer_type; + u64 current_frame = usb->frame_number; struct cvmx_usb_pipe *pipe; list_for_each_entry(pipe, list, node) { @@ -1880,11 +1846,11 @@ static struct cvmx_usb_pipe *cvmx_usb_find_ready_pipe( list_first_entry(&pipe->transactions, typeof(*t), node); if (!(pipe->flags & CVMX_USB_PIPE_FLAGS_SCHEDULED) && t && - (pipe->next_tx_frame <= current_frame) && - ((pipe->split_sc_frame == -1) || - ((((int)current_frame - (int)pipe->split_sc_frame) - & 0x7f) < 0x40)) && - (!usb->active_split || (usb->active_split == t))) { + (pipe->next_tx_frame <= current_frame) && + ((pipe->split_sc_frame == -1) || + ((((int)current_frame - pipe->split_sc_frame) & 0x7f) < + 0x40)) && + (!usb->active_split || (usb->active_split == t))) { prefetch(t); return pipe; } @@ -1892,6 +1858,32 @@ static struct cvmx_usb_pipe *cvmx_usb_find_ready_pipe( return NULL; } +static struct cvmx_usb_pipe *cvmx_usb_next_pipe(struct octeon_hcd *usb, + int is_sof) +{ + struct cvmx_usb_pipe *pipe; + + /* Find a pipe needing service. */ + if (is_sof) { + /* + * Only process periodic pipes on SOF interrupts. This way we + * are sure that the periodic data is sent in the beginning of + * the frame. + */ + pipe = cvmx_usb_find_ready_pipe(usb, + CVMX_USB_TRANSFER_ISOCHRONOUS); + if (pipe) + return pipe; + pipe = cvmx_usb_find_ready_pipe(usb, + CVMX_USB_TRANSFER_INTERRUPT); + if (pipe) + return pipe; + } + pipe = cvmx_usb_find_ready_pipe(usb, CVMX_USB_TRANSFER_CONTROL); + if (pipe) + return pipe; + return cvmx_usb_find_ready_pipe(usb, CVMX_USB_TRANSFER_BULK); +} /** * Called whenever a pipe might need to be scheduled to the @@ -1900,7 +1892,7 @@ static struct cvmx_usb_pipe *cvmx_usb_find_ready_pipe( * @usb: USB device state populated by cvmx_usb_initialize(). * @is_sof: True if this schedule was called on a SOF interrupt. */ -static void cvmx_usb_schedule(struct cvmx_usb_state *usb, int is_sof) +static void cvmx_usb_schedule(struct octeon_hcd *usb, int is_sof) { int channel; struct cvmx_usb_pipe *pipe; @@ -1922,7 +1914,7 @@ static void cvmx_usb_schedule(struct cvmx_usb_state *usb, int is_sof) CVMX_USBCX_HFIR(usb->index)) }; - if (hfnum.s.frrem < hfir.s.frint/4) + if (hfnum.s.frrem < hfir.s.frint / 4) goto done; } @@ -1932,35 +1924,7 @@ static void cvmx_usb_schedule(struct cvmx_usb_state *usb, int is_sof) if (unlikely(channel > 7)) break; - /* Find a pipe needing service */ - pipe = NULL; - if (is_sof) { - /* - * Only process periodic pipes on SOF interrupts. This - * way we are sure that the periodic data is sent in the - * beginning of the frame - */ - pipe = cvmx_usb_find_ready_pipe(usb, - usb->active_pipes + - CVMX_USB_TRANSFER_ISOCHRONOUS, - usb->frame_number); - if (likely(!pipe)) - pipe = cvmx_usb_find_ready_pipe(usb, - usb->active_pipes + - CVMX_USB_TRANSFER_INTERRUPT, - usb->frame_number); - } - if (likely(!pipe)) { - pipe = cvmx_usb_find_ready_pipe(usb, - usb->active_pipes + - CVMX_USB_TRANSFER_CONTROL, - usb->frame_number); - if (likely(!pipe)) - pipe = cvmx_usb_find_ready_pipe(usb, - usb->active_pipes + - CVMX_USB_TRANSFER_BULK, - usb->frame_number); - } + pipe = cvmx_usb_next_pipe(usb, is_sof); if (!pipe) break; @@ -1974,7 +1938,7 @@ done: */ need_sof = 0; for (ttype = CVMX_USB_TRANSFER_CONTROL; - ttype <= CVMX_USB_TRANSFER_INTERRUPT; ttype++) { + ttype <= CVMX_USB_TRANSFER_INTERRUPT; ttype++) { list_for_each_entry(pipe, &usb->active_pipes[ttype], node) { if (pipe->next_tx_frame > usb->frame_number) { need_sof = 1; @@ -1986,19 +1950,18 @@ done: cvmx_usbcx_gintmsk, sofmsk, need_sof); } -static void octeon_usb_urb_complete_callback(struct cvmx_usb_state *usb, - enum cvmx_usb_complete status, +static void octeon_usb_urb_complete_callback(struct octeon_hcd *usb, + enum cvmx_usb_status status, struct cvmx_usb_pipe *pipe, struct cvmx_usb_transaction *transaction, int bytes_transferred, struct urb *urb) { - struct octeon_hcd *priv = cvmx_usb_to_octeon(usb); - struct usb_hcd *hcd = octeon_to_hcd(priv); + struct usb_hcd *hcd = octeon_to_hcd(usb); struct device *dev = hcd->self.controller; - if (likely(status == CVMX_USB_COMPLETE_SUCCESS)) + if (likely(status == CVMX_USB_STATUS_OK)) urb->actual_length = bytes_transferred; else urb->actual_length = 0; @@ -2006,7 +1969,8 @@ static void octeon_usb_urb_complete_callback(struct cvmx_usb_state *usb, urb->hcpriv = NULL; /* For Isochronous transactions we need to update the URB packet status - list from data in our private copy */ + * list from data in our private copy + */ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { int i; /* @@ -2014,12 +1978,11 @@ static void octeon_usb_urb_complete_callback(struct cvmx_usb_state *usb, * field. */ struct cvmx_usb_iso_packet *iso_packet = - (struct cvmx_usb_iso_packet *) urb->setup_packet; + (struct cvmx_usb_iso_packet *)urb->setup_packet; /* Recalculate the transfer size by adding up each packet */ urb->actual_length = 0; for (i = 0; i < urb->number_of_packets; i++) { - if (iso_packet[i].status == - CVMX_USB_COMPLETE_SUCCESS) { + if (iso_packet[i].status == CVMX_USB_STATUS_OK) { urb->iso_frame_desc[i].status = 0; urb->iso_frame_desc[i].actual_length = iso_packet[i].length; @@ -2039,41 +2002,41 @@ static void octeon_usb_urb_complete_callback(struct cvmx_usb_state *usb, } switch (status) { - case CVMX_USB_COMPLETE_SUCCESS: + case CVMX_USB_STATUS_OK: urb->status = 0; break; - case CVMX_USB_COMPLETE_CANCEL: + case CVMX_USB_STATUS_CANCEL: if (urb->status == 0) urb->status = -ENOENT; break; - case CVMX_USB_COMPLETE_STALL: + case CVMX_USB_STATUS_STALL: dev_dbg(dev, "status=stall pipe=%p transaction=%p size=%d\n", pipe, transaction, bytes_transferred); urb->status = -EPIPE; break; - case CVMX_USB_COMPLETE_BABBLEERR: + case CVMX_USB_STATUS_BABBLEERR: dev_dbg(dev, "status=babble pipe=%p transaction=%p size=%d\n", pipe, transaction, bytes_transferred); urb->status = -EPIPE; break; - case CVMX_USB_COMPLETE_SHORT: + case CVMX_USB_STATUS_SHORT: dev_dbg(dev, "status=short pipe=%p transaction=%p size=%d\n", pipe, transaction, bytes_transferred); urb->status = -EREMOTEIO; break; - case CVMX_USB_COMPLETE_ERROR: - case CVMX_USB_COMPLETE_XACTERR: - case CVMX_USB_COMPLETE_DATATGLERR: - case CVMX_USB_COMPLETE_FRAMEERR: + case CVMX_USB_STATUS_ERROR: + case CVMX_USB_STATUS_XACTERR: + case CVMX_USB_STATUS_DATATGLERR: + case CVMX_USB_STATUS_FRAMEERR: dev_dbg(dev, "status=%d pipe=%p transaction=%p size=%d\n", status, pipe, transaction, bytes_transferred); urb->status = -EPROTO; break; } - usb_hcd_unlink_urb_from_ep(octeon_to_hcd(priv), urb); - spin_unlock(&priv->lock); - usb_hcd_giveback_urb(octeon_to_hcd(priv), urb, urb->status); - spin_lock(&priv->lock); + usb_hcd_unlink_urb_from_ep(octeon_to_hcd(usb), urb); + spin_unlock(&usb->lock); + usb_hcd_giveback_urb(octeon_to_hcd(usb), urb, urb->status); + spin_lock(&usb->lock); } /** @@ -2087,10 +2050,10 @@ static void octeon_usb_urb_complete_callback(struct cvmx_usb_state *usb, * @complete_code: * Completion code */ -static void cvmx_usb_perform_complete(struct cvmx_usb_state *usb, - struct cvmx_usb_pipe *pipe, - struct cvmx_usb_transaction *transaction, - enum cvmx_usb_complete complete_code) +static void cvmx_usb_complete(struct octeon_hcd *usb, + struct cvmx_usb_pipe *pipe, + struct cvmx_usb_transaction *transaction, + enum cvmx_usb_status complete_code) { /* If this was a split then clear our split in progress marker */ if (usb->active_split == transaction) @@ -2110,7 +2073,7 @@ static void cvmx_usb_perform_complete(struct cvmx_usb_state *usb, * next one */ if ((transaction->iso_number_packets > 1) && - (complete_code == CVMX_USB_COMPLETE_SUCCESS)) { + (complete_code == CVMX_USB_STATUS_OK)) { /* No bytes transferred for this packet as of yet */ transaction->actual_bytes = 0; /* One less ISO waiting to transfer */ @@ -2133,7 +2096,6 @@ static void cvmx_usb_perform_complete(struct cvmx_usb_state *usb, kfree(transaction); } - /** * Submit a usb transaction to a pipe. Called for all types * of transactions. @@ -2157,12 +2119,12 @@ static void cvmx_usb_perform_complete(struct cvmx_usb_state *usb, * Returns: Transaction or NULL on failure. */ static struct cvmx_usb_transaction *cvmx_usb_submit_transaction( - struct cvmx_usb_state *usb, + struct octeon_hcd *usb, struct cvmx_usb_pipe *pipe, enum cvmx_usb_transfer type, - uint64_t buffer, + u64 buffer, int buffer_length, - uint64_t control_header, + u64 control_header, int iso_start_frame, int iso_number_packets, struct cvmx_usb_iso_packet *iso_packets, @@ -2208,7 +2170,6 @@ static struct cvmx_usb_transaction *cvmx_usb_submit_transaction( return transaction; } - /** * Call to submit a USB Bulk transfer to a pipe. * @@ -2219,7 +2180,7 @@ static struct cvmx_usb_transaction *cvmx_usb_submit_transaction( * Returns: A submitted transaction or NULL on failure. */ static struct cvmx_usb_transaction *cvmx_usb_submit_bulk( - struct cvmx_usb_state *usb, + struct octeon_hcd *usb, struct cvmx_usb_pipe *pipe, struct urb *urb) { @@ -2233,7 +2194,6 @@ static struct cvmx_usb_transaction *cvmx_usb_submit_bulk( urb); } - /** * Call to submit a USB Interrupt transfer to a pipe. * @@ -2244,7 +2204,7 @@ static struct cvmx_usb_transaction *cvmx_usb_submit_bulk( * Returns: A submitted transaction or NULL on failure. */ static struct cvmx_usb_transaction *cvmx_usb_submit_interrupt( - struct cvmx_usb_state *usb, + struct octeon_hcd *usb, struct cvmx_usb_pipe *pipe, struct urb *urb) { @@ -2259,7 +2219,6 @@ static struct cvmx_usb_transaction *cvmx_usb_submit_interrupt( urb); } - /** * Call to submit a USB Control transfer to a pipe. * @@ -2270,12 +2229,12 @@ static struct cvmx_usb_transaction *cvmx_usb_submit_interrupt( * Returns: A submitted transaction or NULL on failure. */ static struct cvmx_usb_transaction *cvmx_usb_submit_control( - struct cvmx_usb_state *usb, + struct octeon_hcd *usb, struct cvmx_usb_pipe *pipe, struct urb *urb) { int buffer_length = urb->transfer_buffer_length; - uint64_t control_header = urb->setup_dma; + u64 control_header = urb->setup_dma; struct usb_ctrlrequest *header = cvmx_phys_to_ptr(control_header); if ((header->bRequestType & USB_DIR_IN) == 0) @@ -2291,7 +2250,6 @@ static struct cvmx_usb_transaction *cvmx_usb_submit_control( urb); } - /** * Call to submit a USB Isochronous transfer to a pipe. * @@ -2302,13 +2260,13 @@ static struct cvmx_usb_transaction *cvmx_usb_submit_control( * Returns: A submitted transaction or NULL on failure. */ static struct cvmx_usb_transaction *cvmx_usb_submit_isochronous( - struct cvmx_usb_state *usb, + struct octeon_hcd *usb, struct cvmx_usb_pipe *pipe, struct urb *urb) { struct cvmx_usb_iso_packet *packets; - packets = (struct cvmx_usb_iso_packet *) urb->setup_packet; + packets = (struct cvmx_usb_iso_packet *)urb->setup_packet; return cvmx_usb_submit_transaction(usb, pipe, CVMX_USB_TRANSFER_ISOCHRONOUS, urb->transfer_dma, @@ -2319,7 +2277,6 @@ static struct cvmx_usb_transaction *cvmx_usb_submit_isochronous( packets, urb); } - /** * Cancel one outstanding request in a pipe. Canceling a request * can fail if the transaction has already completed before cancel @@ -2333,7 +2290,7 @@ static struct cvmx_usb_transaction *cvmx_usb_submit_isochronous( * * Returns: 0 or a negative error code. */ -static int cvmx_usb_cancel(struct cvmx_usb_state *usb, +static int cvmx_usb_cancel(struct octeon_hcd *usb, struct cvmx_usb_pipe *pipe, struct cvmx_usb_transaction *transaction) { @@ -2359,17 +2316,15 @@ static int cvmx_usb_cancel(struct cvmx_usb_state *usb, if (usbc_hcchar.s.chena) { usbc_hcchar.s.chdis = 1; cvmx_usb_write_csr32(usb, - CVMX_USBCX_HCCHARX(pipe->channel, - usb->index), - usbc_hcchar.u32); + CVMX_USBCX_HCCHARX(pipe->channel, + usb->index), + usbc_hcchar.u32); } } - cvmx_usb_perform_complete(usb, pipe, transaction, - CVMX_USB_COMPLETE_CANCEL); + cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_CANCEL); return 0; } - /** * Cancel all outstanding requests in a pipe. Logically all this * does is call cvmx_usb_cancel() in a loop. @@ -2379,7 +2334,7 @@ static int cvmx_usb_cancel(struct cvmx_usb_state *usb, * * Returns: 0 or a negative error code. */ -static int cvmx_usb_cancel_all(struct cvmx_usb_state *usb, +static int cvmx_usb_cancel_all(struct octeon_hcd *usb, struct cvmx_usb_pipe *pipe) { struct cvmx_usb_transaction *transaction, *next; @@ -2394,7 +2349,6 @@ static int cvmx_usb_cancel_all(struct cvmx_usb_state *usb, return 0; } - /** * Close a pipe created with cvmx_usb_open_pipe(). * @@ -2404,7 +2358,7 @@ static int cvmx_usb_cancel_all(struct cvmx_usb_state *usb, * Returns: 0 or a negative error code. EBUSY is returned if the pipe has * outstanding transfers. */ -static int cvmx_usb_close_pipe(struct cvmx_usb_state *usb, +static int cvmx_usb_close_pipe(struct octeon_hcd *usb, struct cvmx_usb_pipe *pipe) { /* Fail if the pipe has pending transactions */ @@ -2425,7 +2379,7 @@ static int cvmx_usb_close_pipe(struct cvmx_usb_state *usb, * * Returns: USB frame number */ -static int cvmx_usb_get_frame_number(struct cvmx_usb_state *usb) +static int cvmx_usb_get_frame_number(struct octeon_hcd *usb) { int frame_number; union cvmx_usbcx_hfnum usbc_hfnum; @@ -2436,6 +2390,197 @@ static int cvmx_usb_get_frame_number(struct cvmx_usb_state *usb) return frame_number; } +static void cvmx_usb_transfer_control(struct octeon_hcd *usb, + struct cvmx_usb_pipe *pipe, + struct cvmx_usb_transaction *transaction, + union cvmx_usbcx_hccharx usbc_hcchar, + int buffer_space_left, + int bytes_in_last_packet) +{ + switch (transaction->stage) { + case CVMX_USB_STAGE_NON_CONTROL: + case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE: + /* This should be impossible */ + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_ERROR); + break; + case CVMX_USB_STAGE_SETUP: + pipe->pid_toggle = 1; + if (cvmx_usb_pipe_needs_split(usb, pipe)) { + transaction->stage = + CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE; + } else { + struct usb_ctrlrequest *header = + cvmx_phys_to_ptr(transaction->control_header); + if (header->wLength) + transaction->stage = CVMX_USB_STAGE_DATA; + else + transaction->stage = CVMX_USB_STAGE_STATUS; + } + break; + case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE: + { + struct usb_ctrlrequest *header = + cvmx_phys_to_ptr(transaction->control_header); + if (header->wLength) + transaction->stage = CVMX_USB_STAGE_DATA; + else + transaction->stage = CVMX_USB_STAGE_STATUS; + } + break; + case CVMX_USB_STAGE_DATA: + if (cvmx_usb_pipe_needs_split(usb, pipe)) { + transaction->stage = CVMX_USB_STAGE_DATA_SPLIT_COMPLETE; + /* + * For setup OUT data that are splits, + * the hardware doesn't appear to count + * transferred data. Here we manually + * update the data transferred + */ + if (!usbc_hcchar.s.epdir) { + if (buffer_space_left < pipe->max_packet) + transaction->actual_bytes += + buffer_space_left; + else + transaction->actual_bytes += + pipe->max_packet; + } + } else if ((buffer_space_left == 0) || + (bytes_in_last_packet < pipe->max_packet)) { + pipe->pid_toggle = 1; + transaction->stage = CVMX_USB_STAGE_STATUS; + } + break; + case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE: + if ((buffer_space_left == 0) || + (bytes_in_last_packet < pipe->max_packet)) { + pipe->pid_toggle = 1; + transaction->stage = CVMX_USB_STAGE_STATUS; + } else { + transaction->stage = CVMX_USB_STAGE_DATA; + } + break; + case CVMX_USB_STAGE_STATUS: + if (cvmx_usb_pipe_needs_split(usb, pipe)) + transaction->stage = + CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE; + else + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_OK); + break; + case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE: + cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_OK); + break; + } +} + +static void cvmx_usb_transfer_bulk(struct octeon_hcd *usb, + struct cvmx_usb_pipe *pipe, + struct cvmx_usb_transaction *transaction, + union cvmx_usbcx_hcintx usbc_hcint, + int buffer_space_left, + int bytes_in_last_packet) +{ + /* + * The only time a bulk transfer isn't complete when it finishes with + * an ACK is during a split transaction. For splits we need to continue + * the transfer if more data is needed. + */ + if (cvmx_usb_pipe_needs_split(usb, pipe)) { + if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL) + transaction->stage = + CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE; + else if (buffer_space_left && + (bytes_in_last_packet == pipe->max_packet)) + transaction->stage = CVMX_USB_STAGE_NON_CONTROL; + else + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_OK); + } else { + if ((pipe->device_speed == CVMX_USB_SPEED_HIGH) && + (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) && + (usbc_hcint.s.nak)) + pipe->flags |= CVMX_USB_PIPE_FLAGS_NEED_PING; + if (!buffer_space_left || + (bytes_in_last_packet < pipe->max_packet)) + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_OK); + } +} + +static void cvmx_usb_transfer_intr(struct octeon_hcd *usb, + struct cvmx_usb_pipe *pipe, + struct cvmx_usb_transaction *transaction, + int buffer_space_left, + int bytes_in_last_packet) +{ + if (cvmx_usb_pipe_needs_split(usb, pipe)) { + if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL) { + transaction->stage = + CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE; + } else if (buffer_space_left && + (bytes_in_last_packet == pipe->max_packet)) { + transaction->stage = CVMX_USB_STAGE_NON_CONTROL; + } else { + pipe->next_tx_frame += pipe->interval; + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_OK); + } + } else if (!buffer_space_left || + (bytes_in_last_packet < pipe->max_packet)) { + pipe->next_tx_frame += pipe->interval; + cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_OK); + } +} + +static void cvmx_usb_transfer_isoc(struct octeon_hcd *usb, + struct cvmx_usb_pipe *pipe, + struct cvmx_usb_transaction *transaction, + int buffer_space_left, + int bytes_in_last_packet, + int bytes_this_transfer) +{ + if (cvmx_usb_pipe_needs_split(usb, pipe)) { + /* + * ISOCHRONOUS OUT splits don't require a complete split stage. + * Instead they use a sequence of begin OUT splits to transfer + * the data 188 bytes at a time. Once the transfer is complete, + * the pipe sleeps until the next schedule interval. + */ + if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) { + /* + * If no space left or this wasn't a max size packet + * then this transfer is complete. Otherwise start it + * again to send the next 188 bytes + */ + if (!buffer_space_left || (bytes_this_transfer < 188)) { + pipe->next_tx_frame += pipe->interval; + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_OK); + } + return; + } + if (transaction->stage == + CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE) { + /* + * We are in the incoming data phase. Keep getting data + * until we run out of space or get a small packet + */ + if ((buffer_space_left == 0) || + (bytes_in_last_packet < pipe->max_packet)) { + pipe->next_tx_frame += pipe->interval; + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_OK); + } + } else { + transaction->stage = + CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE; + } + } else { + pipe->next_tx_frame += pipe->interval; + cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_OK); + } +} /** * Poll a channel for status @@ -2445,10 +2590,9 @@ static int cvmx_usb_get_frame_number(struct cvmx_usb_state *usb) * * Returns: Zero on success */ -static int cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel) +static int cvmx_usb_poll_channel(struct octeon_hcd *usb, int channel) { - struct octeon_hcd *priv = cvmx_usb_to_octeon(usb); - struct usb_hcd *hcd = octeon_to_hcd(priv); + struct usb_hcd *hcd = octeon_to_hcd(usb); struct device *dev = hcd->self.controller; union cvmx_usbcx_hcintx usbc_hcint; union cvmx_usbcx_hctsizx usbc_hctsiz; @@ -2475,9 +2619,9 @@ static int cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel) * write of HCCHARX without changing things */ cvmx_usb_write_csr32(usb, - CVMX_USBCX_HCCHARX(channel, - usb->index), - usbc_hcchar.u32); + CVMX_USBCX_HCCHARX(channel, + usb->index), + usbc_hcchar.u32); return 0; } @@ -2492,14 +2636,12 @@ static int cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel) hcintmsk.u32 = 0; hcintmsk.s.chhltdmsk = 1; cvmx_usb_write_csr32(usb, - CVMX_USBCX_HCINTMSKX(channel, - usb->index), - hcintmsk.u32); + CVMX_USBCX_HCINTMSKX(channel, usb->index), + hcintmsk.u32); usbc_hcchar.s.chdis = 1; cvmx_usb_write_csr32(usb, - CVMX_USBCX_HCCHARX(channel, - usb->index), - usbc_hcchar.u32); + CVMX_USBCX_HCCHARX(channel, usb->index), + usbc_hcchar.u32); return 0; } else if (usbc_hcint.s.xfercompl) { /* @@ -2523,7 +2665,7 @@ static int cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel) /* Disable the channel interrupts now that it is done */ cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), 0); - usb->idle_hardware_channels |= (1<<channel); + usb->idle_hardware_channels |= (1 << channel); /* Make sure this channel is tied to a valid pipe */ pipe = usb->pipe_for_channel[channel]; @@ -2593,7 +2735,7 @@ static int cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel) * transferred */ if ((transaction->stage == CVMX_USB_STAGE_SETUP) || - (transaction->stage == CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE)) + (transaction->stage == CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE)) bytes_this_transfer = 0; /* @@ -2621,8 +2763,8 @@ static int cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel) * will clear this flag */ if ((pipe->device_speed == CVMX_USB_SPEED_HIGH) && - (pipe->transfer_type == CVMX_USB_TRANSFER_BULK) && - (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT)) + (pipe->transfer_type == CVMX_USB_TRANSFER_BULK) && + (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT)) pipe->flags |= CVMX_USB_PIPE_FLAGS_NEED_PING; if (unlikely(WARN_ON_ONCE(bytes_this_transfer < 0))) { @@ -2631,8 +2773,8 @@ static int cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel) * keeps substracting same byte count over and over again. In * such case we just need to fail every transaction. */ - cvmx_usb_perform_complete(usb, pipe, transaction, - CVMX_USB_COMPLETE_ERROR); + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_ERROR); return 0; } @@ -2644,24 +2786,24 @@ static int cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel) * the actual bytes transferred */ pipe->pid_toggle = 0; - cvmx_usb_perform_complete(usb, pipe, transaction, - CVMX_USB_COMPLETE_STALL); + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_STALL); } else if (usbc_hcint.s.xacterr) { /* * XactErr as a response means the device signaled * something wrong with the transfer. For example, PID * toggle errors cause these. */ - cvmx_usb_perform_complete(usb, pipe, transaction, - CVMX_USB_COMPLETE_XACTERR); + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_XACTERR); } else if (usbc_hcint.s.bblerr) { /* Babble Error (BblErr) */ - cvmx_usb_perform_complete(usb, pipe, transaction, - CVMX_USB_COMPLETE_BABBLEERR); + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_BABBLEERR); } else if (usbc_hcint.s.datatglerr) { /* Data toggle error */ - cvmx_usb_perform_complete(usb, pipe, transaction, - CVMX_USB_COMPLETE_DATATGLERR); + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_DATATGLERR); } else if (usbc_hcint.s.nyet) { /* * NYET as a response is only allowed in three cases: as a @@ -2676,10 +2818,10 @@ static int cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel) * again. Otherwise this transaction is complete */ if ((buffer_space_left == 0) || - (bytes_in_last_packet < pipe->max_packet)) - cvmx_usb_perform_complete(usb, pipe, - transaction, - CVMX_USB_COMPLETE_SUCCESS); + (bytes_in_last_packet < pipe->max_packet)) + cvmx_usb_complete(usb, pipe, + transaction, + CVMX_USB_STATUS_OK); } else { /* * Split transactions retry the split complete 4 times @@ -2713,205 +2855,26 @@ static int cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel) switch (transaction->type) { case CVMX_USB_TRANSFER_CONTROL: - switch (transaction->stage) { - case CVMX_USB_STAGE_NON_CONTROL: - case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE: - /* This should be impossible */ - cvmx_usb_perform_complete(usb, pipe, - transaction, CVMX_USB_COMPLETE_ERROR); - break; - case CVMX_USB_STAGE_SETUP: - pipe->pid_toggle = 1; - if (cvmx_usb_pipe_needs_split(usb, pipe)) - transaction->stage = - CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE; - else { - struct usb_ctrlrequest *header = - cvmx_phys_to_ptr(transaction->control_header); - if (header->wLength) - transaction->stage = - CVMX_USB_STAGE_DATA; - else - transaction->stage = - CVMX_USB_STAGE_STATUS; - } - break; - case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE: - { - struct usb_ctrlrequest *header = - cvmx_phys_to_ptr(transaction->control_header); - if (header->wLength) - transaction->stage = - CVMX_USB_STAGE_DATA; - else - transaction->stage = - CVMX_USB_STAGE_STATUS; - } - break; - case CVMX_USB_STAGE_DATA: - if (cvmx_usb_pipe_needs_split(usb, pipe)) { - transaction->stage = - CVMX_USB_STAGE_DATA_SPLIT_COMPLETE; - /* - * For setup OUT data that are splits, - * the hardware doesn't appear to count - * transferred data. Here we manually - * update the data transferred - */ - if (!usbc_hcchar.s.epdir) { - if (buffer_space_left < pipe->max_packet) - transaction->actual_bytes += - buffer_space_left; - else - transaction->actual_bytes += - pipe->max_packet; - } - } else if ((buffer_space_left == 0) || - (bytes_in_last_packet < - pipe->max_packet)) { - pipe->pid_toggle = 1; - transaction->stage = - CVMX_USB_STAGE_STATUS; - } - break; - case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE: - if ((buffer_space_left == 0) || - (bytes_in_last_packet < - pipe->max_packet)) { - pipe->pid_toggle = 1; - transaction->stage = - CVMX_USB_STAGE_STATUS; - } else { - transaction->stage = - CVMX_USB_STAGE_DATA; - } - break; - case CVMX_USB_STAGE_STATUS: - if (cvmx_usb_pipe_needs_split(usb, pipe)) - transaction->stage = - CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE; - else - cvmx_usb_perform_complete(usb, pipe, - transaction, - CVMX_USB_COMPLETE_SUCCESS); - break; - case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE: - cvmx_usb_perform_complete(usb, pipe, - transaction, - CVMX_USB_COMPLETE_SUCCESS); - break; - } + cvmx_usb_transfer_control(usb, pipe, transaction, + usbc_hcchar, + buffer_space_left, + bytes_in_last_packet); break; case CVMX_USB_TRANSFER_BULK: + cvmx_usb_transfer_bulk(usb, pipe, transaction, + usbc_hcint, buffer_space_left, + bytes_in_last_packet); + break; case CVMX_USB_TRANSFER_INTERRUPT: - /* - * The only time a bulk transfer isn't complete when it - * finishes with an ACK is during a split transaction. - * For splits we need to continue the transfer if more - * data is needed - */ - if (cvmx_usb_pipe_needs_split(usb, pipe)) { - if (transaction->stage == - CVMX_USB_STAGE_NON_CONTROL) - transaction->stage = - CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE; - else { - if (buffer_space_left && - (bytes_in_last_packet == - pipe->max_packet)) - transaction->stage = - CVMX_USB_STAGE_NON_CONTROL; - else { - if (transaction->type == - CVMX_USB_TRANSFER_INTERRUPT) - pipe->next_tx_frame += - pipe->interval; - cvmx_usb_perform_complete( - usb, - pipe, - transaction, - CVMX_USB_COMPLETE_SUCCESS); - } - } - } else { - if ((pipe->device_speed == - CVMX_USB_SPEED_HIGH) && - (pipe->transfer_type == - CVMX_USB_TRANSFER_BULK) && - (pipe->transfer_dir == - CVMX_USB_DIRECTION_OUT) && - (usbc_hcint.s.nak)) - pipe->flags |= - CVMX_USB_PIPE_FLAGS_NEED_PING; - if (!buffer_space_left || - (bytes_in_last_packet < - pipe->max_packet)) { - if (transaction->type == - CVMX_USB_TRANSFER_INTERRUPT) - pipe->next_tx_frame += - pipe->interval; - cvmx_usb_perform_complete(usb, pipe, - transaction, - CVMX_USB_COMPLETE_SUCCESS); - } - } + cvmx_usb_transfer_intr(usb, pipe, transaction, + buffer_space_left, + bytes_in_last_packet); break; case CVMX_USB_TRANSFER_ISOCHRONOUS: - if (cvmx_usb_pipe_needs_split(usb, pipe)) { - /* - * ISOCHRONOUS OUT splits don't require a - * complete split stage. Instead they use a - * sequence of begin OUT splits to transfer the - * data 188 bytes at a time. Once the transfer - * is complete, the pipe sleeps until the next - * schedule interval - */ - if (pipe->transfer_dir == - CVMX_USB_DIRECTION_OUT) { - /* - * If no space left or this wasn't a max - * size packet then this transfer is - * complete. Otherwise start it again to - * send the next 188 bytes - */ - if (!buffer_space_left || - (bytes_this_transfer < 188)) { - pipe->next_tx_frame += - pipe->interval; - cvmx_usb_perform_complete(usb, - pipe, transaction, - CVMX_USB_COMPLETE_SUCCESS); - } - } else { - if (transaction->stage == - CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE) { - /* - * We are in the incoming data - * phase. Keep getting data - * until we run out of space or - * get a small packet - */ - if ((buffer_space_left == 0) || - (bytes_in_last_packet < - pipe->max_packet)) { - pipe->next_tx_frame += - pipe->interval; - cvmx_usb_perform_complete( - usb, - pipe, - transaction, - CVMX_USB_COMPLETE_SUCCESS); - } - } else - transaction->stage = - CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE; - } - } else { - pipe->next_tx_frame += pipe->interval; - cvmx_usb_perform_complete(usb, pipe, - transaction, - CVMX_USB_COMPLETE_SUCCESS); - } + cvmx_usb_transfer_isoc(usb, pipe, transaction, + buffer_space_left, + bytes_in_last_packet, + bytes_this_transfer); break; } } else if (usbc_hcint.s.nak) { @@ -2946,20 +2909,18 @@ static int cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel) * We get channel halted interrupts with no result bits * sets when the cable is unplugged */ - cvmx_usb_perform_complete(usb, pipe, transaction, - CVMX_USB_COMPLETE_ERROR); + cvmx_usb_complete(usb, pipe, transaction, + CVMX_USB_STATUS_ERROR); } } return 0; } -static void octeon_usb_port_callback(struct cvmx_usb_state *usb) +static void octeon_usb_port_callback(struct octeon_hcd *usb) { - struct octeon_hcd *priv = cvmx_usb_to_octeon(usb); - - spin_unlock(&priv->lock); - usb_hcd_poll_rh_status(octeon_to_hcd(priv)); - spin_lock(&priv->lock); + spin_unlock(&usb->lock); + usb_hcd_poll_rh_status(octeon_to_hcd(usb)); + spin_lock(&usb->lock); } /** @@ -2972,7 +2933,7 @@ static void octeon_usb_port_callback(struct cvmx_usb_state *usb) * * Returns: 0 or a negative error code. */ -static int cvmx_usb_poll(struct cvmx_usb_state *usb) +static int cvmx_usb_poll(struct octeon_hcd *usb) { union cvmx_usbcx_hfnum usbc_hfnum; union cvmx_usbcx_gintsts usbc_gintsts; @@ -2981,7 +2942,7 @@ static int cvmx_usb_poll(struct cvmx_usb_state *usb) /* Update the frame counter */ usbc_hfnum.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index)); - if ((usb->frame_number&0x3fff) > usbc_hfnum.s.frnum) + if ((usb->frame_number & 0x3fff) > usbc_hfnum.s.frnum) usb->frame_number += 0x4000; usb->frame_number &= ~0x3fffull; usb->frame_number |= usbc_hfnum.s.frnum; @@ -3028,8 +2989,8 @@ static int cvmx_usb_poll(struct cvmx_usb_state *usb) */ octeon_usb_port_callback(usb); /* Clear the port change bits */ - usbc_hprt.u32 = cvmx_usb_read_csr32(usb, - CVMX_USBCX_HPRT(usb->index)); + usbc_hprt.u32 = + cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index)); usbc_hprt.s.prtena = 0; cvmx_usb_write_csr32(usb, CVMX_USBCX_HPRT(usb->index), usbc_hprt.u32); @@ -3056,7 +3017,7 @@ static int cvmx_usb_poll(struct cvmx_usb_state *usb) channel = __fls(usbc_haint.u32); cvmx_usb_poll_channel(usb, channel); - usbc_haint.u32 ^= 1<<channel; + usbc_haint.u32 ^= 1 << channel; } } @@ -3073,12 +3034,12 @@ static inline struct octeon_hcd *hcd_to_octeon(struct usb_hcd *hcd) static irqreturn_t octeon_usb_irq(struct usb_hcd *hcd) { - struct octeon_hcd *priv = hcd_to_octeon(hcd); + struct octeon_hcd *usb = hcd_to_octeon(hcd); unsigned long flags; - spin_lock_irqsave(&priv->lock, flags); - cvmx_usb_poll(&priv->usb); - spin_unlock_irqrestore(&priv->lock, flags); + spin_lock_irqsave(&usb->lock, flags); + cvmx_usb_poll(usb); + spin_unlock_irqrestore(&usb->lock, flags); return IRQ_HANDLED; } @@ -3095,16 +3056,16 @@ static void octeon_usb_stop(struct usb_hcd *hcd) static int octeon_usb_get_frame_number(struct usb_hcd *hcd) { - struct octeon_hcd *priv = hcd_to_octeon(hcd); + struct octeon_hcd *usb = hcd_to_octeon(hcd); - return cvmx_usb_get_frame_number(&priv->usb); + return cvmx_usb_get_frame_number(usb); } static int octeon_usb_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) { - struct octeon_hcd *priv = hcd_to_octeon(hcd); + struct octeon_hcd *usb = hcd_to_octeon(hcd); struct device *dev = hcd->self.controller; struct cvmx_usb_transaction *transaction = NULL; struct cvmx_usb_pipe *pipe; @@ -3114,11 +3075,11 @@ static int octeon_usb_urb_enqueue(struct usb_hcd *hcd, int rc; urb->status = 0; - spin_lock_irqsave(&priv->lock, flags); + spin_lock_irqsave(&usb->lock, flags); rc = usb_hcd_link_urb_to_ep(hcd, urb); if (rc) { - spin_unlock_irqrestore(&priv->lock, flags); + spin_unlock_irqrestore(&usb->lock, flags); return rc; } @@ -3184,7 +3145,7 @@ static int octeon_usb_urb_enqueue(struct usb_hcd *hcd, dev = dev->parent; } } - pipe = cvmx_usb_open_pipe(&priv->usb, usb_pipedevice(urb->pipe), + pipe = cvmx_usb_open_pipe(usb, usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe), speed, le16_to_cpu(ep->desc.wMaxPacketSize) & 0x7ff, @@ -3198,7 +3159,7 @@ static int octeon_usb_urb_enqueue(struct usb_hcd *hcd, split_device, split_port); if (!pipe) { usb_hcd_unlink_urb_from_ep(hcd, urb); - spin_unlock_irqrestore(&priv->lock, flags); + spin_unlock_irqrestore(&usb->lock, flags); dev_dbg(dev, "Failed to create pipe\n"); return -ENOMEM; } @@ -3227,8 +3188,7 @@ static int octeon_usb_urb_enqueue(struct usb_hcd *hcd, urb->iso_frame_desc[i].offset; iso_packet[i].length = urb->iso_frame_desc[i].length; - iso_packet[i].status = - CVMX_USB_COMPLETE_ERROR; + iso_packet[i].status = CVMX_USB_STATUS_ERROR; } /* * Store a pointer to the list in the URB setup_packet @@ -3236,7 +3196,7 @@ static int octeon_usb_urb_enqueue(struct usb_hcd *hcd, * this saves us a bunch of logic. */ urb->setup_packet = (char *)iso_packet; - transaction = cvmx_usb_submit_isochronous(&priv->usb, + transaction = cvmx_usb_submit_isochronous(usb, pipe, urb); /* * If submit failed we need to free our private packet @@ -3252,29 +3212,29 @@ static int octeon_usb_urb_enqueue(struct usb_hcd *hcd, dev_dbg(dev, "Submit interrupt to %d.%d\n", usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe)); - transaction = cvmx_usb_submit_interrupt(&priv->usb, pipe, urb); + transaction = cvmx_usb_submit_interrupt(usb, pipe, urb); break; case PIPE_CONTROL: dev_dbg(dev, "Submit control to %d.%d\n", usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe)); - transaction = cvmx_usb_submit_control(&priv->usb, pipe, urb); + transaction = cvmx_usb_submit_control(usb, pipe, urb); break; case PIPE_BULK: dev_dbg(dev, "Submit bulk to %d.%d\n", usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe)); - transaction = cvmx_usb_submit_bulk(&priv->usb, pipe, urb); + transaction = cvmx_usb_submit_bulk(usb, pipe, urb); break; } if (!transaction) { usb_hcd_unlink_urb_from_ep(hcd, urb); - spin_unlock_irqrestore(&priv->lock, flags); + spin_unlock_irqrestore(&usb->lock, flags); dev_dbg(dev, "Failed to submit\n"); return -ENOMEM; } urb->hcpriv = transaction; - spin_unlock_irqrestore(&priv->lock, flags); + spin_unlock_irqrestore(&usb->lock, flags); return 0; } @@ -3282,24 +3242,24 @@ static int octeon_usb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) { - struct octeon_hcd *priv = hcd_to_octeon(hcd); + struct octeon_hcd *usb = hcd_to_octeon(hcd); unsigned long flags; int rc; if (!urb->dev) return -EINVAL; - spin_lock_irqsave(&priv->lock, flags); + spin_lock_irqsave(&usb->lock, flags); rc = usb_hcd_check_unlink_urb(hcd, urb, status); if (rc) goto out; urb->status = status; - cvmx_usb_cancel(&priv->usb, urb->ep->hcpriv, urb->hcpriv); + cvmx_usb_cancel(usb, urb->ep->hcpriv, urb->hcpriv); out: - spin_unlock_irqrestore(&priv->lock, flags); + spin_unlock_irqrestore(&usb->lock, flags); return rc; } @@ -3310,28 +3270,28 @@ static void octeon_usb_endpoint_disable(struct usb_hcd *hcd, struct device *dev = hcd->self.controller; if (ep->hcpriv) { - struct octeon_hcd *priv = hcd_to_octeon(hcd); + struct octeon_hcd *usb = hcd_to_octeon(hcd); struct cvmx_usb_pipe *pipe = ep->hcpriv; unsigned long flags; - spin_lock_irqsave(&priv->lock, flags); - cvmx_usb_cancel_all(&priv->usb, pipe); - if (cvmx_usb_close_pipe(&priv->usb, pipe)) + spin_lock_irqsave(&usb->lock, flags); + cvmx_usb_cancel_all(usb, pipe); + if (cvmx_usb_close_pipe(usb, pipe)) dev_dbg(dev, "Closing pipe %p failed\n", pipe); - spin_unlock_irqrestore(&priv->lock, flags); + spin_unlock_irqrestore(&usb->lock, flags); ep->hcpriv = NULL; } } static int octeon_usb_hub_status_data(struct usb_hcd *hcd, char *buf) { - struct octeon_hcd *priv = hcd_to_octeon(hcd); + struct octeon_hcd *usb = hcd_to_octeon(hcd); struct cvmx_usb_port_status port_status; unsigned long flags; - spin_lock_irqsave(&priv->lock, flags); - port_status = cvmx_usb_get_status(&priv->usb); - spin_unlock_irqrestore(&priv->lock, flags); + spin_lock_irqsave(&usb->lock, flags); + port_status = cvmx_usb_get_status(usb); + spin_unlock_irqrestore(&usb->lock, flags); buf[0] = 0; buf[0] = port_status.connect_change << 1; @@ -3339,12 +3299,11 @@ static int octeon_usb_hub_status_data(struct usb_hcd *hcd, char *buf) } static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, - u16 wIndex, char *buf, u16 wLength) + u16 wIndex, char *buf, u16 wLength) { - struct octeon_hcd *priv = hcd_to_octeon(hcd); + struct octeon_hcd *usb = hcd_to_octeon(hcd); struct device *dev = hcd->self.controller; struct cvmx_usb_port_status usb_port_status; - struct cvmx_usb_state *usb = &priv->usb; int port_status; struct usb_hub_descriptor *desc; unsigned long flags; @@ -3371,9 +3330,9 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, switch (wValue) { case USB_PORT_FEAT_ENABLE: dev_dbg(dev, " ENABLE\n"); - spin_lock_irqsave(&priv->lock, flags); - cvmx_usb_disable(&priv->usb); - spin_unlock_irqrestore(&priv->lock, flags); + spin_lock_irqsave(&usb->lock, flags); + cvmx_usb_disable(usb); + spin_unlock_irqrestore(&usb->lock, flags); break; case USB_PORT_FEAT_SUSPEND: dev_dbg(dev, " SUSPEND\n"); @@ -3390,20 +3349,18 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, case USB_PORT_FEAT_C_CONNECTION: dev_dbg(dev, " C_CONNECTION\n"); /* Clears drivers internal connect status change flag */ - spin_lock_irqsave(&priv->lock, flags); - priv->usb.port_status = - cvmx_usb_get_status(&priv->usb); - spin_unlock_irqrestore(&priv->lock, flags); + spin_lock_irqsave(&usb->lock, flags); + usb->port_status = cvmx_usb_get_status(usb); + spin_unlock_irqrestore(&usb->lock, flags); break; case USB_PORT_FEAT_C_RESET: dev_dbg(dev, " C_RESET\n"); /* * Clears the driver's internal Port Reset Change flag. */ - spin_lock_irqsave(&priv->lock, flags); - priv->usb.port_status = - cvmx_usb_get_status(&priv->usb); - spin_unlock_irqrestore(&priv->lock, flags); + spin_lock_irqsave(&usb->lock, flags); + usb->port_status = cvmx_usb_get_status(usb); + spin_unlock_irqrestore(&usb->lock, flags); break; case USB_PORT_FEAT_C_ENABLE: dev_dbg(dev, " C_ENABLE\n"); @@ -3411,10 +3368,9 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, * Clears the driver's internal Port Enable/Disable * Change flag. */ - spin_lock_irqsave(&priv->lock, flags); - priv->usb.port_status = - cvmx_usb_get_status(&priv->usb); - spin_unlock_irqrestore(&priv->lock, flags); + spin_lock_irqsave(&usb->lock, flags); + usb->port_status = cvmx_usb_get_status(usb); + spin_unlock_irqrestore(&usb->lock, flags); break; case USB_PORT_FEAT_C_SUSPEND: dev_dbg(dev, " C_SUSPEND\n"); @@ -3427,10 +3383,9 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, case USB_PORT_FEAT_C_OVER_CURRENT: dev_dbg(dev, " C_OVER_CURRENT\n"); /* Clears the driver's overcurrent Change flag */ - spin_lock_irqsave(&priv->lock, flags); - priv->usb.port_status = - cvmx_usb_get_status(&priv->usb); - spin_unlock_irqrestore(&priv->lock, flags); + spin_lock_irqsave(&usb->lock, flags); + usb->port_status = cvmx_usb_get_status(usb); + spin_unlock_irqrestore(&usb->lock, flags); break; default: dev_dbg(dev, " UNKNOWN\n"); @@ -3451,7 +3406,7 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, break; case GetHubStatus: dev_dbg(dev, "GetHubStatus\n"); - *(__le32 *) buf = 0; + *(__le32 *)buf = 0; break; case GetPortStatus: dev_dbg(dev, "GetPortStatus\n"); @@ -3460,9 +3415,9 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, return -EINVAL; } - spin_lock_irqsave(&priv->lock, flags); - usb_port_status = cvmx_usb_get_status(&priv->usb); - spin_unlock_irqrestore(&priv->lock, flags); + spin_lock_irqsave(&usb->lock, flags); + usb_port_status = cvmx_usb_get_status(usb); + spin_unlock_irqrestore(&usb->lock, flags); port_status = 0; if (usb_port_status.connect_change) { @@ -3503,7 +3458,7 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, dev_dbg(dev, " LOWSPEED\n"); } - *((__le32 *) buf) = cpu_to_le32(port_status); + *((__le32 *)buf) = cpu_to_le32(port_status); break; case SetHubFeature: dev_dbg(dev, "SetHubFeature\n"); @@ -3525,16 +3480,16 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, /* * Program the port power bit to drive VBUS on the USB. */ - spin_lock_irqsave(&priv->lock, flags); + spin_lock_irqsave(&usb->lock, flags); USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt, prtpwr, 1); - spin_unlock_irqrestore(&priv->lock, flags); + spin_unlock_irqrestore(&usb->lock, flags); return 0; case USB_PORT_FEAT_RESET: dev_dbg(dev, " RESET\n"); - spin_lock_irqsave(&priv->lock, flags); - cvmx_usb_reset_port(&priv->usb); - spin_unlock_irqrestore(&priv->lock, flags); + spin_lock_irqsave(&usb->lock, flags); + cvmx_usb_reset_port(usb); + spin_unlock_irqrestore(&usb->lock, flags); return 0; case USB_PORT_FEAT_INDICATOR: dev_dbg(dev, " INDICATOR\n"); @@ -3579,23 +3534,26 @@ static int octeon_usb_probe(struct platform_device *pdev) struct device_node *usbn_node; int irq = platform_get_irq(pdev, 0); struct device *dev = &pdev->dev; - struct octeon_hcd *priv; + struct octeon_hcd *usb; struct usb_hcd *hcd; u32 clock_rate = 48000000; bool is_crystal_clock = false; const char *clock_type; int i; - if (dev->of_node == NULL) { + if (!dev->of_node) { dev_err(dev, "Error: empty of_node\n"); return -ENXIO; } usbn_node = dev->of_node->parent; i = of_property_read_u32(usbn_node, - "refclk-frequency", &clock_rate); + "clock-frequency", &clock_rate); + if (i) + i = of_property_read_u32(usbn_node, + "refclk-frequency", &clock_rate); if (i) { - dev_err(dev, "No USBN \"refclk-frequency\"\n"); + dev_err(dev, "No USBN \"clock-frequency\"\n"); return -ENXIO; } switch (clock_rate) { @@ -3609,14 +3567,16 @@ static int octeon_usb_probe(struct platform_device *pdev) initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ; break; default: - dev_err(dev, "Illebal USBN \"refclk-frequency\" %u\n", - clock_rate); + dev_err(dev, "Illegal USBN \"clock-frequency\" %u\n", + clock_rate); return -ENXIO; - } i = of_property_read_string(usbn_node, - "refclk-type", &clock_type); + "cavium,refclk-type", &clock_type); + if (i) + i = of_property_read_string(usbn_node, + "refclk-type", &clock_type); if (!i && strcmp("crystal", clock_type) == 0) is_crystal_clock = true; @@ -3627,7 +3587,7 @@ static int octeon_usb_probe(struct platform_device *pdev) initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND; res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res_mem == NULL) { + if (!res_mem) { dev_err(dev, "found no memory resource\n"); return -ENXIO; } @@ -3673,31 +3633,31 @@ static int octeon_usb_probe(struct platform_device *pdev) return -1; } hcd->uses_new_polling = 1; - priv = (struct octeon_hcd *)hcd->hcd_priv; + usb = (struct octeon_hcd *)hcd->hcd_priv; - spin_lock_init(&priv->lock); + spin_lock_init(&usb->lock); - priv->usb.init_flags = initialize_flags; + usb->init_flags = initialize_flags; /* Initialize the USB state structure */ - priv->usb.index = usb_num; - INIT_LIST_HEAD(&priv->usb.idle_pipes); - for (i = 0; i < ARRAY_SIZE(priv->usb.active_pipes); i++) - INIT_LIST_HEAD(&priv->usb.active_pipes[i]); + usb->index = usb_num; + INIT_LIST_HEAD(&usb->idle_pipes); + for (i = 0; i < ARRAY_SIZE(usb->active_pipes); i++) + INIT_LIST_HEAD(&usb->active_pipes[i]); /* Due to an errata, CN31XX doesn't support DMA */ if (OCTEON_IS_MODEL(OCTEON_CN31XX)) { - priv->usb.init_flags |= CVMX_USB_INITIALIZE_FLAGS_NO_DMA; + usb->init_flags |= CVMX_USB_INITIALIZE_FLAGS_NO_DMA; /* Only use one channel with non DMA */ - priv->usb.idle_hardware_channels = 0x1; + usb->idle_hardware_channels = 0x1; } else if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) { /* CN5XXX have an errata with channel 3 */ - priv->usb.idle_hardware_channels = 0xf7; + usb->idle_hardware_channels = 0xf7; } else { - priv->usb.idle_hardware_channels = 0xff; + usb->idle_hardware_channels = 0xff; } - status = cvmx_usb_initialize(dev, &priv->usb); + status = cvmx_usb_initialize(dev, usb); if (status) { dev_dbg(dev, "USB initialization failed with %d\n", status); kfree(hcd); @@ -3722,13 +3682,13 @@ static int octeon_usb_remove(struct platform_device *pdev) int status; struct device *dev = &pdev->dev; struct usb_hcd *hcd = dev_get_drvdata(dev); - struct octeon_hcd *priv = hcd_to_octeon(hcd); + struct octeon_hcd *usb = hcd_to_octeon(hcd); unsigned long flags; usb_remove_hcd(hcd); - spin_lock_irqsave(&priv->lock, flags); - status = cvmx_usb_shutdown(&priv->usb); - spin_unlock_irqrestore(&priv->lock, flags); + spin_lock_irqsave(&usb->lock, flags); + status = cvmx_usb_shutdown(usb); + spin_unlock_irqrestore(&usb->lock, flags); if (status) dev_dbg(dev, "USB shutdown failed with %d\n", status); @@ -3747,7 +3707,7 @@ MODULE_DEVICE_TABLE(of, octeon_usb_match); static struct platform_driver octeon_usb_driver = { .driver = { - .name = "OcteonUSB", + .name = "octeon-hcd", .of_match_table = octeon_usb_match, }, .probe = octeon_usb_probe, diff --git a/drivers/staging/octeon-usb/octeon-hcd.h b/drivers/staging/octeon-usb/octeon-hcd.h index 70e7fa5e37d9..3353aefe662e 100644 --- a/drivers/staging/octeon-usb/octeon-hcd.h +++ b/drivers/staging/octeon-usb/octeon-hcd.h @@ -110,7 +110,7 @@ * initialization. Do not change this register after the initial programming. */ union cvmx_usbcx_gahbcfg { - uint32_t u32; + u32 u32; /** * struct cvmx_usbcx_gahbcfg_s * @ptxfemplvl: Periodic TxFIFO Empty Level (PTxFEmpLvl) @@ -145,13 +145,13 @@ union cvmx_usbcx_gahbcfg { * * 1'b1: Unmask the interrupt assertion to the application. */ struct cvmx_usbcx_gahbcfg_s { - __BITFIELD_FIELD(uint32_t reserved_9_31 : 23, - __BITFIELD_FIELD(uint32_t ptxfemplvl : 1, - __BITFIELD_FIELD(uint32_t nptxfemplvl : 1, - __BITFIELD_FIELD(uint32_t reserved_6_6 : 1, - __BITFIELD_FIELD(uint32_t dmaen : 1, - __BITFIELD_FIELD(uint32_t hbstlen : 4, - __BITFIELD_FIELD(uint32_t glblintrmsk : 1, + __BITFIELD_FIELD(u32 reserved_9_31 : 23, + __BITFIELD_FIELD(u32 ptxfemplvl : 1, + __BITFIELD_FIELD(u32 nptxfemplvl : 1, + __BITFIELD_FIELD(u32 reserved_6_6 : 1, + __BITFIELD_FIELD(u32 dmaen : 1, + __BITFIELD_FIELD(u32 hbstlen : 4, + __BITFIELD_FIELD(u32 glblintrmsk : 1, ;))))))) } s; }; @@ -164,7 +164,7 @@ union cvmx_usbcx_gahbcfg { * This register contains the configuration options of the O2P USB core. */ union cvmx_usbcx_ghwcfg3 { - uint32_t u32; + u32 u32; /** * struct cvmx_usbcx_ghwcfg3_s * @dfifodepth: DFIFO Depth (DfifoDepth) @@ -212,16 +212,16 @@ union cvmx_usbcx_ghwcfg3 { * * Others: Reserved */ struct cvmx_usbcx_ghwcfg3_s { - __BITFIELD_FIELD(uint32_t dfifodepth : 16, - __BITFIELD_FIELD(uint32_t reserved_13_15 : 3, - __BITFIELD_FIELD(uint32_t ahbphysync : 1, - __BITFIELD_FIELD(uint32_t rsttype : 1, - __BITFIELD_FIELD(uint32_t optfeature : 1, - __BITFIELD_FIELD(uint32_t vendor_control_interface_support : 1, - __BITFIELD_FIELD(uint32_t i2c_selection : 1, - __BITFIELD_FIELD(uint32_t otgen : 1, - __BITFIELD_FIELD(uint32_t pktsizewidth : 3, - __BITFIELD_FIELD(uint32_t xfersizewidth : 4, + __BITFIELD_FIELD(u32 dfifodepth : 16, + __BITFIELD_FIELD(u32 reserved_13_15 : 3, + __BITFIELD_FIELD(u32 ahbphysync : 1, + __BITFIELD_FIELD(u32 rsttype : 1, + __BITFIELD_FIELD(u32 optfeature : 1, + __BITFIELD_FIELD(u32 vendor_control_interface_support : 1, + __BITFIELD_FIELD(u32 i2c_selection : 1, + __BITFIELD_FIELD(u32 otgen : 1, + __BITFIELD_FIELD(u32 pktsizewidth : 3, + __BITFIELD_FIELD(u32 xfersizewidth : 4, ;)))))))))) } s; }; @@ -238,7 +238,7 @@ union cvmx_usbcx_ghwcfg3 { * Mask interrupt: 1'b0, Unmask interrupt: 1'b1 */ union cvmx_usbcx_gintmsk { - uint32_t u32; + u32 u32; /** * struct cvmx_usbcx_gintmsk_s * @wkupintmsk: Resume/Remote Wakeup Detected Interrupt Mask @@ -279,38 +279,38 @@ union cvmx_usbcx_gintmsk { * @modemismsk: Mode Mismatch Interrupt Mask (ModeMisMsk) */ struct cvmx_usbcx_gintmsk_s { - __BITFIELD_FIELD(uint32_t wkupintmsk : 1, - __BITFIELD_FIELD(uint32_t sessreqintmsk : 1, - __BITFIELD_FIELD(uint32_t disconnintmsk : 1, - __BITFIELD_FIELD(uint32_t conidstschngmsk : 1, - __BITFIELD_FIELD(uint32_t reserved_27_27 : 1, - __BITFIELD_FIELD(uint32_t ptxfempmsk : 1, - __BITFIELD_FIELD(uint32_t hchintmsk : 1, - __BITFIELD_FIELD(uint32_t prtintmsk : 1, - __BITFIELD_FIELD(uint32_t reserved_23_23 : 1, - __BITFIELD_FIELD(uint32_t fetsuspmsk : 1, - __BITFIELD_FIELD(uint32_t incomplpmsk : 1, - __BITFIELD_FIELD(uint32_t incompisoinmsk : 1, - __BITFIELD_FIELD(uint32_t oepintmsk : 1, - __BITFIELD_FIELD(uint32_t inepintmsk : 1, - __BITFIELD_FIELD(uint32_t epmismsk : 1, - __BITFIELD_FIELD(uint32_t reserved_16_16 : 1, - __BITFIELD_FIELD(uint32_t eopfmsk : 1, - __BITFIELD_FIELD(uint32_t isooutdropmsk : 1, - __BITFIELD_FIELD(uint32_t enumdonemsk : 1, - __BITFIELD_FIELD(uint32_t usbrstmsk : 1, - __BITFIELD_FIELD(uint32_t usbsuspmsk : 1, - __BITFIELD_FIELD(uint32_t erlysuspmsk : 1, - __BITFIELD_FIELD(uint32_t i2cint : 1, - __BITFIELD_FIELD(uint32_t ulpickintmsk : 1, - __BITFIELD_FIELD(uint32_t goutnakeffmsk : 1, - __BITFIELD_FIELD(uint32_t ginnakeffmsk : 1, - __BITFIELD_FIELD(uint32_t nptxfempmsk : 1, - __BITFIELD_FIELD(uint32_t rxflvlmsk : 1, - __BITFIELD_FIELD(uint32_t sofmsk : 1, - __BITFIELD_FIELD(uint32_t otgintmsk : 1, - __BITFIELD_FIELD(uint32_t modemismsk : 1, - __BITFIELD_FIELD(uint32_t reserved_0_0 : 1, + __BITFIELD_FIELD(u32 wkupintmsk : 1, + __BITFIELD_FIELD(u32 sessreqintmsk : 1, + __BITFIELD_FIELD(u32 disconnintmsk : 1, + __BITFIELD_FIELD(u32 conidstschngmsk : 1, + __BITFIELD_FIELD(u32 reserved_27_27 : 1, + __BITFIELD_FIELD(u32 ptxfempmsk : 1, + __BITFIELD_FIELD(u32 hchintmsk : 1, + __BITFIELD_FIELD(u32 prtintmsk : 1, + __BITFIELD_FIELD(u32 reserved_23_23 : 1, + __BITFIELD_FIELD(u32 fetsuspmsk : 1, + __BITFIELD_FIELD(u32 incomplpmsk : 1, + __BITFIELD_FIELD(u32 incompisoinmsk : 1, + __BITFIELD_FIELD(u32 oepintmsk : 1, + __BITFIELD_FIELD(u32 inepintmsk : 1, + __BITFIELD_FIELD(u32 epmismsk : 1, + __BITFIELD_FIELD(u32 reserved_16_16 : 1, + __BITFIELD_FIELD(u32 eopfmsk : 1, + __BITFIELD_FIELD(u32 isooutdropmsk : 1, + __BITFIELD_FIELD(u32 enumdonemsk : 1, + __BITFIELD_FIELD(u32 usbrstmsk : 1, + __BITFIELD_FIELD(u32 usbsuspmsk : 1, + __BITFIELD_FIELD(u32 erlysuspmsk : 1, + __BITFIELD_FIELD(u32 i2cint : 1, + __BITFIELD_FIELD(u32 ulpickintmsk : 1, + __BITFIELD_FIELD(u32 goutnakeffmsk : 1, + __BITFIELD_FIELD(u32 ginnakeffmsk : 1, + __BITFIELD_FIELD(u32 nptxfempmsk : 1, + __BITFIELD_FIELD(u32 rxflvlmsk : 1, + __BITFIELD_FIELD(u32 sofmsk : 1, + __BITFIELD_FIELD(u32 otgintmsk : 1, + __BITFIELD_FIELD(u32 modemismsk : 1, + __BITFIELD_FIELD(u32 reserved_0_0 : 1, ;)))))))))))))))))))))))))))))))) } s; }; @@ -331,7 +331,7 @@ union cvmx_usbcx_gintmsk { * automatically. */ union cvmx_usbcx_gintsts { - uint32_t u32; + u32 u32; /** * struct cvmx_usbcx_gintsts_s * @wkupint: Resume/Remote Wakeup Detected Interrupt (WkUpInt) @@ -509,38 +509,38 @@ union cvmx_usbcx_gintsts { * * 1'b1: Host mode */ struct cvmx_usbcx_gintsts_s { - __BITFIELD_FIELD(uint32_t wkupint : 1, - __BITFIELD_FIELD(uint32_t sessreqint : 1, - __BITFIELD_FIELD(uint32_t disconnint : 1, - __BITFIELD_FIELD(uint32_t conidstschng : 1, - __BITFIELD_FIELD(uint32_t reserved_27_27 : 1, - __BITFIELD_FIELD(uint32_t ptxfemp : 1, - __BITFIELD_FIELD(uint32_t hchint : 1, - __BITFIELD_FIELD(uint32_t prtint : 1, - __BITFIELD_FIELD(uint32_t reserved_23_23 : 1, - __BITFIELD_FIELD(uint32_t fetsusp : 1, - __BITFIELD_FIELD(uint32_t incomplp : 1, - __BITFIELD_FIELD(uint32_t incompisoin : 1, - __BITFIELD_FIELD(uint32_t oepint : 1, - __BITFIELD_FIELD(uint32_t iepint : 1, - __BITFIELD_FIELD(uint32_t epmis : 1, - __BITFIELD_FIELD(uint32_t reserved_16_16 : 1, - __BITFIELD_FIELD(uint32_t eopf : 1, - __BITFIELD_FIELD(uint32_t isooutdrop : 1, - __BITFIELD_FIELD(uint32_t enumdone : 1, - __BITFIELD_FIELD(uint32_t usbrst : 1, - __BITFIELD_FIELD(uint32_t usbsusp : 1, - __BITFIELD_FIELD(uint32_t erlysusp : 1, - __BITFIELD_FIELD(uint32_t i2cint : 1, - __BITFIELD_FIELD(uint32_t ulpickint : 1, - __BITFIELD_FIELD(uint32_t goutnakeff : 1, - __BITFIELD_FIELD(uint32_t ginnakeff : 1, - __BITFIELD_FIELD(uint32_t nptxfemp : 1, - __BITFIELD_FIELD(uint32_t rxflvl : 1, - __BITFIELD_FIELD(uint32_t sof : 1, - __BITFIELD_FIELD(uint32_t otgint : 1, - __BITFIELD_FIELD(uint32_t modemis : 1, - __BITFIELD_FIELD(uint32_t curmod : 1, + __BITFIELD_FIELD(u32 wkupint : 1, + __BITFIELD_FIELD(u32 sessreqint : 1, + __BITFIELD_FIELD(u32 disconnint : 1, + __BITFIELD_FIELD(u32 conidstschng : 1, + __BITFIELD_FIELD(u32 reserved_27_27 : 1, + __BITFIELD_FIELD(u32 ptxfemp : 1, + __BITFIELD_FIELD(u32 hchint : 1, + __BITFIELD_FIELD(u32 prtint : 1, + __BITFIELD_FIELD(u32 reserved_23_23 : 1, + __BITFIELD_FIELD(u32 fetsusp : 1, + __BITFIELD_FIELD(u32 incomplp : 1, + __BITFIELD_FIELD(u32 incompisoin : 1, + __BITFIELD_FIELD(u32 oepint : 1, + __BITFIELD_FIELD(u32 iepint : 1, + __BITFIELD_FIELD(u32 epmis : 1, + __BITFIELD_FIELD(u32 reserved_16_16 : 1, + __BITFIELD_FIELD(u32 eopf : 1, + __BITFIELD_FIELD(u32 isooutdrop : 1, + __BITFIELD_FIELD(u32 enumdone : 1, + __BITFIELD_FIELD(u32 usbrst : 1, + __BITFIELD_FIELD(u32 usbsusp : 1, + __BITFIELD_FIELD(u32 erlysusp : 1, + __BITFIELD_FIELD(u32 i2cint : 1, + __BITFIELD_FIELD(u32 ulpickint : 1, + __BITFIELD_FIELD(u32 goutnakeff : 1, + __BITFIELD_FIELD(u32 ginnakeff : 1, + __BITFIELD_FIELD(u32 nptxfemp : 1, + __BITFIELD_FIELD(u32 rxflvl : 1, + __BITFIELD_FIELD(u32 sof : 1, + __BITFIELD_FIELD(u32 otgint : 1, + __BITFIELD_FIELD(u32 modemis : 1, + __BITFIELD_FIELD(u32 curmod : 1, ;)))))))))))))))))))))))))))))))) } s; }; @@ -554,7 +554,7 @@ union cvmx_usbcx_gintsts { * Non-Periodic TxFIFO. */ union cvmx_usbcx_gnptxfsiz { - uint32_t u32; + u32 u32; /** * struct cvmx_usbcx_gnptxfsiz_s * @nptxfdep: Non-Periodic TxFIFO Depth (NPTxFDep) @@ -566,8 +566,8 @@ union cvmx_usbcx_gnptxfsiz { * Transmit FIFO RAM. */ struct cvmx_usbcx_gnptxfsiz_s { - __BITFIELD_FIELD(uint32_t nptxfdep : 16, - __BITFIELD_FIELD(uint32_t nptxfstaddr : 16, + __BITFIELD_FIELD(u32 nptxfdep : 16, + __BITFIELD_FIELD(u32 nptxfstaddr : 16, ;)) } s; }; @@ -581,7 +581,7 @@ union cvmx_usbcx_gnptxfsiz { * Non-Periodic TxFIFO and the Non-Periodic Transmit Request Queue. */ union cvmx_usbcx_gnptxsts { - uint32_t u32; + u32 u32; /** * struct cvmx_usbcx_gnptxsts_s * @nptxqtop: Top of the Non-Periodic Transmit Request Queue (NPTxQTop) @@ -617,10 +617,10 @@ union cvmx_usbcx_gnptxsts { * * Others: Reserved */ struct cvmx_usbcx_gnptxsts_s { - __BITFIELD_FIELD(uint32_t reserved_31_31 : 1, - __BITFIELD_FIELD(uint32_t nptxqtop : 7, - __BITFIELD_FIELD(uint32_t nptxqspcavail : 8, - __BITFIELD_FIELD(uint32_t nptxfspcavail : 16, + __BITFIELD_FIELD(u32 reserved_31_31 : 1, + __BITFIELD_FIELD(u32 nptxqtop : 7, + __BITFIELD_FIELD(u32 nptxqspcavail : 8, + __BITFIELD_FIELD(u32 nptxfspcavail : 16, ;)))) } s; }; @@ -634,7 +634,7 @@ union cvmx_usbcx_gnptxsts { * the core. */ union cvmx_usbcx_grstctl { - uint32_t u32; + u32 u32; /** * struct cvmx_usbcx_grstctl_s * @ahbidle: AHB Master Idle (AHBIdle) @@ -739,16 +739,16 @@ union cvmx_usbcx_grstctl { * selected, the PHY domain has to be reset for proper operation. */ struct cvmx_usbcx_grstctl_s { - __BITFIELD_FIELD(uint32_t ahbidle : 1, - __BITFIELD_FIELD(uint32_t dmareq : 1, - __BITFIELD_FIELD(uint32_t reserved_11_29 : 19, - __BITFIELD_FIELD(uint32_t txfnum : 5, - __BITFIELD_FIELD(uint32_t txfflsh : 1, - __BITFIELD_FIELD(uint32_t rxfflsh : 1, - __BITFIELD_FIELD(uint32_t intknqflsh : 1, - __BITFIELD_FIELD(uint32_t frmcntrrst : 1, - __BITFIELD_FIELD(uint32_t hsftrst : 1, - __BITFIELD_FIELD(uint32_t csftrst : 1, + __BITFIELD_FIELD(u32 ahbidle : 1, + __BITFIELD_FIELD(u32 dmareq : 1, + __BITFIELD_FIELD(u32 reserved_11_29 : 19, + __BITFIELD_FIELD(u32 txfnum : 5, + __BITFIELD_FIELD(u32 txfflsh : 1, + __BITFIELD_FIELD(u32 rxfflsh : 1, + __BITFIELD_FIELD(u32 intknqflsh : 1, + __BITFIELD_FIELD(u32 frmcntrrst : 1, + __BITFIELD_FIELD(u32 hsftrst : 1, + __BITFIELD_FIELD(u32 csftrst : 1, ;)))))))))) } s; }; @@ -762,7 +762,7 @@ union cvmx_usbcx_grstctl { * RxFIFO. */ union cvmx_usbcx_grxfsiz { - uint32_t u32; + u32 u32; /** * struct cvmx_usbcx_grxfsiz_s * @rxfdep: RxFIFO Depth (RxFDep) @@ -771,8 +771,8 @@ union cvmx_usbcx_grxfsiz { * * Maximum value is 32768 */ struct cvmx_usbcx_grxfsiz_s { - __BITFIELD_FIELD(uint32_t reserved_16_31 : 16, - __BITFIELD_FIELD(uint32_t rxfdep : 16, + __BITFIELD_FIELD(u32 reserved_16_31 : 16, + __BITFIELD_FIELD(u32 rxfdep : 16, ;)) } s; }; @@ -792,7 +792,7 @@ union cvmx_usbcx_grxfsiz { * hardware. */ union cvmx_usbcx_grxstsph { - uint32_t u32; + u32 u32; /** * struct cvmx_usbcx_grxstsph_s * @pktsts: Packet Status (PktSts) @@ -814,11 +814,11 @@ union cvmx_usbcx_grxstsph { * packet belongs. */ struct cvmx_usbcx_grxstsph_s { - __BITFIELD_FIELD(uint32_t reserved_21_31 : 11, - __BITFIELD_FIELD(uint32_t pktsts : 4, - __BITFIELD_FIELD(uint32_t dpid : 2, - __BITFIELD_FIELD(uint32_t bcnt : 11, - __BITFIELD_FIELD(uint32_t chnum : 4, + __BITFIELD_FIELD(u32 reserved_21_31 : 11, + __BITFIELD_FIELD(u32 pktsts : 4, + __BITFIELD_FIELD(u32 dpid : 2, + __BITFIELD_FIELD(u32 bcnt : 11, + __BITFIELD_FIELD(u32 chnum : 4, ;))))) } s; }; @@ -835,7 +835,7 @@ union cvmx_usbcx_grxstsph { * to this register after the initial programming. */ union cvmx_usbcx_gusbcfg { - uint32_t u32; + u32 u32; /** * struct cvmx_usbcx_gusbcfg_s * @otgi2csel: UTMIFS or I2C Interface Select (OtgI2CSel) @@ -895,19 +895,19 @@ union cvmx_usbcx_gusbcfg { * * One 48-MHz PHY clock = 0.25 bit times */ struct cvmx_usbcx_gusbcfg_s { - __BITFIELD_FIELD(uint32_t reserved_17_31 : 15, - __BITFIELD_FIELD(uint32_t otgi2csel : 1, - __BITFIELD_FIELD(uint32_t phylpwrclksel : 1, - __BITFIELD_FIELD(uint32_t reserved_14_14 : 1, - __BITFIELD_FIELD(uint32_t usbtrdtim : 4, - __BITFIELD_FIELD(uint32_t hnpcap : 1, - __BITFIELD_FIELD(uint32_t srpcap : 1, - __BITFIELD_FIELD(uint32_t ddrsel : 1, - __BITFIELD_FIELD(uint32_t physel : 1, - __BITFIELD_FIELD(uint32_t fsintf : 1, - __BITFIELD_FIELD(uint32_t ulpi_utmi_sel : 1, - __BITFIELD_FIELD(uint32_t phyif : 1, - __BITFIELD_FIELD(uint32_t toutcal : 3, + __BITFIELD_FIELD(u32 reserved_17_31 : 15, + __BITFIELD_FIELD(u32 otgi2csel : 1, + __BITFIELD_FIELD(u32 phylpwrclksel : 1, + __BITFIELD_FIELD(u32 reserved_14_14 : 1, + __BITFIELD_FIELD(u32 usbtrdtim : 4, + __BITFIELD_FIELD(u32 hnpcap : 1, + __BITFIELD_FIELD(u32 srpcap : 1, + __BITFIELD_FIELD(u32 ddrsel : 1, + __BITFIELD_FIELD(u32 physel : 1, + __BITFIELD_FIELD(u32 fsintf : 1, + __BITFIELD_FIELD(u32 ulpi_utmi_sel : 1, + __BITFIELD_FIELD(u32 phyif : 1, + __BITFIELD_FIELD(u32 toutcal : 3, ;))))))))))))) } s; }; @@ -925,15 +925,15 @@ union cvmx_usbcx_gusbcfg { * in the corresponding Host Channel-n Interrupt register. */ union cvmx_usbcx_haint { - uint32_t u32; + u32 u32; /** * struct cvmx_usbcx_haint_s * @haint: Channel Interrupts (HAINT) * One bit per channel: Bit 0 for Channel 0, bit 15 for Channel 15 */ struct cvmx_usbcx_haint_s { - __BITFIELD_FIELD(uint32_t reserved_16_31 : 16, - __BITFIELD_FIELD(uint32_t haint : 16, + __BITFIELD_FIELD(u32 reserved_16_31 : 16, + __BITFIELD_FIELD(u32 haint : 16, ;)) } s; }; @@ -950,15 +950,15 @@ union cvmx_usbcx_haint { * Mask interrupt: 1'b0 Unmask interrupt: 1'b1 */ union cvmx_usbcx_haintmsk { - uint32_t u32; + u32 u32; /** * struct cvmx_usbcx_haintmsk_s * @haintmsk: Channel Interrupt Mask (HAINTMsk) * One bit per channel: Bit 0 for channel 0, bit 15 for channel 15 */ struct cvmx_usbcx_haintmsk_s { - __BITFIELD_FIELD(uint32_t reserved_16_31 : 16, - __BITFIELD_FIELD(uint32_t haintmsk : 16, + __BITFIELD_FIELD(u32 reserved_16_31 : 16, + __BITFIELD_FIELD(u32 haintmsk : 16, ;)) } s; }; @@ -970,7 +970,7 @@ union cvmx_usbcx_haintmsk { * */ union cvmx_usbcx_hccharx { - uint32_t u32; + u32 u32; /** * struct cvmx_usbcx_hccharx_s * @chena: Channel Enable (ChEna) @@ -1028,17 +1028,17 @@ union cvmx_usbcx_hccharx { * Indicates the maximum packet size of the associated endpoint. */ struct cvmx_usbcx_hccharx_s { - __BITFIELD_FIELD(uint32_t chena : 1, - __BITFIELD_FIELD(uint32_t chdis : 1, - __BITFIELD_FIELD(uint32_t oddfrm : 1, - __BITFIELD_FIELD(uint32_t devaddr : 7, - __BITFIELD_FIELD(uint32_t ec : 2, - __BITFIELD_FIELD(uint32_t eptype : 2, - __BITFIELD_FIELD(uint32_t lspddev : 1, - __BITFIELD_FIELD(uint32_t reserved_16_16 : 1, - __BITFIELD_FIELD(uint32_t epdir : 1, - __BITFIELD_FIELD(uint32_t epnum : 4, - __BITFIELD_FIELD(uint32_t mps : 11, + __BITFIELD_FIELD(u32 chena : 1, + __BITFIELD_FIELD(u32 chdis : 1, + __BITFIELD_FIELD(u32 oddfrm : 1, + __BITFIELD_FIELD(u32 devaddr : 7, + __BITFIELD_FIELD(u32 ec : 2, + __BITFIELD_FIELD(u32 eptype : 2, + __BITFIELD_FIELD(u32 lspddev : 1, + __BITFIELD_FIELD(u32 reserved_16_16 : 1, + __BITFIELD_FIELD(u32 epdir : 1, + __BITFIELD_FIELD(u32 epnum : 4, + __BITFIELD_FIELD(u32 mps : 11, ;))))))))))) } s; }; @@ -1052,7 +1052,7 @@ union cvmx_usbcx_hccharx { * register after initializing the host. */ union cvmx_usbcx_hcfg { - uint32_t u32; + u32 u32; /** * struct cvmx_usbcx_hcfg_s * @fslssupp: FS- and LS-Only Support (FSLSSupp) @@ -1084,9 +1084,9 @@ union cvmx_usbcx_hcfg { * * 2'b11: Reserved */ struct cvmx_usbcx_hcfg_s { - __BITFIELD_FIELD(uint32_t reserved_3_31 : 29, - __BITFIELD_FIELD(uint32_t fslssupp : 1, - __BITFIELD_FIELD(uint32_t fslspclksel : 2, + __BITFIELD_FIELD(u32 reserved_3_31 : 29, + __BITFIELD_FIELD(u32 fslssupp : 1, + __BITFIELD_FIELD(u32 fslspclksel : 2, ;))) } s; }; @@ -1106,7 +1106,7 @@ union cvmx_usbcx_hcfg { * HAINT and GINTSTS registers. */ union cvmx_usbcx_hcintx { - uint32_t u32; + u32 u32; /** * struct cvmx_usbcx_hcintx_s * @datatglerr: Data Toggle Error (DataTglErr) @@ -1126,18 +1126,18 @@ union cvmx_usbcx_hcintx { * Transfer completed normally without any errors. */ struct cvmx_usbcx_hcintx_s { - __BITFIELD_FIELD(uint32_t reserved_11_31 : 21, - __BITFIELD_FIELD(uint32_t datatglerr : 1, - __BITFIELD_FIELD(uint32_t frmovrun : 1, - __BITFIELD_FIELD(uint32_t bblerr : 1, - __BITFIELD_FIELD(uint32_t xacterr : 1, - __BITFIELD_FIELD(uint32_t nyet : 1, - __BITFIELD_FIELD(uint32_t ack : 1, - __BITFIELD_FIELD(uint32_t nak : 1, - __BITFIELD_FIELD(uint32_t stall : 1, - __BITFIELD_FIELD(uint32_t ahberr : 1, - __BITFIELD_FIELD(uint32_t chhltd : 1, - __BITFIELD_FIELD(uint32_t xfercompl : 1, + __BITFIELD_FIELD(u32 reserved_11_31 : 21, + __BITFIELD_FIELD(u32 datatglerr : 1, + __BITFIELD_FIELD(u32 frmovrun : 1, + __BITFIELD_FIELD(u32 bblerr : 1, + __BITFIELD_FIELD(u32 xacterr : 1, + __BITFIELD_FIELD(u32 nyet : 1, + __BITFIELD_FIELD(u32 ack : 1, + __BITFIELD_FIELD(u32 nak : 1, + __BITFIELD_FIELD(u32 stall : 1, + __BITFIELD_FIELD(u32 ahberr : 1, + __BITFIELD_FIELD(u32 chhltd : 1, + __BITFIELD_FIELD(u32 xfercompl : 1, ;)))))))))))) } s; }; @@ -1152,7 +1152,7 @@ union cvmx_usbcx_hcintx { * Mask interrupt: 1'b0 Unmask interrupt: 1'b1 */ union cvmx_usbcx_hcintmskx { - uint32_t u32; + u32 u32; /** * struct cvmx_usbcx_hcintmskx_s * @datatglerrmsk: Data Toggle Error Mask (DataTglErrMsk) @@ -1168,18 +1168,18 @@ union cvmx_usbcx_hcintmskx { * @xfercomplmsk: Transfer Completed Mask (XferComplMsk) */ struct cvmx_usbcx_hcintmskx_s { - __BITFIELD_FIELD(uint32_t reserved_11_31 : 21, - __BITFIELD_FIELD(uint32_t datatglerrmsk : 1, - __BITFIELD_FIELD(uint32_t frmovrunmsk : 1, - __BITFIELD_FIELD(uint32_t bblerrmsk : 1, - __BITFIELD_FIELD(uint32_t xacterrmsk : 1, - __BITFIELD_FIELD(uint32_t nyetmsk : 1, - __BITFIELD_FIELD(uint32_t ackmsk : 1, - __BITFIELD_FIELD(uint32_t nakmsk : 1, - __BITFIELD_FIELD(uint32_t stallmsk : 1, - __BITFIELD_FIELD(uint32_t ahberrmsk : 1, - __BITFIELD_FIELD(uint32_t chhltdmsk : 1, - __BITFIELD_FIELD(uint32_t xfercomplmsk : 1, + __BITFIELD_FIELD(u32 reserved_11_31 : 21, + __BITFIELD_FIELD(u32 datatglerrmsk : 1, + __BITFIELD_FIELD(u32 frmovrunmsk : 1, + __BITFIELD_FIELD(u32 bblerrmsk : 1, + __BITFIELD_FIELD(u32 xacterrmsk : 1, + __BITFIELD_FIELD(u32 nyetmsk : 1, + __BITFIELD_FIELD(u32 ackmsk : 1, + __BITFIELD_FIELD(u32 nakmsk : 1, + __BITFIELD_FIELD(u32 stallmsk : 1, + __BITFIELD_FIELD(u32 ahberrmsk : 1, + __BITFIELD_FIELD(u32 chhltdmsk : 1, + __BITFIELD_FIELD(u32 xfercomplmsk : 1, ;)))))))))))) } s; }; @@ -1191,7 +1191,7 @@ union cvmx_usbcx_hcintmskx { * */ union cvmx_usbcx_hcspltx { - uint32_t u32; + u32 u32; /** * struct cvmx_usbcx_hcspltx_s * @spltena: Split Enable (SpltEna) @@ -1219,12 +1219,12 @@ union cvmx_usbcx_hcspltx { * translator. */ struct cvmx_usbcx_hcspltx_s { - __BITFIELD_FIELD(uint32_t spltena : 1, - __BITFIELD_FIELD(uint32_t reserved_17_30 : 14, - __BITFIELD_FIELD(uint32_t compsplt : 1, - __BITFIELD_FIELD(uint32_t xactpos : 2, - __BITFIELD_FIELD(uint32_t hubaddr : 7, - __BITFIELD_FIELD(uint32_t prtaddr : 7, + __BITFIELD_FIELD(u32 spltena : 1, + __BITFIELD_FIELD(u32 reserved_17_30 : 14, + __BITFIELD_FIELD(u32 compsplt : 1, + __BITFIELD_FIELD(u32 xactpos : 2, + __BITFIELD_FIELD(u32 hubaddr : 7, + __BITFIELD_FIELD(u32 prtaddr : 7, ;)))))) } s; }; @@ -1236,7 +1236,7 @@ union cvmx_usbcx_hcspltx { * */ union cvmx_usbcx_hctsizx { - uint32_t u32; + u32 u32; /** * struct cvmx_usbcx_hctsizx_s * @dopng: Do Ping (DoPng) @@ -1265,10 +1265,10 @@ union cvmx_usbcx_hctsizx { * size for IN transactions (periodic and non-periodic). */ struct cvmx_usbcx_hctsizx_s { - __BITFIELD_FIELD(uint32_t dopng : 1, - __BITFIELD_FIELD(uint32_t pid : 2, - __BITFIELD_FIELD(uint32_t pktcnt : 10, - __BITFIELD_FIELD(uint32_t xfersize : 19, + __BITFIELD_FIELD(u32 dopng : 1, + __BITFIELD_FIELD(u32 pid : 2, + __BITFIELD_FIELD(u32 pktcnt : 10, + __BITFIELD_FIELD(u32 xfersize : 19, ;)))) } s; }; @@ -1282,7 +1282,7 @@ union cvmx_usbcx_hctsizx { * which the O2P USB core has enumerated. */ union cvmx_usbcx_hfir { - uint32_t u32; + u32 u32; /** * struct cvmx_usbcx_hfir_s * @frint: Frame Interval (FrInt) @@ -1303,8 +1303,8 @@ union cvmx_usbcx_hfir { * * 1 ms (PHY clock frequency for FS/LS) */ struct cvmx_usbcx_hfir_s { - __BITFIELD_FIELD(uint32_t reserved_16_31 : 16, - __BITFIELD_FIELD(uint32_t frint : 16, + __BITFIELD_FIELD(u32 reserved_16_31 : 16, + __BITFIELD_FIELD(u32 frint : 16, ;)) } s; }; @@ -1319,7 +1319,7 @@ union cvmx_usbcx_hfir { * in the current (micro)frame. */ union cvmx_usbcx_hfnum { - uint32_t u32; + u32 u32; /** * struct cvmx_usbcx_hfnum_s * @frrem: Frame Time Remaining (FrRem) @@ -1333,8 +1333,8 @@ union cvmx_usbcx_hfnum { * USB, and is reset to 0 when it reaches 16'h3FFF. */ struct cvmx_usbcx_hfnum_s { - __BITFIELD_FIELD(uint32_t frrem : 16, - __BITFIELD_FIELD(uint32_t frnum : 16, + __BITFIELD_FIELD(u32 frrem : 16, + __BITFIELD_FIELD(u32 frnum : 16, ;)) } s; }; @@ -1355,7 +1355,7 @@ union cvmx_usbcx_hfnum { * the application must write a 1 to the bit to clear the interrupt. */ union cvmx_usbcx_hprt { - uint32_t u32; + u32 u32; /** * struct cvmx_usbcx_hprt_s * @prtspd: Port Speed (PrtSpd) @@ -1461,21 +1461,21 @@ union cvmx_usbcx_hprt { * * 1: A device is attached to the port. */ struct cvmx_usbcx_hprt_s { - __BITFIELD_FIELD(uint32_t reserved_19_31 : 13, - __BITFIELD_FIELD(uint32_t prtspd : 2, - __BITFIELD_FIELD(uint32_t prttstctl : 4, - __BITFIELD_FIELD(uint32_t prtpwr : 1, - __BITFIELD_FIELD(uint32_t prtlnsts : 2, - __BITFIELD_FIELD(uint32_t reserved_9_9 : 1, - __BITFIELD_FIELD(uint32_t prtrst : 1, - __BITFIELD_FIELD(uint32_t prtsusp : 1, - __BITFIELD_FIELD(uint32_t prtres : 1, - __BITFIELD_FIELD(uint32_t prtovrcurrchng : 1, - __BITFIELD_FIELD(uint32_t prtovrcurract : 1, - __BITFIELD_FIELD(uint32_t prtenchng : 1, - __BITFIELD_FIELD(uint32_t prtena : 1, - __BITFIELD_FIELD(uint32_t prtconndet : 1, - __BITFIELD_FIELD(uint32_t prtconnsts : 1, + __BITFIELD_FIELD(u32 reserved_19_31 : 13, + __BITFIELD_FIELD(u32 prtspd : 2, + __BITFIELD_FIELD(u32 prttstctl : 4, + __BITFIELD_FIELD(u32 prtpwr : 1, + __BITFIELD_FIELD(u32 prtlnsts : 2, + __BITFIELD_FIELD(u32 reserved_9_9 : 1, + __BITFIELD_FIELD(u32 prtrst : 1, + __BITFIELD_FIELD(u32 prtsusp : 1, + __BITFIELD_FIELD(u32 prtres : 1, + __BITFIELD_FIELD(u32 prtovrcurrchng : 1, + __BITFIELD_FIELD(u32 prtovrcurract : 1, + __BITFIELD_FIELD(u32 prtenchng : 1, + __BITFIELD_FIELD(u32 prtena : 1, + __BITFIELD_FIELD(u32 prtconndet : 1, + __BITFIELD_FIELD(u32 prtconnsts : 1, ;))))))))))))))) } s; }; @@ -1489,7 +1489,7 @@ union cvmx_usbcx_hprt { * TxFIFO, as shown in Figures 310 and 311. */ union cvmx_usbcx_hptxfsiz { - uint32_t u32; + u32 u32; /** * struct cvmx_usbcx_hptxfsiz_s * @ptxfsize: Host Periodic TxFIFO Depth (PTxFSize) @@ -1499,8 +1499,8 @@ union cvmx_usbcx_hptxfsiz { * @ptxfstaddr: Host Periodic TxFIFO Start Address (PTxFStAddr) */ struct cvmx_usbcx_hptxfsiz_s { - __BITFIELD_FIELD(uint32_t ptxfsize : 16, - __BITFIELD_FIELD(uint32_t ptxfstaddr : 16, + __BITFIELD_FIELD(u32 ptxfsize : 16, + __BITFIELD_FIELD(u32 ptxfstaddr : 16, ;)) } s; }; @@ -1514,7 +1514,7 @@ union cvmx_usbcx_hptxfsiz { * TxFIFO and the Periodic Transmit Request Queue */ union cvmx_usbcx_hptxsts { - uint32_t u32; + u32 u32; /** * struct cvmx_usbcx_hptxsts_s * @ptxqtop: Top of the Periodic Transmit Request Queue (PTxQTop) @@ -1555,9 +1555,9 @@ union cvmx_usbcx_hptxsts { * * Others: Reserved */ struct cvmx_usbcx_hptxsts_s { - __BITFIELD_FIELD(uint32_t ptxqtop : 8, - __BITFIELD_FIELD(uint32_t ptxqspcavail : 8, - __BITFIELD_FIELD(uint32_t ptxfspcavail : 16, + __BITFIELD_FIELD(u32 ptxqtop : 8, + __BITFIELD_FIELD(u32 ptxqspcavail : 8, + __BITFIELD_FIELD(u32 ptxfspcavail : 16, ;))) } s; }; @@ -1571,7 +1571,7 @@ union cvmx_usbcx_hptxsts { * hreset and phy_rst signals. */ union cvmx_usbnx_clk_ctl { - uint64_t u64; + u64 u64; /** * struct cvmx_usbnx_clk_ctl_s * @divide2: The 'hclk' used by the USB subsystem is derived @@ -1661,21 +1661,21 @@ union cvmx_usbnx_clk_ctl { * until AFTER this field is set and then read. */ struct cvmx_usbnx_clk_ctl_s { - __BITFIELD_FIELD(uint64_t reserved_20_63 : 44, - __BITFIELD_FIELD(uint64_t divide2 : 2, - __BITFIELD_FIELD(uint64_t hclk_rst : 1, - __BITFIELD_FIELD(uint64_t p_x_on : 1, - __BITFIELD_FIELD(uint64_t p_rtype : 2, - __BITFIELD_FIELD(uint64_t p_com_on : 1, - __BITFIELD_FIELD(uint64_t p_c_sel : 2, - __BITFIELD_FIELD(uint64_t cdiv_byp : 1, - __BITFIELD_FIELD(uint64_t sd_mode : 2, - __BITFIELD_FIELD(uint64_t s_bist : 1, - __BITFIELD_FIELD(uint64_t por : 1, - __BITFIELD_FIELD(uint64_t enable : 1, - __BITFIELD_FIELD(uint64_t prst : 1, - __BITFIELD_FIELD(uint64_t hrst : 1, - __BITFIELD_FIELD(uint64_t divide : 3, + __BITFIELD_FIELD(u64 reserved_20_63 : 44, + __BITFIELD_FIELD(u64 divide2 : 2, + __BITFIELD_FIELD(u64 hclk_rst : 1, + __BITFIELD_FIELD(u64 p_x_on : 1, + __BITFIELD_FIELD(u64 p_rtype : 2, + __BITFIELD_FIELD(u64 p_com_on : 1, + __BITFIELD_FIELD(u64 p_c_sel : 2, + __BITFIELD_FIELD(u64 cdiv_byp : 1, + __BITFIELD_FIELD(u64 sd_mode : 2, + __BITFIELD_FIELD(u64 s_bist : 1, + __BITFIELD_FIELD(u64 por : 1, + __BITFIELD_FIELD(u64 enable : 1, + __BITFIELD_FIELD(u64 prst : 1, + __BITFIELD_FIELD(u64 hrst : 1, + __BITFIELD_FIELD(u64 divide : 3, ;))))))))))))))) } s; }; @@ -1688,7 +1688,7 @@ union cvmx_usbnx_clk_ctl { * Contains general control and status information for the USBN block. */ union cvmx_usbnx_usbp_ctl_status { - uint64_t u64; + u64 u64; /** * struct cvmx_usbnx_usbp_ctl_status_s * @txrisetune: HS Transmitter Rise/Fall Time Adjustment @@ -1804,41 +1804,41 @@ union cvmx_usbnx_usbp_ctl_status { * de-assertion. */ struct cvmx_usbnx_usbp_ctl_status_s { - __BITFIELD_FIELD(uint64_t txrisetune : 1, - __BITFIELD_FIELD(uint64_t txvreftune : 4, - __BITFIELD_FIELD(uint64_t txfslstune : 4, - __BITFIELD_FIELD(uint64_t txhsxvtune : 2, - __BITFIELD_FIELD(uint64_t sqrxtune : 3, - __BITFIELD_FIELD(uint64_t compdistune : 3, - __BITFIELD_FIELD(uint64_t otgtune : 3, - __BITFIELD_FIELD(uint64_t otgdisable : 1, - __BITFIELD_FIELD(uint64_t portreset : 1, - __BITFIELD_FIELD(uint64_t drvvbus : 1, - __BITFIELD_FIELD(uint64_t lsbist : 1, - __BITFIELD_FIELD(uint64_t fsbist : 1, - __BITFIELD_FIELD(uint64_t hsbist : 1, - __BITFIELD_FIELD(uint64_t bist_done : 1, - __BITFIELD_FIELD(uint64_t bist_err : 1, - __BITFIELD_FIELD(uint64_t tdata_out : 4, - __BITFIELD_FIELD(uint64_t siddq : 1, - __BITFIELD_FIELD(uint64_t txpreemphasistune : 1, - __BITFIELD_FIELD(uint64_t dma_bmode : 1, - __BITFIELD_FIELD(uint64_t usbc_end : 1, - __BITFIELD_FIELD(uint64_t usbp_bist : 1, - __BITFIELD_FIELD(uint64_t tclk : 1, - __BITFIELD_FIELD(uint64_t dp_pulld : 1, - __BITFIELD_FIELD(uint64_t dm_pulld : 1, - __BITFIELD_FIELD(uint64_t hst_mode : 1, - __BITFIELD_FIELD(uint64_t tuning : 4, - __BITFIELD_FIELD(uint64_t tx_bs_enh : 1, - __BITFIELD_FIELD(uint64_t tx_bs_en : 1, - __BITFIELD_FIELD(uint64_t loop_enb : 1, - __BITFIELD_FIELD(uint64_t vtest_enb : 1, - __BITFIELD_FIELD(uint64_t bist_enb : 1, - __BITFIELD_FIELD(uint64_t tdata_sel : 1, - __BITFIELD_FIELD(uint64_t taddr_in : 4, - __BITFIELD_FIELD(uint64_t tdata_in : 8, - __BITFIELD_FIELD(uint64_t ate_reset : 1, + __BITFIELD_FIELD(u64 txrisetune : 1, + __BITFIELD_FIELD(u64 txvreftune : 4, + __BITFIELD_FIELD(u64 txfslstune : 4, + __BITFIELD_FIELD(u64 txhsxvtune : 2, + __BITFIELD_FIELD(u64 sqrxtune : 3, + __BITFIELD_FIELD(u64 compdistune : 3, + __BITFIELD_FIELD(u64 otgtune : 3, + __BITFIELD_FIELD(u64 otgdisable : 1, + __BITFIELD_FIELD(u64 portreset : 1, + __BITFIELD_FIELD(u64 drvvbus : 1, + __BITFIELD_FIELD(u64 lsbist : 1, + __BITFIELD_FIELD(u64 fsbist : 1, + __BITFIELD_FIELD(u64 hsbist : 1, + __BITFIELD_FIELD(u64 bist_done : 1, + __BITFIELD_FIELD(u64 bist_err : 1, + __BITFIELD_FIELD(u64 tdata_out : 4, + __BITFIELD_FIELD(u64 siddq : 1, + __BITFIELD_FIELD(u64 txpreemphasistune : 1, + __BITFIELD_FIELD(u64 dma_bmode : 1, + __BITFIELD_FIELD(u64 usbc_end : 1, + __BITFIELD_FIELD(u64 usbp_bist : 1, + __BITFIELD_FIELD(u64 tclk : 1, + __BITFIELD_FIELD(u64 dp_pulld : 1, + __BITFIELD_FIELD(u64 dm_pulld : 1, + __BITFIELD_FIELD(u64 hst_mode : 1, + __BITFIELD_FIELD(u64 tuning : 4, + __BITFIELD_FIELD(u64 tx_bs_enh : 1, + __BITFIELD_FIELD(u64 tx_bs_en : 1, + __BITFIELD_FIELD(u64 loop_enb : 1, + __BITFIELD_FIELD(u64 vtest_enb : 1, + __BITFIELD_FIELD(u64 bist_enb : 1, + __BITFIELD_FIELD(u64 tdata_sel : 1, + __BITFIELD_FIELD(u64 taddr_in : 4, + __BITFIELD_FIELD(u64 tdata_in : 8, + __BITFIELD_FIELD(u64 ate_reset : 1, ;))))))))))))))))))))))))))))))))))) } s; }; diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c index fd9b3d899c1f..e13a4ab46977 100644 --- a/drivers/staging/octeon/ethernet-mdio.c +++ b/drivers/staging/octeon/ethernet-mdio.c @@ -118,13 +118,20 @@ void cvm_oct_adjust_link(struct net_device *dev) struct octeon_ethernet *priv = netdev_priv(dev); cvmx_helper_link_info_t link_info; + link_info.u64 = 0; + link_info.s.link_up = priv->phydev->link ? 1 : 0; + link_info.s.full_duplex = priv->phydev->duplex ? 1 : 0; + link_info.s.speed = priv->phydev->speed; + priv->link_info = link_info.u64; + + /* + * The polling task need to know about link status changes. + */ + if (priv->poll) + priv->poll(dev); + if (priv->last_link != priv->phydev->link) { priv->last_link = priv->phydev->link; - link_info.u64 = 0; - link_info.s.link_up = priv->last_link ? 1 : 0; - link_info.s.full_duplex = priv->phydev->duplex ? 1 : 0; - link_info.s.speed = priv->phydev->speed; - cvmx_helper_link_set(priv->port, link_info); cvm_oct_note_carrier(priv, link_info); } @@ -174,13 +181,22 @@ int cvm_oct_phy_setup_device(struct net_device *dev) goto no_phy; phy_node = of_parse_phandle(priv->of_node, "phy-handle", 0); + if (!phy_node && of_phy_is_fixed_link(priv->of_node)) { + int rc; + + rc = of_phy_register_fixed_link(priv->of_node); + if (rc) + return rc; + + phy_node = of_node_get(priv->of_node); + } if (!phy_node) goto no_phy; priv->phydev = of_phy_connect(dev, phy_node, cvm_oct_adjust_link, 0, PHY_INTERFACE_MODE_GMII); - if (priv->phydev == NULL) + if (!priv->phydev) return -ENODEV; priv->last_link = 0; diff --git a/drivers/staging/octeon/ethernet-mem.c b/drivers/staging/octeon/ethernet-mem.c index 5a5cdb3cd740..d6172e4dace5 100644 --- a/drivers/staging/octeon/ethernet-mem.c +++ b/drivers/staging/octeon/ethernet-mem.c @@ -34,7 +34,7 @@ static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements) while (freed) { struct sk_buff *skb = dev_alloc_skb(size + 256); - if (unlikely(skb == NULL)) + if (unlikely(!skb)) break; skb_reserve(skb, 256 - (((unsigned long)skb->data) & 0x7f)); *(struct sk_buff **)(skb->data - sizeof(void *)) = skb; @@ -98,7 +98,7 @@ static int cvm_oct_fill_hw_memory(int pool, int size, int elements) * just before the block. */ memory = kmalloc(size + 256, GFP_ATOMIC); - if (unlikely(memory == NULL)) { + if (unlikely(!memory)) { pr_warn("Unable to allocate %u bytes for FPA pool %d\n", elements * size, pool); break; diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c index 1055ee14b66a..91b148cfcbdb 100644 --- a/drivers/staging/octeon/ethernet-rgmii.c +++ b/drivers/staging/octeon/ethernet-rgmii.c @@ -30,8 +30,6 @@ static DEFINE_SPINLOCK(global_register_lock); -static int number_rgmii_ports; - static void cvm_oct_set_hw_preamble(struct octeon_ethernet *priv, bool enable) { union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl; @@ -63,251 +61,106 @@ static void cvm_oct_set_hw_preamble(struct octeon_ethernet *priv, bool enable) gmxx_rxx_int_reg.u64); } -static void cvm_oct_rgmii_poll(struct net_device *dev) +static void cvm_oct_check_preamble_errors(struct net_device *dev) { struct octeon_ethernet *priv = netdev_priv(dev); - unsigned long flags = 0; cvmx_helper_link_info_t link_info; - int use_global_register_lock = (priv->phydev == NULL); + unsigned long flags; + + link_info.u64 = priv->link_info; - BUG_ON(in_interrupt()); - if (use_global_register_lock) { + /* + * Take the global register lock since we are going to + * touch registers that affect more than one port. + */ + spin_lock_irqsave(&global_register_lock, flags); + + if (link_info.s.speed == 10 && priv->last_speed == 10) { /* - * Take the global register lock since we are going to - * touch registers that affect more than one port. + * Read the GMXX_RXX_INT_REG[PCTERR] bit and see if we are + * getting preamble errors. */ - spin_lock_irqsave(&global_register_lock, flags); - } else { - mutex_lock(&priv->phydev->mdio.bus->mdio_lock); - } + int interface = INTERFACE(priv->port); + int index = INDEX(priv->port); + union cvmx_gmxx_rxx_int_reg gmxx_rxx_int_reg; - link_info = cvmx_helper_link_get(priv->port); - if (link_info.u64 == priv->link_info) { - if (link_info.s.speed == 10) { + gmxx_rxx_int_reg.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_REG + (index, interface)); + if (gmxx_rxx_int_reg.s.pcterr) { /* - * Read the GMXX_RXX_INT_REG[PCTERR] bit and - * see if we are getting preamble errors. + * We are getting preamble errors at 10Mbps. Most + * likely the PHY is giving us packets with misaligned + * preambles. In order to get these packets we need to + * disable preamble checking and do it in software. */ - int interface = INTERFACE(priv->port); - int index = INDEX(priv->port); - union cvmx_gmxx_rxx_int_reg gmxx_rxx_int_reg; - - gmxx_rxx_int_reg.u64 = - cvmx_read_csr(CVMX_GMXX_RXX_INT_REG - (index, interface)); - if (gmxx_rxx_int_reg.s.pcterr) { - /* - * We are getting preamble errors at - * 10Mbps. Most likely the PHY is - * giving us packets with mis aligned - * preambles. In order to get these - * packets we need to disable preamble - * checking and do it in software. - */ - cvm_oct_set_hw_preamble(priv, false); - printk_ratelimited("%s: Using 10Mbps with software preamble removal\n", - dev->name); - } + cvm_oct_set_hw_preamble(priv, false); + printk_ratelimited("%s: Using 10Mbps with software preamble removal\n", + dev->name); } - - if (use_global_register_lock) - spin_unlock_irqrestore(&global_register_lock, flags); - else - mutex_unlock(&priv->phydev->mdio.bus->mdio_lock); - return; - } - - /* Since the 10Mbps preamble workaround is allowed we need to enable - * preamble checking, FCS stripping, and clear error bits on - * every speed change. If errors occur during 10Mbps operation - * the above code will change this stuff - */ - cvm_oct_set_hw_preamble(priv, true); - - if (priv->phydev == NULL) { - link_info = cvmx_helper_link_autoconf(priv->port); - priv->link_info = link_info.u64; - } - - if (use_global_register_lock) - spin_unlock_irqrestore(&global_register_lock, flags); - else - mutex_unlock(&priv->phydev->mdio.bus->mdio_lock); - - if (priv->phydev == NULL) { - /* Tell core. */ - if (link_info.s.link_up) { - if (!netif_carrier_ok(dev)) - netif_carrier_on(dev); - } else if (netif_carrier_ok(dev)) { - netif_carrier_off(dev); - } - cvm_oct_note_carrier(priv, link_info); + } else { + /* + * Since the 10Mbps preamble workaround is allowed we need to + * enable preamble checking, FCS stripping, and clear error + * bits on every speed change. If errors occur during 10Mbps + * operation the above code will change this stuff + */ + if (priv->last_speed != link_info.s.speed) + cvm_oct_set_hw_preamble(priv, true); + priv->last_speed = link_info.s.speed; } + spin_unlock_irqrestore(&global_register_lock, flags); } -static int cmv_oct_rgmii_gmx_interrupt(int interface) +static void cvm_oct_rgmii_poll(struct net_device *dev) { - int index; - int count = 0; - - /* Loop through every port of this interface */ - for (index = 0; - index < cvmx_helper_ports_on_interface(interface); - index++) { - union cvmx_gmxx_rxx_int_reg gmx_rx_int_reg; + struct octeon_ethernet *priv = netdev_priv(dev); + cvmx_helper_link_info_t link_info; + bool status_change; - /* Read the GMX interrupt status bits */ - gmx_rx_int_reg.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_REG - (index, interface)); - gmx_rx_int_reg.u64 &= cvmx_read_csr(CVMX_GMXX_RXX_INT_EN - (index, interface)); + link_info = cvmx_helper_link_autoconf(priv->port); + status_change = priv->link_info != link_info.u64; + priv->link_info = link_info.u64; - /* Poll the port if inband status changed */ - if (gmx_rx_int_reg.s.phy_dupx || gmx_rx_int_reg.s.phy_link || - gmx_rx_int_reg.s.phy_spd) { - struct net_device *dev = - cvm_oct_device[cvmx_helper_get_ipd_port - (interface, index)]; - struct octeon_ethernet *priv = netdev_priv(dev); + cvm_oct_check_preamble_errors(dev); - if (dev && !atomic_read(&cvm_oct_poll_queue_stopping)) - queue_work(cvm_oct_poll_queue, - &priv->port_work); + if (likely(!status_change)) + return; - gmx_rx_int_reg.u64 = 0; - gmx_rx_int_reg.s.phy_dupx = 1; - gmx_rx_int_reg.s.phy_link = 1; - gmx_rx_int_reg.s.phy_spd = 1; - cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, interface), - gmx_rx_int_reg.u64); - count++; - } + /* Tell core. */ + if (link_info.s.link_up) { + if (!netif_carrier_ok(dev)) + netif_carrier_on(dev); + } else if (netif_carrier_ok(dev)) { + netif_carrier_off(dev); } - return count; -} - -static irqreturn_t cvm_oct_rgmii_rml_interrupt(int cpl, void *dev_id) -{ - union cvmx_npi_rsl_int_blocks rsl_int_blocks; - int count = 0; - - rsl_int_blocks.u64 = cvmx_read_csr(CVMX_NPI_RSL_INT_BLOCKS); - - /* Check and see if this interrupt was caused by the GMX0 block */ - if (rsl_int_blocks.s.gmx0) - count += cmv_oct_rgmii_gmx_interrupt(0); - - /* Check and see if this interrupt was caused by the GMX1 block */ - if (rsl_int_blocks.s.gmx1) - count += cmv_oct_rgmii_gmx_interrupt(1); - - return count ? IRQ_HANDLED : IRQ_NONE; + cvm_oct_note_carrier(priv, link_info); } int cvm_oct_rgmii_open(struct net_device *dev) { - return cvm_oct_common_open(dev, cvm_oct_rgmii_poll); -} - -static void cvm_oct_rgmii_immediate_poll(struct work_struct *work) -{ - struct octeon_ethernet *priv = - container_of(work, struct octeon_ethernet, port_work); - cvm_oct_rgmii_poll(cvm_oct_device[priv->port]); -} - -int cvm_oct_rgmii_init(struct net_device *dev) -{ struct octeon_ethernet *priv = netdev_priv(dev); - int r; - - cvm_oct_common_init(dev); - INIT_WORK(&priv->port_work, cvm_oct_rgmii_immediate_poll); - /* - * Due to GMX errata in CN3XXX series chips, it is necessary - * to take the link down immediately when the PHY changes - * state. In order to do this we call the poll function every - * time the RGMII inband status changes. This may cause - * problems if the PHY doesn't implement inband status - * properly. - */ - if (number_rgmii_ports == 0) { - r = request_irq(OCTEON_IRQ_RML, cvm_oct_rgmii_rml_interrupt, - IRQF_SHARED, "RGMII", &number_rgmii_ports); - if (r != 0) - return r; - } - number_rgmii_ports++; - - /* - * Only true RGMII ports need to be polled. In GMII mode, port - * 0 is really a RGMII port. - */ - if (((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII) - && (priv->port == 0)) - || (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) { + int ret; - if (!octeon_is_simulation()) { + ret = cvm_oct_common_open(dev, cvm_oct_rgmii_poll); + if (ret) + return ret; - union cvmx_gmxx_rxx_int_en gmx_rx_int_en; - int interface = INTERFACE(priv->port); - int index = INDEX(priv->port); - - /* - * Enable interrupts on inband status changes - * for this port. - */ - gmx_rx_int_en.u64 = 0; - gmx_rx_int_en.s.phy_dupx = 1; - gmx_rx_int_en.s.phy_link = 1; - gmx_rx_int_en.s.phy_spd = 1; - cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, interface), - gmx_rx_int_en.u64); + if (priv->phydev) { + /* + * In phydev mode, we need still periodic polling for the + * preamble error checking, and we also need to call this + * function on every link state change. + * + * Only true RGMII ports need to be polled. In GMII mode, port + * 0 is really a RGMII port. + */ + if ((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII && + priv->port == 0) || + (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) { + priv->poll = cvm_oct_check_preamble_errors; + cvm_oct_check_preamble_errors(dev); } } return 0; } - -void cvm_oct_rgmii_uninit(struct net_device *dev) -{ - struct octeon_ethernet *priv = netdev_priv(dev); - - cvm_oct_common_uninit(dev); - - /* - * Only true RGMII ports need to be polled. In GMII mode, port - * 0 is really a RGMII port. - */ - if (((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII) - && (priv->port == 0)) - || (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) { - - if (!octeon_is_simulation()) { - - union cvmx_gmxx_rxx_int_en gmx_rx_int_en; - int interface = INTERFACE(priv->port); - int index = INDEX(priv->port); - - /* - * Disable interrupts on inband status changes - * for this port. - */ - gmx_rx_int_en.u64 = - cvmx_read_csr(CVMX_GMXX_RXX_INT_EN - (index, interface)); - gmx_rx_int_en.s.phy_dupx = 0; - gmx_rx_int_en.s.phy_link = 0; - gmx_rx_int_en.s.phy_spd = 0; - cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, interface), - gmx_rx_int_en.u64); - } - } - - /* Remove the interrupt handler when the last port is removed. */ - number_rgmii_ports--; - if (number_rgmii_ports == 0) - free_irq(OCTEON_IRQ_RML, &number_rgmii_ports); - cancel_work_sync(&priv->port_work); -} diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c index 6aed3cf6c0b4..b6993b0b8170 100644 --- a/drivers/staging/octeon/ethernet-rx.c +++ b/drivers/staging/octeon/ethernet-rx.c @@ -26,8 +26,6 @@ #include <net/xfrm.h> #endif /* CONFIG_XFRM */ -#include <linux/atomic.h> - #include <asm/octeon/octeon.h> #include "ethernet-defines.h" @@ -103,7 +101,6 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work) gmxx_rxx_frm_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface)); if (gmxx_rxx_frm_ctl.s.pre_chk == 0) { - u8 *ptr = cvmx_phys_to_ptr(work->packet_ptr.s.addr); int i = 0; @@ -116,17 +113,11 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work) } if (*ptr == 0xd5) { - /* - printk_ratelimited("Port %d received 0xd5 preamble\n", - port); - */ + /* Port received 0xd5 preamble */ work->packet_ptr.s.addr += i + 1; work->word1.len -= i + 5; } else if ((*ptr & 0xf) == 0xd) { - /* - printk_ratelimited("Port %d received 0x?d preamble\n", - port); - */ + /* Port received 0xd preamble */ work->packet_ptr.s.addr += i; work->word1.len -= i + 4; for (i = 0; i < work->word1.len; i++) { @@ -138,9 +129,6 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work) } else { printk_ratelimited("Port %d unknown preamble, packet dropped\n", port); - /* - cvmx_helper_dump_packet(work); - */ cvm_oct_free_work(work); return 1; } @@ -211,7 +199,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) prefetch(work); did_work_request = 0; - if (work == NULL) { + if (!work) { if (OCTEON_IS_MODEL(OCTEON_CN68XX)) { cvmx_write_csr(CVMX_SSO_WQ_IQ_DIS, 1ull << pow_receive_group); @@ -227,7 +215,8 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) } break; } - pskb = (struct sk_buff **)(cvm_oct_get_buffer_ptr(work->packet_ptr) - + pskb = (struct sk_buff **) + (cvm_oct_get_buffer_ptr(work->packet_ptr) - sizeof(void *)); prefetch(pskb); @@ -309,7 +298,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) while (segments--) { union cvmx_buf_ptr next_ptr = - *(union cvmx_buf_ptr *)cvmx_phys_to_ptr(segment_ptr.s.addr - 8); + *(union cvmx_buf_ptr *) + cvmx_phys_to_ptr( + segment_ptr.s.addr - 8); /* * Octeon Errata PKI-100: The segment size is @@ -333,7 +324,8 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) segment_size = len; /* Copy the data into the packet */ memcpy(skb_put(skb, segment_size), - cvmx_phys_to_ptr(segment_ptr.s.addr), + cvmx_phys_to_ptr( + segment_ptr.s.addr), segment_size); len -= segment_size; segment_ptr = next_ptr; @@ -364,32 +356,16 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) /* Increment RX stats for virtual ports */ if (port >= CVMX_PIP_NUM_INPUT_PORTS) { -#ifdef CONFIG_64BIT - atomic64_add(1, - (atomic64_t *)&priv->stats.rx_packets); - atomic64_add(skb->len, - (atomic64_t *)&priv->stats.rx_bytes); -#else - atomic_add(1, - (atomic_t *)&priv->stats.rx_packets); - atomic_add(skb->len, - (atomic_t *)&priv->stats.rx_bytes); -#endif + priv->stats.rx_packets++; + priv->stats.rx_bytes += skb->len; } netif_receive_skb(skb); } else { - /* Drop any packet received for a device that isn't up */ /* - printk_ratelimited("%s: Device not up, packet dropped\n", - dev->name); - */ -#ifdef CONFIG_64BIT - atomic64_add(1, - (atomic64_t *)&priv->stats.rx_dropped); -#else - atomic_add(1, - (atomic_t *)&priv->stats.rx_dropped); -#endif + * Drop any packet received for a device that + * isn't up. + */ + priv->stats.rx_dropped++; dev_kfree_skb_irq(skb); } } else { @@ -433,7 +409,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) } cvm_oct_rx_refill_pool(0); - if (rx_count < budget && napi != NULL) { + if (rx_count < budget && napi) { /* No more work */ napi_complete(napi); enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group); @@ -466,7 +442,7 @@ void cvm_oct_rx_initialize(void) } } - if (NULL == dev_for_napi) + if (!dev_for_napi) panic("No net_devices were allocated."); netif_napi_add(dev_for_napi, &cvm_oct_napi, cvm_oct_napi_poll, diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c index 2ae1944b3a1b..063dcd07557b 100644 --- a/drivers/staging/octeon/ethernet-spi.c +++ b/drivers/staging/octeon/ethernet-spi.c @@ -167,9 +167,7 @@ static void cvm_oct_spi_poll(struct net_device *dev) int interface; for (interface = 0; interface < 2; interface++) { - if ((priv->port == interface * 16) && need_retrain[interface]) { - if (cvmx_spi_restart_interface (interface, CVMX_SPI_MODE_DUPLEX, 10) == 0) { need_retrain[interface] = 0; diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c index c053c4a47a7e..ffe9bd77a7bb 100644 --- a/drivers/staging/octeon/ethernet-tx.c +++ b/drivers/staging/octeon/ethernet-tx.c @@ -95,12 +95,10 @@ static void cvm_oct_free_tx_skbs(struct net_device *dev) for (qos = 0; qos < queues_per_port; qos++) { if (skb_queue_len(&priv->tx_free_list[qos]) == 0) continue; - skb_to_free = cvmx_fau_fetch_and_add32(priv->fau+qos*4, + skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4, MAX_SKB_TO_FREE); skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, - priv->fau+qos*4); - - + priv->fau + qos * 4); total_freed += skb_to_free; if (skb_to_free > 0) { struct sk_buff *to_free_list = NULL; @@ -126,7 +124,7 @@ static void cvm_oct_free_tx_skbs(struct net_device *dev) } total_remaining += skb_queue_len(&priv->tx_free_list[qos]); } - if (total_freed >= 0 && netif_queue_stopped(dev)) + if (total_remaining < MAX_OUT_QUEUE_DEPTH && netif_queue_stopped(dev)) netif_wake_queue(dev); if (total_remaining) cvm_oct_kick_tx_poll_watchdog(); @@ -176,8 +174,9 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) qos = 0; else if (qos >= cvmx_pko_get_num_queues(priv->port)) qos = 0; - } else + } else { qos = 0; + } if (USE_ASYNC_IOBDMA) { /* Save scratch in case userspace is using it */ @@ -309,55 +308,38 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) #if REUSE_SKBUFFS_WITHOUT_FREE fpa_head = skb->head + 256 - ((unsigned long)skb->head & 0x7f); if (unlikely(skb->data < fpa_head)) { - /* - * printk("TX buffer beginning can't meet FPA - * alignment constraints\n"); - */ + /* TX buffer beginning can't meet FPA alignment constraints */ goto dont_put_skbuff_in_hw; } if (unlikely ((skb_end_pointer(skb) - fpa_head) < CVMX_FPA_PACKET_POOL_SIZE)) { - /* - printk("TX buffer isn't large enough for the FPA\n"); - */ + /* TX buffer isn't large enough for the FPA */ goto dont_put_skbuff_in_hw; } if (unlikely(skb_shared(skb))) { - /* - printk("TX buffer sharing data with someone else\n"); - */ + /* TX buffer sharing data with someone else */ goto dont_put_skbuff_in_hw; } if (unlikely(skb_cloned(skb))) { - /* - printk("TX buffer has been cloned\n"); - */ + /* TX buffer has been cloned */ goto dont_put_skbuff_in_hw; } if (unlikely(skb_header_cloned(skb))) { - /* - printk("TX buffer header has been cloned\n"); - */ + /* TX buffer header has been cloned */ goto dont_put_skbuff_in_hw; } if (unlikely(skb->destructor)) { - /* - printk("TX buffer has a destructor\n"); - */ + /* TX buffer has a destructor */ goto dont_put_skbuff_in_hw; } if (unlikely(skb_shinfo(skb)->nr_frags)) { - /* - printk("TX buffer has fragments\n"); - */ + /* TX buffer has fragments */ goto dont_put_skbuff_in_hw; } if (unlikely (skb->truesize != sizeof(*skb) + skb_end_offset(skb))) { - /* - printk("TX buffer truesize has been changed\n"); - */ + /* TX buffer truesize has been changed */ goto dont_put_skbuff_in_hw; } @@ -403,7 +385,7 @@ dont_put_skbuff_in_hw: ((ip_hdr(skb)->protocol == IPPROTO_TCP) || (ip_hdr(skb)->protocol == IPPROTO_UDP))) { /* Use hardware checksum calc */ - pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1; + pko_command.s.ipoffp1 = skb_network_offset(skb) + 1; } if (USE_ASYNC_IOBDMA) { @@ -419,7 +401,8 @@ dont_put_skbuff_in_hw: cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); } - skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4); + skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, + priv->fau + qos * 4); /* * If we're sending faster than the receive can free them then @@ -430,7 +413,7 @@ dont_put_skbuff_in_hw: if (pko_command.s.dontfree) { queue_type = QUEUE_CORE; - pko_command.s.reg0 = priv->fau+qos*4; + pko_command.s.reg0 = priv->fau + qos * 4; } else { queue_type = QUEUE_HW; } @@ -443,7 +426,6 @@ dont_put_skbuff_in_hw: /* Drop this packet if we have too many already queued to the HW */ if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >= MAX_OUT_QUEUE_DEPTH)) { - if (dev->tx_queue_len != 0) { /* Drop the lock when notifying the core. */ spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, @@ -559,7 +541,7 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev) /* Get a packet buffer */ packet_buffer = cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL); - if (unlikely(packet_buffer == NULL)) { + if (unlikely(!packet_buffer)) { printk_ratelimited("%s: Failed to allocate a packet buffer\n", dev->name); cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1); @@ -617,8 +599,8 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev) work->word2.s.dec_ipcomp = 0; /* FIXME */ #endif work->word2.s.tcp_or_udp = - (ip_hdr(skb)->protocol == IPPROTO_TCP) - || (ip_hdr(skb)->protocol == IPPROTO_UDP); + (ip_hdr(skb)->protocol == IPPROTO_TCP) || + (ip_hdr(skb)->protocol == IPPROTO_UDP); #if 0 /* FIXME */ work->word2.s.dec_ipsec = 0; @@ -629,8 +611,8 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev) /* No error, packet is internal */ work->word2.s.L4_error = 0; #endif - work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0) - || (ip_hdr(skb)->frag_off == + work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0) || + (ip_hdr(skb)->frag_off == 1 << 14)); #if 0 /* Assume Linux is sending a good packet */ diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c index f69fb5cc7cb8..271e1b8d8506 100644 --- a/drivers/staging/octeon/ethernet.c +++ b/drivers/staging/octeon/ethernet.c @@ -86,10 +86,6 @@ int rx_napi_weight = 32; module_param(rx_napi_weight, int, 0444); MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter."); -/* - * cvm_oct_poll_queue - Workqueue for polling operations. - */ -struct workqueue_struct *cvm_oct_poll_queue; /* * cvm_oct_poll_queue_stopping - flag to indicate polling should stop. @@ -121,8 +117,7 @@ static void cvm_oct_rx_refill_worker(struct work_struct *work) cvm_oct_rx_refill_pool(num_packet_buffers / 2); if (!atomic_read(&cvm_oct_poll_queue_stopping)) - queue_delayed_work(cvm_oct_poll_queue, - &cvm_oct_rx_refill_work, HZ); + schedule_delayed_work(&cvm_oct_rx_refill_work, HZ); } static void cvm_oct_periodic_worker(struct work_struct *work) @@ -138,8 +133,7 @@ static void cvm_oct_periodic_worker(struct work_struct *work) cvm_oct_device[priv->port]); if (!atomic_read(&cvm_oct_poll_queue_stopping)) - queue_delayed_work(cvm_oct_poll_queue, - &priv->port_periodic_work, HZ); + schedule_delayed_work(&priv->port_periodic_work, HZ); } static void cvm_oct_configure_common_hw(void) @@ -226,18 +220,7 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev) priv->stats.multicast += rx_status.multicast_packets; priv->stats.rx_crc_errors += rx_status.inb_errors; priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets; - - /* - * The drop counter must be incremented atomically - * since the RX tasklet also increments it. - */ -#ifdef CONFIG_64BIT - atomic64_add(rx_status.dropped_packets, - (atomic64_t *)&priv->stats.rx_dropped); -#else - atomic_add(rx_status.dropped_packets, - (atomic_t *)&priv->stats.rx_dropped); -#endif + priv->stats.rx_dropped += rx_status.dropped_packets; } return &priv->stats; @@ -265,22 +248,22 @@ static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu) * Limit the MTU to make sure the ethernet packets are between * 64 bytes and 65535 bytes. */ - if ((new_mtu + 14 + 4 + vlan_bytes < 64) - || (new_mtu + 14 + 4 + vlan_bytes > 65392)) { + if ((new_mtu + 14 + 4 + vlan_bytes < 64) || + (new_mtu + 14 + 4 + vlan_bytes > 65392)) { pr_err("MTU must be between %d and %d.\n", 64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes); return -EINVAL; } dev->mtu = new_mtu; - if ((interface < 2) - && (cvmx_helper_interface_get_mode(interface) != + if ((interface < 2) && + (cvmx_helper_interface_get_mode(interface) != CVMX_HELPER_INTERFACE_MODE_SPI)) { /* Add ethernet header and FCS, and VLAN if configured. */ int max_packet = new_mtu + 14 + 4 + vlan_bytes; - if (OCTEON_IS_MODEL(OCTEON_CN3XXX) - || OCTEON_IS_MODEL(OCTEON_CN58XX)) { + if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || + OCTEON_IS_MODEL(OCTEON_CN58XX)) { /* Signal errors on packets larger than the MTU */ cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface), max_packet); @@ -319,8 +302,8 @@ static void cvm_oct_common_set_multicast_list(struct net_device *dev) int interface = INTERFACE(priv->port); int index = INDEX(priv->port); - if ((interface < 2) - && (cvmx_helper_interface_get_mode(interface) != + if ((interface < 2) && + (cvmx_helper_interface_get_mode(interface) != CVMX_HELPER_INTERFACE_MODE_SPI)) { union cvmx_gmxx_rxx_adr_ctl control; @@ -371,8 +354,8 @@ static int cvm_oct_set_mac_filter(struct net_device *dev) int interface = INTERFACE(priv->port); int index = INDEX(priv->port); - if ((interface < 2) - && (cvmx_helper_interface_get_mode(interface) != + if ((interface < 2) && + (cvmx_helper_interface_get_mode(interface) != CVMX_HELPER_INTERFACE_MODE_SPI)) { int i; u8 *ptr = dev->dev_addr; @@ -445,8 +428,8 @@ int cvm_oct_common_init(struct net_device *dev) * Force the interface to use the POW send if always_use_pow * was specified or it is in the pow send list. */ - if ((pow_send_group != -1) - && (always_use_pow || strstr(pow_send_list, dev->name))) + if ((pow_send_group != -1) && + (always_use_pow || strstr(pow_send_list, dev->name))) priv->queue = -1; if (priv->queue != -1) @@ -557,6 +540,7 @@ static const struct net_device_ops cvm_oct_npi_netdev_ops = { .ndo_poll_controller = cvm_oct_poll_controller, #endif }; + static const struct net_device_ops cvm_oct_xaui_netdev_ops = { .ndo_init = cvm_oct_common_init, .ndo_uninit = cvm_oct_common_uninit, @@ -572,6 +556,7 @@ static const struct net_device_ops cvm_oct_xaui_netdev_ops = { .ndo_poll_controller = cvm_oct_poll_controller, #endif }; + static const struct net_device_ops cvm_oct_sgmii_netdev_ops = { .ndo_init = cvm_oct_sgmii_init, .ndo_uninit = cvm_oct_common_uninit, @@ -587,6 +572,7 @@ static const struct net_device_ops cvm_oct_sgmii_netdev_ops = { .ndo_poll_controller = cvm_oct_poll_controller, #endif }; + static const struct net_device_ops cvm_oct_spi_netdev_ops = { .ndo_init = cvm_oct_spi_init, .ndo_uninit = cvm_oct_spi_uninit, @@ -600,9 +586,10 @@ static const struct net_device_ops cvm_oct_spi_netdev_ops = { .ndo_poll_controller = cvm_oct_poll_controller, #endif }; + static const struct net_device_ops cvm_oct_rgmii_netdev_ops = { - .ndo_init = cvm_oct_rgmii_init, - .ndo_uninit = cvm_oct_rgmii_uninit, + .ndo_init = cvm_oct_common_init, + .ndo_uninit = cvm_oct_common_uninit, .ndo_open = cvm_oct_rgmii_open, .ndo_stop = cvm_oct_common_stop, .ndo_start_xmit = cvm_oct_xmit, @@ -615,6 +602,7 @@ static const struct net_device_ops cvm_oct_rgmii_netdev_ops = { .ndo_poll_controller = cvm_oct_poll_controller, #endif }; + static const struct net_device_ops cvm_oct_pow_netdev_ops = { .ndo_init = cvm_oct_common_init, .ndo_start_xmit = cvm_oct_xmit_pow, @@ -677,11 +665,6 @@ static int cvm_oct_probe(struct platform_device *pdev) return -EINVAL; } - cvm_oct_poll_queue = create_singlethread_workqueue("octeon-ethernet"); - if (!cvm_oct_poll_queue) { - pr_err("octeon-ethernet: Cannot create workqueue"); - return -ENOMEM; - } cvm_oct_configure_common_hw(); @@ -790,7 +773,6 @@ static int cvm_oct_probe(struct platform_device *pdev) cvmx_fau_atomic_write32(priv->fau + qos * 4, 0); switch (priv->imode) { - /* These types don't support ports to IPD/PKO */ case CVMX_HELPER_INTERFACE_MODE_DISABLED: case CVMX_HELPER_INTERFACE_MODE_PCIE: @@ -840,8 +822,7 @@ static int cvm_oct_probe(struct platform_device *pdev) fau -= cvmx_pko_get_num_queues(priv->port) * sizeof(u32); - queue_delayed_work(cvm_oct_poll_queue, - &priv->port_periodic_work, HZ); + schedule_delayed_work(&priv->port_periodic_work, HZ); } } } @@ -854,7 +835,7 @@ static int cvm_oct_probe(struct platform_device *pdev) */ cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000); - queue_delayed_work(cvm_oct_poll_queue, &cvm_oct_rx_refill_work, HZ); + schedule_delayed_work(&cvm_oct_rx_refill_work, HZ); return 0; } @@ -897,7 +878,6 @@ static int cvm_oct_remove(struct platform_device *pdev) } } - destroy_workqueue(cvm_oct_poll_queue); cvmx_pko_shutdown(); diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h index fdf24d120e77..6275c15e0035 100644 --- a/drivers/staging/octeon/octeon-ethernet.h +++ b/drivers/staging/octeon/octeon-ethernet.h @@ -41,20 +41,18 @@ struct octeon_ethernet { /* Device statistics */ struct net_device_stats stats; struct phy_device *phydev; + unsigned int last_speed; unsigned int last_link; /* Last negotiated link state */ u64 link_info; /* Called periodically to check link status */ void (*poll)(struct net_device *dev); struct delayed_work port_periodic_work; - struct work_struct port_work; /* may be unused. */ struct device_node *of_node; }; int cvm_oct_free_work(void *work_queue_entry); -int cvm_oct_rgmii_init(struct net_device *dev); -void cvm_oct_rgmii_uninit(struct net_device *dev); int cvm_oct_rgmii_open(struct net_device *dev); int cvm_oct_sgmii_init(struct net_device *dev); @@ -78,7 +76,6 @@ extern int pow_send_group; extern int pow_receive_group; extern char pow_send_list[]; extern struct net_device *cvm_oct_device[]; -extern struct workqueue_struct *cvm_oct_poll_queue; extern atomic_t cvm_oct_poll_queue_stopping; extern u64 cvm_oct_tx_poll_interval; diff --git a/drivers/staging/olpc_dcon/Kconfig b/drivers/staging/olpc_dcon/Kconfig deleted file mode 100644 index d277f048789e..000000000000 --- a/drivers/staging/olpc_dcon/Kconfig +++ /dev/null @@ -1,35 +0,0 @@ -config FB_OLPC_DCON - tristate "One Laptop Per Child Display CONtroller support" - depends on OLPC && FB - depends on I2C - depends on (GPIO_CS5535 || GPIO_CS5535=n) - select BACKLIGHT_CLASS_DEVICE - ---help--- - In order to support very low power operation, the XO laptop uses a - secondary Display CONtroller, or DCON. This secondary controller - is present in the video pipeline between the primary display - controller (integrate into the processor or chipset) and the LCD - panel. It allows the main processor/display controller to be - completely powered off while still retaining an image on the display. - This controller is only available on OLPC platforms. Unless you have - one of these platforms, you will want to say 'N'. - -config FB_OLPC_DCON_1 - bool "OLPC XO-1 DCON support" - depends on FB_OLPC_DCON && GPIO_CS5535 - default y - ---help--- - Enable support for the DCON in XO-1 model laptops. The kernel - communicates with the DCON using model-specific code. If you - have an XO-1 (or if you're unsure what model you have), you should - say 'Y'. - -config FB_OLPC_DCON_1_5 - bool "OLPC XO-1.5 DCON support" - depends on FB_OLPC_DCON && ACPI - default y - ---help--- - Enable support for the DCON in XO-1.5 model laptops. The kernel - communicates with the DCON using model-specific code. If you - have an XO-1.5 (or if you're unsure what model you have), you - should say 'Y'. diff --git a/drivers/staging/olpc_dcon/Makefile b/drivers/staging/olpc_dcon/Makefile deleted file mode 100644 index 36c7e67fec20..000000000000 --- a/drivers/staging/olpc_dcon/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -olpc-dcon-objs += olpc_dcon.o -olpc-dcon-$(CONFIG_FB_OLPC_DCON_1) += olpc_dcon_xo_1.o -olpc-dcon-$(CONFIG_FB_OLPC_DCON_1_5) += olpc_dcon_xo_1_5.o -obj-$(CONFIG_FB_OLPC_DCON) += olpc-dcon.o - - diff --git a/drivers/staging/olpc_dcon/TODO b/drivers/staging/olpc_dcon/TODO deleted file mode 100644 index 61c2e65ac354..000000000000 --- a/drivers/staging/olpc_dcon/TODO +++ /dev/null @@ -1,9 +0,0 @@ -TODO: - - see if vx855 gpio API can be made similar enough to cs5535 so we can - share more code - - allow simultaneous XO-1 and XO-1.5 support - -Please send patches to Greg Kroah-Hartman <greg@kroah.com> and -copy: - Daniel Drake <dsd@laptop.org> - Jens Frederich <jfrederich@gmail.com> diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c deleted file mode 100644 index f45b2ef05f48..000000000000 --- a/drivers/staging/olpc_dcon/olpc_dcon.c +++ /dev/null @@ -1,813 +0,0 @@ -/* - * Mainly by David Woodhouse, somewhat modified by Jordan Crouse - * - * Copyright © 2006-2007 Red Hat, Inc. - * Copyright © 2006-2007 Advanced Micro Devices, Inc. - * Copyright © 2009 VIA Technology, Inc. - * Copyright (c) 2010-2011 Andres Salomon <dilinger@queued.net> - * - * This program is free software. You can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/kernel.h> -#include <linux/fb.h> -#include <linux/console.h> -#include <linux/i2c.h> -#include <linux/platform_device.h> -#include <linux/interrupt.h> -#include <linux/delay.h> -#include <linux/module.h> -#include <linux/backlight.h> -#include <linux/device.h> -#include <linux/uaccess.h> -#include <linux/ctype.h> -#include <linux/reboot.h> -#include <linux/olpc-ec.h> -#include <asm/tsc.h> -#include <asm/olpc.h> - -#include "olpc_dcon.h" - -/* Module definitions */ - -static ushort resumeline = 898; -module_param(resumeline, ushort, 0444); - -static struct dcon_platform_data *pdata; - -/* I2C structures */ - -/* Platform devices */ -static struct platform_device *dcon_device; - -static unsigned short normal_i2c[] = { 0x0d, I2C_CLIENT_END }; - -static s32 dcon_write(struct dcon_priv *dcon, u8 reg, u16 val) -{ - return i2c_smbus_write_word_data(dcon->client, reg, val); -} - -static s32 dcon_read(struct dcon_priv *dcon, u8 reg) -{ - return i2c_smbus_read_word_data(dcon->client, reg); -} - -/* ===== API functions - these are called by a variety of users ==== */ - -static int dcon_hw_init(struct dcon_priv *dcon, int is_init) -{ - u16 ver; - int rc = 0; - - ver = dcon_read(dcon, DCON_REG_ID); - if ((ver >> 8) != 0xDC) { - pr_err("DCON ID not 0xDCxx: 0x%04x instead.\n", ver); - rc = -ENXIO; - goto err; - } - - if (is_init) { - pr_info("Discovered DCON version %x\n", ver & 0xFF); - rc = pdata->init(dcon); - if (rc != 0) { - pr_err("Unable to init.\n"); - goto err; - } - } - - if (ver < 0xdc02) { - dev_err(&dcon->client->dev, - "DCON v1 is unsupported, giving up..\n"); - rc = -ENODEV; - goto err; - } - - /* SDRAM setup/hold time */ - dcon_write(dcon, 0x3a, 0xc040); - dcon_write(dcon, DCON_REG_MEM_OPT_A, 0x0000); /* clear option bits */ - dcon_write(dcon, DCON_REG_MEM_OPT_A, - MEM_DLL_CLOCK_DELAY | MEM_POWER_DOWN); - dcon_write(dcon, DCON_REG_MEM_OPT_B, MEM_SOFT_RESET); - - /* Colour swizzle, AA, no passthrough, backlight */ - if (is_init) { - dcon->disp_mode = MODE_PASSTHRU | MODE_BL_ENABLE | - MODE_CSWIZZLE | MODE_COL_AA; - } - dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode); - - /* Set the scanline to interrupt on during resume */ - dcon_write(dcon, DCON_REG_SCAN_INT, resumeline); - -err: - return rc; -} - -/* - * The smbus doesn't always come back due to what is believed to be - * hardware (power rail) bugs. For older models where this is known to - * occur, our solution is to attempt to wait for the bus to stabilize; - * if it doesn't happen, cut power to the dcon, repower it, and wait - * for the bus to stabilize. Rinse, repeat until we have a working - * smbus. For newer models, we simply BUG(); we want to know if this - * still happens despite the power fixes that have been made! - */ -static int dcon_bus_stabilize(struct dcon_priv *dcon, int is_powered_down) -{ - unsigned long timeout; - u8 pm; - int x; - -power_up: - if (is_powered_down) { - pm = 1; - x = olpc_ec_cmd(EC_DCON_POWER_MODE, &pm, 1, NULL, 0); - if (x) { - pr_warn("unable to force dcon to power up: %d!\n", x); - return x; - } - usleep_range(10000, 11000); /* we'll be conservative */ - } - - pdata->bus_stabilize_wiggle(); - - for (x = -1, timeout = 50; timeout && x < 0; timeout--) { - usleep_range(1000, 1100); - x = dcon_read(dcon, DCON_REG_ID); - } - if (x < 0) { - pr_err("unable to stabilize dcon's smbus, reasserting power and praying.\n"); - BUG_ON(olpc_board_at_least(olpc_board(0xc2))); - pm = 0; - olpc_ec_cmd(EC_DCON_POWER_MODE, &pm, 1, NULL, 0); - msleep(100); - is_powered_down = 1; - goto power_up; /* argh, stupid hardware.. */ - } - - if (is_powered_down) - return dcon_hw_init(dcon, 0); - return 0; -} - -static void dcon_set_backlight(struct dcon_priv *dcon, u8 level) -{ - dcon->bl_val = level; - dcon_write(dcon, DCON_REG_BRIGHT, dcon->bl_val); - - /* Purposely turn off the backlight when we go to level 0 */ - if (dcon->bl_val == 0) { - dcon->disp_mode &= ~MODE_BL_ENABLE; - dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode); - } else if (!(dcon->disp_mode & MODE_BL_ENABLE)) { - dcon->disp_mode |= MODE_BL_ENABLE; - dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode); - } -} - -/* Set the output type to either color or mono */ -static int dcon_set_mono_mode(struct dcon_priv *dcon, bool enable_mono) -{ - if (dcon->mono == enable_mono) - return 0; - - dcon->mono = enable_mono; - - if (enable_mono) { - dcon->disp_mode &= ~(MODE_CSWIZZLE | MODE_COL_AA); - dcon->disp_mode |= MODE_MONO_LUMA; - } else { - dcon->disp_mode &= ~(MODE_MONO_LUMA); - dcon->disp_mode |= MODE_CSWIZZLE | MODE_COL_AA; - } - - dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode); - return 0; -} - -/* For now, this will be really stupid - we need to address how - * DCONLOAD works in a sleep and account for it accordingly - */ - -static void dcon_sleep(struct dcon_priv *dcon, bool sleep) -{ - int x; - - /* Turn off the backlight and put the DCON to sleep */ - - if (dcon->asleep == sleep) - return; - - if (!olpc_board_at_least(olpc_board(0xc2))) - return; - - if (sleep) { - u8 pm = 0; - - x = olpc_ec_cmd(EC_DCON_POWER_MODE, &pm, 1, NULL, 0); - if (x) - pr_warn("unable to force dcon to power down: %d!\n", x); - else - dcon->asleep = sleep; - } else { - /* Only re-enable the backlight if the backlight value is set */ - if (dcon->bl_val != 0) - dcon->disp_mode |= MODE_BL_ENABLE; - x = dcon_bus_stabilize(dcon, 1); - if (x) - pr_warn("unable to reinit dcon hardware: %d!\n", x); - else - dcon->asleep = sleep; - - /* Restore backlight */ - dcon_set_backlight(dcon, dcon->bl_val); - } - - /* We should turn off some stuff in the framebuffer - but what? */ -} - -/* the DCON seems to get confused if we change DCONLOAD too - * frequently -- i.e., approximately faster than frame time. - * normally we don't change it this fast, so in general we won't - * delay here. - */ -static void dcon_load_holdoff(struct dcon_priv *dcon) -{ - ktime_t delta_t, now; - - while (1) { - now = ktime_get(); - delta_t = ktime_sub(now, dcon->load_time); - if (ktime_to_ns(delta_t) > NSEC_PER_MSEC * 20) - break; - mdelay(4); - } -} - -static bool dcon_blank_fb(struct dcon_priv *dcon, bool blank) -{ - int err; - - console_lock(); - if (!lock_fb_info(dcon->fbinfo)) { - console_unlock(); - dev_err(&dcon->client->dev, "unable to lock framebuffer\n"); - return false; - } - - dcon->ignore_fb_events = true; - err = fb_blank(dcon->fbinfo, - blank ? FB_BLANK_POWERDOWN : FB_BLANK_UNBLANK); - dcon->ignore_fb_events = false; - unlock_fb_info(dcon->fbinfo); - console_unlock(); - - if (err) { - dev_err(&dcon->client->dev, "couldn't %sblank framebuffer\n", - blank ? "" : "un"); - return false; - } - return true; -} - -/* Set the source of the display (CPU or DCON) */ -static void dcon_source_switch(struct work_struct *work) -{ - struct dcon_priv *dcon = container_of(work, struct dcon_priv, - switch_source); - int source = dcon->pending_src; - - if (dcon->curr_src == source) - return; - - dcon_load_holdoff(dcon); - - dcon->switched = false; - - switch (source) { - case DCON_SOURCE_CPU: - pr_info("dcon_source_switch to CPU\n"); - /* Enable the scanline interrupt bit */ - if (dcon_write(dcon, DCON_REG_MODE, - dcon->disp_mode | MODE_SCAN_INT)) - pr_err("couldn't enable scanline interrupt!\n"); - else - /* Wait up to one second for the scanline interrupt */ - wait_event_timeout(dcon->waitq, dcon->switched, HZ); - - if (!dcon->switched) - pr_err("Timeout entering CPU mode; expect a screen glitch.\n"); - - /* Turn off the scanline interrupt */ - if (dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode)) - pr_err("couldn't disable scanline interrupt!\n"); - - /* - * Ideally we'd like to disable interrupts here so that the - * fb unblanking and DCON turn on happen at a known time value; - * however, we can't do that right now with fb_blank - * messing with semaphores. - * - * For now, we just hope.. - */ - if (!dcon_blank_fb(dcon, false)) { - pr_err("Failed to enter CPU mode\n"); - dcon->pending_src = DCON_SOURCE_DCON; - return; - } - - /* And turn off the DCON */ - pdata->set_dconload(1); - dcon->load_time = ktime_get(); - - pr_info("The CPU has control\n"); - break; - case DCON_SOURCE_DCON: - { - ktime_t delta_t; - - pr_info("dcon_source_switch to DCON\n"); - - /* Clear DCONLOAD - this implies that the DCON is in control */ - pdata->set_dconload(0); - dcon->load_time = ktime_get(); - - wait_event_timeout(dcon->waitq, dcon->switched, HZ/2); - - if (!dcon->switched) { - pr_err("Timeout entering DCON mode; expect a screen glitch.\n"); - } else { - /* sometimes the DCON doesn't follow its own rules, - * and doesn't wait for two vsync pulses before - * ack'ing the frame load with an IRQ. the result - * is that the display shows the *previously* - * loaded frame. we can detect this by looking at - * the time between asserting DCONLOAD and the IRQ -- - * if it's less than 20msec, then the DCON couldn't - * have seen two VSYNC pulses. in that case we - * deassert and reassert, and hope for the best. - * see http://dev.laptop.org/ticket/9664 - */ - delta_t = ktime_sub(dcon->irq_time, dcon->load_time); - if (dcon->switched && ktime_to_ns(delta_t) - < NSEC_PER_MSEC * 20) { - pr_err("missed loading, retrying\n"); - pdata->set_dconload(1); - mdelay(41); - pdata->set_dconload(0); - dcon->load_time = ktime_get(); - mdelay(41); - } - } - - dcon_blank_fb(dcon, true); - pr_info("The DCON has control\n"); - break; - } - default: - BUG(); - } - - dcon->curr_src = source; -} - -static void dcon_set_source(struct dcon_priv *dcon, int arg) -{ - if (dcon->pending_src == arg) - return; - - dcon->pending_src = arg; - - if (dcon->curr_src != arg) - schedule_work(&dcon->switch_source); -} - -static void dcon_set_source_sync(struct dcon_priv *dcon, int arg) -{ - dcon_set_source(dcon, arg); - flush_scheduled_work(); -} - -static ssize_t dcon_mode_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct dcon_priv *dcon = dev_get_drvdata(dev); - - return sprintf(buf, "%4.4X\n", dcon->disp_mode); -} - -static ssize_t dcon_sleep_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct dcon_priv *dcon = dev_get_drvdata(dev); - - return sprintf(buf, "%d\n", dcon->asleep); -} - -static ssize_t dcon_freeze_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct dcon_priv *dcon = dev_get_drvdata(dev); - - return sprintf(buf, "%d\n", dcon->curr_src == DCON_SOURCE_DCON ? 1 : 0); -} - -static ssize_t dcon_mono_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct dcon_priv *dcon = dev_get_drvdata(dev); - - return sprintf(buf, "%d\n", dcon->mono); -} - -static ssize_t dcon_resumeline_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "%d\n", resumeline); -} - -static ssize_t dcon_mono_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - unsigned long enable_mono; - int rc; - - rc = kstrtoul(buf, 10, &enable_mono); - if (rc) - return rc; - - dcon_set_mono_mode(dev_get_drvdata(dev), enable_mono ? true : false); - - return count; -} - -static ssize_t dcon_freeze_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct dcon_priv *dcon = dev_get_drvdata(dev); - unsigned long output; - int ret; - - ret = kstrtoul(buf, 10, &output); - if (ret) - return ret; - - pr_info("dcon_freeze_store: %lu\n", output); - - switch (output) { - case 0: - dcon_set_source(dcon, DCON_SOURCE_CPU); - break; - case 1: - dcon_set_source_sync(dcon, DCON_SOURCE_DCON); - break; - case 2: /* normally unused */ - dcon_set_source(dcon, DCON_SOURCE_DCON); - break; - default: - return -EINVAL; - } - - return count; -} - -static ssize_t dcon_resumeline_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - unsigned short rl; - int rc; - - rc = kstrtou16(buf, 10, &rl); - if (rc) - return rc; - - resumeline = rl; - dcon_write(dev_get_drvdata(dev), DCON_REG_SCAN_INT, resumeline); - - return count; -} - -static ssize_t dcon_sleep_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - unsigned long output; - int ret; - - ret = kstrtoul(buf, 10, &output); - if (ret) - return ret; - - dcon_sleep(dev_get_drvdata(dev), output ? true : false); - return count; -} - -static struct device_attribute dcon_device_files[] = { - __ATTR(mode, 0444, dcon_mode_show, NULL), - __ATTR(sleep, 0644, dcon_sleep_show, dcon_sleep_store), - __ATTR(freeze, 0644, dcon_freeze_show, dcon_freeze_store), - __ATTR(monochrome, 0644, dcon_mono_show, dcon_mono_store), - __ATTR(resumeline, 0644, dcon_resumeline_show, dcon_resumeline_store), -}; - -static int dcon_bl_update(struct backlight_device *dev) -{ - struct dcon_priv *dcon = bl_get_data(dev); - u8 level = dev->props.brightness & 0x0F; - - if (dev->props.power != FB_BLANK_UNBLANK) - level = 0; - - if (level != dcon->bl_val) - dcon_set_backlight(dcon, level); - - /* power down the DCON when the screen is blanked */ - if (!dcon->ignore_fb_events) - dcon_sleep(dcon, !!(dev->props.state & BL_CORE_FBBLANK)); - - return 0; -} - -static int dcon_bl_get(struct backlight_device *dev) -{ - struct dcon_priv *dcon = bl_get_data(dev); - - return dcon->bl_val; -} - -static const struct backlight_ops dcon_bl_ops = { - .update_status = dcon_bl_update, - .get_brightness = dcon_bl_get, -}; - -static struct backlight_properties dcon_bl_props = { - .max_brightness = 15, - .type = BACKLIGHT_RAW, - .power = FB_BLANK_UNBLANK, -}; - -static int dcon_reboot_notify(struct notifier_block *nb, - unsigned long foo, void *bar) -{ - struct dcon_priv *dcon = container_of(nb, struct dcon_priv, reboot_nb); - - if (!dcon || !dcon->client) - return NOTIFY_DONE; - - /* Turn off the DCON. Entirely. */ - dcon_write(dcon, DCON_REG_MODE, 0x39); - dcon_write(dcon, DCON_REG_MODE, 0x32); - return NOTIFY_DONE; -} - -static int unfreeze_on_panic(struct notifier_block *nb, - unsigned long e, void *p) -{ - pdata->set_dconload(1); - return NOTIFY_DONE; -} - -static struct notifier_block dcon_panic_nb = { - .notifier_call = unfreeze_on_panic, -}; - -static int dcon_detect(struct i2c_client *client, struct i2c_board_info *info) -{ - strlcpy(info->type, "olpc_dcon", I2C_NAME_SIZE); - - return 0; -} - -static int dcon_probe(struct i2c_client *client, const struct i2c_device_id *id) -{ - struct dcon_priv *dcon; - int rc, i, j; - - if (!pdata) - return -ENXIO; - - dcon = kzalloc(sizeof(*dcon), GFP_KERNEL); - if (!dcon) - return -ENOMEM; - - dcon->client = client; - init_waitqueue_head(&dcon->waitq); - INIT_WORK(&dcon->switch_source, dcon_source_switch); - dcon->reboot_nb.notifier_call = dcon_reboot_notify; - dcon->reboot_nb.priority = -1; - - i2c_set_clientdata(client, dcon); - - if (num_registered_fb < 1) { - dev_err(&client->dev, "DCON driver requires a registered fb\n"); - rc = -EIO; - goto einit; - } - dcon->fbinfo = registered_fb[0]; - - rc = dcon_hw_init(dcon, 1); - if (rc) - goto einit; - - /* Add the DCON device */ - - dcon_device = platform_device_alloc("dcon", -1); - - if (!dcon_device) { - pr_err("Unable to create the DCON device\n"); - rc = -ENOMEM; - goto eirq; - } - rc = platform_device_add(dcon_device); - platform_set_drvdata(dcon_device, dcon); - - if (rc) { - pr_err("Unable to add the DCON device\n"); - goto edev; - } - - for (i = 0; i < ARRAY_SIZE(dcon_device_files); i++) { - rc = device_create_file(&dcon_device->dev, - &dcon_device_files[i]); - if (rc) { - dev_err(&dcon_device->dev, "Cannot create sysfs file\n"); - goto ecreate; - } - } - - dcon->bl_val = dcon_read(dcon, DCON_REG_BRIGHT) & 0x0F; - - /* Add the backlight device for the DCON */ - dcon_bl_props.brightness = dcon->bl_val; - dcon->bl_dev = backlight_device_register("dcon-bl", &dcon_device->dev, - dcon, &dcon_bl_ops, &dcon_bl_props); - if (IS_ERR(dcon->bl_dev)) { - dev_err(&client->dev, "cannot register backlight dev (%ld)\n", - PTR_ERR(dcon->bl_dev)); - dcon->bl_dev = NULL; - } - - register_reboot_notifier(&dcon->reboot_nb); - atomic_notifier_chain_register(&panic_notifier_list, &dcon_panic_nb); - - return 0; - - ecreate: - for (j = 0; j < i; j++) - device_remove_file(&dcon_device->dev, &dcon_device_files[j]); - edev: - platform_device_unregister(dcon_device); - dcon_device = NULL; - eirq: - free_irq(DCON_IRQ, dcon); - einit: - kfree(dcon); - return rc; -} - -static int dcon_remove(struct i2c_client *client) -{ - struct dcon_priv *dcon = i2c_get_clientdata(client); - - unregister_reboot_notifier(&dcon->reboot_nb); - atomic_notifier_chain_unregister(&panic_notifier_list, &dcon_panic_nb); - - free_irq(DCON_IRQ, dcon); - - backlight_device_unregister(dcon->bl_dev); - - if (dcon_device) - platform_device_unregister(dcon_device); - cancel_work_sync(&dcon->switch_source); - - kfree(dcon); - - return 0; -} - -#ifdef CONFIG_PM -static int dcon_suspend(struct device *dev) -{ - struct i2c_client *client = to_i2c_client(dev); - struct dcon_priv *dcon = i2c_get_clientdata(client); - - if (!dcon->asleep) { - /* Set up the DCON to have the source */ - dcon_set_source_sync(dcon, DCON_SOURCE_DCON); - } - - return 0; -} - -static int dcon_resume(struct device *dev) -{ - struct i2c_client *client = to_i2c_client(dev); - struct dcon_priv *dcon = i2c_get_clientdata(client); - - if (!dcon->asleep) { - dcon_bus_stabilize(dcon, 0); - dcon_set_source(dcon, DCON_SOURCE_CPU); - } - - return 0; -} - -#else - -#define dcon_suspend NULL -#define dcon_resume NULL - -#endif /* CONFIG_PM */ - -irqreturn_t dcon_interrupt(int irq, void *id) -{ - struct dcon_priv *dcon = id; - u8 status; - - if (pdata->read_status(&status)) - return IRQ_NONE; - - switch (status & 3) { - case 3: - pr_debug("DCONLOAD_MISSED interrupt\n"); - break; - - case 2: /* switch to DCON mode */ - case 1: /* switch to CPU mode */ - dcon->switched = true; - dcon->irq_time = ktime_get(); - wake_up(&dcon->waitq); - break; - - case 0: - /* workaround resume case: the DCON (on 1.5) doesn't - * ever assert status 0x01 when switching to CPU mode - * during resume. this is because DCONLOAD is de-asserted - * _immediately_ upon exiting S3, so the actual release - * of the DCON happened long before this point. - * see http://dev.laptop.org/ticket/9869 - */ - if (dcon->curr_src != dcon->pending_src && !dcon->switched) { - dcon->switched = true; - dcon->irq_time = ktime_get(); - wake_up(&dcon->waitq); - pr_debug("switching w/ status 0/0\n"); - } else { - pr_debug("scanline interrupt w/CPU\n"); - } - } - - return IRQ_HANDLED; -} - -static const struct dev_pm_ops dcon_pm_ops = { - .suspend = dcon_suspend, - .resume = dcon_resume, -}; - -static const struct i2c_device_id dcon_idtable[] = { - { "olpc_dcon", 0 }, - { } -}; -MODULE_DEVICE_TABLE(i2c, dcon_idtable); - -static struct i2c_driver dcon_driver = { - .driver = { - .name = "olpc_dcon", - .pm = &dcon_pm_ops, - }, - .class = I2C_CLASS_DDC | I2C_CLASS_HWMON, - .id_table = dcon_idtable, - .probe = dcon_probe, - .remove = dcon_remove, - .detect = dcon_detect, - .address_list = normal_i2c, -}; - -static int __init olpc_dcon_init(void) -{ -#ifdef CONFIG_FB_OLPC_DCON_1_5 - /* XO-1.5 */ - if (olpc_board_at_least(olpc_board(0xd0))) - pdata = &dcon_pdata_xo_1_5; -#endif -#ifdef CONFIG_FB_OLPC_DCON_1 - if (!pdata) - pdata = &dcon_pdata_xo_1; -#endif - - return i2c_add_driver(&dcon_driver); -} - -static void __exit olpc_dcon_exit(void) -{ - i2c_del_driver(&dcon_driver); -} - -module_init(olpc_dcon_init); -module_exit(olpc_dcon_exit); - -MODULE_LICENSE("GPL"); diff --git a/drivers/staging/olpc_dcon/olpc_dcon.h b/drivers/staging/olpc_dcon/olpc_dcon.h deleted file mode 100644 index 215e7ec4dea2..000000000000 --- a/drivers/staging/olpc_dcon/olpc_dcon.h +++ /dev/null @@ -1,111 +0,0 @@ -#ifndef OLPC_DCON_H_ -#define OLPC_DCON_H_ - -#include <linux/notifier.h> -#include <linux/workqueue.h> - -/* DCON registers */ - -#define DCON_REG_ID 0 -#define DCON_REG_MODE 1 - -#define MODE_PASSTHRU (1<<0) -#define MODE_SLEEP (1<<1) -#define MODE_SLEEP_AUTO (1<<2) -#define MODE_BL_ENABLE (1<<3) -#define MODE_BLANK (1<<4) -#define MODE_CSWIZZLE (1<<5) -#define MODE_COL_AA (1<<6) -#define MODE_MONO_LUMA (1<<7) -#define MODE_SCAN_INT (1<<8) -#define MODE_CLOCKDIV (1<<9) -#define MODE_DEBUG (1<<14) -#define MODE_SELFTEST (1<<15) - -#define DCON_REG_HRES 0x2 -#define DCON_REG_HTOTAL 0x3 -#define DCON_REG_HSYNC_WIDTH 0x4 -#define DCON_REG_VRES 0x5 -#define DCON_REG_VTOTAL 0x6 -#define DCON_REG_VSYNC_WIDTH 0x7 -#define DCON_REG_TIMEOUT 0x8 -#define DCON_REG_SCAN_INT 0x9 -#define DCON_REG_BRIGHT 0xa -#define DCON_REG_MEM_OPT_A 0x41 -#define DCON_REG_MEM_OPT_B 0x42 - -/* Load Delay Locked Loop (DLL) settings for clock delay */ -#define MEM_DLL_CLOCK_DELAY (1<<0) -/* Memory controller power down function */ -#define MEM_POWER_DOWN (1<<8) -/* Memory controller software reset */ -#define MEM_SOFT_RESET (1<<0) - -/* Status values */ - -#define DCONSTAT_SCANINT 0 -#define DCONSTAT_SCANINT_DCON 1 -#define DCONSTAT_DISPLAYLOAD 2 -#define DCONSTAT_MISSED 3 - -/* Source values */ - -#define DCON_SOURCE_DCON 0 -#define DCON_SOURCE_CPU 1 - -/* Interrupt */ -#define DCON_IRQ 6 - -struct dcon_priv { - struct i2c_client *client; - struct fb_info *fbinfo; - struct backlight_device *bl_dev; - - wait_queue_head_t waitq; - struct work_struct switch_source; - struct notifier_block reboot_nb; - - /* Shadow register for the DCON_REG_MODE register */ - u8 disp_mode; - - /* The current backlight value - this saves us some smbus traffic */ - u8 bl_val; - - /* Current source, initialized at probe time */ - int curr_src; - - /* Desired source */ - int pending_src; - - /* Variables used during switches */ - bool switched; - ktime_t irq_time; - ktime_t load_time; - - /* Current output type; true == mono, false == color */ - bool mono; - bool asleep; - /* This get set while controlling fb blank state from the driver */ - bool ignore_fb_events; -}; - -struct dcon_platform_data { - int (*init)(struct dcon_priv *); - void (*bus_stabilize_wiggle)(void); - void (*set_dconload)(int); - int (*read_status)(u8 *); -}; - -#include <linux/interrupt.h> - -irqreturn_t dcon_interrupt(int irq, void *id); - -#ifdef CONFIG_FB_OLPC_DCON_1 -extern struct dcon_platform_data dcon_pdata_xo_1; -#endif - -#ifdef CONFIG_FB_OLPC_DCON_1_5 -extern struct dcon_platform_data dcon_pdata_xo_1_5; -#endif - -#endif diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c deleted file mode 100644 index 0c5a10c69401..000000000000 --- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c +++ /dev/null @@ -1,205 +0,0 @@ -/* - * Mainly by David Woodhouse, somewhat modified by Jordan Crouse - * - * Copyright © 2006-2007 Red Hat, Inc. - * Copyright © 2006-2007 Advanced Micro Devices, Inc. - * Copyright © 2009 VIA Technology, Inc. - * Copyright (c) 2010 Andres Salomon <dilinger@queued.net> - * - * This program is free software. You can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/cs5535.h> -#include <linux/gpio.h> -#include <linux/delay.h> -#include <asm/olpc.h> - -#include "olpc_dcon.h" - -static int dcon_init_xo_1(struct dcon_priv *dcon) -{ - unsigned char lob; - - if (gpio_request(OLPC_GPIO_DCON_STAT0, "OLPC-DCON")) { - pr_err("failed to request STAT0 GPIO\n"); - return -EIO; - } - if (gpio_request(OLPC_GPIO_DCON_STAT1, "OLPC-DCON")) { - pr_err("failed to request STAT1 GPIO\n"); - goto err_gp_stat1; - } - if (gpio_request(OLPC_GPIO_DCON_IRQ, "OLPC-DCON")) { - pr_err("failed to request IRQ GPIO\n"); - goto err_gp_irq; - } - if (gpio_request(OLPC_GPIO_DCON_LOAD, "OLPC-DCON")) { - pr_err("failed to request LOAD GPIO\n"); - goto err_gp_load; - } - if (gpio_request(OLPC_GPIO_DCON_BLANK, "OLPC-DCON")) { - pr_err("failed to request BLANK GPIO\n"); - goto err_gp_blank; - } - - /* Turn off the event enable for GPIO7 just to be safe */ - cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_EVENTS_ENABLE); - - /* - * Determine the current state by reading the GPIO bit; earlier - * stages of the boot process have established the state. - * - * Note that we read GPIO_OUTPUT_VAL rather than GPIO_READ_BACK here; - * this is because OFW will disable input for the pin and set a value.. - * READ_BACK will only contain a valid value if input is enabled and - * then a value is set. So, future readings of the pin can use - * READ_BACK, but the first one cannot. Awesome, huh? - */ - dcon->curr_src = cs5535_gpio_isset(OLPC_GPIO_DCON_LOAD, GPIO_OUTPUT_VAL) - ? DCON_SOURCE_CPU - : DCON_SOURCE_DCON; - dcon->pending_src = dcon->curr_src; - - /* Set the directions for the GPIO pins */ - gpio_direction_input(OLPC_GPIO_DCON_STAT0); - gpio_direction_input(OLPC_GPIO_DCON_STAT1); - gpio_direction_input(OLPC_GPIO_DCON_IRQ); - gpio_direction_input(OLPC_GPIO_DCON_BLANK); - gpio_direction_output(OLPC_GPIO_DCON_LOAD, - dcon->curr_src == DCON_SOURCE_CPU); - - /* Set up the interrupt mappings */ - - /* Set the IRQ to pair 2 */ - cs5535_gpio_setup_event(OLPC_GPIO_DCON_IRQ, 2, 0); - - /* Enable group 2 to trigger the DCON interrupt */ - cs5535_gpio_set_irq(2, DCON_IRQ); - - /* Select edge level for interrupt (in PIC) */ - lob = inb(0x4d0); - lob &= ~(1 << DCON_IRQ); - outb(lob, 0x4d0); - - /* Register the interrupt handler */ - if (request_irq(DCON_IRQ, &dcon_interrupt, 0, "DCON", dcon)) { - pr_err("failed to request DCON's irq\n"); - goto err_req_irq; - } - - /* Clear INV_EN for GPIO7 (DCONIRQ) */ - cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_INVERT); - - /* Enable filter for GPIO12 (DCONBLANK) */ - cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_INPUT_FILTER); - - /* Disable filter for GPIO7 */ - cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_FILTER); - - /* Disable event counter for GPIO7 (DCONIRQ) and GPIO12 (DCONBLANK) */ - cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_EVENT_COUNT); - cs5535_gpio_clear(OLPC_GPIO_DCON_BLANK, GPIO_INPUT_EVENT_COUNT); - - /* Add GPIO12 to the Filter Event Pair #7 */ - cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_FE7_SEL); - - /* Turn off negative Edge Enable for GPIO12 */ - cs5535_gpio_clear(OLPC_GPIO_DCON_BLANK, GPIO_NEGATIVE_EDGE_EN); - - /* Enable negative Edge Enable for GPIO7 */ - cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_EN); - - /* Zero the filter amount for Filter Event Pair #7 */ - cs5535_gpio_set(0, GPIO_FLTR7_AMOUNT); - - /* Clear the negative edge status for GPIO7 and GPIO12 */ - cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_STS); - cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_NEGATIVE_EDGE_STS); - - /* FIXME: Clear the positive status as well, just to be sure */ - cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_POSITIVE_EDGE_STS); - cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_POSITIVE_EDGE_STS); - - /* Enable events for GPIO7 (DCONIRQ) and GPIO12 (DCONBLANK) */ - cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_EVENTS_ENABLE); - cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_EVENTS_ENABLE); - - return 0; - -err_req_irq: - gpio_free(OLPC_GPIO_DCON_BLANK); -err_gp_blank: - gpio_free(OLPC_GPIO_DCON_LOAD); -err_gp_load: - gpio_free(OLPC_GPIO_DCON_IRQ); -err_gp_irq: - gpio_free(OLPC_GPIO_DCON_STAT1); -err_gp_stat1: - gpio_free(OLPC_GPIO_DCON_STAT0); - return -EIO; -} - -static void dcon_wiggle_xo_1(void) -{ - int x; - - /* - * According to HiMax, when powering the DCON up we should hold - * SMB_DATA high for 8 SMB_CLK cycles. This will force the DCON - * state machine to reset to a (sane) initial state. Mitch Bradley - * did some testing and discovered that holding for 16 SMB_CLK cycles - * worked a lot more reliably, so that's what we do here. - * - * According to the cs5536 spec, to set GPIO14 to SMB_CLK we must - * simultaneously set AUX1 IN/OUT to GPIO14; ditto for SMB_DATA and - * GPIO15. - */ - cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL); - cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_VAL); - cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_ENABLE); - cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_ENABLE); - cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX1); - cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1); - cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX2); - cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX2); - cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_INPUT_AUX1); - cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1); - - for (x = 0; x < 16; x++) { - udelay(5); - cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL); - udelay(5); - cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL); - } - udelay(5); - cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX1); - cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1); - cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_INPUT_AUX1); - cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1); -} - -static void dcon_set_dconload_1(int val) -{ - gpio_set_value(OLPC_GPIO_DCON_LOAD, val); -} - -static int dcon_read_status_xo_1(u8 *status) -{ - *status = gpio_get_value(OLPC_GPIO_DCON_STAT0); - *status |= gpio_get_value(OLPC_GPIO_DCON_STAT1) << 1; - - /* Clear the negative edge status for GPIO7 */ - cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_STS); - - return 0; -} - -struct dcon_platform_data dcon_pdata_xo_1 = { - .init = dcon_init_xo_1, - .bus_stabilize_wiggle = dcon_wiggle_xo_1, - .set_dconload = dcon_set_dconload_1, - .read_status = dcon_read_status_xo_1, -}; diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c deleted file mode 100644 index 6a4d379c16a3..000000000000 --- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c +++ /dev/null @@ -1,161 +0,0 @@ -/* - * Copyright (c) 2009,2010 One Laptop per Child - * - * This program is free software. You can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/acpi.h> -#include <linux/delay.h> -#include <linux/gpio.h> -#include <asm/olpc.h> - -/* TODO: this eventually belongs in linux/vx855.h */ -#define NR_VX855_GPI 14 -#define NR_VX855_GPO 13 -#define NR_VX855_GPIO 15 - -#define VX855_GPI(n) (n) -#define VX855_GPO(n) (NR_VX855_GPI + (n)) -#define VX855_GPIO(n) (NR_VX855_GPI + NR_VX855_GPO + (n)) - -#include "olpc_dcon.h" - -/* Hardware setup on the XO 1.5: - * DCONLOAD connects to VX855_GPIO1 (not SMBCK2) - * DCONBLANK connects to VX855_GPIO8 (not SSPICLK) unused in driver - * DCONSTAT0 connects to VX855_GPI10 (not SSPISDI) - * DCONSTAT1 connects to VX855_GPI11 (not nSSPISS) - * DCONIRQ connects to VX855_GPIO12 - * DCONSMBDATA connects to VX855 graphics CRTSPD - * DCONSMBCLK connects to VX855 graphics CRTSPCLK - */ - -#define VX855_GENL_PURPOSE_OUTPUT 0x44c /* PMIO_Rx4c-4f */ -#define VX855_GPI_STATUS_CHG 0x450 /* PMIO_Rx50 */ -#define VX855_GPI_SCI_SMI 0x452 /* PMIO_Rx52 */ -#define BIT_GPIO12 0x40 - -#define PREFIX "OLPC DCON:" - -static void dcon_clear_irq(void) -{ - /* irq status will appear in PMIO_Rx50[6] (RW1C) on gpio12 */ - outb(BIT_GPIO12, VX855_GPI_STATUS_CHG); -} - -static int dcon_was_irq(void) -{ - u_int8_t tmp; - - /* irq status will appear in PMIO_Rx50[6] on gpio12 */ - tmp = inb(VX855_GPI_STATUS_CHG); - return !!(tmp & BIT_GPIO12); - - return 0; -} - -static int dcon_init_xo_1_5(struct dcon_priv *dcon) -{ - unsigned int irq; - - dcon_clear_irq(); - - /* set PMIO_Rx52[6] to enable SCI/SMI on gpio12 */ - outb(inb(VX855_GPI_SCI_SMI)|BIT_GPIO12, VX855_GPI_SCI_SMI); - - /* Determine the current state of DCONLOAD, likely set by firmware */ - /* GPIO1 */ - dcon->curr_src = (inl(VX855_GENL_PURPOSE_OUTPUT) & 0x1000) ? - DCON_SOURCE_CPU : DCON_SOURCE_DCON; - dcon->pending_src = dcon->curr_src; - - /* we're sharing the IRQ with ACPI */ - irq = acpi_gbl_FADT.sci_interrupt; - if (request_irq(irq, &dcon_interrupt, IRQF_SHARED, "DCON", dcon)) { - pr_err("DCON (IRQ%d) allocation failed\n", irq); - return 1; - } - - return 0; -} - -static void set_i2c_line(int sda, int scl) -{ - unsigned char tmp; - unsigned int port = 0x26; - - /* FIXME: This directly accesses the CRT GPIO controller !!! */ - outb(port, 0x3c4); - tmp = inb(0x3c5); - - if (scl) - tmp |= 0x20; - else - tmp &= ~0x20; - - if (sda) - tmp |= 0x10; - else - tmp &= ~0x10; - - tmp |= 0x01; - - outb(port, 0x3c4); - outb(tmp, 0x3c5); -} - - -static void dcon_wiggle_xo_1_5(void) -{ - int x; - - /* - * According to HiMax, when powering the DCON up we should hold - * SMB_DATA high for 8 SMB_CLK cycles. This will force the DCON - * state machine to reset to a (sane) initial state. Mitch Bradley - * did some testing and discovered that holding for 16 SMB_CLK cycles - * worked a lot more reliably, so that's what we do here. - */ - set_i2c_line(1, 1); - - for (x = 0; x < 16; x++) { - udelay(5); - set_i2c_line(1, 0); - udelay(5); - set_i2c_line(1, 1); - } - udelay(5); - - /* set PMIO_Rx52[6] to enable SCI/SMI on gpio12 */ - outb(inb(VX855_GPI_SCI_SMI)|BIT_GPIO12, VX855_GPI_SCI_SMI); -} - -static void dcon_set_dconload_xo_1_5(int val) -{ - gpio_set_value(VX855_GPIO(1), val); -} - -static int dcon_read_status_xo_1_5(u8 *status) -{ - if (!dcon_was_irq()) - return -1; - - /* i believe this is the same as "inb(0x44b) & 3" */ - *status = gpio_get_value(VX855_GPI(10)); - *status |= gpio_get_value(VX855_GPI(11)) << 1; - - dcon_clear_irq(); - - return 0; -} - -struct dcon_platform_data dcon_pdata_xo_1_5 = { - .init = dcon_init_xo_1_5, - .bus_stabilize_wiggle = dcon_wiggle_xo_1_5, - .set_dconload = dcon_set_dconload_xo_1_5, - .read_status = dcon_read_status_xo_1_5, -}; diff --git a/drivers/staging/panel/Kconfig b/drivers/staging/panel/Kconfig deleted file mode 100644 index 3defa0133f2e..000000000000 --- a/drivers/staging/panel/Kconfig +++ /dev/null @@ -1,278 +0,0 @@ -config PANEL - tristate "Parallel port LCD/Keypad Panel support" - depends on PARPORT - ---help--- - Say Y here if you have an HD44780 or KS-0074 LCD connected to your - parallel port. This driver also features 4 and 6-key keypads. The LCD - is accessible through the /dev/lcd char device (10, 156), and the - keypad through /dev/keypad (10, 185). Both require misc device to be - enabled. This code can either be compiled as a module, or linked into - the kernel and started at boot. If you don't understand what all this - is about, say N. - -config PANEL_PARPORT - int "Default parallel port number (0=LPT1)" - depends on PANEL - range 0 255 - default "0" - ---help--- - This is the index of the parallel port the panel is connected to. One - driver instance only supports one parallel port, so if your keypad - and LCD are connected to two separate ports, you have to start two - modules with different arguments. Numbering starts with '0' for LPT1, - and so on. - -config PANEL_PROFILE - int "Default panel profile (0-5, 0=custom)" - depends on PANEL - range 0 5 - default "5" - ---help--- - To ease configuration, the driver supports different configuration - profiles for past and recent wirings. These profiles can also be - used to define an approximative configuration, completed by a few - other options. Here are the profiles : - - 0 = custom (see further) - 1 = 2x16 parallel LCD, old keypad - 2 = 2x16 serial LCD (KS-0074), new keypad - 3 = 2x16 parallel LCD (Hantronix), no keypad - 4 = 2x16 parallel LCD (Nexcom NSA1045) with Nexcom's keypad - 5 = 2x40 parallel LCD (old one), with old keypad - - Custom configurations allow you to define how your display is - wired to the parallel port, and how it works. This is only intended - for experts. - -config PANEL_KEYPAD - depends on PANEL && PANEL_PROFILE="0" - int "Keypad type (0=none, 1=old 6 keys, 2=new 6 keys, 3=Nexcom 4 keys)" - range 0 3 - default 0 - ---help--- - This enables and configures a keypad connected to the parallel port. - The keys will be read from character device 10,185. Valid values are : - - 0 : do not enable this driver - 1 : old 6 keys keypad - 2 : new 6 keys keypad, as used on the server at www.ant-computing.com - 3 : Nexcom NSA1045's 4 keys keypad - - New profiles can be described in the driver source. The driver also - supports simultaneous keys pressed when the keypad supports them. - -config PANEL_LCD - depends on PANEL && PANEL_PROFILE="0" - int "LCD type (0=none, 1=custom, 2=old //, 3=ks0074, 4=hantronix, 5=Nexcom)" - range 0 5 - default 0 - ---help--- - This enables and configures an LCD connected to the parallel port. - The driver includes an interpreter for escape codes starting with - '\e[L' which are specific to the LCD, and a few ANSI codes. The - driver will be registered as character device 10,156, usually - under the name '/dev/lcd'. There are a total of 6 supported types : - - 0 : do not enable the driver - 1 : custom configuration and wiring (see further) - 2 : 2x16 & 2x40 parallel LCD (old wiring) - 3 : 2x16 serial LCD (KS-0074 based) - 4 : 2x16 parallel LCD (Hantronix wiring) - 5 : 2x16 parallel LCD (Nexcom wiring) - - When type '1' is specified, other options will appear to configure - more precise aspects (wiring, dimensions, protocol, ...). Please note - that those values changed from the 2.4 driver for better consistency. - -config PANEL_LCD_HEIGHT - depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" - int "Number of lines on the LCD (1-2)" - range 1 2 - default 2 - ---help--- - This is the number of visible character lines on the LCD in custom profile. - It can either be 1 or 2. - -config PANEL_LCD_WIDTH - depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" - int "Number of characters per line on the LCD (1-40)" - range 1 40 - default 40 - ---help--- - This is the number of characters per line on the LCD in custom profile. - Common values are 16,20,24,40. - -config PANEL_LCD_BWIDTH - depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" - int "Internal LCD line width (1-40, 40 by default)" - range 1 40 - default 40 - ---help--- - Most LCDs use a standard controller which supports hardware lines of 40 - characters, although sometimes only 16, 20 or 24 of them are really wired - to the terminal. This results in some non-visible but addressable characters, - and is the case for most parallel LCDs. Other LCDs, and some serial ones, - however, use the same line width internally as what is visible. The KS0074 - for example, uses 16 characters per line for 16 visible characters per line. - - This option lets you configure the value used by your LCD in 'custom' profile. - If you don't know, put '40' here. - -config PANEL_LCD_HWIDTH - depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" - int "Hardware LCD line width (1-64, 64 by default)" - range 1 64 - default 64 - ---help--- - Most LCDs use a single address bit to differentiate line 0 and line 1. Since - some of them need to be able to address 40 chars with the lower bits, they - often use the immediately superior power of 2, which is 64, to address the - next line. - - If you don't know what your LCD uses, in doubt let 16 here for a 2x16, and - 64 here for a 2x40. - -config PANEL_LCD_CHARSET - depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" - int "LCD character set (0=normal, 1=KS0074)" - range 0 1 - default 0 - ---help--- - Some controllers such as the KS0074 use a somewhat strange character set - where many symbols are at unusual places. The driver knows how to map - 'standard' ASCII characters to the character sets used by these controllers. - Valid values are : - - 0 : normal (untranslated) character set - 1 : KS0074 character set - - If you don't know, use the normal one (0). - -config PANEL_LCD_PROTO - depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" - int "LCD communication mode (0=parallel 8 bits, 1=serial)" - range 0 1 - default 0 - ---help--- - This driver now supports any serial or parallel LCD wired to a parallel - port. But before assigning signals, the driver needs to know if it will - be driving a serial LCD or a parallel one. Serial LCDs only use 2 wires - (SDA/SCL), while parallel ones use 2 or 3 wires for the control signals - (E, RS, sometimes RW), and 4 or 8 for the data. Use 0 here for a 8 bits - parallel LCD, and 1 for a serial LCD. - -config PANEL_LCD_PIN_E - depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0" - int "Parallel port pin number & polarity connected to the LCD E signal (-17...17) " - range -17 17 - default 14 - ---help--- - This describes the number of the parallel port pin to which the LCD 'E' - signal has been connected. It can be : - - 0 : no connection (eg: connected to ground) - 1..17 : directly connected to any of these pins on the DB25 plug - -1..-17 : connected to the same pin through an inverter (eg: transistor). - - Default for the 'E' pin in custom profile is '14' (AUTOFEED). - -config PANEL_LCD_PIN_RS - depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0" - int "Parallel port pin number & polarity connected to the LCD RS signal (-17...17) " - range -17 17 - default 17 - ---help--- - This describes the number of the parallel port pin to which the LCD 'RS' - signal has been connected. It can be : - - 0 : no connection (eg: connected to ground) - 1..17 : directly connected to any of these pins on the DB25 plug - -1..-17 : connected to the same pin through an inverter (eg: transistor). - - Default for the 'RS' pin in custom profile is '17' (SELECT IN). - -config PANEL_LCD_PIN_RW - depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0" - int "Parallel port pin number & polarity connected to the LCD RW signal (-17...17) " - range -17 17 - default 16 - ---help--- - This describes the number of the parallel port pin to which the LCD 'RW' - signal has been connected. It can be : - - 0 : no connection (eg: connected to ground) - 1..17 : directly connected to any of these pins on the DB25 plug - -1..-17 : connected to the same pin through an inverter (eg: transistor). - - Default for the 'RW' pin in custom profile is '16' (INIT). - -config PANEL_LCD_PIN_SCL - depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0" - int "Parallel port pin number & polarity connected to the LCD SCL signal (-17...17) " - range -17 17 - default 1 - ---help--- - This describes the number of the parallel port pin to which the serial - LCD 'SCL' signal has been connected. It can be : - - 0 : no connection (eg: connected to ground) - 1..17 : directly connected to any of these pins on the DB25 plug - -1..-17 : connected to the same pin through an inverter (eg: transistor). - - Default for the 'SCL' pin in custom profile is '1' (STROBE). - -config PANEL_LCD_PIN_SDA - depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0" - int "Parallel port pin number & polarity connected to the LCD SDA signal (-17...17) " - range -17 17 - default 2 - ---help--- - This describes the number of the parallel port pin to which the serial - LCD 'SDA' signal has been connected. It can be : - - 0 : no connection (eg: connected to ground) - 1..17 : directly connected to any of these pins on the DB25 plug - -1..-17 : connected to the same pin through an inverter (eg: transistor). - - Default for the 'SDA' pin in custom profile is '2' (D0). - -config PANEL_LCD_PIN_BL - depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" - int "Parallel port pin number & polarity connected to the LCD backlight signal (-17...17) " - range -17 17 - default 0 - ---help--- - This describes the number of the parallel port pin to which the LCD 'BL' signal - has been connected. It can be : - - 0 : no connection (eg: connected to ground) - 1..17 : directly connected to any of these pins on the DB25 plug - -1..-17 : connected to the same pin through an inverter (eg: transistor). - - Default for the 'BL' pin in custom profile is '0' (uncontrolled). - -config PANEL_CHANGE_MESSAGE - depends on PANEL - bool "Change LCD initialization message ?" - default "n" - ---help--- - This allows you to replace the boot message indicating the kernel version - and the driver version with a custom message. This is useful on appliances - where a simple 'Starting system' message can be enough to stop a customer - from worrying. - - If you say 'Y' here, you'll be able to choose a message yourself. Otherwise, - say 'N' and keep the default message with the version. - -config PANEL_BOOT_MESSAGE - depends on PANEL && PANEL_CHANGE_MESSAGE="y" - string "New initialization message" - default "" - ---help--- - This allows you to replace the boot message indicating the kernel version - and the driver version with a custom message. This is useful on appliances - where a simple 'Starting system' message can be enough to stop a customer - from worrying. - - An empty message will only clear the display at driver init time. Any other - printf()-formatted message is valid with newline and escape codes. diff --git a/drivers/staging/panel/Makefile b/drivers/staging/panel/Makefile deleted file mode 100644 index 747c238b82f9..000000000000 --- a/drivers/staging/panel/Makefile +++ /dev/null @@ -1 +0,0 @@ -obj-$(CONFIG_PANEL) += panel.o diff --git a/drivers/staging/panel/TODO b/drivers/staging/panel/TODO deleted file mode 100644 index 2db3f994b632..000000000000 --- a/drivers/staging/panel/TODO +++ /dev/null @@ -1,8 +0,0 @@ -TODO: - - checkpatch.pl cleanups - - review major/minor usages - - review userspace api - - see if all of this could be easier done in userspace instead. - -Please send patches to Greg Kroah-Hartman <greg@kroah.com> and -Willy Tarreau <willy@meta-x.org> diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c index bbe5ad85cec0..46a1830b509b 100644 --- a/drivers/staging/rdma/hfi1/chip.c +++ b/drivers/staging/rdma/hfi1/chip.c @@ -1250,11 +1250,8 @@ CNTR_ELEM(#name, \ u64 read_csr(const struct hfi1_devdata *dd, u32 offset) { - u64 val; - if (dd->flags & HFI1_PRESENT) { - val = readq((void __iomem *)dd->kregbase + offset); - return val; + return readq((void __iomem *)dd->kregbase + offset); } return -1; } @@ -13537,7 +13534,6 @@ int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey) write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg); /* * Enable send-side J_KEY integrity check, unless this is A0 h/w - * (due to A0 erratum). */ if (!is_ax(dd)) { reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE); diff --git a/drivers/staging/rdma/hfi1/chip_registers.h b/drivers/staging/rdma/hfi1/chip_registers.h index 701e9e1012a6..014d7a609ea0 100644 --- a/drivers/staging/rdma/hfi1/chip_registers.h +++ b/drivers/staging/rdma/hfi1/chip_registers.h @@ -551,6 +551,17 @@ #define CCE_MSIX_TABLE_UPPER (CCE + 0x000000100008) #define CCE_MSIX_TABLE_UPPER_RESETCSR 0x0000000100000000ull #define CCE_MSIX_VEC_CLR_WITHOUT_INT (CCE + 0x000000110400) +#define CCE_PCIE_CTRL (CCE + 0x0000000000C0) +#define CCE_PCIE_CTRL_PCIE_LANE_BUNDLE_MASK 0x3ull +#define CCE_PCIE_CTRL_PCIE_LANE_BUNDLE_SHIFT 0 +#define CCE_PCIE_CTRL_PCIE_LANE_DELAY_MASK 0xFull +#define CCE_PCIE_CTRL_PCIE_LANE_DELAY_SHIFT 2 +#define CCE_PCIE_CTRL_XMT_MARGIN_OVERWRITE_ENABLE_SHIFT 8 +#define CCE_PCIE_CTRL_XMT_MARGIN_SHIFT 9 +#define CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_OVERWRITE_ENABLE_MASK 0x1ull +#define CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_OVERWRITE_ENABLE_SHIFT 12 +#define CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_MASK 0x7ull +#define CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_SHIFT 13 #define CCE_REVISION (CCE + 0x000000000000) #define CCE_REVISION2 (CCE + 0x000000000008) #define CCE_REVISION2_HFI_ID_MASK 0x1ull diff --git a/drivers/staging/rdma/hfi1/diag.c b/drivers/staging/rdma/hfi1/diag.c index 0c8831705664..e41159fe6889 100644 --- a/drivers/staging/rdma/hfi1/diag.c +++ b/drivers/staging/rdma/hfi1/diag.c @@ -257,7 +257,7 @@ static int hfi1_filter_ib_service_level(void *ibhdr, void *packet_data, static int hfi1_filter_ib_pkey(void *ibhdr, void *packet_data, void *value); static int hfi1_filter_direction(void *ibhdr, void *packet_data, void *value); -static struct hfi1_filter_array hfi1_filters[] = { +static const struct hfi1_filter_array hfi1_filters[] = { { hfi1_filter_lid }, { hfi1_filter_dlid }, { hfi1_filter_mad_mgmt_class }, diff --git a/drivers/staging/rdma/hfi1/driver.c b/drivers/staging/rdma/hfi1/driver.c index 8485de1fce08..ee50bbf64d39 100644 --- a/drivers/staging/rdma/hfi1/driver.c +++ b/drivers/staging/rdma/hfi1/driver.c @@ -246,7 +246,7 @@ static inline void *get_egrbuf(const struct hfi1_ctxtdata *rcd, u64 rhf, */ inline int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encoded) { - if (unlikely(!IS_ALIGNED(size, PAGE_SIZE))) + if (unlikely(!PAGE_ALIGNED(size))) return 0; if (unlikely(size < MIN_EAGER_BUFFER)) return 0; @@ -368,7 +368,7 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd, if (opcode == IB_OPCODE_CNP) { /* * Only in pre-B0 h/w is the CNP_OPCODE handled - * via this code path (errata 291394). + * via this code path. */ struct hfi1_qp *qp = NULL; u32 lqpn, rqpn; diff --git a/drivers/staging/rdma/hfi1/efivar.c b/drivers/staging/rdma/hfi1/efivar.c index 7dc5bae220e0..47dfe2584760 100644 --- a/drivers/staging/rdma/hfi1/efivar.c +++ b/drivers/staging/rdma/hfi1/efivar.c @@ -83,8 +83,7 @@ static int read_efi_var(const char *name, unsigned long *size, if (!efi_enabled(EFI_RUNTIME_SERVICES)) return -EOPNOTSUPP; - uni_name = kzalloc(sizeof(efi_char16_t) * (strlen(name) + 1), - GFP_KERNEL); + uni_name = kcalloc(strlen(name) + 1, sizeof(efi_char16_t), GFP_KERNEL); temp_buffer = kzalloc(EFI_DATA_SIZE, GFP_KERNEL); if (!uni_name || !temp_buffer) { @@ -128,13 +127,12 @@ static int read_efi_var(const char *name, unsigned long *size, * temporary buffer. Now allocate a correctly sized * buffer. */ - data = kmalloc(temp_size, GFP_KERNEL); + data = kmemdup(temp_buffer, temp_size, GFP_KERNEL); if (!data) { ret = -ENOMEM; goto fail; } - memcpy(data, temp_buffer, temp_size); *size = temp_size; *return_data = data; diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c index d57d549052c8..8b911e8bf0df 100644 --- a/drivers/staging/rdma/hfi1/file_ops.c +++ b/drivers/staging/rdma/hfi1/file_ops.c @@ -487,8 +487,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma) * Map only the amount allocated to the context, not the * entire available context's PIO space. */ - memlen = ALIGN(uctxt->sc->credits * PIO_BLOCK_SIZE, - PAGE_SIZE); + memlen = PAGE_ALIGN(uctxt->sc->credits * PIO_BLOCK_SIZE); flags &= ~VM_MAYREAD; flags |= VM_DONTCOPY | VM_DONTEXPAND; vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); @@ -638,7 +637,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma) goto done; } memaddr = (u64)cq->comps; - memlen = ALIGN(sizeof(*cq->comps) * cq->nentries, PAGE_SIZE); + memlen = PAGE_ALIGN(sizeof(*cq->comps) * cq->nentries); flags |= VM_IO | VM_DONTEXPAND; vmf = 1; break; diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index 2611bb2e764d..d4826a9ab8d3 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -1730,7 +1730,7 @@ static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd, base_sc_integrity |= HFI1_PKT_KERNEL_SC_INTEGRITY; if (is_ax(dd)) - /* turn off send-side job key checks - A0 erratum */ + /* turn off send-side job key checks - A0 */ return base_sc_integrity & ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; return base_sc_integrity; @@ -1757,7 +1757,7 @@ static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd) | SEND_DMA_CHECK_ENABLE_CHECK_ENABLE_SMASK; if (is_ax(dd)) - /* turn off send-side job key checks - A0 erratum */ + /* turn off send-side job key checks - A0 */ return base_sdma_integrity & ~SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; return base_sdma_integrity; @@ -1794,6 +1794,10 @@ static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd) dev_info(&(dd)->pcidev->dev, "%s: " fmt, \ get_unit_name((dd)->unit), ##__VA_ARGS__) +#define dd_dev_dbg(dd, fmt, ...) \ + dev_dbg(&(dd)->pcidev->dev, "%s: " fmt, \ + get_unit_name((dd)->unit), ##__VA_ARGS__) + #define hfi1_dev_porterr(dd, port, fmt, ...) \ dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \ get_unit_name((dd)->unit), (dd)->unit, (port), \ diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c index 4dd8051aba7e..02df291eb172 100644 --- a/drivers/staging/rdma/hfi1/init.c +++ b/drivers/staging/rdma/hfi1/init.c @@ -332,7 +332,6 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt) } return rcd; bail: - kfree(rcd->opstats); kfree(rcd->egrbufs.rcvtids); kfree(rcd->egrbufs.buffers); kfree(rcd); @@ -736,8 +735,8 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit) ret = lastfail; /* Allocate enough memory for user event notification. */ - len = ALIGN(dd->chip_rcv_contexts * HFI1_MAX_SHARED_CTXTS * - sizeof(*dd->events), PAGE_SIZE); + len = PAGE_ALIGN(dd->chip_rcv_contexts * HFI1_MAX_SHARED_CTXTS * + sizeof(*dd->events)); dd->events = vmalloc_user(len); if (!dd->events) dd_dev_err(dd, "Failed to allocate user events page\n"); @@ -1506,8 +1505,8 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) * rcvhdrqentsize is in DWs, so we have to convert to bytes * (* sizeof(u32)). */ - amt = ALIGN(rcd->rcvhdrq_cnt * rcd->rcvhdrqentsize * - sizeof(u32), PAGE_SIZE); + amt = PAGE_ALIGN(rcd->rcvhdrq_cnt * rcd->rcvhdrqentsize * + sizeof(u32)); gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ? GFP_USER : GFP_KERNEL; diff --git a/drivers/staging/rdma/hfi1/keys.c b/drivers/staging/rdma/hfi1/keys.c index cb4e6087dfdb..e34f093a6b55 100644 --- a/drivers/staging/rdma/hfi1/keys.c +++ b/drivers/staging/rdma/hfi1/keys.c @@ -113,7 +113,7 @@ int hfi1_alloc_lkey(struct hfi1_mregion *mr, int dma_region) ((((1 << (24 - hfi1_lkey_table_size)) - 1) & rkt->gen) << 8); if (mr->lkey == 0) { - mr->lkey |= 1 << 8; + mr->lkey = 1 << 8; rkt->gen++; } rcu_assign_pointer(rkt->table[r], mr); diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c index 4f5dbd14b5de..77700b818e3d 100644 --- a/drivers/staging/rdma/hfi1/mad.c +++ b/drivers/staging/rdma/hfi1/mad.c @@ -2279,17 +2279,23 @@ static void a0_portstatus(struct hfi1_pportdata *ppd, { if (!is_bx(ppd->dd)) { unsigned long vl; - u64 max_vl_xmit_wait = 0, tmp; + u64 sum_vl_xmit_wait = 0; u32 vl_all_mask = VL_MASK_ALL; for_each_set_bit(vl, (unsigned long *)&(vl_all_mask), 8 * sizeof(vl_all_mask)) { - tmp = read_port_cntr(ppd, C_TX_WAIT_VL, - idx_from_vl(vl)); - if (tmp > max_vl_xmit_wait) - max_vl_xmit_wait = tmp; + u64 tmp = sum_vl_xmit_wait + + read_port_cntr(ppd, C_TX_WAIT_VL, + idx_from_vl(vl)); + if (tmp < sum_vl_xmit_wait) { + /* we wrapped */ + sum_vl_xmit_wait = (u64)~0; + break; + } + sum_vl_xmit_wait = tmp; } - rsp->port_xmit_wait = cpu_to_be64(max_vl_xmit_wait); + if (be64_to_cpu(rsp->port_xmit_wait) > sum_vl_xmit_wait) + rsp->port_xmit_wait = cpu_to_be64(sum_vl_xmit_wait); } } @@ -2491,18 +2497,19 @@ static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port, return error_counter_summary; } -static void a0_datacounters(struct hfi1_devdata *dd, struct _port_dctrs *rsp, +static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp, u32 vl_select_mask) { - if (!is_bx(dd)) { + if (!is_bx(ppd->dd)) { unsigned long vl; - int vfi = 0; u64 sum_vl_xmit_wait = 0; + u32 vl_all_mask = VL_MASK_ALL; - for_each_set_bit(vl, (unsigned long *)&(vl_select_mask), - 8 * sizeof(vl_select_mask)) { + for_each_set_bit(vl, (unsigned long *)&(vl_all_mask), + 8 * sizeof(vl_all_mask)) { u64 tmp = sum_vl_xmit_wait + - be64_to_cpu(rsp->vls[vfi++].port_vl_xmit_wait); + read_port_cntr(ppd, C_TX_WAIT_VL, + idx_from_vl(vl)); if (tmp < sum_vl_xmit_wait) { /* we wrapped */ sum_vl_xmit_wait = (u64) ~0; @@ -2572,7 +2579,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp, return reply((struct ib_mad_hdr *)pmp); } - rsp = (struct _port_dctrs *)&(req->port[0]); + rsp = &req->port[0]; memset(rsp, 0, sizeof(*rsp)); rsp->port_number = port; @@ -2665,7 +2672,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp, vfi++; } - a0_datacounters(dd, rsp, vl_select_mask); + a0_datacounters(ppd, rsp, vl_select_mask); if (resp_len) *resp_len += response_data_size; @@ -2724,7 +2731,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp, return reply((struct ib_mad_hdr *)pmp); } - rsp = (struct _port_ectrs *)&(req->port[0]); + rsp = &req->port[0]; ibp = to_iport(ibdev, port_num); ppd = ppd_from_ibp(ibp); @@ -2772,7 +2779,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp, tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL); rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff; - vlinfo = (struct _vls_ectrs *)&(rsp->vls[0]); + vlinfo = &rsp->vls[0]; vfi = 0; vl_select_mask = be32_to_cpu(req->vl_select_mask); for_each_set_bit(vl, (unsigned long *)&(vl_select_mask), @@ -2803,7 +2810,7 @@ static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp, u64 reg; req = (struct opa_port_error_info_msg *)pmp->data; - rsp = (struct _port_ei *)&(req->port[0]); + rsp = &req->port[0]; num_ports = OPA_AM_NPORT(be32_to_cpu(pmp->mad_hdr.attr_mod)); num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3])); @@ -3044,7 +3051,7 @@ static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp, u32 error_info_select; req = (struct opa_port_error_info_msg *)pmp->data; - rsp = (struct _port_ei *)&(req->port[0]); + rsp = &req->port[0]; num_ports = OPA_AM_NPORT(be32_to_cpu(pmp->mad_hdr.attr_mod)); num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3])); diff --git a/drivers/staging/rdma/hfi1/mr.c b/drivers/staging/rdma/hfi1/mr.c index a3f8b884fdd6..38253212af7a 100644 --- a/drivers/staging/rdma/hfi1/mr.c +++ b/drivers/staging/rdma/hfi1/mr.c @@ -70,7 +70,7 @@ static int init_mregion(struct hfi1_mregion *mr, struct ib_pd *pd, int m, i = 0; int rval = 0; - m = (count + HFI1_SEGSZ - 1) / HFI1_SEGSZ; + m = DIV_ROUND_UP(count, HFI1_SEGSZ); for (; i < m; i++) { mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL); if (!mr->map[i]) @@ -159,7 +159,7 @@ static struct hfi1_mr *alloc_mr(int count, struct ib_pd *pd) int m; /* Allocate struct plus pointers to first level page tables. */ - m = (count + HFI1_SEGSZ - 1) / HFI1_SEGSZ; + m = DIV_ROUND_UP(count, HFI1_SEGSZ); mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL); if (!mr) goto bail; @@ -333,7 +333,7 @@ struct ib_fmr *hfi1_alloc_fmr(struct ib_pd *pd, int mr_access_flags, int rval = -ENOMEM; /* Allocate struct plus pointers to first level page tables. */ - m = (fmr_attr->max_pages + HFI1_SEGSZ - 1) / HFI1_SEGSZ; + m = DIV_ROUND_UP(fmr_attr->max_pages, HFI1_SEGSZ); fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL); if (!fmr) goto bail; diff --git a/drivers/staging/rdma/hfi1/pcie.c b/drivers/staging/rdma/hfi1/pcie.c index 8317b07d722a..47ca6314e328 100644 --- a/drivers/staging/rdma/hfi1/pcie.c +++ b/drivers/staging/rdma/hfi1/pcie.c @@ -247,8 +247,6 @@ void hfi1_pcie_ddcleanup(struct hfi1_devdata *dd) iounmap(dd->rcvarray_wc); if (dd->piobase) iounmap(dd->piobase); - - pci_set_drvdata(dd->pcidev, NULL); } /* @@ -867,6 +865,83 @@ static void arm_gasket_logic(struct hfi1_devdata *dd) } /* + * CCE_PCIE_CTRL long name helpers + * We redefine these shorter macros to use in the code while leaving + * chip_registers.h to be autogenerated from the hardware spec. + */ +#define LANE_BUNDLE_MASK CCE_PCIE_CTRL_PCIE_LANE_BUNDLE_MASK +#define LANE_BUNDLE_SHIFT CCE_PCIE_CTRL_PCIE_LANE_BUNDLE_SHIFT +#define LANE_DELAY_MASK CCE_PCIE_CTRL_PCIE_LANE_DELAY_MASK +#define LANE_DELAY_SHIFT CCE_PCIE_CTRL_PCIE_LANE_DELAY_SHIFT +#define MARGIN_OVERWRITE_ENABLE_SHIFT CCE_PCIE_CTRL_XMT_MARGIN_OVERWRITE_ENABLE_SHIFT +#define MARGIN_SHIFT CCE_PCIE_CTRL_XMT_MARGIN_SHIFT +#define MARGIN_G1_G2_OVERWRITE_MASK CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_OVERWRITE_ENABLE_MASK +#define MARGIN_G1_G2_OVERWRITE_SHIFT CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_OVERWRITE_ENABLE_SHIFT +#define MARGIN_GEN1_GEN2_MASK CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_MASK +#define MARGIN_GEN1_GEN2_SHIFT CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_SHIFT + + /* + * Write xmt_margin for full-swing (WFR-B) or half-swing (WFR-C). + */ +static void write_xmt_margin(struct hfi1_devdata *dd, const char *fname) +{ + u64 pcie_ctrl; + u64 xmt_margin; + u64 xmt_margin_oe; + u64 lane_delay; + u64 lane_bundle; + + pcie_ctrl = read_csr(dd, CCE_PCIE_CTRL); + + /* + * For Discrete, use full-swing. + * - PCIe TX defaults to full-swing. + * Leave this register as default. + * For Integrated, use half-swing + * - Copy xmt_margin and xmt_margin_oe + * from Gen1/Gen2 to Gen3. + */ + if (dd->pcidev->device == PCI_DEVICE_ID_INTEL1) { /* integrated */ + /* extract initial fields */ + xmt_margin = (pcie_ctrl >> MARGIN_GEN1_GEN2_SHIFT) + & MARGIN_GEN1_GEN2_MASK; + xmt_margin_oe = (pcie_ctrl >> MARGIN_G1_G2_OVERWRITE_SHIFT) + & MARGIN_G1_G2_OVERWRITE_MASK; + lane_delay = (pcie_ctrl >> LANE_DELAY_SHIFT) & LANE_DELAY_MASK; + lane_bundle = (pcie_ctrl >> LANE_BUNDLE_SHIFT) + & LANE_BUNDLE_MASK; + + /* + * For A0, EFUSE values are not set. Override with the + * correct values. + */ + if (is_ax(dd)) { + /* + * xmt_margin and OverwiteEnabel should be the + * same for Gen1/Gen2 and Gen3 + */ + xmt_margin = 0x5; + xmt_margin_oe = 0x1; + lane_delay = 0xF; /* Delay 240ns. */ + lane_bundle = 0x0; /* Set to 1 lane. */ + } + + /* overwrite existing values */ + pcie_ctrl = (xmt_margin << MARGIN_GEN1_GEN2_SHIFT) + | (xmt_margin_oe << MARGIN_G1_G2_OVERWRITE_SHIFT) + | (xmt_margin << MARGIN_SHIFT) + | (xmt_margin_oe << MARGIN_OVERWRITE_ENABLE_SHIFT) + | (lane_delay << LANE_DELAY_SHIFT) + | (lane_bundle << LANE_BUNDLE_SHIFT); + + write_csr(dd, CCE_PCIE_CTRL, pcie_ctrl); + } + + dd_dev_dbg(dd, "%s: program XMT margin, CcePcieCtrl 0x%llx\n", + fname, pcie_ctrl); +} + +/* * Do all the steps needed to transition the PCIe link to Gen3 speed. */ int do_pcie_gen3_transition(struct hfi1_devdata *dd) @@ -986,7 +1061,7 @@ retry: * PcieCfgRegPl100 - Gen3 Control * * turn off PcieCfgRegPl100.Gen3ZRxDcNonCompl - * turn on PcieCfgRegPl100.EqEieosCnt (erratum) + * turn on PcieCfgRegPl100.EqEieosCnt * Everything else zero. */ reg32 = PCIE_CFG_REG_PL100_EQ_EIEOS_CNT_SMASK; @@ -1064,11 +1139,8 @@ retry: /* * step 5d: program XMT margin - * Right now, leave the default alone. To change, do a - * read-modify-write of: - * CcePcieCtrl.XmtMargin - * CcePcieCtrl.XmitMarginOverwriteEnable */ + write_xmt_margin(dd, __func__); /* step 5e: disable active state power management (ASPM) */ dd_dev_info(dd, "%s: clearing ASPM\n", __func__); diff --git a/drivers/staging/rdma/hfi1/pio_copy.c b/drivers/staging/rdma/hfi1/pio_copy.c index ebb0bafc68cb..64bef6c26653 100644 --- a/drivers/staging/rdma/hfi1/pio_copy.c +++ b/drivers/staging/rdma/hfi1/pio_copy.c @@ -235,7 +235,7 @@ static inline void read_extra_bytes(struct pio_buf *pbuf, while (nbytes) { /* find the number of bytes in this u64 */ room = 8 - off; /* this u64 has room for this many bytes */ - xbytes = nbytes > room ? room : nbytes; + xbytes = min(room, nbytes); /* * shift down to zero lower bytes, shift up to zero upper @@ -565,7 +565,7 @@ static void mid_copy_mix(struct pio_buf *pbuf, const void *from, size_t nbytes) /* calculate the end of data or end of block, whichever comes first */ send = pbuf->start + PIO_BLOCK_SIZE; - xend = send < dend ? send : dend; + xend = min(send, dend); /* shift up to SOP=1 space */ dest += SOP_DISTANCE; @@ -659,7 +659,7 @@ static void mid_copy_straight(struct pio_buf *pbuf, /* calculate the end of data or end of block, whichever comes first */ send = pbuf->start + PIO_BLOCK_SIZE; - xend = send < dend ? send : dend; + xend = min(send, dend); /* shift up to SOP=1 space */ dest += SOP_DISTANCE; diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/staging/rdma/hfi1/ud.c index bd1b402c1e14..25e6053c38db 100644 --- a/drivers/staging/rdma/hfi1/ud.c +++ b/drivers/staging/rdma/hfi1/ud.c @@ -671,7 +671,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) if (unlikely(bth1 & HFI1_BECN_SMASK)) { /* * In pre-B0 h/w the CNP_OPCODE is handled via an - * error path (errata 291394). + * error path. */ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); u32 lqpn = be32_to_cpu(ohdr->bth[1]) & HFI1_QPN_MASK; diff --git a/drivers/staging/rdma/hfi1/user_pages.c b/drivers/staging/rdma/hfi1/user_pages.c index 692de658f0dc..8ebfe9ee0d76 100644 --- a/drivers/staging/rdma/hfi1/user_pages.c +++ b/drivers/staging/rdma/hfi1/user_pages.c @@ -61,11 +61,7 @@ dma_addr_t hfi1_map_page(struct pci_dev *hwdev, struct page *page, unsigned long offset, size_t size, int direction) { - dma_addr_t phys; - - phys = pci_map_page(hwdev, page, offset, size, direction); - - return phys; + return pci_map_page(hwdev, page, offset, size, direction); } int hfi1_acquire_user_pages(unsigned long vaddr, size_t npages, bool writable, diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/staging/rdma/hfi1/user_sdma.c index d3de771a0770..9d4f5d6aaf33 100644 --- a/drivers/staging/rdma/hfi1/user_sdma.c +++ b/drivers/staging/rdma/hfi1/user_sdma.c @@ -67,7 +67,6 @@ #include "hfi.h" #include "sdma.h" #include "user_sdma.h" -#include "sdma.h" #include "verbs.h" /* for the headers */ #include "common.h" /* for struct hfi1_tid_info */ #include "trace.h" @@ -346,7 +345,7 @@ static void activate_packet_queue(struct iowait *wait, int reason) static void sdma_kmem_cache_ctor(void *obj) { - struct user_sdma_txreq *tx = (struct user_sdma_txreq *)obj; + struct user_sdma_txreq *tx = obj; memset(tx, 0, sizeof(*tx)); } @@ -414,8 +413,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp) if (!cq) goto cq_nomem; - memsize = ALIGN(sizeof(*cq->comps) * hfi1_sdma_comp_ring_size, - PAGE_SIZE); + memsize = PAGE_ALIGN(sizeof(*cq->comps) * hfi1_sdma_comp_ring_size); cq->comps = vmalloc_user(memsize); if (!cq->comps) goto cq_comps_nomem; @@ -468,8 +466,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd) fd->pq = NULL; } if (fd->cq) { - if (fd->cq->comps) - vfree(fd->cq->comps); + vfree(fd->cq->comps); kfree(fd->cq); fd->cq = NULL; } @@ -926,8 +923,8 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts) unsigned pageidx, len; base = (unsigned long)iovec->iov.iov_base; - offset = ((base + iovec->offset + iov_offset) & - ~PAGE_MASK); + offset = offset_in_page(base + iovec->offset + + iov_offset); pageidx = (((iovec->offset + iov_offset + base) - (base & PAGE_MASK)) >> PAGE_SHIFT); len = offset + req->info.fragsize > PAGE_SIZE ? diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c index 09b8d412ee90..176168614b5a 100644 --- a/drivers/staging/rdma/hfi1/verbs.c +++ b/drivers/staging/rdma/hfi1/verbs.c @@ -1926,9 +1926,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) spin_lock_init(&dev->n_qps_lock); spin_lock_init(&dev->n_srqs_lock); spin_lock_init(&dev->n_mcast_grps_lock); - init_timer(&dev->mem_timer); - dev->mem_timer.function = mem_timer; - dev->mem_timer.data = (unsigned long) dev; + setup_timer(&dev->mem_timer, mem_timer, (unsigned long)dev); /* * The top hfi1_lkey_table_size bits are used to index the diff --git a/drivers/staging/rtl8188eu/Makefile b/drivers/staging/rtl8188eu/Makefile index ed723585b502..29b9834870fd 100644 --- a/drivers/staging/rtl8188eu/Makefile +++ b/drivers/staging/rtl8188eu/Makefile @@ -53,4 +53,4 @@ r8188eu-y := \ obj-$(CONFIG_R8188EU) := r8188eu.o -ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/include +ccflags-y += -D__CHECK_ENDIAN__ -I$(srctree)/$(src)/include diff --git a/drivers/staging/rtl8188eu/TODO b/drivers/staging/rtl8188eu/TODO index b574b235b340..ce60f07b9977 100644 --- a/drivers/staging/rtl8188eu/TODO +++ b/drivers/staging/rtl8188eu/TODO @@ -15,5 +15,5 @@ TODO: rcu_read_unlock(); Perhaps delete it, perhaps assign to some local variable. -Please send any patches to Greg Kroah-Hartman <gregkh@linux.com>, +Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>, and Larry Finger <Larry.Finger@lwfinger.net>. diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c index e5d29fe9d446..012860b34651 100644 --- a/drivers/staging/rtl8188eu/core/rtw_ap.c +++ b/drivers/staging/rtl8188eu/core/rtw_ap.c @@ -76,90 +76,87 @@ static void update_BCNTIM(struct adapter *padapter) struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); struct wlan_bssid_ex *pnetwork_mlmeext = &(pmlmeinfo->network); unsigned char *pie = pnetwork_mlmeext->IEs; + u8 *p, *dst_ie, *premainder_ie = NULL; + u8 *pbackup_remainder_ie = NULL; + uint offset, tmp_len, tim_ielen, tim_ie_offset, remainder_ielen; /* update TIM IE */ - if (true) { - u8 *p, *dst_ie, *premainder_ie = NULL; - u8 *pbackup_remainder_ie = NULL; - uint offset, tmp_len, tim_ielen, tim_ie_offset, remainder_ielen; - - p = rtw_get_ie(pie + _FIXED_IE_LENGTH_, _TIM_IE_, &tim_ielen, - pnetwork_mlmeext->IELength - _FIXED_IE_LENGTH_); - if (p != NULL && tim_ielen > 0) { - tim_ielen += 2; - premainder_ie = p+tim_ielen; - tim_ie_offset = (int)(p - pie); - remainder_ielen = pnetwork_mlmeext->IELength - - tim_ie_offset - tim_ielen; - /* append TIM IE from dst_ie offset */ - dst_ie = p; - } else { - tim_ielen = 0; + p = rtw_get_ie(pie + _FIXED_IE_LENGTH_, _TIM_IE_, &tim_ielen, + pnetwork_mlmeext->IELength - _FIXED_IE_LENGTH_); + if (p != NULL && tim_ielen > 0) { + tim_ielen += 2; + premainder_ie = p+tim_ielen; + tim_ie_offset = (int)(p - pie); + remainder_ielen = pnetwork_mlmeext->IELength - + tim_ie_offset - tim_ielen; + /* append TIM IE from dst_ie offset */ + dst_ie = p; + } else { + tim_ielen = 0; - /* calculate head_len */ - offset = _FIXED_IE_LENGTH_; - offset += pnetwork_mlmeext->Ssid.SsidLength + 2; + /* calculate head_len */ + offset = _FIXED_IE_LENGTH_; + offset += pnetwork_mlmeext->Ssid.SsidLength + 2; - /* get supported rates len */ - p = rtw_get_ie(pie + _BEACON_IE_OFFSET_, - _SUPPORTEDRATES_IE_, &tmp_len, - (pnetwork_mlmeext->IELength - - _BEACON_IE_OFFSET_)); - if (p != NULL) - offset += tmp_len+2; + /* get supported rates len */ + p = rtw_get_ie(pie + _BEACON_IE_OFFSET_, + _SUPPORTEDRATES_IE_, &tmp_len, + (pnetwork_mlmeext->IELength - + _BEACON_IE_OFFSET_)); + if (p != NULL) + offset += tmp_len+2; - /* DS Parameter Set IE, len = 3 */ - offset += 3; + /* DS Parameter Set IE, len = 3 */ + offset += 3; - premainder_ie = pie + offset; + premainder_ie = pie + offset; - remainder_ielen = pnetwork_mlmeext->IELength - - offset - tim_ielen; + remainder_ielen = pnetwork_mlmeext->IELength - + offset - tim_ielen; - /* append TIM IE from offset */ - dst_ie = pie + offset; - } + /* append TIM IE from offset */ + dst_ie = pie + offset; + } - if (remainder_ielen > 0) { - pbackup_remainder_ie = rtw_malloc(remainder_ielen); - if (pbackup_remainder_ie && premainder_ie) - memcpy(pbackup_remainder_ie, - premainder_ie, remainder_ielen); - } - *dst_ie++ = _TIM_IE_; + if (remainder_ielen > 0) { + pbackup_remainder_ie = rtw_malloc(remainder_ielen); + if (pbackup_remainder_ie && premainder_ie) + memcpy(pbackup_remainder_ie, + premainder_ie, remainder_ielen); + } + *dst_ie++ = _TIM_IE_; - if ((pstapriv->tim_bitmap&0xff00) && - (pstapriv->tim_bitmap&0x00fc)) - tim_ielen = 5; - else - tim_ielen = 4; + if ((pstapriv->tim_bitmap&0xff00) && + (pstapriv->tim_bitmap&0x00fc)) + tim_ielen = 5; + else + tim_ielen = 4; - *dst_ie++ = tim_ielen; + *dst_ie++ = tim_ielen; - *dst_ie++ = 0;/* DTIM count */ - *dst_ie++ = 1;/* DTIM period */ + *dst_ie++ = 0;/* DTIM count */ + *dst_ie++ = 1;/* DTIM period */ - if (pstapriv->tim_bitmap&BIT(0))/* for bc/mc frames */ - *dst_ie++ = BIT(0);/* bitmap ctrl */ - else - *dst_ie++ = 0; + if (pstapriv->tim_bitmap&BIT(0))/* for bc/mc frames */ + *dst_ie++ = BIT(0);/* bitmap ctrl */ + else + *dst_ie++ = 0; - if (tim_ielen == 4) { - *dst_ie++ = pstapriv->tim_bitmap & 0xff; - } else if (tim_ielen == 5) { - put_unaligned_le16(pstapriv->tim_bitmap, dst_ie); - dst_ie += 2; - } + if (tim_ielen == 4) { + *dst_ie++ = pstapriv->tim_bitmap & 0xff; + } else if (tim_ielen == 5) { + put_unaligned_le16(pstapriv->tim_bitmap, dst_ie); + dst_ie += 2; + } - /* copy remainder IE */ - if (pbackup_remainder_ie) { - memcpy(dst_ie, pbackup_remainder_ie, remainder_ielen); + /* copy remainder IE */ + if (pbackup_remainder_ie) { + memcpy(dst_ie, pbackup_remainder_ie, remainder_ielen); - kfree(pbackup_remainder_ie); - } - offset = (uint)(dst_ie - pie); - pnetwork_mlmeext->IELength = offset + remainder_ielen; + kfree(pbackup_remainder_ie); } + offset = (uint)(dst_ie - pie); + pnetwork_mlmeext->IELength = offset + remainder_ielen; set_tx_beacon_cmd(padapter); } @@ -203,7 +200,7 @@ void rtw_add_bcn_ie(struct adapter *padapter, struct wlan_bssid_ex *pnetwork, if (bmatch) dst_ie = p; else - dst_ie = (p+ielen); + dst_ie = p+ielen; } if (remainder_ielen > 0) { @@ -569,7 +566,7 @@ static void update_bmc_sta(struct adapter *padapter) psta->ieee8021x_blocked = 0; - memset((void *)&psta->sta_stats, 0, sizeof(struct stainfo_stats)); + memset(&psta->sta_stats, 0, sizeof(struct stainfo_stats)); /* prepare for add_RATid */ supportRateNum = rtw_get_rateset_len((u8 *)&pcur_network->SupportedRates); @@ -692,7 +689,7 @@ void update_sta_info_apmode(struct adapter *padapter, struct sta_info *psta) /* todo: init other variables */ - memset((void *)&psta->sta_stats, 0, sizeof(struct stainfo_stats)); + memset(&psta->sta_stats, 0, sizeof(struct stainfo_stats)); spin_lock_bh(&psta->lock); psta->state |= _FW_LINKED; diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c index 433b926ceae7..e5a6b7a70df7 100644 --- a/drivers/staging/rtl8188eu/core/rtw_cmd.c +++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c @@ -69,23 +69,17 @@ exit: return _SUCCESS; } -struct cmd_obj *rtw_dequeue_cmd(struct __queue *queue) +struct cmd_obj *rtw_dequeue_cmd(struct __queue *queue) { unsigned long irqL; struct cmd_obj *obj; - spin_lock_irqsave(&queue->lock, irqL); - if (list_empty(&(queue->queue))) { - obj = NULL; - } else { - obj = container_of((&queue->queue)->next, struct cmd_obj, list); + obj = list_first_entry_or_null(&queue->queue, struct cmd_obj, list); + if (obj) list_del_init(&obj->list); - } - spin_unlock_irqrestore(&queue->lock, irqL); - return obj; } @@ -400,9 +394,8 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork) RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+Join cmd: SSid =[%s]\n", pmlmepriv->assoc_ssid.Ssid)); pcmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC); - if (pcmd == NULL) { + if (!pcmd) { res = _FAIL; - RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("rtw_joinbss_cmd: memory allocate for cmd_obj fail!!!\n")); goto exit; } /* for IEs is fix buf size */ diff --git a/drivers/staging/rtl8188eu/core/rtw_debug.c b/drivers/staging/rtl8188eu/core/rtw_debug.c index 2c4afb80fc64..93e898d598fe 100644 --- a/drivers/staging/rtl8188eu/core/rtw_debug.c +++ b/drivers/staging/rtl8188eu/core/rtw_debug.c @@ -149,7 +149,7 @@ int proc_get_fwstate(char *page, char **start, { struct net_device *dev = data; struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev); - struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); + struct mlme_priv *pmlmepriv = &padapter->mlmepriv; int len = 0; @@ -184,7 +184,7 @@ int proc_get_mlmext_state(char *page, char **start, struct net_device *dev = data; struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev); struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; - struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); + struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info; int len = 0; @@ -200,7 +200,7 @@ int proc_get_qos_option(char *page, char **start, { struct net_device *dev = data; struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev); - struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); + struct mlme_priv *pmlmepriv = &padapter->mlmepriv; int len = 0; @@ -216,7 +216,7 @@ int proc_get_ht_option(char *page, char **start, { struct net_device *dev = data; struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev); - struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); + struct mlme_priv *pmlmepriv = &padapter->mlmepriv; int len = 0; @@ -247,9 +247,9 @@ int proc_get_ap_info(char *page, char **start, struct sta_info *psta; struct net_device *dev = data; struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev); - struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); + struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; - struct wlan_network *cur_network = &(pmlmepriv->cur_network); + struct wlan_network *cur_network = &pmlmepriv->cur_network; struct sta_priv *pstapriv = &padapter->stapriv; int len = 0; @@ -851,7 +851,7 @@ int proc_get_all_sta_info(char *page, char **start, spin_lock_bh(&pstapriv->sta_hash_lock); for (i = 0; i < NUM_STA; i++) { - phead = &(pstapriv->sta_hash[i]); + phead = &pstapriv->sta_hash[i]; plist = phead->next; while (phead != plist) { diff --git a/drivers/staging/rtl8188eu/core/rtw_efuse.c b/drivers/staging/rtl8188eu/core/rtw_efuse.c index 2320fb11af24..19f11d04d152 100644 --- a/drivers/staging/rtl8188eu/core/rtw_efuse.c +++ b/drivers/staging/rtl8188eu/core/rtw_efuse.c @@ -104,13 +104,11 @@ efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8 *pbuf) u8 u1temp = 0; efuseTbl = kzalloc(EFUSE_MAP_LEN_88E, GFP_KERNEL); - if (efuseTbl == NULL) { - DBG_88E("%s: alloc efuseTbl fail!\n", __func__); + if (!efuseTbl) return; - } eFuseWord = (u16 **)rtw_malloc2d(EFUSE_MAX_SECTION_88E, EFUSE_MAX_WORD_UNIT, sizeof(u16)); - if (eFuseWord == NULL) { + if (!eFuseWord) { DBG_88E("%s: alloc eFuseWord fail!\n", __func__); goto eFuseWord_failed; } @@ -394,7 +392,7 @@ u8 Efuse_WordEnableDataWrite(struct adapter *pAdapter, u16 efuse_addr, u8 word_e u8 badworden = 0x0F; u8 tmpdata[8]; - memset((void *)tmpdata, 0xff, PGPKT_DATA_SIZE); + memset(tmpdata, 0xff, PGPKT_DATA_SIZE); if (!(word_en & BIT(0))) { tmpaddr = start_addr; @@ -495,13 +493,13 @@ int Efuse_PgPacketRead(struct adapter *pAdapter, u8 offset, u8 *data) EFUSE_GetEfuseDefinition(pAdapter, EFUSE_WIFI, TYPE_EFUSE_MAX_SECTION, (void *)&max_section); - if (data == NULL) + if (!data) return false; if (offset > max_section) return false; - memset((void *)data, 0xff, sizeof(u8)*PGPKT_DATA_SIZE); - memset((void *)tmpdata, 0xff, sizeof(u8)*PGPKT_DATA_SIZE); + memset(data, 0xff, sizeof(u8) * PGPKT_DATA_SIZE); + memset(tmpdata, 0xff, sizeof(u8) * PGPKT_DATA_SIZE); /* <Roger_TODO> Efuse has been pre-programmed dummy 5Bytes at the end of Efuse by CP. */ /* Skip dummy parts to prevent unexpected data read from Efuse. */ @@ -572,7 +570,7 @@ static bool hal_EfuseFixHeaderProcess(struct adapter *pAdapter, u8 efuseType, st u16 efuse_addr = *pAddr; u32 PgWriteSuccess = 0; - memset((void *)originaldata, 0xff, 8); + memset(originaldata, 0xff, 8); if (Efuse_PgPacketRead(pAdapter, pFixPkt->offset, originaldata)) { /* check if data exist */ diff --git a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c index 742b29c590df..f4e4baf6054a 100644 --- a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c +++ b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c @@ -207,8 +207,8 @@ inline u8 *rtw_set_ie_mesh_ch_switch_parm(u8 *buf, u32 *buf_len, u8 ttl, ie_data[0] = ttl; ie_data[1] = flags; - *(u16 *)(ie_data+2) = cpu_to_le16(reason); - *(u16 *)(ie_data+4) = cpu_to_le16(precedence); + *(u16 *)(ie_data + 2) = cpu_to_le16(reason); + *(u16 *)(ie_data + 4) = cpu_to_le16(precedence); return rtw_set_ie(buf, 0x118, 6, ie_data, buf_len); } @@ -268,18 +268,18 @@ u8 *rtw_get_ie_ex(u8 *in_ie, uint in_len, u8 eid, u8 *oui, u8 oui_len, u8 *ie, u cnt = 0; while (cnt < in_len) { - if (eid == in_ie[cnt] && (!oui || !memcmp(&in_ie[cnt+2], oui, oui_len))) { + if (eid == in_ie[cnt] && (!oui || !memcmp(&in_ie[cnt + 2], oui, oui_len))) { target_ie = &in_ie[cnt]; if (ie) - memcpy(ie, &in_ie[cnt], in_ie[cnt+1]+2); + memcpy(ie, &in_ie[cnt], in_ie[cnt + 1] + 2); if (ielen) - *ielen = in_ie[cnt+1]+2; + *ielen = in_ie[cnt + 1] + 2; break; } else { - cnt += in_ie[cnt+1]+2; /* goto next */ + cnt += in_ie[cnt + 1] + 2; /* goto next */ } } return target_ie; @@ -530,8 +530,8 @@ int rtw_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwis } - if ((*wpa_ie != _WPA_IE_ID_) || (*(wpa_ie+1) != (u8)(wpa_ie_len - 2)) || - (memcmp(wpa_ie+2, RTW_WPA_OUI_TYPE, WPA_SELECTOR_LEN))) + if ((*wpa_ie != _WPA_IE_ID_) || (*(wpa_ie + 1) != (u8)(wpa_ie_len - 2)) || + (memcmp(wpa_ie + 2, RTW_WPA_OUI_TYPE, WPA_SELECTOR_LEN))) return _FAIL; pos = wpa_ie; @@ -599,7 +599,7 @@ int rtw_parse_wpa2_ie(u8 *rsn_ie, int rsn_ie_len, int *group_cipher, int *pairwi } - if ((*rsn_ie != _WPA2_IE_ID_) || (*(rsn_ie+1) != (u8)(rsn_ie_len - 2))) + if ((*rsn_ie != _WPA2_IE_ID_) || (*(rsn_ie + 1) != (u8)(rsn_ie_len - 2))) return _FAIL; pos = rsn_ie; @@ -671,45 +671,45 @@ int rtw_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len, u8 *wpa_ie, while (cnt < in_len) { authmode = in_ie[cnt]; - if ((authmode == _WPA_IE_ID_) && (!memcmp(&in_ie[cnt+2], &wpa_oui[0], 4))) { + if ((authmode == _WPA_IE_ID_) && (!memcmp(&in_ie[cnt + 2], &wpa_oui[0], 4))) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("\n rtw_get_wpa_ie: sec_idx =%d in_ie[cnt+1]+2 =%d\n", - sec_idx, in_ie[cnt+1]+2)); + sec_idx, in_ie[cnt + 1] + 2)); if (wpa_ie) { - memcpy(wpa_ie, &in_ie[cnt], in_ie[cnt+1]+2); + memcpy(wpa_ie, &in_ie[cnt], in_ie[cnt + 1] + 2); - for (i = 0; i < (in_ie[cnt+1]+2); i += 8) { + for (i = 0; i < (in_ie[cnt + 1] + 2); i += 8) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("\n %2x,%2x,%2x,%2x,%2x,%2x,%2x,%2x\n", - wpa_ie[i], wpa_ie[i+1], wpa_ie[i+2], wpa_ie[i+3], wpa_ie[i+4], - wpa_ie[i+5], wpa_ie[i+6], wpa_ie[i+7])); + wpa_ie[i], wpa_ie[i + 1], wpa_ie[i + 2], wpa_ie[i + 3], wpa_ie[i + 4], + wpa_ie[i + 5], wpa_ie[i + 6], wpa_ie[i + 7])); } } - *wpa_len = in_ie[cnt+1]+2; - cnt += in_ie[cnt+1]+2; /* get next */ + *wpa_len = in_ie[cnt + 1] + 2; + cnt += in_ie[cnt + 1] + 2; /* get next */ } else { if (authmode == _WPA2_IE_ID_) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("\n get_rsn_ie: sec_idx =%d in_ie[cnt+1]+2 =%d\n", - sec_idx, in_ie[cnt+1]+2)); + sec_idx, in_ie[cnt + 1] + 2)); if (rsn_ie) { - memcpy(rsn_ie, &in_ie[cnt], in_ie[cnt+1]+2); + memcpy(rsn_ie, &in_ie[cnt], in_ie[cnt + 1] + 2); - for (i = 0; i < (in_ie[cnt+1]+2); i += 8) { + for (i = 0; i < (in_ie[cnt + 1] + 2); i += 8) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("\n %2x,%2x,%2x,%2x,%2x,%2x,%2x,%2x\n", - rsn_ie[i], rsn_ie[i+1], rsn_ie[i+2], rsn_ie[i+3], rsn_ie[i+4], - rsn_ie[i+5], rsn_ie[i+6], rsn_ie[i+7])); + rsn_ie[i], rsn_ie[i + 1], rsn_ie[i + 2], rsn_ie[i + 3], rsn_ie[i + 4], + rsn_ie[i + 5], rsn_ie[i + 6], rsn_ie[i + 7])); } } - *rsn_len = in_ie[cnt+1]+2; - cnt += in_ie[cnt+1]+2; /* get next */ + *rsn_len = in_ie[cnt + 1] + 2; + cnt += in_ie[cnt + 1] + 2; /* get next */ } else { - cnt += in_ie[cnt+1]+2; /* get next */ + cnt += in_ie[cnt + 1] + 2; /* get next */ } } } @@ -729,7 +729,7 @@ u8 rtw_is_wps_ie(u8 *ie_ptr, uint *wps_ielen) eid = ie_ptr[0]; if ((eid == _WPA_IE_ID_) && (!memcmp(&ie_ptr[2], wps_oui, 4))) { - *wps_ielen = ie_ptr[1]+2; + *wps_ielen = ie_ptr[1] + 2; match = true; } return match; @@ -761,20 +761,20 @@ u8 *rtw_get_wps_ie(u8 *in_ie, uint in_len, u8 *wps_ie, uint *wps_ielen) while (cnt < in_len) { eid = in_ie[cnt]; - if ((eid == _WPA_IE_ID_) && (!memcmp(&in_ie[cnt+2], wps_oui, 4))) { + if ((eid == _WPA_IE_ID_) && (!memcmp(&in_ie[cnt + 2], wps_oui, 4))) { wpsie_ptr = &in_ie[cnt]; if (wps_ie) - memcpy(wps_ie, &in_ie[cnt], in_ie[cnt+1]+2); + memcpy(wps_ie, &in_ie[cnt], in_ie[cnt + 1] + 2); if (wps_ielen) - *wps_ielen = in_ie[cnt+1]+2; + *wps_ielen = in_ie[cnt + 1] + 2; - cnt += in_ie[cnt+1]+2; + cnt += in_ie[cnt + 1] + 2; break; } else { - cnt += in_ie[cnt+1]+2; /* goto next */ + cnt += in_ie[cnt + 1] + 2; /* goto next */ } } return wpsie_ptr; @@ -848,12 +848,12 @@ u8 *rtw_get_wps_attr_content(u8 *wps_ie, uint wps_ielen, u16 target_attr_id, u8 if (attr_ptr && attr_len) { if (buf_content) - memcpy(buf_content, attr_ptr+4, attr_len-4); + memcpy(buf_content, attr_ptr + 4, attr_len - 4); if (len_content) - *len_content = attr_len-4; + *len_content = attr_len - 4; - return attr_ptr+4; + return attr_ptr + 4; } return NULL; @@ -935,8 +935,8 @@ static int rtw_ieee802_11_parse_vendor_specific(u8 *pos, uint elen, } break; default: - DBG_88E("unknown vendor specific information element ignored (vendor OUI %02x:%02x:%02x len=%lu)\n", - pos[0], pos[1], pos[2], (unsigned long)elen); + DBG_88E("unknown vendor specific information element ignored (vendor OUI %3phC len=%lu)\n", + pos, (unsigned long)elen); return -1; } return 0; @@ -1106,9 +1106,9 @@ void dump_ies(u8 *buf, u32 buf_len) u8 *pos = buf; u8 id, len; - while (pos-buf <= buf_len) { + while (pos - buf <= buf_len) { id = *pos; - len = *(pos+1); + len = *(pos + 1); DBG_88E("%s ID:%u, LEN:%u\n", __func__, id, len); dump_wps_ie(pos, len); @@ -1130,11 +1130,11 @@ void dump_wps_ie(u8 *ie, u32 ie_len) return; pos += 6; - while (pos-ie < ie_len) { + while (pos - ie < ie_len) { id = get_unaligned_be16(pos); len = get_unaligned_be16(pos + 2); DBG_88E("%s ID:0x%04x, LEN:%u\n", __func__, id, len); - pos += (4+len); + pos += (4 + len); } } @@ -1188,11 +1188,11 @@ static int rtw_get_cipher_info(struct wlan_network *pnetwork) unsigned char *pbuf; int group_cipher = 0, pairwise_cipher = 0, is8021x = 0; int ret = _FAIL; - pbuf = rtw_get_wpa_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength-12); + pbuf = rtw_get_wpa_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength - 12); if (pbuf && (wpa_ielen > 0)) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_get_cipher_info: wpa_ielen: %d", wpa_ielen)); - if (_SUCCESS == rtw_parse_wpa_ie(pbuf, wpa_ielen+2, &group_cipher, &pairwise_cipher, &is8021x)) { + if (_SUCCESS == rtw_parse_wpa_ie(pbuf, wpa_ielen + 2, &group_cipher, &pairwise_cipher, &is8021x)) { pnetwork->BcnInfo.pairwise_cipher = pairwise_cipher; pnetwork->BcnInfo.group_cipher = group_cipher; pnetwork->BcnInfo.is_8021x = is8021x; @@ -1201,11 +1201,11 @@ static int rtw_get_cipher_info(struct wlan_network *pnetwork) ret = _SUCCESS; } } else { - pbuf = rtw_get_wpa2_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength-12); + pbuf = rtw_get_wpa2_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength - 12); if (pbuf && (wpa_ielen > 0)) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("get RSN IE\n")); - if (_SUCCESS == rtw_parse_wpa2_ie(pbuf, wpa_ielen+2, &group_cipher, &pairwise_cipher, &is8021x)) { + if (_SUCCESS == rtw_parse_wpa2_ie(pbuf, wpa_ielen + 2, &group_cipher, &pairwise_cipher, &is8021x)) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("get RSN IE OK!!!\n")); pnetwork->BcnInfo.pairwise_cipher = pairwise_cipher; pnetwork->BcnInfo.group_cipher = group_cipher; @@ -1349,8 +1349,8 @@ int rtw_action_frame_parse(const u8 *frame, u32 frame_len, u8 *category, u8 *act fc = le16_to_cpu(((struct rtw_ieee80211_hdr_3addr *)frame)->frame_ctl); - if ((fc & (RTW_IEEE80211_FCTL_FTYPE|RTW_IEEE80211_FCTL_STYPE)) != - (RTW_IEEE80211_FTYPE_MGMT|RTW_IEEE80211_STYPE_ACTION)) + if ((fc & (RTW_IEEE80211_FCTL_FTYPE | RTW_IEEE80211_FCTL_STYPE)) != + (RTW_IEEE80211_FTYPE_MGMT | RTW_IEEE80211_STYPE_ACTION)) return false; c = frame_body[0]; diff --git a/drivers/staging/rtl8188eu/core/rtw_iol.c b/drivers/staging/rtl8188eu/core/rtw_iol.c index cdcf0eacc0e0..2e2145caa56b 100644 --- a/drivers/staging/rtl8188eu/core/rtw_iol.c +++ b/drivers/staging/rtl8188eu/core/rtw_iol.c @@ -11,21 +11,18 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * ******************************************************************************/ -#include<rtw_iol.h> +#include <rtw_iol.h> -bool rtw_IOL_applied(struct adapter *adapter) +bool rtw_IOL_applied(struct adapter *adapter) { - if (1 == adapter->registrypriv.fw_iol) + if (adapter->registrypriv.fw_iol == 1) return true; - if ((2 == adapter->registrypriv.fw_iol) && (!adapter_to_dvobj(adapter)->ishighspeed)) + if ((adapter->registrypriv.fw_iol == 2) && + (!adapter_to_dvobj(adapter)->ishighspeed)) return true; return false; } diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c index abab854e6889..a645a620ebe2 100644 --- a/drivers/staging/rtl8188eu/core/rtw_mlme.c +++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c @@ -122,31 +122,26 @@ void rtw_free_mlme_priv(struct mlme_priv *pmlmepriv) { rtw_free_mlme_priv_ie_data(pmlmepriv); - if (pmlmepriv) { - if (pmlmepriv->free_bss_buf) - vfree(pmlmepriv->free_bss_buf); - } + if (pmlmepriv) + vfree(pmlmepriv->free_bss_buf); } -struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv)/* _queue *free_queue) */ +struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv) + /* _queue *free_queue) */ { - struct wlan_network *pnetwork; + struct wlan_network *pnetwork; struct __queue *free_queue = &pmlmepriv->free_bss_pool; - struct list_head *plist = NULL; spin_lock_bh(&free_queue->lock); - - if (list_empty(&free_queue->queue)) { - pnetwork = NULL; + pnetwork = list_first_entry_or_null(&free_queue->queue, + struct wlan_network, list); + if (!pnetwork) goto exit; - } - plist = free_queue->queue.next; - - pnetwork = container_of(plist, struct wlan_network, list); list_del_init(&pnetwork->list); - RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("_rtw_alloc_network: ptr=%p\n", plist)); + RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, + ("_rtw_alloc_network: ptr=%p\n", &pnetwork->list)); pnetwork->network_type = 0; pnetwork->fixed = false; pnetwork->last_scanned = jiffies; diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c index 3eca6874b6df..591a9127b573 100644 --- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c +++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c @@ -20,6 +20,7 @@ #define _RTW_MLME_EXT_C_ #include <linux/ieee80211.h> +#include <asm/unaligned.h> #include <osdep_service.h> #include <drv_types.h> @@ -1027,7 +1028,6 @@ static void issue_assocreq(struct adapter *padapter) unsigned char *pframe, *p; struct rtw_ieee80211_hdr *pwlanhdr; __le16 *fctrl; - __le16 le_tmp; unsigned int i, j, ie_len, index = 0; unsigned char rf_type, bssrate[NumRates], sta_bssrate[NumRates]; struct ndis_802_11_var_ie *pIE; @@ -1073,8 +1073,7 @@ static void issue_assocreq(struct adapter *padapter) /* listen interval */ /* todo: listen interval for power saving */ - le_tmp = cpu_to_le16(3); - memcpy(pframe , (unsigned char *)&le_tmp, 2); + put_unaligned_le16(3, pframe); pframe += 2; pattrib->pktlen += 2; @@ -1673,7 +1672,6 @@ static void issue_action_BA(struct adapter *padapter, unsigned char *raddr, fctrl = &(pwlanhdr->frame_ctl); *(fctrl) = 0; - /* memcpy(pwlanhdr->addr1, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN); */ memcpy(pwlanhdr->addr1, raddr, ETH_ALEN); memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN); memcpy(pwlanhdr->addr3, pnetwork->MacAddress, ETH_ALEN); @@ -3653,7 +3651,7 @@ static unsigned int on_action_spct(struct adapter *padapter, struct sta_info *psta = NULL; struct sta_priv *pstapriv = &padapter->stapriv; u8 *pframe = precv_frame->rx_data; - u8 *frame_body = (u8 *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr)); + u8 *frame_body = pframe + sizeof(struct rtw_ieee80211_hdr_3addr); u8 category; u8 action; @@ -3740,10 +3738,10 @@ static unsigned int OnAction_back(struct adapter *padapter, memcpy(&(pmlmeinfo->ADDBA_req), &(frame_body[2]), sizeof(struct ADDBA_request)); process_addba_req(padapter, (u8 *)&(pmlmeinfo->ADDBA_req), addr); - if (pmlmeinfo->bAcceptAddbaReq) - issue_action_BA(padapter, addr, RTW_WLAN_ACTION_ADDBA_RESP, 0); - else - issue_action_BA(padapter, addr, RTW_WLAN_ACTION_ADDBA_RESP, 37);/* reject ADDBA Req */ + /* 37 = reject ADDBA Req */ + issue_action_BA(padapter, addr, + RTW_WLAN_ACTION_ADDBA_RESP, + pmlmeinfo->accept_addba_req ? 0 : 37); break; case RTW_WLAN_ACTION_ADDBA_RESP: /* ADDBA response */ status = get_unaligned_le16(&frame_body[3]); @@ -4150,7 +4148,7 @@ int init_mlme_ext_priv(struct adapter *padapter) pmlmeext->padapter = padapter; init_mlme_ext_priv_value(padapter); - pmlmeinfo->bAcceptAddbaReq = pregistrypriv->bAcceptAddbaReq; + pmlmeinfo->accept_addba_req = pregistrypriv->accept_addba_req; init_mlme_ext_timer(padapter); @@ -5063,7 +5061,7 @@ u8 createbss_hdl(struct adapter *padapter, u8 *pbuf) /* clear CAM */ flush_all_cam_entry(padapter); - memcpy(pnetwork, pbuf, FIELD_OFFSET(struct wlan_bssid_ex, IELength)); + memcpy(pnetwork, pbuf, offsetof(struct wlan_bssid_ex, IELength)); pnetwork->IELength = ((struct wlan_bssid_ex *)pbuf)->IELength; if (pnetwork->IELength > MAX_IE_SZ)/* Check pbuf->IELength */ @@ -5122,7 +5120,7 @@ u8 join_cmd_hdl(struct adapter *padapter, u8 *pbuf) pmlmeinfo->candidate_tid_bitmap = 0; pmlmeinfo->bwmode_updated = false; - memcpy(pnetwork, pbuf, FIELD_OFFSET(struct wlan_bssid_ex, IELength)); + memcpy(pnetwork, pbuf, offsetof(struct wlan_bssid_ex, IELength)); pnetwork->IELength = ((struct wlan_bssid_ex *)pbuf)->IELength; if (pnetwork->IELength > MAX_IE_SZ)/* Check pbuf->IELength */ diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c index 110b8c0b6cd7..5f53aa1cfd8a 100644 --- a/drivers/staging/rtl8188eu/core/rtw_recv.c +++ b/drivers/staging/rtl8188eu/core/rtw_recv.c @@ -116,9 +116,7 @@ void _rtw_free_recv_priv(struct recv_priv *precvpriv) rtw_free_uc_swdec_pending_queue(padapter); - if (precvpriv->pallocated_frame_buf) { - vfree(precvpriv->pallocated_frame_buf); - } + vfree(precvpriv->pallocated_frame_buf); rtw_hal_free_recv_priv(padapter); @@ -127,29 +125,22 @@ void _rtw_free_recv_priv(struct recv_priv *precvpriv) struct recv_frame *_rtw_alloc_recvframe(struct __queue *pfree_recv_queue) { struct recv_frame *hdr; - struct list_head *plist, *phead; struct adapter *padapter; struct recv_priv *precvpriv; - if (list_empty(&pfree_recv_queue->queue)) { - hdr = NULL; - } else { - phead = get_list_head(pfree_recv_queue); - - plist = phead->next; - - hdr = container_of(plist, struct recv_frame, list); - + hdr = list_first_entry_or_null(&pfree_recv_queue->queue, + struct recv_frame, list); + if (hdr) { list_del_init(&hdr->list); padapter = hdr->adapter; - if (padapter != NULL) { + if (padapter) { precvpriv = &padapter->recvpriv; if (pfree_recv_queue == &precvpriv->free_recv_queue) precvpriv->free_recvframe_cnt--; } } - return (struct recv_frame *)hdr; + return hdr; } struct recv_frame *rtw_alloc_recvframe(struct __queue *pfree_recv_queue) @@ -248,7 +239,7 @@ void rtw_free_recvframe_queue(struct __queue *pframequeue, struct __queue *pfre plist = plist->next; - rtw_free_recvframe((struct recv_frame *)hdr, pfree_recv_queue); + rtw_free_recvframe(hdr, pfree_recv_queue); } spin_unlock(&pframequeue->lock); @@ -917,9 +908,8 @@ static int sta2ap_data_frame(struct adapter *adapter, process_pwrbit_data(adapter, precv_frame); - if ((GetFrameSubType(ptr) & WIFI_QOS_DATA_TYPE) == WIFI_QOS_DATA_TYPE) { + if ((GetFrameSubType(ptr) & WIFI_QOS_DATA_TYPE) == WIFI_QOS_DATA_TYPE) process_wmmps_data(adapter, precv_frame); - } if (GetFrameSubType(ptr) & BIT(6)) { /* No data, will not indicate to upper layer, temporily count it here */ @@ -1274,32 +1264,25 @@ static int validate_recv_frame(struct adapter *adapter, /* Dump rx packets */ rtw_hal_get_def_var(adapter, HAL_DEF_DBG_DUMP_RXPKT, &(bDumpRxPkt)); if (bDumpRxPkt == 1) {/* dump all rx packets */ - int i; - DBG_88E("#############################\n"); - - for (i = 0; i < 64; i += 8) - DBG_88E("%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X:\n", *(ptr+i), - *(ptr+i+1), *(ptr+i+2), *(ptr+i+3), *(ptr+i+4), *(ptr+i+5), *(ptr+i+6), *(ptr+i+7)); - DBG_88E("#############################\n"); + if (_drv_err_ <= GlobalDebugLevel) { + pr_info(DRIVER_PREFIX "#############################\n"); + print_hex_dump(KERN_INFO, DRIVER_PREFIX, DUMP_PREFIX_NONE, + 16, 1, ptr, 64, false); + pr_info(DRIVER_PREFIX "#############################\n"); + } } else if (bDumpRxPkt == 2) { - if (type == WIFI_MGT_TYPE) { - int i; - DBG_88E("#############################\n"); - - for (i = 0; i < 64; i += 8) - DBG_88E("%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X:\n", *(ptr+i), - *(ptr+i+1), *(ptr+i+2), *(ptr+i+3), *(ptr+i+4), *(ptr+i+5), *(ptr+i+6), *(ptr+i+7)); - DBG_88E("#############################\n"); + if ((_drv_err_ <= GlobalDebugLevel) && (type == WIFI_MGT_TYPE)) { + pr_info(DRIVER_PREFIX "#############################\n"); + print_hex_dump(KERN_INFO, DRIVER_PREFIX, DUMP_PREFIX_NONE, + 16, 1, ptr, 64, false); + pr_info(DRIVER_PREFIX "#############################\n"); } } else if (bDumpRxPkt == 3) { - if (type == WIFI_DATA_TYPE) { - int i; - DBG_88E("#############################\n"); - - for (i = 0; i < 64; i += 8) - DBG_88E("%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X:\n", *(ptr+i), - *(ptr+i+1), *(ptr+i+2), *(ptr+i+3), *(ptr+i+4), *(ptr+i+5), *(ptr+i+6), *(ptr+i+7)); - DBG_88E("#############################\n"); + if ((_drv_err_ <= GlobalDebugLevel) && (type == WIFI_DATA_TYPE)) { + pr_info(DRIVER_PREFIX "#############################\n"); + print_hex_dump(KERN_INFO, DRIVER_PREFIX, DUMP_PREFIX_NONE, + 16, 1, ptr, 64, false); + pr_info(DRIVER_PREFIX "#############################\n"); } } switch (type) { @@ -1433,7 +1416,7 @@ static struct recv_frame *recvframe_defrag(struct adapter *adapter, phead = get_list_head(defrag_q); plist = phead->next; pfhdr = container_of(plist, struct recv_frame, list); - prframe = (struct recv_frame *)pfhdr; + prframe = pfhdr; list_del_init(&(prframe->list)); if (curfragnum != pfhdr->attrib.frag_num) { @@ -1453,7 +1436,7 @@ static struct recv_frame *recvframe_defrag(struct adapter *adapter, while (phead != plist) { pnfhdr = container_of(plist, struct recv_frame, list); - pnextrframe = (struct recv_frame *)pnfhdr; + pnextrframe = pnfhdr; /* check the fragment sequence (2nd ~n fragment frame) */ @@ -1541,10 +1524,9 @@ struct recv_frame *recvframe_chk_defrag(struct adapter *padapter, if (pdefrag_q != NULL) { if (fragnum == 0) { /* the first fragment */ - if (!list_empty(&pdefrag_q->queue)) { + if (!list_empty(&pdefrag_q->queue)) /* free current defrag_q */ rtw_free_recvframe_queue(pdefrag_q, pfree_recv_queue); - } } /* Then enqueue the 0~(n-1) fragment into the defrag_q */ @@ -1660,9 +1642,8 @@ static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe) a_len -= nSubframe_Length; if (a_len != 0) { padding_len = 4 - ((nSubframe_Length + ETH_HLEN) & (4-1)); - if (padding_len == 4) { + if (padding_len == 4) padding_len = 0; - } if (a_len < padding_len) { goto exit; @@ -1798,7 +1779,7 @@ static int recv_indicatepkts_in_order(struct adapter *padapter, struct recv_reor /* Check if there is any packet need indicate. */ while (!list_empty(phead)) { prhdr = container_of(plist, struct recv_frame, list); - prframe = (struct recv_frame *)prhdr; + prframe = prhdr; pattrib = &prframe->attrib; if (!SN_LESS(preorder_ctrl->indicate_seq, pattrib->seq_num)) { diff --git a/drivers/staging/rtl8188eu/core/rtw_rf.c b/drivers/staging/rtl8188eu/core/rtw_rf.c index 6983c572b358..4ad2d8f63acf 100644 --- a/drivers/staging/rtl8188eu/core/rtw_rf.c +++ b/drivers/staging/rtl8188eu/core/rtw_rf.c @@ -70,20 +70,3 @@ u32 rtw_ch2freq(u32 channel) return freq; } - -u32 rtw_freq2ch(u32 freq) -{ - u8 i; - u32 ch = 0; - - for (i = 0; i < ch_freq_map_num; i++) { - if (freq == ch_freq_map[i].frequency) { - ch = ch_freq_map[i].channel; - break; - } - } - if (i == ch_freq_map_num) - ch = 1; - - return ch; -} diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c index 22839d57dc8c..b781ccf45bc0 100644 --- a/drivers/staging/rtl8188eu/core/rtw_security.c +++ b/drivers/staging/rtl8188eu/core/rtw_security.c @@ -1081,13 +1081,13 @@ static int aes_cipher(u8 *key, uint hdrlen, u8 *pframe, uint plen) frsubtype >>= 4; - memset((void *)mic_iv, 0, 16); - memset((void *)mic_header1, 0, 16); - memset((void *)mic_header2, 0, 16); - memset((void *)ctr_preload, 0, 16); - memset((void *)chain_buffer, 0, 16); - memset((void *)aes_out, 0, 16); - memset((void *)padded_buffer, 0, 16); + memset(mic_iv, 0, 16); + memset(mic_header1, 0, 16); + memset(mic_header2, 0, 16); + memset(ctr_preload, 0, 16); + memset(chain_buffer, 0, 16); + memset(aes_out, 0, 16); + memset(padded_buffer, 0, 16); if ((hdrlen == WLAN_HDR_A3_LEN) || (hdrlen == WLAN_HDR_A3_QOS_LEN)) a4_exists = 0; @@ -1279,13 +1279,13 @@ static int aes_decipher(u8 *key, uint hdrlen, uint frsubtype = GetFrameSubType(pframe); frsubtype >>= 4; - memset((void *)mic_iv, 0, 16); - memset((void *)mic_header1, 0, 16); - memset((void *)mic_header2, 0, 16); - memset((void *)ctr_preload, 0, 16); - memset((void *)chain_buffer, 0, 16); - memset((void *)aes_out, 0, 16); - memset((void *)padded_buffer, 0, 16); + memset(mic_iv, 0, 16); + memset(mic_header1, 0, 16); + memset(mic_header2, 0, 16); + memset(ctr_preload, 0, 16); + memset(chain_buffer, 0, 16); + memset(aes_out, 0, 16); + memset(padded_buffer, 0, 16); /* start to decrypt the payload */ diff --git a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c index 1beeac46bfe7..78a9b9bf3b32 100644 --- a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c +++ b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c @@ -172,16 +172,15 @@ u32 _rtw_free_sta_priv(struct sta_priv *pstapriv) spin_unlock_bh(&pstapriv->sta_hash_lock); /*===============================*/ - if (pstapriv->pallocated_stainfo_buf) - vfree(pstapriv->pallocated_stainfo_buf); + vfree(pstapriv->pallocated_stainfo_buf); } return _SUCCESS; } -struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr) +struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr) { - s32 index; + s32 index; struct list_head *phash_list; struct sta_info *psta; struct __queue *pfree_sta_queue; @@ -189,17 +188,15 @@ struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr) int i = 0; u16 wRxSeqInitialValue = 0xffff; - pfree_sta_queue = &pstapriv->free_sta_queue; - spin_lock_bh(&(pfree_sta_queue->lock)); - - if (list_empty(&pfree_sta_queue->queue)) { + spin_lock_bh(&pfree_sta_queue->lock); + psta = list_first_entry_or_null(&pfree_sta_queue->queue, + struct sta_info, list); + if (!psta) { spin_unlock_bh(&pfree_sta_queue->lock); - psta = NULL; } else { - psta = container_of((&pfree_sta_queue->queue)->next, struct sta_info, list); - list_del_init(&(psta->list)); + list_del_init(&psta->list); spin_unlock_bh(&pfree_sta_queue->lock); _rtw_init_stainfo(psta); memcpy(psta->hwaddr, hwaddr, ETH_ALEN); @@ -210,14 +207,11 @@ struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr) psta = NULL; goto exit; } - phash_list = &(pstapriv->sta_hash[index]); - - spin_lock_bh(&(pstapriv->sta_hash_lock)); + phash_list = &pstapriv->sta_hash[index]; + spin_lock_bh(&pstapriv->sta_hash_lock); list_add_tail(&psta->hash_list, phash_list); - pstapriv->asoc_sta_count++; - spin_unlock_bh(&pstapriv->sta_hash_lock); /* Commented by Albert 2009/08/13 */ @@ -493,11 +487,9 @@ exit: struct sta_info *rtw_get_bcmc_stainfo(struct adapter *padapter) { - struct sta_info *psta; struct sta_priv *pstapriv = &padapter->stapriv; u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; - psta = rtw_get_stainfo(pstapriv, bc_addr); - return psta; + return rtw_get_stainfo(pstapriv, bc_addr); } u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr) diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c index 59b443255a90..83096696cd5b 100644 --- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c +++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c @@ -1374,7 +1374,7 @@ unsigned char check_assoc_AP(u8 *pframe, uint len) epigram_vendor_flag = 1; if (ralink_vendor_flag) { DBG_88E("link to Tenda W311R AP\n"); - return HT_IOT_PEER_TENDA; + return HT_IOT_PEER_TENDA; } else { DBG_88E("Capture EPIGRAM_OUI\n"); } @@ -1579,7 +1579,8 @@ void process_addba_req(struct adapter *padapter, u8 *paddba_req, u8 *addr) tid = (param>>2)&0x0f; preorder_ctrl = &psta->recvreorder_ctrl[tid]; preorder_ctrl->indicate_seq = 0xffff; - preorder_ctrl->enable = (pmlmeinfo->bAcceptAddbaReq) ? true : false; + preorder_ctrl->enable = (pmlmeinfo->accept_addba_req) ? true + : false; } } diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c index e778132b73dc..f2dd7a60f67c 100644 --- a/drivers/staging/rtl8188eu/core/rtw_xmit.c +++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c @@ -247,11 +247,8 @@ void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv) pxmitbuf++; } - if (pxmitpriv->pallocated_frame_buf) - vfree(pxmitpriv->pallocated_frame_buf); - - if (pxmitpriv->pallocated_xmitbuf) - vfree(pxmitpriv->pallocated_xmitbuf); + vfree(pxmitpriv->pallocated_frame_buf); + vfree(pxmitpriv->pallocated_xmitbuf); /* free xmit extension buff */ pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf; @@ -1216,40 +1213,24 @@ void rtw_count_tx_stats(struct adapter *padapter, struct xmit_frame *pxmitframe, struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv) { unsigned long irql; - struct xmit_buf *pxmitbuf = NULL; - struct list_head *plist, *phead; + struct xmit_buf *pxmitbuf; struct __queue *pfree_queue = &pxmitpriv->free_xmit_extbuf_queue; - spin_lock_irqsave(&pfree_queue->lock, irql); - - if (list_empty(&pfree_queue->queue)) { - pxmitbuf = NULL; - } else { - phead = get_list_head(pfree_queue); - - plist = phead->next; - - pxmitbuf = container_of(plist, struct xmit_buf, list); - - list_del_init(&(pxmitbuf->list)); - } - - if (pxmitbuf != NULL) { + pxmitbuf = list_first_entry_or_null(&pfree_queue->queue, + struct xmit_buf, list); + if (pxmitbuf) { + list_del_init(&pxmitbuf->list); pxmitpriv->free_xmit_extbuf_cnt--; - pxmitbuf->priv_data = NULL; /* pxmitbuf->ext_tag = true; */ - if (pxmitbuf->sctx) { DBG_88E("%s pxmitbuf->sctx is not NULL\n", __func__); rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC); } } - spin_unlock_irqrestore(&pfree_queue->lock, irql); - return pxmitbuf; } @@ -1278,28 +1259,16 @@ s32 rtw_free_xmitbuf_ext(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf) struct xmit_buf *rtw_alloc_xmitbuf(struct xmit_priv *pxmitpriv) { unsigned long irql; - struct xmit_buf *pxmitbuf = NULL; - struct list_head *plist, *phead; + struct xmit_buf *pxmitbuf; struct __queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue; - /* DBG_88E("+rtw_alloc_xmitbuf\n"); */ spin_lock_irqsave(&pfree_xmitbuf_queue->lock, irql); - - if (list_empty(&pfree_xmitbuf_queue->queue)) { - pxmitbuf = NULL; - } else { - phead = get_list_head(pfree_xmitbuf_queue); - - plist = phead->next; - - pxmitbuf = container_of(plist, struct xmit_buf, list); - - list_del_init(&(pxmitbuf->list)); - } - - if (pxmitbuf != NULL) { + pxmitbuf = list_first_entry_or_null(&pfree_xmitbuf_queue->queue, + struct xmit_buf, list); + if (pxmitbuf) { + list_del_init(&pxmitbuf->list); pxmitpriv->free_xmitbuf_cnt--; pxmitbuf->priv_data = NULL; if (pxmitbuf->sctx) { @@ -1309,7 +1278,6 @@ struct xmit_buf *rtw_alloc_xmitbuf(struct xmit_priv *pxmitpriv) } spin_unlock_irqrestore(&pfree_xmitbuf_queue->lock, irql); - return pxmitbuf; } @@ -1355,38 +1323,33 @@ Must be very very cautious... */ -struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv)/* _queue *pfree_xmit_queue) */ +struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv) + /* _queue *pfree_xmit_queue) */ { /* Please remember to use all the osdep_service api, and lock/unlock or _enter/_exit critical to protect pfree_xmit_queue */ - - struct xmit_frame *pxframe = NULL; - struct list_head *plist, *phead; + struct xmit_frame *pxframe; struct __queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue; - spin_lock_bh(&pfree_xmit_queue->lock); - - if (list_empty(&pfree_xmit_queue->queue)) { - RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_alloc_xmitframe:%d\n", pxmitpriv->free_xmitframe_cnt)); - pxframe = NULL; + pxframe = list_first_entry_or_null(&pfree_xmit_queue->queue, + struct xmit_frame, list); + if (!pxframe) { + RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, + ("rtw_alloc_xmitframe:%d\n", + pxmitpriv->free_xmitframe_cnt)); } else { - phead = get_list_head(pfree_xmit_queue); - - plist = phead->next; - - pxframe = container_of(plist, struct xmit_frame, list); + list_del_init(&pxframe->list); - list_del_init(&(pxframe->list)); - } - - if (pxframe != NULL) { /* default value setting */ + /* default value setting */ pxmitpriv->free_xmitframe_cnt--; - RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_alloc_xmitframe():free_xmitframe_cnt=%d\n", pxmitpriv->free_xmitframe_cnt)); + RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, + ("rtw_alloc_xmitframe():free_xmitframe_cnt=%d\n", + pxmitpriv->free_xmitframe_cnt)); pxframe->buf_addr = NULL; pxframe->pxmitbuf = NULL; @@ -1402,10 +1365,8 @@ struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv)/* _queue *pf pxframe->agg_num = 1; pxframe->ack_report = 0; } - spin_unlock_bh(&pfree_xmit_queue->lock); - return pxframe; } diff --git a/drivers/staging/rtl8188eu/hal/bb_cfg.c b/drivers/staging/rtl8188eu/hal/bb_cfg.c index f58a8222c899..c2ad6a3b99da 100644 --- a/drivers/staging/rtl8188eu/hal/bb_cfg.c +++ b/drivers/staging/rtl8188eu/hal/bb_cfg.c @@ -598,18 +598,12 @@ static void rtl88e_phy_init_bb_rf_register_definition(struct adapter *adapter) reg[RF_PATH_A] = &hal_data->PHYRegDef[RF_PATH_A]; reg[RF_PATH_B] = &hal_data->PHYRegDef[RF_PATH_B]; - reg[RF_PATH_C] = &hal_data->PHYRegDef[RF_PATH_C]; - reg[RF_PATH_D] = &hal_data->PHYRegDef[RF_PATH_D]; reg[RF_PATH_A]->rfintfs = rFPGA0_XAB_RFInterfaceSW; reg[RF_PATH_B]->rfintfs = rFPGA0_XAB_RFInterfaceSW; - reg[RF_PATH_C]->rfintfs = rFPGA0_XCD_RFInterfaceSW; - reg[RF_PATH_D]->rfintfs = rFPGA0_XCD_RFInterfaceSW; reg[RF_PATH_A]->rfintfi = rFPGA0_XAB_RFInterfaceRB; reg[RF_PATH_B]->rfintfi = rFPGA0_XAB_RFInterfaceRB; - reg[RF_PATH_C]->rfintfi = rFPGA0_XCD_RFInterfaceRB; - reg[RF_PATH_D]->rfintfi = rFPGA0_XCD_RFInterfaceRB; reg[RF_PATH_A]->rfintfo = rFPGA0_XA_RFInterfaceOE; reg[RF_PATH_B]->rfintfo = rFPGA0_XB_RFInterfaceOE; @@ -622,13 +616,9 @@ static void rtl88e_phy_init_bb_rf_register_definition(struct adapter *adapter) reg[RF_PATH_A]->rfLSSI_Select = rFPGA0_XAB_RFParameter; reg[RF_PATH_B]->rfLSSI_Select = rFPGA0_XAB_RFParameter; - reg[RF_PATH_C]->rfLSSI_Select = rFPGA0_XCD_RFParameter; - reg[RF_PATH_D]->rfLSSI_Select = rFPGA0_XCD_RFParameter; reg[RF_PATH_A]->rfTxGainStage = rFPGA0_TxGainStage; reg[RF_PATH_B]->rfTxGainStage = rFPGA0_TxGainStage; - reg[RF_PATH_C]->rfTxGainStage = rFPGA0_TxGainStage; - reg[RF_PATH_D]->rfTxGainStage = rFPGA0_TxGainStage; reg[RF_PATH_A]->rfHSSIPara1 = rFPGA0_XA_HSSIParameter1; reg[RF_PATH_B]->rfHSSIPara1 = rFPGA0_XB_HSSIParameter1; @@ -638,43 +628,27 @@ static void rtl88e_phy_init_bb_rf_register_definition(struct adapter *adapter) reg[RF_PATH_A]->rfSwitchControl = rFPGA0_XAB_SwitchControl; reg[RF_PATH_B]->rfSwitchControl = rFPGA0_XAB_SwitchControl; - reg[RF_PATH_C]->rfSwitchControl = rFPGA0_XCD_SwitchControl; - reg[RF_PATH_D]->rfSwitchControl = rFPGA0_XCD_SwitchControl; reg[RF_PATH_A]->rfAGCControl1 = rOFDM0_XAAGCCore1; reg[RF_PATH_B]->rfAGCControl1 = rOFDM0_XBAGCCore1; - reg[RF_PATH_C]->rfAGCControl1 = rOFDM0_XCAGCCore1; - reg[RF_PATH_D]->rfAGCControl1 = rOFDM0_XDAGCCore1; reg[RF_PATH_A]->rfAGCControl2 = rOFDM0_XAAGCCore2; reg[RF_PATH_B]->rfAGCControl2 = rOFDM0_XBAGCCore2; - reg[RF_PATH_C]->rfAGCControl2 = rOFDM0_XCAGCCore2; - reg[RF_PATH_D]->rfAGCControl2 = rOFDM0_XDAGCCore2; reg[RF_PATH_A]->rfRxIQImbalance = rOFDM0_XARxIQImbalance; reg[RF_PATH_B]->rfRxIQImbalance = rOFDM0_XBRxIQImbalance; - reg[RF_PATH_C]->rfRxIQImbalance = rOFDM0_XCRxIQImbalance; - reg[RF_PATH_D]->rfRxIQImbalance = rOFDM0_XDRxIQImbalance; reg[RF_PATH_A]->rfRxAFE = rOFDM0_XARxAFE; reg[RF_PATH_B]->rfRxAFE = rOFDM0_XBRxAFE; - reg[RF_PATH_C]->rfRxAFE = rOFDM0_XCRxAFE; - reg[RF_PATH_D]->rfRxAFE = rOFDM0_XDRxAFE; reg[RF_PATH_A]->rfTxIQImbalance = rOFDM0_XATxIQImbalance; reg[RF_PATH_B]->rfTxIQImbalance = rOFDM0_XBTxIQImbalance; - reg[RF_PATH_C]->rfTxIQImbalance = rOFDM0_XCTxIQImbalance; - reg[RF_PATH_D]->rfTxIQImbalance = rOFDM0_XDTxIQImbalance; reg[RF_PATH_A]->rfTxAFE = rOFDM0_XATxAFE; reg[RF_PATH_B]->rfTxAFE = rOFDM0_XBTxAFE; - reg[RF_PATH_C]->rfTxAFE = rOFDM0_XCTxAFE; - reg[RF_PATH_D]->rfTxAFE = rOFDM0_XDTxAFE; reg[RF_PATH_A]->rfLSSIReadBack = rFPGA0_XA_LSSIReadBack; reg[RF_PATH_B]->rfLSSIReadBack = rFPGA0_XB_LSSIReadBack; - reg[RF_PATH_C]->rfLSSIReadBack = rFPGA0_XC_LSSIReadBack; - reg[RF_PATH_D]->rfLSSIReadBack = rFPGA0_XD_LSSIReadBack; reg[RF_PATH_A]->rfLSSIReadBackPi = TransceiverA_HSPI_Readback; reg[RF_PATH_B]->rfLSSIReadBackPi = TransceiverB_HSPI_Readback; diff --git a/drivers/staging/rtl8188eu/hal/fw.c b/drivers/staging/rtl8188eu/hal/fw.c index 4d72537644b3..656133c47426 100644 --- a/drivers/staging/rtl8188eu/hal/fw.c +++ b/drivers/staging/rtl8188eu/hal/fw.c @@ -75,16 +75,6 @@ static void _rtl88e_fw_block_write(struct adapter *adapt, usb_write8(adapt, write_address, byte_buffer[i]); } -static void _rtl88e_fill_dummy(u8 *pfwbuf, u32 *pfwlen) -{ - u32 i; - - for (i = *pfwlen; i < roundup(*pfwlen, 4); i++) - pfwbuf[i] = 0; - - *pfwlen = i; -} - static void _rtl88e_fw_page_write(struct adapter *adapt, u32 page, const u8 *buffer, u32 size) { @@ -103,8 +93,6 @@ static void _rtl88e_write_fw(struct adapter *adapt, u8 *buffer, u32 size) u32 page_no, remain; u32 page, offset; - _rtl88e_fill_dummy(buf_ptr, &size); - page_no = size / FW_8192C_PAGE_SIZE; remain = size % FW_8192C_PAGE_SIZE; @@ -170,14 +158,14 @@ exit: int rtl88eu_download_fw(struct adapter *adapt) { - struct hal_data_8188e *rtlhal = GET_HAL_DATA(adapt); struct dvobj_priv *dvobj = adapter_to_dvobj(adapt); struct device *device = dvobj_to_dev(dvobj); const struct firmware *fw; const char fw_name[] = "rtlwifi/rtl8188eufw.bin"; struct rtl92c_firmware_header *pfwheader = NULL; - u8 *pfwdata; - u32 fwsize; + u8 *download_data, *fw_data; + size_t download_size; + unsigned int trailing_zeros_length; if (request_firmware(&fw, fw_name, device)) { dev_err(device, "Firmware %s not available\n", fw_name); @@ -186,35 +174,43 @@ int rtl88eu_download_fw(struct adapter *adapt) if (fw->size > FW_8188E_SIZE) { dev_err(device, "Firmware size exceed 0x%X. Check it.\n", - FW_8188E_SIZE); + FW_8188E_SIZE); + release_firmware(fw); return -1; } - pfwdata = kzalloc(FW_8188E_SIZE, GFP_KERNEL); - if (!pfwdata) + trailing_zeros_length = (4 - fw->size % 4) % 4; + + fw_data = kmalloc(fw->size + trailing_zeros_length, GFP_KERNEL); + if (!fw_data) { + release_firmware(fw); return -ENOMEM; + } - rtlhal->pfirmware = pfwdata; - memcpy(rtlhal->pfirmware, fw->data, fw->size); - rtlhal->fwsize = fw->size; - release_firmware(fw); + memcpy(fw_data, fw->data, fw->size); + memset(fw_data + fw->size, 0, trailing_zeros_length); - fwsize = rtlhal->fwsize; - pfwheader = (struct rtl92c_firmware_header *)pfwdata; + pfwheader = (struct rtl92c_firmware_header *)fw_data; if (IS_FW_HEADER_EXIST(pfwheader)) { - pfwdata = pfwdata + 32; - fwsize = fwsize - 32; + download_data = fw_data + 32; + download_size = fw->size + trailing_zeros_length - 32; + } else { + download_data = fw_data; + download_size = fw->size + trailing_zeros_length; } + release_firmware(fw); + if (usb_read8(adapt, REG_MCUFWDL) & RAM_DL_SEL) { usb_write8(adapt, REG_MCUFWDL, 0); rtl88e_firmware_selfreset(adapt); } _rtl88e_enable_fw_download(adapt, true); usb_write8(adapt, REG_MCUFWDL, usb_read8(adapt, REG_MCUFWDL) | FWDL_ChkSum_rpt); - _rtl88e_write_fw(adapt, pfwdata, fwsize); + _rtl88e_write_fw(adapt, download_data, download_size); _rtl88e_enable_fw_download(adapt, false); + kfree(fw_data); return _rtl88e_fw_free_to_go(adapt); } diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c index 2c25d3b02036..8d2316b9e6e5 100644 --- a/drivers/staging/rtl8188eu/hal/odm.c +++ b/drivers/staging/rtl8188eu/hal/odm.c @@ -255,9 +255,6 @@ void ODM_CmnInfoInit(struct odm_dm_struct *pDM_Odm, enum odm_common_info_def Cmn case ODM_CMNINFO_CUT_VER: pDM_Odm->CutVersion = (u8)Value; break; - case ODM_CMNINFO_FAB_VER: - pDM_Odm->FabVersion = (u8)Value; - break; case ODM_CMNINFO_RF_TYPE: pDM_Odm->RFType = (u8)Value; break; @@ -477,7 +474,6 @@ void odm_CmnInfoInit_Debug(struct odm_dm_struct *pDM_Odm) ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("SupportInterface=%d\n", pDM_Odm->SupportInterface)); ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("SupportICType=0x%x\n", pDM_Odm->SupportICType)); ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("CutVersion=%d\n", pDM_Odm->CutVersion)); - ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("FabVersion=%d\n", pDM_Odm->FabVersion)); ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("RFType=%d\n", pDM_Odm->RFType)); ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("BoardType=%d\n", pDM_Odm->BoardType)); ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("ExtLNA=%d\n", pDM_Odm->ExtLNA)); diff --git a/drivers/staging/rtl8188eu/hal/phy.c b/drivers/staging/rtl8188eu/hal/phy.c index d3e8a8ea1829..ae42b4492c77 100644 --- a/drivers/staging/rtl8188eu/hal/phy.c +++ b/drivers/staging/rtl8188eu/hal/phy.c @@ -180,32 +180,6 @@ static void get_tx_power_index(struct adapter *adapt, u8 channel, u8 *cck_pwr, hal_data->BW20_24G_Diff[TxCount][RF_PATH_A]+ hal_data->BW20_24G_Diff[TxCount][index]; bw40_pwr[TxCount] = hal_data->Index24G_BW40_Base[TxCount][index]; - } else if (TxCount == RF_PATH_C) { - cck_pwr[TxCount] = hal_data->Index24G_CCK_Base[TxCount][index]; - ofdm_pwr[TxCount] = hal_data->Index24G_BW40_Base[RF_PATH_A][index]+ - hal_data->BW20_24G_Diff[RF_PATH_A][index]+ - hal_data->BW20_24G_Diff[RF_PATH_B][index]+ - hal_data->BW20_24G_Diff[TxCount][index]; - - bw20_pwr[TxCount] = hal_data->Index24G_BW40_Base[RF_PATH_A][index]+ - hal_data->BW20_24G_Diff[RF_PATH_A][index]+ - hal_data->BW20_24G_Diff[RF_PATH_B][index]+ - hal_data->BW20_24G_Diff[TxCount][index]; - bw40_pwr[TxCount] = hal_data->Index24G_BW40_Base[TxCount][index]; - } else if (TxCount == RF_PATH_D) { - cck_pwr[TxCount] = hal_data->Index24G_CCK_Base[TxCount][index]; - ofdm_pwr[TxCount] = hal_data->Index24G_BW40_Base[RF_PATH_A][index]+ - hal_data->BW20_24G_Diff[RF_PATH_A][index]+ - hal_data->BW20_24G_Diff[RF_PATH_B][index]+ - hal_data->BW20_24G_Diff[RF_PATH_C][index]+ - hal_data->BW20_24G_Diff[TxCount][index]; - - bw20_pwr[TxCount] = hal_data->Index24G_BW40_Base[RF_PATH_A][index]+ - hal_data->BW20_24G_Diff[RF_PATH_A][index]+ - hal_data->BW20_24G_Diff[RF_PATH_B][index]+ - hal_data->BW20_24G_Diff[RF_PATH_C][index]+ - hal_data->BW20_24G_Diff[TxCount][index]; - bw40_pwr[TxCount] = hal_data->Index24G_BW40_Base[TxCount][index]; } } } diff --git a/drivers/staging/rtl8188eu/hal/pwrseqcmd.c b/drivers/staging/rtl8188eu/hal/pwrseqcmd.c index 3e60b23819ae..b76b0f5d6220 100644 --- a/drivers/staging/rtl8188eu/hal/pwrseqcmd.c +++ b/drivers/staging/rtl8188eu/hal/pwrseqcmd.c @@ -23,8 +23,8 @@ /* This routine deals with the Power Configuration CMDs parsing * for RTL8723/RTL8188E Series IC. */ -u8 rtl88eu_pwrseqcmdparsing(struct adapter *padapter, u8 cut_vers, u8 fab_vers, - u8 ifacetype, struct wl_pwr_cfg pwrseqcmd[]) +u8 rtl88eu_pwrseqcmdparsing(struct adapter *padapter, u8 cut_vers, + struct wl_pwr_cfg pwrseqcmd[]) { struct wl_pwr_cfg pwrcfgcmd = {0}; u8 poll_bit = false; @@ -39,21 +39,16 @@ u8 rtl88eu_pwrseqcmdparsing(struct adapter *padapter, u8 cut_vers, u8 fab_vers, RT_TRACE(_module_hal_init_c_, _drv_info_, ("rtl88eu_pwrseqcmdparsing: offset(%#x) cut_msk(%#x)" - "fab_msk(%#x) interface_msk(%#x) base(%#x) cmd(%#x)" + " cmd(%#x)" "msk(%#x) value(%#x)\n", GET_PWR_CFG_OFFSET(pwrcfgcmd), GET_PWR_CFG_CUT_MASK(pwrcfgcmd), - GET_PWR_CFG_FAB_MASK(pwrcfgcmd), - GET_PWR_CFG_INTF_MASK(pwrcfgcmd), - GET_PWR_CFG_BASE(pwrcfgcmd), GET_PWR_CFG_CMD(pwrcfgcmd), GET_PWR_CFG_MASK(pwrcfgcmd), GET_PWR_CFG_VALUE(pwrcfgcmd))); - /* Only Handle the command whose FAB, CUT, and Interface are matched */ - if ((GET_PWR_CFG_FAB_MASK(pwrcfgcmd) & fab_vers) && - (GET_PWR_CFG_CUT_MASK(pwrcfgcmd) & cut_vers) && - (GET_PWR_CFG_INTF_MASK(pwrcfgcmd) & ifacetype)) { + /* Only Handle the command whose CUT is matched */ + if (GET_PWR_CFG_CUT_MASK(pwrcfgcmd) & cut_vers) { switch (GET_PWR_CFG_CMD(pwrcfgcmd)) { case PWR_CMD_READ: RT_TRACE(_module_hal_init_c_, _drv_info_, diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c index 199a77acd7a9..f9919a94a77e 100644 --- a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c +++ b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c @@ -50,7 +50,7 @@ static void Init_ODM_ComInfo_88E(struct adapter *Adapter) struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter); struct dm_priv *pdmpriv = &hal_data->dmpriv; struct odm_dm_struct *dm_odm = &(hal_data->odmpriv); - u8 cut_ver, fab_ver; + u8 cut_ver; /* Init Value */ memset(dm_odm, 0, sizeof(*dm_odm)); @@ -61,10 +61,8 @@ static void Init_ODM_ComInfo_88E(struct adapter *Adapter) ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_IC_TYPE, ODM_RTL8188E); - fab_ver = ODM_TSMC; cut_ver = ODM_CUT_A; - ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_FAB_VER, fab_ver); ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_CUT_VER, cut_ver); ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_MP_TEST_CHIP, hal_data->VersionID.ChipType == NORMAL_CHIP ? true : false); diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c index e04303ce80af..c96d80487a56 100644 --- a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c +++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c @@ -415,7 +415,7 @@ static u32 xmitframe_need_length(struct xmit_frame *pxmitframe) { struct pkt_attrib *pattrib = &pxmitframe->attrib; - u32 len = 0; + u32 len; /* no consider fragement */ len = pattrib->hdrlen + pattrib->iv_len + @@ -614,7 +614,7 @@ s32 rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmitp static s32 xmitframe_direct(struct adapter *adapt, struct xmit_frame *pxmitframe) { - s32 res = _SUCCESS; + s32 res; res = rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe); if (res == _SUCCESS) diff --git a/drivers/staging/rtl8188eu/hal/usb_halinit.c b/drivers/staging/rtl8188eu/hal/usb_halinit.c index 5789e1e23f0a..07a61b8271f0 100644 --- a/drivers/staging/rtl8188eu/hal/usb_halinit.c +++ b/drivers/staging/rtl8188eu/hal/usb_halinit.c @@ -108,7 +108,6 @@ static u32 rtl8188eu_InitPowerOn(struct adapter *adapt) return _SUCCESS; if (!rtl88eu_pwrseqcmdparsing(adapt, PWR_CUT_ALL_MSK, - PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, Rtl8188E_NIC_PWR_ON_FLOW)) { DBG_88E(KERN_ERR "%s: run power on flow fail\n", __func__); return _FAIL; @@ -926,7 +925,6 @@ static void CardDisableRTL8188EU(struct adapter *Adapter) /* Run LPS WL RFOFF flow */ rtl88eu_pwrseqcmdparsing(Adapter, PWR_CUT_ALL_MSK, - PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, Rtl8188E_NIC_LPS_ENTER_FLOW); /* 2. 0x1F[7:0] = 0 turn off RF */ @@ -949,7 +947,6 @@ static void CardDisableRTL8188EU(struct adapter *Adapter) /* Card disable power action flow */ rtl88eu_pwrseqcmdparsing(Adapter, PWR_CUT_ALL_MSK, - PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, Rtl8188E_NIC_DISABLE_FLOW); /* Reset MCU IO Wrapper */ diff --git a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h index e058162fe2ba..2670d6b6a79e 100644 --- a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h +++ b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h @@ -69,13 +69,11 @@ enum hw90_block { enum rf_radio_path { RF_PATH_A = 0, /* Radio Path A */ RF_PATH_B = 1, /* Radio Path B */ - RF_PATH_C = 2, /* Radio Path C */ - RF_PATH_D = 3, /* Radio Path D */ }; #define MAX_PG_GROUP 13 -#define RF_PATH_MAX 3 +#define RF_PATH_MAX 2 #define MAX_RF_PATH RF_PATH_MAX #define MAX_TX_COUNT 4 /* path numbers */ diff --git a/drivers/staging/rtl8188eu/include/basic_types.h b/drivers/staging/rtl8188eu/include/basic_types.h index 6a2a147e6d15..3fb691daa5af 100644 --- a/drivers/staging/rtl8188eu/include/basic_types.h +++ b/drivers/staging/rtl8188eu/include/basic_types.h @@ -23,10 +23,6 @@ #include <linux/types.h> #define NDIS_OID uint -typedef void (*proc_t)(void *); - -#define FIELD_OFFSET(s, field) ((ssize_t)&((s *)(0))->field) - /* port from fw */ /* TODO: Macros Below are Sync from SD7-Driver. It is necessary * to check correctness */ @@ -46,31 +42,6 @@ typedef void (*proc_t)(void *); #define EF4BYTE(_val) \ (le32_to_cpu(_val)) -/* Read data from memory */ -#define READEF1BYTE(_ptr) \ - EF1BYTE(*((u8 *)(_ptr))) -/* Read le16 data from memory and convert to host ordering */ -#define READEF2BYTE(_ptr) \ - EF2BYTE(*(_ptr)) -#define READEF4BYTE(_ptr) \ - EF4BYTE(*(_ptr)) - -/* Write data to memory */ -#define WRITEEF1BYTE(_ptr, _val) \ - do { \ - (*((u8 *)(_ptr))) = EF1BYTE(_val) \ - } while (0) -/* Write le data to memory in host ordering */ -#define WRITEEF2BYTE(_ptr, _val) \ - do { \ - (*((u16 *)(_ptr))) = EF2BYTE(_val) \ - } while (0) - -#define WRITEEF4BYTE(_ptr, _val) \ - do { \ - (*((u32 *)(_ptr))) = EF2BYTE(_val) \ - } while (0) - /* Create a bit mask * Examples: * BIT_LEN_MASK_32(0) => 0x00000000 diff --git a/drivers/staging/rtl8188eu/include/drv_types.h b/drivers/staging/rtl8188eu/include/drv_types.h index 0729bd40b02a..dcb032b6c3a7 100644 --- a/drivers/staging/rtl8188eu/include/drv_types.h +++ b/drivers/staging/rtl8188eu/include/drv_types.h @@ -110,7 +110,7 @@ struct registry_priv { u8 wifi_spec;/* !turbo_mode */ u8 channel_plan; - bool bAcceptAddbaReq; + bool accept_addba_req; /* true = accept AP's Add BA req */ u8 antdiv_cfg; u8 antdiv_type; @@ -135,9 +135,9 @@ struct registry_priv { }; /* For registry parameters */ -#define RGTRY_OFT(field) ((u32)FIELD_OFFSET(struct registry_priv, field)) +#define RGTRY_OFT(field) ((u32)offsetof(struct registry_priv, field)) #define RGTRY_SZ(field) sizeof(((struct registry_priv *)0)->field) -#define BSSID_OFT(field) ((u32)FIELD_OFFSET(struct wlan_bssid_ex, field)) +#define BSSID_OFT(field) ((u32)offsetofT(struct wlan_bssid_ex, field)) #define BSSID_SZ(field) sizeof(((struct wlan_bssid_ex *)0)->field) #define MAX_CONTINUAL_URB_ERR 4 @@ -176,8 +176,6 @@ static inline struct device *dvobj_to_dev(struct dvobj_priv *dvobj) }; struct adapter { - u16 chip_type; - struct dvobj_priv *dvobj; struct mlme_priv mlmepriv; struct mlme_ext_priv mlmeextpriv; diff --git a/drivers/staging/rtl8188eu/include/ieee80211_ext.h b/drivers/staging/rtl8188eu/include/ieee80211_ext.h deleted file mode 100644 index 15e53d380ad0..000000000000 --- a/drivers/staging/rtl8188eu/include/ieee80211_ext.h +++ /dev/null @@ -1,290 +0,0 @@ -/****************************************************************************** - * - * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * - * - ******************************************************************************/ -#ifndef __IEEE80211_EXT_H -#define __IEEE80211_EXT_H - -#include <osdep_service.h> -#include <drv_types.h> - -#define WMM_OUI_TYPE 2 -#define WMM_OUI_SUBTYPE_INFORMATION_ELEMENT 0 -#define WMM_OUI_SUBTYPE_PARAMETER_ELEMENT 1 -#define WMM_OUI_SUBTYPE_TSPEC_ELEMENT 2 -#define WMM_VERSION 1 - -#define WPA_PROTO_WPA BIT(0) -#define WPA_PROTO_RSN BIT(1) - -#define WPA_KEY_MGMT_IEEE8021X BIT(0) -#define WPA_KEY_MGMT_PSK BIT(1) -#define WPA_KEY_MGMT_NONE BIT(2) -#define WPA_KEY_MGMT_IEEE8021X_NO_WPA BIT(3) -#define WPA_KEY_MGMT_WPA_NONE BIT(4) - - -#define WPA_CAPABILITY_PREAUTH BIT(0) -#define WPA_CAPABILITY_MGMT_FRAME_PROTECTION BIT(6) -#define WPA_CAPABILITY_PEERKEY_ENABLED BIT(9) - - -#define PMKID_LEN 16 - - -struct wpa_ie_hdr { - u8 elem_id; - u8 len; - u8 oui[4]; /* 24-bit OUI followed by 8-bit OUI type */ - u8 version[2]; /* little endian */ -} __packed; - -struct rsn_ie_hdr { - u8 elem_id; /* WLAN_EID_RSN */ - u8 len; - u8 version[2]; /* little endian */ -} __packed; - -struct wme_ac_parameter { -#if defined(__LITTLE_ENDIAN) - /* byte 1 */ - u8 aifsn:4, - acm:1, - aci:2, - reserved:1; - - /* byte 2 */ - u8 eCWmin:4, - eCWmax:4; -#elif defined(__BIG_ENDIAN) - /* byte 1 */ - u8 reserved:1, - aci:2, - acm:1, - aifsn:4; - - /* byte 2 */ - u8 eCWmax:4, - eCWmin:4; -#else -#error "Please fix <endian.h>" -#endif - - /* bytes 3 & 4 */ - u16 txopLimit; -} __packed; - -struct wme_parameter_element { - /* required fields for WME version 1 */ - u8 oui[3]; - u8 oui_type; - u8 oui_subtype; - u8 version; - u8 acInfo; - u8 reserved; - struct wme_ac_parameter ac[4]; - -} __packed; - -#define WPA_PUT_LE16(a, val) \ - do { \ - (a)[1] = ((u16)(val)) >> 8; \ - (a)[0] = ((u16)(val)) & 0xff; \ - } while (0) - -#define WPA_PUT_BE32(a, val) \ - do { \ - (a)[0] = (u8)((((u32) (val)) >> 24) & 0xff); \ - (a)[1] = (u8)((((u32) (val)) >> 16) & 0xff); \ - (a)[2] = (u8)((((u32) (val)) >> 8) & 0xff); \ - (a)[3] = (u8)(((u32) (val)) & 0xff); \ - } while (0) - -#define WPA_PUT_LE32(a, val) \ - do { \ - (a)[3] = (u8)((((u32) (val)) >> 24) & 0xff); \ - (a)[2] = (u8)((((u32) (val)) >> 16) & 0xff); \ - (a)[1] = (u8)((((u32) (val)) >> 8) & 0xff); \ - (a)[0] = (u8)(((u32) (val)) & 0xff); \ - } while (0) - -#define RSN_SELECTOR_PUT(a, val) WPA_PUT_BE32((u8 *)(a), (val)) - -/* Action category code */ -enum ieee80211_category { - WLAN_CATEGORY_SPECTRUM_MGMT = 0, - WLAN_CATEGORY_QOS = 1, - WLAN_CATEGORY_DLS = 2, - WLAN_CATEGORY_BACK = 3, - WLAN_CATEGORY_HT = 7, - WLAN_CATEGORY_WMM = 17, -}; - -/* SPECTRUM_MGMT action code */ -enum ieee80211_spectrum_mgmt_actioncode { - WLAN_ACTION_SPCT_MSR_REQ = 0, - WLAN_ACTION_SPCT_MSR_RPRT = 1, - WLAN_ACTION_SPCT_TPC_REQ = 2, - WLAN_ACTION_SPCT_TPC_RPRT = 3, - WLAN_ACTION_SPCT_CHL_SWITCH = 4, - WLAN_ACTION_SPCT_EXT_CHL_SWITCH = 5, -}; - -/* BACK action code */ -enum ieee80211_back_actioncode { - WLAN_ACTION_ADDBA_REQ = 0, - WLAN_ACTION_ADDBA_RESP = 1, - WLAN_ACTION_DELBA = 2, -}; - -/* HT features action code */ -enum ieee80211_ht_actioncode { - WLAN_ACTION_NOTIFY_CH_WIDTH = 0, - WLAN_ACTION_SM_PS = 1, - WLAN_ACTION_PSPM = 2, - WLAN_ACTION_PCO_PHASE = 3, - WLAN_ACTION_MIMO_CSI_MX = 4, - WLAN_ACTION_MIMO_NONCP_BF = 5, - WLAN_ACTION_MIMP_CP_BF = 6, - WLAN_ACTION_ASEL_INDICATES_FB = 7, - WLAN_ACTION_HI_INFO_EXCHG = 8, -}; - -/* BACK (block-ack) parties */ -enum ieee80211_back_parties { - WLAN_BACK_RECIPIENT = 0, - WLAN_BACK_INITIATOR = 1, - WLAN_BACK_TIMER = 2, -}; - -struct ieee80211_mgmt { - u16 frame_control; - u16 duration; - u8 da[6]; - u8 sa[6]; - u8 bssid[6]; - u16 seq_ctrl; - union { - struct { - u16 auth_alg; - u16 auth_transaction; - u16 status_code; - /* possibly followed by Challenge text */ - u8 variable[0]; - } __packed auth; - struct { - u16 reason_code; - } __packed deauth; - struct { - u16 capab_info; - u16 listen_interval; - /* followed by SSID and Supported rates */ - u8 variable[0]; - } __packed assoc_req; - struct { - u16 capab_info; - u16 status_code; - u16 aid; - /* followed by Supported rates */ - u8 variable[0]; - } __packed assoc_resp, reassoc_resp; - struct { - u16 capab_info; - u16 listen_interval; - u8 current_ap[6]; - /* followed by SSID and Supported rates */ - u8 variable[0]; - } __packed reassoc_req; - struct { - u16 reason_code; - } __packed disassoc; - struct { - __le64 timestamp; - u16 beacon_int; - u16 capab_info; - /* followed by some of SSID, Supported rates, - * FH Params, DS Params, CF Params, IBSS Params, TIM */ - u8 variable[0]; - } __packed beacon; - struct { - /* only variable items: SSID, Supported rates */ - u8 variable[0]; - } __packed probe_req; - struct { - __le64 timestamp; - u16 beacon_int; - u16 capab_info; - /* followed by some of SSID, Supported rates, - * FH Params, DS Params, CF Params, IBSS Params */ - u8 variable[0]; - } __packed probe_resp; - struct { - u8 category; - union { - struct { - u8 action_code; - u8 dialog_token; - u8 status_code; - u8 variable[0]; - } __packed wme_action; - struct { - u8 action_code; - u8 dialog_token; - u16 capab; - u16 timeout; - u16 start_seq_num; - } __packed addba_req; - struct { - u8 action_code; - u8 dialog_token; - u16 status; - u16 capab; - u16 timeout; - } __packed addba_resp; - struct { - u8 action_code; - u16 params; - u16 reason_code; - } __packed delba; - structi { - u8 action_code; - /* capab_info for open and confirm, - * reason for close - */ - u16 aux; - /* Followed in plink_confirm by status - * code, AID and supported rates, - * and directly by supported rates in - * plink_open and plink_close - */ - u8 variable[0]; - } __packed plink_action; - struct{ - u8 action_code; - u8 variable[0]; - } __packed mesh_action; - } __packed u; - } __packed action; - } __packed u; -} __packed; - -/* mgmt header + 1 byte category code */ -#define IEEE80211_MIN_ACTION_SIZE \ - FIELD_OFFSET(struct ieee80211_mgmt, u.action.u) - -#endif diff --git a/drivers/staging/rtl8188eu/include/odm.h b/drivers/staging/rtl8188eu/include/odm.h index bc970caf7eda..af781c7cd3a5 100644 --- a/drivers/staging/rtl8188eu/include/odm.h +++ b/drivers/staging/rtl8188eu/include/odm.h @@ -348,7 +348,6 @@ enum odm_common_info_def { ODM_CMNINFO_MP_TEST_CHIP, ODM_CMNINFO_IC_TYPE, /* ODM_IC_TYPE_E */ ODM_CMNINFO_CUT_VER, /* ODM_CUT_VERSION_E */ - ODM_CMNINFO_FAB_VER, /* ODM_FAB_E */ ODM_CMNINFO_RF_TYPE, /* ODM_RF_PATH_E or ODM_RF_TYPE_E? */ ODM_CMNINFO_BOARD_TYPE, /* ODM_BOARD_TYPE_E */ ODM_CMNINFO_EXT_LNA, /* true */ @@ -451,12 +450,6 @@ enum odm_cut_version { ODM_CUT_TEST = 7, }; -/* ODM_CMNINFO_FAB_VER */ -enum odm_fab_Version { - ODM_TSMC = 0, - ODM_UMC = 1, -}; - /* ODM_CMNINFO_RF_TYPE */ /* For example 1T2R (A+AB = BIT0|BIT4|BIT5) */ enum odm_rf_path { @@ -752,8 +745,6 @@ struct odm_dm_struct { u32 SupportICType; /* Cut Version TestChip/A-cut/B-cut... = 0/1/2/3/... */ u8 CutVersion; - /* Fab Version TSMC/UMC = 0/1 */ - u8 FabVersion; /* RF Type 4T4R/3T3R/2T2R/1T2R/1T1R/... */ u8 RFType; /* Board Type Normal/HighPower/MiniCard/SLIM/Combo/. = 0/1/2/3/4/. */ diff --git a/drivers/staging/rtl8188eu/include/odm_HWConfig.h b/drivers/staging/rtl8188eu/include/odm_HWConfig.h index 62a00498e473..ef792bfd535e 100644 --- a/drivers/staging/rtl8188eu/include/odm_HWConfig.h +++ b/drivers/staging/rtl8188eu/include/odm_HWConfig.h @@ -69,7 +69,7 @@ struct phy_rx_agc_info { }; struct phy_status_rpt { - struct phy_rx_agc_info path_agc[3]; + struct phy_rx_agc_info path_agc[RF_PATH_MAX]; u8 ch_corr[2]; u8 cck_sig_qual_ofdm_pwdb_all; u8 cck_agc_rpt_ofdm_cfosho_a; diff --git a/drivers/staging/rtl8188eu/include/pwrseq.h b/drivers/staging/rtl8188eu/include/pwrseq.h index 8c876c6c7a4f..9dbf8435f147 100644 --- a/drivers/staging/rtl8188eu/include/pwrseq.h +++ b/drivers/staging/rtl8188eu/include/pwrseq.h @@ -60,254 +60,172 @@ #define RTL8188E_TRANS_CARDEMU_TO_ACT \ /* format - * { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value + * { offset, cut_msk, cmd, msk, value * }, * comment here */ \ - {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), BIT(1)}, \ + {0x0006, PWR_CUT_ALL_MSK, PWR_CMD_POLLING, BIT(1), BIT(1)}, \ /* wait till 0x04[17] = 1 power ready*/ \ - {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0) | BIT(1), 0}, \ + {0x0002, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(0) | BIT(1), 0}, \ /* 0x02[1:0] = 0 reset BB*/ \ - {0x0026, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), BIT(7)}, \ + {0x0026, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(7), BIT(7)}, \ /*0x24[23] = 2b'01 schmit trigger */ \ - {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(7), 0}, \ /* 0x04[15] = 0 disable HWPDN (control by DRV)*/ \ - {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4) | BIT(3), 0}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(4) | BIT(3), 0}, \ /*0x04[12:11] = 2b'00 disable WL suspend*/ \ - {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(0), BIT(0)}, \ /*0x04[8] = 1 polling until return 0*/ \ - {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(0), 0}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_CMD_POLLING, BIT(0), 0}, \ /*wait till 0x04[8] = 0*/ \ - {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, \ - /*LDO normal mode*/ \ - {0x0074, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, \ - /*SDIO Driving*/ + {0x0023, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(4), 0}, \ + /*LDO normal mode*/ #define RTL8188E_TRANS_ACT_TO_CARDEMU \ /* format - * { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value + * { offset, cut_msk, cmd, msk, value * }, * comments here */ \ - {0x001F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0}, \ + {0x001F, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, 0xFF, 0}, \ /*0x1F[7:0] = 0 turn off RF*/ \ - {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, \ + {0x0023, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(4), BIT(4)}, \ /*LDO Sleep mode*/ \ - {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(1), BIT(1)}, \ /*0x04[9] = 1 turn off MAC by HW state machine*/ \ - {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), 0}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_CMD_POLLING, BIT(1), 0}, \ /*wait till 0x04[9] = 0 polling until return 0 to disable*/ #define RTL8188E_TRANS_CARDEMU_TO_SUS \ /* format - * { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, + * { offset, cut_msk, cmd, msk, * value }, * comments here */ \ - {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ - PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, \ - PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3)}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3)}, \ /* 0x04[12:11] = 2b'01enable WL suspend */ \ - {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3) | BIT(4)}, \ - /* 0x04[12:11] = 2b'11enable WL suspend for PCIe */ \ - {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ - PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, \ - PWR_CMD_WRITE, 0xFF, BIT(7)}, \ + {0x0007, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, 0xFF, BIT(7)}, \ /* 0x04[31:30] = 2b'10 enable enable bandgap mbias in suspend */\ - {0x0041, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ - PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, \ - PWR_CMD_WRITE, BIT(4), 0}, \ + {0x0041, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(4), 0}, \ /*Clear SIC_EN register 0x40[12] = 1'b0 */ \ - {0xfe10, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ - PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, \ - PWR_CMD_WRITE, BIT(4), BIT(4)}, \ - /*Set USB suspend enable local register 0xfe10[4]=1 */ \ - {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ - PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), BIT(0)}, \ - /*Set SDIO suspend local register*/ \ - {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ - PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), 0}, \ - /*wait power state to suspend*/ + {0xfe10, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(4), BIT(4)}, \ + /*Set USB suspend enable local register 0xfe10[4]=1 */ #define RTL8188E_TRANS_SUS_TO_CARDEMU \ /* format - * { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, + * { offset, cut_msk, cmd, msk, * value }, * comments here */ \ - {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ - PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), 0}, \ - /*Set SDIO suspend local register*/ \ - {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ - PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), BIT(1)}, \ - /*wait power state to suspend*/ \ - {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3) | BIT(4), 0}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(3) | BIT(4), 0}, \ /*0x04[12:11] = 2b'01enable WL suspend*/ #define RTL8188E_TRANS_CARDEMU_TO_CARDDIS \ /* format - * { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, + * { offset, cut_msk, cmd, msk, * value }, * comments here */ \ - {0x0026, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), BIT(7)}, \ + {0x0026, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(7), BIT(7)}, \ /*0x24[23] = 2b'01 schmit trigger */ \ - {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ - PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, \ - PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3)}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3)}, \ /*0x04[12:11] = 2b'01 enable WL suspend*/ \ - {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ - PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, \ - PWR_CMD_WRITE, 0xFF, 0}, \ + {0x0007, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, 0xFF, 0}, \ /* 0x04[31:30] = 2b'10 enable enable bandgap mbias in suspend */\ - {0x0041, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ - PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, \ - PWR_CMD_WRITE, BIT(4), 0}, \ + {0x0041, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(4), 0}, \ /*Clear SIC_EN register 0x40[12] = 1'b0 */ \ - {0xfe10, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, \ - /*Set USB suspend enable local register 0xfe10[4]=1 */ \ - {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ - PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), BIT(0)}, \ - /*Set SDIO suspend local register*/ \ - {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ - PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), 0}, \ - /*wait power state to suspend*/ + {0xfe10, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(4), BIT(4)}, \ + /*Set USB suspend enable local register 0xfe10[4]=1 */ #define RTL8188E_TRANS_CARDDIS_TO_CARDEMU \ /* format - * { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, + * { offset, cut_msk, cmd, msk, * value }, * comments here */ \ - {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ - PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), 0}, \ - /*Set SDIO suspend local register*/ \ - {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ - PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), BIT(1)}, \ - /*wait power state to suspend*/ \ - {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3) | BIT(4), 0}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(3) | BIT(4), 0}, \ /*0x04[12:11] = 2b'01enable WL suspend*/ #define RTL8188E_TRANS_CARDEMU_TO_PDN \ /* format - * { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, + * { offset, cut_msk, cmd, msk, * value }, * comments here */ \ - {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0}, \ + {0x0006, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(0), 0}, \ /* 0x04[16] = 0*/ \ - {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), BIT(7)}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(7), BIT(7)}, \ /* 0x04[15] = 1*/ #define RTL8188E_TRANS_PDN_TO_CARDEMU \ /* format - * { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, + * { offset, cut_msk, cmd, msk, * value }, * comments here */ \ - {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(7), 0}, \ /* 0x04[15] = 0*/ /* This is used by driver for LPSRadioOff Procedure, not for FW LPS Step */ #define RTL8188E_TRANS_ACT_TO_LPS \ /* format - * { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, + * { offset, cut_msk, cmd, msk, * value }, * comments here */ \ - {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x7F},/*Tx Pause*/ \ - {0x05F8, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \ + {0x0522, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, 0xFF, 0x7F},/*Tx Pause*/ \ + {0x05F8, PWR_CUT_ALL_MSK, PWR_CMD_POLLING, 0xFF, 0}, \ /*Should be zero if no packet is transmitting*/ \ - {0x05F9, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \ + {0x05F9, PWR_CUT_ALL_MSK, PWR_CMD_POLLING, 0xFF, 0}, \ /*Should be zero if no packet is transmitting*/ \ - {0x05FA, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \ + {0x05FA, PWR_CUT_ALL_MSK, PWR_CMD_POLLING, 0xFF, 0}, \ /*Should be zero if no packet is transmitting*/ \ - {0x05FB, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \ + {0x05FB, PWR_CUT_ALL_MSK, PWR_CMD_POLLING, 0xFF, 0}, \ /*Should be zero if no packet is transmitting*/ \ - {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0}, \ + {0x0002, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(0), 0}, \ /*CCK and OFDM are disabled,and clock are gated*/ \ - {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, \ - PWRSEQ_DELAY_US},/*Delay 1us*/ \ - {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x3F},/*Reset MAC TRX*/ \ - {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0},/*check if removed later*/\ - {0x0553, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(5), BIT(5)}, \ + {0x0002, PWR_CUT_ALL_MSK, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US}, \ + /*Delay 1us*/ \ + {0x0100, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, 0xFF, 0x3F}, \ + /*Reset MAC TRX*/ \ + {0x0101, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(1), 0}, \ + /*check if removed later*/\ + {0x0553, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(5), BIT(5)}, \ /*Respond TxOK to scheduler*/ #define RTL8188E_TRANS_LPS_TO_ACT \ /* format - * { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, + * { offset, cut_msk, cmd, msk, * value }, * comments here */ \ - {0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ - PWR_BASEADDR_SDIO, PWR_CMD_WRITE, 0xFF, 0x84}, /*SDIO RPWM*/ \ - {0xFE58, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84}, /*USB RPWM*/ \ - {0x0361, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84}, /*PCIe RPWM*/ \ - {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_MS}, /*Delay*/ \ - {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, \ + {0xFE58, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, 0xFF, 0x84}, \ + /*USB RPWM*/ \ + {0x0002, PWR_CUT_ALL_MSK, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_MS}, \ + /*Delay*/ \ + {0x0008, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(4), 0}, \ /* 0x08[4] = 0 switch TSF to 40M */ \ - {0x0109, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(7), 0}, \ + {0x0109, PWR_CUT_ALL_MSK, PWR_CMD_POLLING, BIT(7), 0}, \ /* Polling 0x109[7]=0 TSF in 40M */ \ - {0x0029, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(6) | BIT(7), 0}, \ + {0x0029, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(6) | BIT(7), 0}, \ /* 0x29[7:6] = 2b'00 enable BB clock */ \ - {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \ + {0x0101, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(1), BIT(1)}, \ /* 0x101[1] = 1 */ \ - {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF}, \ + {0x0100, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, 0xFF, 0xFF}, \ /* 0x100[7:0] = 0xFF enable WMAC TRX */ \ - {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1) | BIT(0), BIT(1) | BIT(0)}, \ + {0x0002, PWR_CUT_ALL_MSK, \ + PWR_CMD_WRITE, BIT(1) | BIT(0), BIT(1) | BIT(0)}, \ /* 0x02[1:0] = 2b'11 enable BB macro */ \ - {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ - PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0}, /*. 0x522 = 0*/ + {0x0522, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, 0xFF, 0}, /*. 0x522 = 0*/ #define RTL8188E_TRANS_END \ /* format - * { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, + * { offset, cut_msk, cmd, msk, * value }, * comments here */ \ - {0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, 0, \ - PWR_CMD_END, 0, 0}, + {0xFFFF, PWR_CUT_ALL_MSK, PWR_CMD_END, 0, 0}, extern struct wl_pwr_cfg rtl8188E_power_on_flow diff --git a/drivers/staging/rtl8188eu/include/pwrseqcmd.h b/drivers/staging/rtl8188eu/include/pwrseqcmd.h index 980a49769157..468a3fb28e00 100644 --- a/drivers/staging/rtl8188eu/include/pwrseqcmd.h +++ b/drivers/staging/rtl8188eu/include/pwrseqcmd.h @@ -29,24 +29,6 @@ #define PWR_CMD_DELAY 0x03 #define PWR_CMD_END 0x04 -/* The value of base: 4 bits */ -/* define the base address of each block */ -#define PWR_BASEADDR_MAC 0x00 -#define PWR_BASEADDR_USB 0x01 -#define PWR_BASEADDR_PCIE 0x02 -#define PWR_BASEADDR_SDIO 0x03 - -/* The value of interface_msk: 4 bits */ -#define PWR_INTF_SDIO_MSK BIT(0) -#define PWR_INTF_USB_MSK BIT(1) -#define PWR_INTF_PCI_MSK BIT(2) -#define PWR_INTF_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3)) - -/* The value of fab_msk: 4 bits */ -#define PWR_FAB_TSMC_MSK BIT(0) -#define PWR_FAB_UMC_MSK BIT(1) -#define PWR_FAB_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3)) - /* The value of cut_msk: 8 bits */ #define PWR_CUT_TESTCHIP_MSK BIT(0) #define PWR_CUT_A_MSK BIT(1) @@ -67,9 +49,6 @@ enum pwrseq_cmd_delat_unit { struct wl_pwr_cfg { u16 offset; u8 cut_msk; - u8 fab_msk:4; - u8 interface_msk:4; - u8 base:4; u8 cmd:4; u8 msk; u8 value; @@ -77,14 +56,11 @@ struct wl_pwr_cfg { #define GET_PWR_CFG_OFFSET(__PWR_CMD) __PWR_CMD.offset #define GET_PWR_CFG_CUT_MASK(__PWR_CMD) __PWR_CMD.cut_msk -#define GET_PWR_CFG_FAB_MASK(__PWR_CMD) __PWR_CMD.fab_msk -#define GET_PWR_CFG_INTF_MASK(__PWR_CMD) __PWR_CMD.interface_msk -#define GET_PWR_CFG_BASE(__PWR_CMD) __PWR_CMD.base #define GET_PWR_CFG_CMD(__PWR_CMD) __PWR_CMD.cmd #define GET_PWR_CFG_MASK(__PWR_CMD) __PWR_CMD.msk #define GET_PWR_CFG_VALUE(__PWR_CMD) __PWR_CMD.value -u8 rtl88eu_pwrseqcmdparsing(struct adapter *padapter, u8 cut_vers, u8 fab_vers, - u8 ifacetype, struct wl_pwr_cfg pwrcfgCmd[]); +u8 rtl88eu_pwrseqcmdparsing(struct adapter *padapter, u8 cut_vers, + struct wl_pwr_cfg pwrcfgCmd[]); #endif diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h index cbad364f189c..9f5050e6f6ab 100644 --- a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h +++ b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h @@ -191,8 +191,6 @@ struct txpowerinfo24g { struct hal_data_8188e { struct HAL_VERSION VersionID; u16 CustomerID; - u8 *pfirmware; - u32 fwsize; u16 FirmwareVersion; u16 FirmwareVersionRev; u16 FirmwareSubVersion; diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h index 9093a5f94d32..44711332b90c 100644 --- a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h +++ b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h @@ -348,7 +348,7 @@ struct mlme_ext_info { u8 candidate_tid_bitmap; u8 dialogToken; /* Accept ADDBA Request */ - bool bAcceptAddbaReq; + bool accept_addba_req; u8 bwmode_updated; u8 hidden_ssid_mode; diff --git a/drivers/staging/rtl8188eu/include/rtw_rf.h b/drivers/staging/rtl8188eu/include/rtw_rf.h index 2df88370de59..35f61be12acd 100644 --- a/drivers/staging/rtl8188eu/include/rtw_rf.h +++ b/drivers/staging/rtl8188eu/include/rtw_rf.h @@ -140,7 +140,6 @@ enum rt_rf_type_def { }; u32 rtw_ch2freq(u32 ch); -u32 rtw_freq2ch(u32 freq); #endif /* _RTL8711_RF_H_ */ diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c index a076ede50b22..911980495fb2 100644 --- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c +++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c @@ -403,7 +403,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param, if (wep_key_len > 0) { wep_key_len = wep_key_len <= 5 ? 5 : 13; - wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, KeyMaterial); + wep_total_len = wep_key_len + offsetof(struct ndis_802_11_wep, KeyMaterial); pwep = (struct ndis_802_11_wep *)rtw_malloc(wep_total_len); if (pwep == NULL) { RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, (" wpa_set_encryption: pwep allocate fail !!!\n")); @@ -1677,7 +1677,7 @@ static int rtw_wx_set_enc(struct net_device *dev, if (erq->length > 0) { wep.KeyLength = erq->length <= 5 ? 5 : 13; - wep.Length = wep.KeyLength + FIELD_OFFSET(struct ndis_802_11_wep, KeyMaterial); + wep.Length = wep.KeyLength + offsetof(struct ndis_802_11_wep, KeyMaterial); } else { wep.KeyLength = 0; @@ -1907,7 +1907,7 @@ static int rtw_wx_set_enc_ext(struct net_device *dev, memset(param, 0, param_len); param->cmd = IEEE_CMD_SET_ENCRYPTION; - memset(param->sta_addr, 0xff, ETH_ALEN); + eth_broadcast_addr(param->sta_addr); switch (pext->alg) { case IW_ENCODE_ALG_NONE: @@ -2277,7 +2277,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param, if (wep_key_len > 0) { wep_key_len = wep_key_len <= 5 ? 5 : 13; - wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, KeyMaterial); + wep_total_len = wep_key_len + offsetof(struct ndis_802_11_wep, KeyMaterial); pwep = (struct ndis_802_11_wep *)rtw_malloc(wep_total_len); if (pwep == NULL) { DBG_88E(" r871x_set_encryption: pwep allocate fail !!!\n"); @@ -3095,7 +3095,6 @@ struct iw_handler_def rtw_handlers_def = { .get_wireless_stats = rtw_get_wireless_stats, }; -#include <rtw_android.h> int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct iwreq *wrq = (struct iwreq *)rq; diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c index 9201b94d017c..7986e678521a 100644 --- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c +++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c @@ -101,8 +101,6 @@ static int rtw_rf_config = RF_819X_MAX_TYPE; /* auto */ static int rtw_low_power; static int rtw_wifi_spec; static int rtw_channel_plan = RT_CHANNEL_DOMAIN_MAX; -/* 0:Reject AP's Add BA req, 1:Accept AP's Add BA req. */ -static int rtw_AcceptAddbaReq = true; static int rtw_antdiv_cfg = 2; /* 0:OFF , 1:ON, 2:decide by Efuse config */ @@ -593,7 +591,7 @@ static void loadparam(struct adapter *padapter, struct net_device *pnetdev) registry_par->low_power = (u8)rtw_low_power; registry_par->wifi_spec = (u8)rtw_wifi_spec; registry_par->channel_plan = (u8)rtw_channel_plan; - registry_par->bAcceptAddbaReq = (u8)rtw_AcceptAddbaReq; + registry_par->accept_addba_req = true; registry_par->antdiv_cfg = (u8)rtw_antdiv_cfg; registry_par->antdiv_type = (u8)rtw_antdiv_type; registry_par->hwpdn_mode = (u8)rtw_hwpdn_mode; @@ -1157,7 +1155,6 @@ int pm_netdev_open(struct net_device *pnetdev, u8 bnormal) static int netdev_close(struct net_device *pnetdev) { struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev); - struct hal_data_8188e *rtlhal = GET_HAL_DATA(padapter); RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+88eu_drv - drv_close\n")); @@ -1190,9 +1187,6 @@ static int netdev_close(struct net_device *pnetdev) rtw_led_control(padapter, LED_CTL_POWER_OFF); } - kfree(rtlhal->pfirmware); - rtlhal->pfirmware = NULL; - RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-88eu_drv - drv_close\n")); DBG_88E("-88eu_drv - drv_close, bup =%d\n", padapter->bup); return 0; diff --git a/drivers/staging/rtl8188eu/os_dep/osdep_service.c b/drivers/staging/rtl8188eu/os_dep/osdep_service.c index d87b54711c0d..f090bef59594 100644 --- a/drivers/staging/rtl8188eu/os_dep/osdep_service.c +++ b/drivers/staging/rtl8188eu/os_dep/osdep_service.c @@ -41,10 +41,7 @@ inline int RTW_STATUS_CODE(int error_code) u8 *_rtw_malloc(u32 sz) { - u8 *pbuf = NULL; - - pbuf = kmalloc(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); - return pbuf; + return kmalloc(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); } void *rtw_malloc2d(int h, int w, int size) @@ -67,8 +64,7 @@ u32 _rtw_down_sema(struct semaphore *sema) { if (down_interruptible(sema)) return _FAIL; - else - return _SUCCESS; + return _SUCCESS; } void _rtw_init_queue(struct __queue *pqueue) diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c index 01d50f7c1667..794cc114348c 100644 --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c @@ -360,7 +360,6 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj, padapter->bDriverStopped = true; mutex_init(&padapter->hw_init_mutex); - padapter->chip_type = RTL8188E; pnetdev = rtw_init_netdev(padapter); if (pnetdev == NULL) @@ -442,7 +441,7 @@ free_adapter: if (status != _SUCCESS) { if (pnetdev) rtw_free_netdev(pnetdev); - else if (padapter) + else vfree(padapter); padapter = NULL; } @@ -474,8 +473,7 @@ static void rtw_usb_if1_deinit(struct adapter *if1) pr_debug("+r871xu_dev_remove, hw_init_completed=%d\n", if1->hw_init_completed); rtw_free_drv_sw(if1); - if (pnetdev) - rtw_free_netdev(pnetdev); + rtw_free_netdev(pnetdev); } static int rtw_drv_init(struct usb_interface *pusb_intf, const struct usb_device_id *pdid) @@ -483,24 +481,20 @@ static int rtw_drv_init(struct usb_interface *pusb_intf, const struct usb_device struct adapter *if1 = NULL; struct dvobj_priv *dvobj; - RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+rtw_drv_init\n")); - /* Initialize dvobj_priv */ dvobj = usb_dvobj_init(pusb_intf); - if (dvobj == NULL) { + if (!dvobj) { RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("initialize device object priv Failed!\n")); goto exit; } if1 = rtw_usb_if1_init(dvobj, pusb_intf, pdid); - if (if1 == NULL) { + if (!if1) { pr_debug("rtw_init_primarystruct adapter Failed!\n"); goto free_dvobj; } - RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-871x_drv - drv_init, success!\n")); - return 0; free_dvobj: diff --git a/drivers/staging/rtl8192e/dot11d.h b/drivers/staging/rtl8192e/dot11d.h index 2c19054cf027..735a199ebdcf 100644 --- a/drivers/staging/rtl8192e/dot11d.h +++ b/drivers/staging/rtl8192e/dot11d.h @@ -17,8 +17,6 @@ #include "rtllib.h" - - struct chnl_txpow_triple { u8 FirstChnl; u8 NumChnls; @@ -42,7 +40,6 @@ enum dot11d_state { */ struct rt_dot11d_info { - bool bEnabled; u16 CountryIeLen; diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c index e9c4f973bba9..ba64a4f1b3a8 100644 --- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c +++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c @@ -680,7 +680,7 @@ static void _rtl92e_hwconfig(struct net_device *dev) rtl92e_writeb(dev, BW_OPMODE, regBwOpMode); { - u32 ratr_value = 0; + u32 ratr_value; ratr_value = regRATR; if (priv->rf_type == RF_1T2R) @@ -1000,7 +1000,7 @@ void rtl92e_link_change(struct net_device *dev) _rtl92e_update_msr(dev); if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC) { - u32 reg = 0; + u32 reg; reg = rtl92e_readl(dev, RCR); if (priv->rtllib->state == RTLLIB_LINKED) { @@ -1186,7 +1186,7 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc, struct r8192_priv *priv = rtllib_priv(dev); dma_addr_t mapping = pci_map_single(priv->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); - struct tx_fwinfo_8190pci *pTxFwInfo = NULL; + struct tx_fwinfo_8190pci *pTxFwInfo; pTxFwInfo = (struct tx_fwinfo_8190pci *)skb->data; memset(pTxFwInfo, 0, sizeof(struct tx_fwinfo_8190pci)); @@ -2235,7 +2235,7 @@ void rtl92e_disable_irq(struct net_device *dev) void rtl92e_clear_irq(struct net_device *dev) { - u32 tmp = 0; + u32 tmp; tmp = rtl92e_readl(dev, ISR); rtl92e_writel(dev, ISR, tmp); diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c index 8f989a95a019..9b7cc7dc7cb8 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c @@ -249,7 +249,7 @@ bool rtl92e_set_rf_state(struct net_device *dev, if (StateToSet == eRfOn) { if (bConnectBySSID && priv->blinked_ingpio) { - queue_delayed_work_rsl(ieee->wq, + schedule_delayed_work( &ieee->associate_procedure_wq, 0); priv->blinked_ingpio = false; } @@ -288,7 +288,7 @@ static void _rtl92e_tx_timeout(struct net_device *dev) void rtl92e_irq_enable(struct net_device *dev) { - struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); + struct r8192_priv *priv = rtllib_priv(dev); priv->irq_enabled = 1; @@ -297,7 +297,7 @@ void rtl92e_irq_enable(struct net_device *dev) void rtl92e_irq_disable(struct net_device *dev) { - struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); + struct r8192_priv *priv = rtllib_priv(dev); priv->ops->irq_disable(dev); @@ -306,7 +306,7 @@ void rtl92e_irq_disable(struct net_device *dev) static void _rtl92e_set_chan(struct net_device *dev, short ch) { - struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); + struct r8192_priv *priv = rtllib_priv(dev); RT_TRACE(COMP_CH, "=====>%s()====ch:%d\n", __func__, ch); if (priv->chan_forced) @@ -437,7 +437,7 @@ static int _rtl92e_qos_handle_probe_response(struct r8192_priv *priv, network->qos_data.old_param_count = network->qos_data.param_count; priv->rtllib->wmm_acm = network->qos_data.wmm_acm; - queue_work_rsl(priv->priv_wq, &priv->qos_activate); + schedule_work(&priv->qos_activate); RT_TRACE(COMP_QOS, "QoS parameters change call qos_activate\n"); } @@ -446,7 +446,7 @@ static int _rtl92e_qos_handle_probe_response(struct r8192_priv *priv, &def_qos_parameters, size); if ((network->qos_data.active == 1) && (active_network == 1)) { - queue_work_rsl(priv->priv_wq, &priv->qos_activate); + schedule_work(&priv->qos_activate); RT_TRACE(COMP_QOS, "QoS was disabled call qos_activate\n"); } @@ -465,7 +465,7 @@ static int _rtl92e_handle_beacon(struct net_device *dev, _rtl92e_qos_handle_probe_response(priv, 1, network); - queue_delayed_work_rsl(priv->priv_wq, &priv->update_beacon_wq, 0); + schedule_delayed_work(&priv->update_beacon_wq, 0); return 0; } @@ -512,7 +512,7 @@ static int _rtl92e_qos_assoc_resp(struct r8192_priv *priv, network->flags, priv->rtllib->current_network.qos_data.active); if (set_qos_param == 1) { rtl92e_dm_init_edca_turbo(priv->rtllib->dev); - queue_work_rsl(priv->priv_wq, &priv->qos_activate); + schedule_work(&priv->qos_activate); } return 0; } @@ -1002,7 +1002,6 @@ static void _rtl92e_init_priv_task(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); - priv->priv_wq = create_workqueue(DRV_NAME); INIT_WORK_RSL(&priv->reset_wq, (void *)_rtl92e_restart, dev); INIT_WORK_RSL(&priv->rtllib->ips_leave_wq, (void *)rtl92e_ips_leave_wq, dev); @@ -1327,7 +1326,7 @@ RESET_START: ieee->set_chan(ieee->dev, ieee->current_network.channel); - queue_work_rsl(ieee->wq, &ieee->associate_complete_wq); + schedule_work(&ieee->associate_complete_wq); } else if (ieee->state == RTLLIB_LINKED && ieee->iw_mode == IW_MODE_ADHOC) { @@ -1499,7 +1498,7 @@ static void _rtl92e_watchdog_wq_cb(void *data) if (!(ieee->rtllib_ap_sec_type(ieee) & (SEC_ALG_CCMP|SEC_ALG_TKIP))) - queue_delayed_work_rsl(ieee->wq, + schedule_delayed_work( &ieee->associate_procedure_wq, 0); priv->check_roaming_cnt = 0; @@ -1536,7 +1535,7 @@ static void _rtl92e_watchdog_timer_cb(unsigned long data) { struct r8192_priv *priv = rtllib_priv((struct net_device *)data); - queue_delayed_work_rsl(priv->priv_wq, &priv->watch_dog_wq, 0); + schedule_delayed_work(&priv->watch_dog_wq, 0); mod_timer(&priv->watch_dog_timer, jiffies + msecs_to_jiffies(RTLLIB_WATCH_DOG_TIME)); } @@ -1546,14 +1545,14 @@ static void _rtl92e_watchdog_timer_cb(unsigned long data) *****************************************************************************/ void rtl92e_rx_enable(struct net_device *dev) { - struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); + struct r8192_priv *priv = rtllib_priv(dev); priv->ops->rx_enable(dev); } void rtl92e_tx_enable(struct net_device *dev) { - struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); + struct r8192_priv *priv = rtllib_priv(dev); priv->ops->tx_enable(dev); @@ -1612,7 +1611,7 @@ static void _rtl92e_free_tx_ring(struct net_device *dev, unsigned int prio) static void _rtl92e_hard_data_xmit(struct sk_buff *skb, struct net_device *dev, int rate) { - struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); + struct r8192_priv *priv = rtllib_priv(dev); int ret; struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); @@ -1643,7 +1642,7 @@ static void _rtl92e_hard_data_xmit(struct sk_buff *skb, struct net_device *dev, static int _rtl92e_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { - struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); + struct r8192_priv *priv = rtllib_priv(dev); int ret; struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); @@ -1676,7 +1675,7 @@ static int _rtl92e_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) static void _rtl92e_tx_isr(struct net_device *dev, int prio) { - struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); + struct r8192_priv *priv = rtllib_priv(dev); struct rtl8192_tx_ring *ring = &priv->tx_ring[prio]; @@ -1850,7 +1849,7 @@ static short _rtl92e_alloc_rx_ring(struct net_device *dev) static int _rtl92e_alloc_tx_ring(struct net_device *dev, unsigned int prio, unsigned int entries) { - struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); + struct r8192_priv *priv = rtllib_priv(dev); struct tx_desc *ring; dma_addr_t dma; int i; @@ -1944,7 +1943,7 @@ void rtl92e_reset_desc_ring(struct net_device *dev) void rtl92e_update_rx_pkt_timestamp(struct net_device *dev, struct rtllib_rx_stats *stats) { - struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); + struct r8192_priv *priv = rtllib_priv(dev); if (stats->bIsAMPDU && !stats->bFirstMPDU) stats->mac_time = priv->LastRxDescTSF; @@ -2022,7 +2021,7 @@ void rtl92e_copy_mpdu_stats(struct rtllib_rx_stats *psrc_stats, static void _rtl92e_rx_normal(struct net_device *dev) { - struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); + struct r8192_priv *priv = rtllib_priv(dev); struct rtllib_hdr_1addr *rtllib_hdr = NULL; bool unicast_packet = false; bool bLedBlinking = true; @@ -2128,7 +2127,7 @@ done: static void _rtl92e_tx_resume(struct net_device *dev) { - struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); + struct r8192_priv *priv = rtllib_priv(dev); struct rtllib_device *ieee = priv->rtllib; struct sk_buff *skb; int queue_index; @@ -2161,8 +2160,8 @@ static void _rtl92e_irq_rx_tasklet(struct r8192_priv *priv) *****************************************************************************/ static void _rtl92e_cancel_deferred_work(struct r8192_priv *priv) { - cancel_delayed_work(&priv->watch_dog_wq); - cancel_delayed_work(&priv->update_beacon_wq); + cancel_delayed_work_sync(&priv->watch_dog_wq); + cancel_delayed_work_sync(&priv->update_beacon_wq); cancel_delayed_work(&priv->rtllib->hw_sleep_wq); cancel_work_sync(&priv->reset_wq); cancel_work_sync(&priv->qos_activate); @@ -2279,7 +2278,7 @@ static int _rtl92e_set_mac_adr(struct net_device *dev, void *mac) /* based on ipw2200 driver */ static int _rtl92e_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); + struct r8192_priv *priv = rtllib_priv(dev); struct iwreq *wrq = (struct iwreq *)rq; int ret = -1; struct rtllib_device *ieee = priv->rtllib; @@ -2402,8 +2401,8 @@ out: static irqreturn_t _rtl92e_irq(int irq, void *netdev) { - struct net_device *dev = (struct net_device *) netdev; - struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); + struct net_device *dev = netdev; + struct r8192_priv *priv = rtllib_priv(dev); unsigned long flags; u32 inta; u32 intb; @@ -2693,7 +2692,7 @@ static void _rtl92e_pci_disconnect(struct pci_dev *pdev) priv = rtllib_priv(dev); del_timer_sync(&priv->gpio_polling_timer); - cancel_delayed_work(&priv->gpio_change_rf_wq); + cancel_delayed_work_sync(&priv->gpio_change_rf_wq); priv->polling_timer_on = 0; _rtl92e_down(dev, true); rtl92e_dm_deinit(dev); @@ -2701,7 +2700,6 @@ static void _rtl92e_pci_disconnect(struct pci_dev *pdev) vfree(priv->pFirmware); priv->pFirmware = NULL; } - destroy_workqueue(priv->priv_wq); _rtl92e_free_rx_ring(dev); for (i = 0; i < MAX_TX_QUEUE_COUNT; i++) _rtl92e_free_tx_ring(dev, i); @@ -2783,7 +2781,7 @@ void rtl92e_check_rfctrl_gpio_timer(unsigned long data) priv->polling_timer_on = 1; - queue_delayed_work_rsl(priv->priv_wq, &priv->gpio_change_rf_wq, 0); + schedule_delayed_work(&priv->gpio_change_rf_wq, 0); mod_timer(&priv->gpio_polling_timer, jiffies + msecs_to_jiffies(RTLLIB_WATCH_DOG_TIME)); diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h index a7777a319c02..f627fdc15a58 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h @@ -338,8 +338,6 @@ struct r8192_priv { struct delayed_work rfpath_check_wq; struct delayed_work gpio_change_rf_wq; - struct workqueue_struct *priv_wq; - struct channel_access_setting ChannelAccessSetting; struct rtl819x_ops *ops; diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c index ef03242113be..9bc284812c30 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c @@ -994,8 +994,7 @@ static void _rtl92e_dm_check_tx_power_tracking_tssi(struct net_device *dev) if (tx_power_track_counter >= 180) { - queue_delayed_work_rsl(priv->priv_wq, - &priv->txpower_tracking_wq, 0); + schedule_delayed_work(&priv->txpower_tracking_wq, 0); tx_power_track_counter = 0; } @@ -1028,7 +1027,7 @@ static void _rtl92e_dm_check_tx_power_tracking_thermal(struct net_device *dev) return; } netdev_info(dev, "===============>Schedule TxPowerTrackingWorkItem\n"); - queue_delayed_work_rsl(priv->priv_wq, &priv->txpower_tracking_wq, 0); + schedule_delayed_work(&priv->txpower_tracking_wq, 0); TM_Trigger = 0; } @@ -1875,7 +1874,7 @@ void rtl92e_dm_rf_pathcheck_wq(void *data) struct r8192_priv, rfpath_check_wq); struct net_device *dev = priv->rtllib->dev; - u8 rfpath = 0, i; + u8 rfpath, i; rfpath = rtl92e_readb(dev, 0xc04); @@ -2121,7 +2120,7 @@ static void _rtl92e_dm_check_rx_path_selection(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); - queue_delayed_work_rsl(priv->priv_wq, &priv->rfpath_check_wq, 0); + schedule_delayed_work(&priv->rfpath_check_wq, 0); } diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c index b2b5ada69e73..9e04dc29fbbb 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c @@ -27,7 +27,7 @@ int rtl92e_suspend(struct pci_dev *pdev, pm_message_t state) netdev_info(dev, "============> r8192E suspend call.\n"); del_timer_sync(&priv->gpio_polling_timer); - cancel_delayed_work(&priv->gpio_change_rf_wq); + cancel_delayed_work_sync(&priv->gpio_change_rf_wq); priv->polling_timer_on = 0; if (!netif_running(dev)) { diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c index 9a4d1bcb881d..98e4d88d0e73 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c @@ -63,9 +63,8 @@ void rtl92e_hw_wakeup(struct net_device *dev) spin_unlock_irqrestore(&priv->rf_ps_lock, flags); RT_TRACE(COMP_DBG, "rtl92e_hw_wakeup(): RF Change in progress!\n"); - queue_delayed_work_rsl(priv->rtllib->wq, - &priv->rtllib->hw_wakeup_wq, - msecs_to_jiffies(10)); + schedule_delayed_work(&priv->rtllib->hw_wakeup_wq, + msecs_to_jiffies(10)); return; } spin_unlock_irqrestore(&priv->rf_ps_lock, flags); @@ -111,10 +110,8 @@ void rtl92e_enter_sleep(struct net_device *dev, u64 time) return; } tmp = time - jiffies; - queue_delayed_work_rsl(priv->rtllib->wq, - &priv->rtllib->hw_wakeup_wq, tmp); - queue_delayed_work_rsl(priv->rtllib->wq, - (void *)&priv->rtllib->hw_sleep_wq, 0); + schedule_delayed_work(&priv->rtllib->hw_wakeup_wq, tmp); + schedule_delayed_work(&priv->rtllib->hw_sleep_wq, 0); spin_unlock_irqrestore(&priv->ps_lock, flags); } @@ -203,8 +200,7 @@ void rtl92e_rtllib_ips_leave_wq(struct net_device *dev) } netdev_info(dev, "=========>%s(): rtl92e_ips_leave\n", __func__); - queue_work_rsl(priv->rtllib->wq, - &priv->rtllib->ips_leave_wq); + schedule_work(&priv->rtllib->ips_leave_wq); } } } diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c index c04a020f6d6c..c7fd1b1653d6 100644 --- a/drivers/staging/rtl8192e/rtl819x_BAProc.c +++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c @@ -189,7 +189,7 @@ static struct sk_buff *rtllib_DELBA(struct rtllib_device *ieee, u8 *dst, static void rtllib_send_ADDBAReq(struct rtllib_device *ieee, u8 *dst, struct ba_record *pBA) { - struct sk_buff *skb = NULL; + struct sk_buff *skb; skb = rtllib_ADDBA(ieee, dst, pBA, 0, ACT_ADDBAREQ); @@ -204,7 +204,7 @@ static void rtllib_send_ADDBAReq(struct rtllib_device *ieee, u8 *dst, static void rtllib_send_ADDBARsp(struct rtllib_device *ieee, u8 *dst, struct ba_record *pBA, u16 StatusCode) { - struct sk_buff *skb = NULL; + struct sk_buff *skb; skb = rtllib_ADDBA(ieee, dst, pBA, StatusCode, ACT_ADDBARSP); if (skb) @@ -217,7 +217,7 @@ static void rtllib_send_DELBA(struct rtllib_device *ieee, u8 *dst, struct ba_record *pBA, enum tr_select TxRxSelect, u16 ReasonCode) { - struct sk_buff *skb = NULL; + struct sk_buff *skb; skb = rtllib_DELBA(ieee, dst, pBA, TxRxSelect, ReasonCode); if (skb) diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h index 563ac12f0b2c..776e179d5bfd 100644 --- a/drivers/staging/rtl8192e/rtllib.h +++ b/drivers/staging/rtl8192e/rtllib.h @@ -76,7 +76,7 @@ #define container_of_work_rsl(x, y, z) container_of(x, y, z) #define container_of_dwork_rsl(x, y, z) \ - container_of(container_of(x, struct delayed_work, work), y, z) + container_of(to_delayed_work(x), y, z) #define iwe_stream_add_event_rsl(info, start, stop, iwe, len) \ iwe_stream_add_event(info, start, stop, iwe, len) @@ -1728,7 +1728,6 @@ struct rtllib_device { struct delayed_work link_change_wq; struct work_struct wx_sync_scan_wq; - struct workqueue_struct *wq; union { struct rtllib_rxb *RfdArray[REORDER_WIN_SIZE]; struct rtllib_rxb *stats_IndicateArray[REORDER_WIN_SIZE]; diff --git a/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c b/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c index 496de4f6a7bc..bc45cf098b04 100644 --- a/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c +++ b/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c @@ -233,7 +233,7 @@ static int rtllib_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv) ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0); - blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN; + blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN); last = data_len % AES_BLOCK_LEN; for (i = 1; i <= blocks; i++) { @@ -319,7 +319,7 @@ static int rtllib_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) ccmp_init_blocks(key->tfm, hdr, pn, data_len, b0, a, b); xor_block(mic, b, CCMP_MIC_LEN); - blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN; + blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN); last = data_len % AES_BLOCK_LEN; for (i = 1; i <= blocks; i++) { diff --git a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c index 8eac7cdd5f3e..ae103b0b7a2a 100644 --- a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c +++ b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c @@ -367,8 +367,7 @@ static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) if (!tcb_desc->bHwSec) return ret; - else - return 0; + return 0; } diff --git a/drivers/staging/rtl8192e/rtllib_module.c b/drivers/staging/rtl8192e/rtllib_module.c index 113fbf7fbb17..f4f318abb299 100644 --- a/drivers/staging/rtl8192e/rtllib_module.c +++ b/drivers/staging/rtl8192e/rtllib_module.c @@ -45,15 +45,11 @@ #include <linux/etherdevice.h> #include <linux/uaccess.h> #include <net/arp.h> - #include "rtllib.h" - u32 rt_global_debug_component = COMP_ERR; EXPORT_SYMBOL(rt_global_debug_component); - - static inline int rtllib_networks_allocate(struct rtllib_device *ieee) { if (ieee->networks) @@ -110,7 +106,6 @@ struct net_device *alloc_rtllib(int sizeof_priv) } rtllib_networks_initialize(ieee); - /* Default fragmentation threshold is maximum payload size */ ieee->fts = DEFAULT_FTS; ieee->scan_age = DEFAULT_MAX_SCAN_AGE; diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c index 37343ec3b484..c743182b933e 100644 --- a/drivers/staging/rtl8192e/rtllib_rx.c +++ b/drivers/staging/rtl8192e/rtllib_rx.c @@ -467,7 +467,7 @@ static bool AddReorderEntry(struct rx_ts_record *pTS, else if (SN_EQUAL(pReorderEntry->SeqNum, ((struct rx_reorder_entry *)list_entry(pList->next, struct rx_reorder_entry, List))->SeqNum)) - return false; + return false; else break; } @@ -905,7 +905,7 @@ static size_t rtllib_rx_get_hdrlen(struct rtllib_device *ieee, { struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data; u16 fc = le16_to_cpu(hdr->frame_ctl); - size_t hdrlen = 0; + size_t hdrlen; hdrlen = rtllib_get_hdrlen(fc); if (HTCCheck(ieee, skb->data)) { @@ -1829,7 +1829,6 @@ static inline void rtllib_extract_country_ie( if (IS_EQUAL_CIE_SRC(ieee, addr2)) UPDATE_CIE_WATCHDOG(ieee); } - } static void rtllib_parse_mife_generic(struct rtllib_device *ieee, @@ -1902,7 +1901,6 @@ static void rtllib_parse_mife_generic(struct rtllib_device *ieee, info_element->data, network->bssht.bdHTInfoLen); } - } } diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c index d0fedb0ff132..cfab715495ad 100644 --- a/drivers/staging/rtl8192e/rtllib_softmac.c +++ b/drivers/staging/rtl8192e/rtllib_softmac.c @@ -355,9 +355,9 @@ static inline struct sk_buff *rtllib_probe_req(struct rtllib_device *ieee) req->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_PROBE_REQ); req->header.duration_id = 0; - memset(req->header.addr1, 0xff, ETH_ALEN); + eth_broadcast_addr(req->header.addr1); ether_addr_copy(req->header.addr2, ieee->dev->dev_addr); - memset(req->header.addr3, 0xff, ETH_ALEN); + eth_broadcast_addr(req->header.addr3); tag = (u8 *) skb_put(skb, len + 2 + rate_len); @@ -615,8 +615,8 @@ static void rtllib_softmac_scan_wq(void *data) if (ieee->active_channel_map[ieee->current_network.channel] == 1) rtllib_send_probe_requests(ieee, 0); - queue_delayed_work_rsl(ieee->wq, &ieee->softmac_scan_wq, - msecs_to_jiffies(RTLLIB_SOFTMAC_SCAN_TIME)); + schedule_delayed_work(&ieee->softmac_scan_wq, + msecs_to_jiffies(RTLLIB_SOFTMAC_SCAN_TIME)); up(&ieee->scan_sem); return; @@ -689,7 +689,7 @@ static void rtllib_softmac_stop_scan(struct rtllib_device *ieee) ieee->scanning_continue = 0; ieee->actscanning = false; - cancel_delayed_work(&ieee->softmac_scan_wq); + cancel_delayed_work_sync(&ieee->softmac_scan_wq); } up(&ieee->scan_sem); @@ -745,8 +745,7 @@ static void rtllib_start_scan(struct rtllib_device *ieee) if (ieee->scanning_continue == 0) { ieee->actscanning = true; ieee->scanning_continue = 1; - queue_delayed_work_rsl(ieee->wq, - &ieee->softmac_scan_wq, 0); + schedule_delayed_work(&ieee->softmac_scan_wq, 0); } } else { if (ieee->rtllib_start_hw_scan) @@ -776,7 +775,7 @@ inline struct sk_buff *rtllib_authentication_req(struct rtllib_network *beacon, { struct sk_buff *skb; struct rtllib_authentication *auth; - int len = 0; + int len; len = sizeof(struct rtllib_authentication) + challengelen + ieee->tx_headroom + 4; @@ -1428,8 +1427,8 @@ static void rtllib_associate_abort(struct rtllib_device *ieee) ieee->state = RTLLIB_ASSOCIATING_RETRY; - queue_delayed_work_rsl(ieee->wq, &ieee->associate_retry_wq, - RTLLIB_SOFTMAC_ASSOC_RETRY_TIME); + schedule_delayed_work(&ieee->associate_retry_wq, + RTLLIB_SOFTMAC_ASSOC_RETRY_TIME); spin_unlock_irqrestore(&ieee->lock, flags); } @@ -1580,7 +1579,7 @@ static void rtllib_associate_complete(struct rtllib_device *ieee) ieee->state = RTLLIB_LINKED; rtllib_sta_send_associnfo(ieee); - queue_work_rsl(ieee->wq, &ieee->associate_complete_wq); + schedule_work(&ieee->associate_complete_wq); } static void rtllib_associate_procedure_wq(void *data) @@ -1729,7 +1728,7 @@ inline void rtllib_softmac_new_net(struct rtllib_device *ieee, if (ieee->LedControlHandler != NULL) ieee->LedControlHandler(ieee->dev, LED_CTL_START_TO_LINK); - queue_delayed_work_rsl(ieee->wq, + schedule_delayed_work( &ieee->associate_procedure_wq, 0); } else { if (rtllib_is_54g(&ieee->current_network) && @@ -2283,7 +2282,7 @@ inline int rtllib_rx_assoc_resp(struct rtllib_device *ieee, struct sk_buff *skb, "Association response status code 0x%x\n", errcode); if (ieee->AsocRetryCount < RT_ASOC_RETRY_LIMIT) - queue_delayed_work_rsl(ieee->wq, + schedule_delayed_work( &ieee->associate_procedure_wq, 0); else rtllib_associate_abort(ieee); @@ -2393,7 +2392,7 @@ inline int rtllib_rx_deauth(struct rtllib_device *ieee, struct sk_buff *skb) if (!(ieee->rtllib_ap_sec_type(ieee) & (SEC_ALG_CCMP|SEC_ALG_TKIP))) - queue_delayed_work_rsl(ieee->wq, + schedule_delayed_work( &ieee->associate_procedure_wq, 5); } return 0; @@ -2538,12 +2537,6 @@ void rtllib_wake_all_queues(struct rtllib_device *ieee) netif_tx_wake_all_queues(ieee->dev); } -inline void rtllib_randomize_cell(struct rtllib_device *ieee) -{ - - random_ether_addr(ieee->current_network.bssid); -} - /* called in user context only */ static void rtllib_start_master_bss(struct rtllib_device *ieee) { @@ -2634,7 +2627,7 @@ static void rtllib_start_ibss_wq(void *data) netdev_info(ieee->dev, "creating new IBSS cell\n"); ieee->current_network.channel = ieee->IbssStartChnl; if (!ieee->wap_set) - rtllib_randomize_cell(ieee); + eth_random_addr(ieee->current_network.bssid); if (ieee->modulation & RTLLIB_CCK_MODULATION) { @@ -2715,8 +2708,7 @@ static void rtllib_start_ibss_wq(void *data) inline void rtllib_start_ibss(struct rtllib_device *ieee) { - queue_delayed_work_rsl(ieee->wq, &ieee->start_ibss_wq, - msecs_to_jiffies(150)); + schedule_delayed_work(&ieee->start_ibss_wq, msecs_to_jiffies(150)); } /* this is called only in user context, with wx_sem held */ @@ -2770,7 +2762,7 @@ void rtllib_disassociate(struct rtllib_device *ieee) ieee->is_set_key = false; ieee->wap_set = 0; - queue_delayed_work_rsl(ieee->wq, &ieee->link_change_wq, 0); + schedule_delayed_work(&ieee->link_change_wq, 0); notify_wx_assoc_event(ieee); } @@ -2882,9 +2874,9 @@ void rtllib_stop_protocol(struct rtllib_device *ieee, u8 shutdown) rtllib_stop_send_beacons(ieee); del_timer_sync(&ieee->associate_timer); - cancel_delayed_work(&ieee->associate_retry_wq); - cancel_delayed_work(&ieee->start_ibss_wq); - cancel_delayed_work(&ieee->link_change_wq); + cancel_delayed_work_sync(&ieee->associate_retry_wq); + cancel_delayed_work_sync(&ieee->start_ibss_wq); + cancel_delayed_work_sync(&ieee->link_change_wq); rtllib_stop_scan(ieee); if (ieee->state <= RTLLIB_ASSOCIATING_AUTHENTICATED) @@ -3027,9 +3019,6 @@ void rtllib_softmac_init(struct rtllib_device *ieee) rtllib_send_beacon_cb, (unsigned long) ieee); - - ieee->wq = create_workqueue(DRV_NAME); - INIT_DELAYED_WORK_RSL(&ieee->link_change_wq, (void *)rtllib_link_change_wq, ieee); INIT_DELAYED_WORK_RSL(&ieee->start_ibss_wq, @@ -3065,8 +3054,16 @@ void rtllib_softmac_free(struct rtllib_device *ieee) ieee->pDot11dInfo = NULL; del_timer_sync(&ieee->associate_timer); - cancel_delayed_work(&ieee->associate_retry_wq); - destroy_workqueue(ieee->wq); + cancel_delayed_work_sync(&ieee->associate_retry_wq); + cancel_delayed_work_sync(&ieee->associate_procedure_wq); + cancel_delayed_work_sync(&ieee->softmac_scan_wq); + cancel_delayed_work_sync(&ieee->start_ibss_wq); + cancel_delayed_work_sync(&ieee->hw_wakeup_wq); + cancel_delayed_work_sync(&ieee->hw_sleep_wq); + cancel_delayed_work_sync(&ieee->link_change_wq); + cancel_work_sync(&ieee->associate_complete_wq); + cancel_work_sync(&ieee->ips_leave_wq); + cancel_work_sync(&ieee->wx_sync_scan_wq); up(&ieee->wx_sem); tasklet_kill(&ieee->ps_task); } @@ -3328,7 +3325,7 @@ static int rtllib_wpa_set_encryption(struct rtllib_device *ieee, goto done; } new_crypt->ops = ops; - if (new_crypt->ops) + if (new_crypt->ops && try_module_get(new_crypt->ops->owner)) new_crypt->priv = new_crypt->ops->init(param->u.crypt.idx); diff --git a/drivers/staging/rtl8192e/rtllib_softmac_wx.c b/drivers/staging/rtl8192e/rtllib_softmac_wx.c index 86f52ac7d33e..61ed8b0413e4 100644 --- a/drivers/staging/rtl8192e/rtllib_softmac_wx.c +++ b/drivers/staging/rtl8192e/rtllib_softmac_wx.c @@ -243,7 +243,7 @@ int rtllib_wx_get_rate(struct rtllib_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { - u32 tmp_rate = 0; + u32 tmp_rate; tmp_rate = TxCountToDataRate(ieee, ieee->softmac_stats.CurrentShowTxate); @@ -429,7 +429,7 @@ int rtllib_wx_set_scan(struct rtllib_device *ieee, struct iw_request_info *a, } if (ieee->state == RTLLIB_LINKED) { - queue_work_rsl(ieee->wq, &ieee->wx_sync_scan_wq); + schedule_work(&ieee->wx_sync_scan_wq); /* intentionally forget to up sem */ return 0; } diff --git a/drivers/staging/rtl8192e/rtllib_wx.c b/drivers/staging/rtl8192e/rtllib_wx.c index 80f7a099dff1..84e6272f28cd 100644 --- a/drivers/staging/rtl8192e/rtllib_wx.c +++ b/drivers/staging/rtl8192e/rtllib_wx.c @@ -623,7 +623,7 @@ int rtllib_wx_set_encode_ext(struct rtllib_device *ieee, goto done; } new_crypt->ops = ops; - if (new_crypt->ops) + if (new_crypt->ops && try_module_get(new_crypt->ops->owner)) new_crypt->priv = new_crypt->ops->init(idx); if (new_crypt->priv == NULL) { diff --git a/drivers/staging/rtl8192u/ieee80211/dot11d.c b/drivers/staging/rtl8192u/ieee80211/dot11d.c index 82d60380bb40..00b6052fbbac 100644 --- a/drivers/staging/rtl8192u/ieee80211/dot11d.c +++ b/drivers/staging/rtl8192u/ieee80211/dot11d.c @@ -103,7 +103,7 @@ u8 DOT11D_GetMaxTxPwrInDbm(struct ieee80211_device *dev, u8 Channel) PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev); u8 MaxTxPwrInDbm = 255; - if (MAX_CHANNEL_NUMBER < Channel) { + if (Channel > MAX_CHANNEL_NUMBER) { netdev_err(dev->dev, "DOT11D_GetMaxTxPwrInDbm(): Invalid Channel\n"); return MaxTxPwrInDbm; } @@ -139,7 +139,7 @@ int IsLegalChannel(struct ieee80211_device *dev, u8 channel) { PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev); - if (MAX_CHANNEL_NUMBER < channel) { + if (channel > MAX_CHANNEL_NUMBER) { netdev_err(dev->dev, "IsLegalChannel(): Invalid Channel\n"); return 0; } @@ -162,7 +162,7 @@ int ToLegalChannel(struct ieee80211_device *dev, u8 channel) } } - if (MAX_CHANNEL_NUMBER < channel) { + if (channel > MAX_CHANNEL_NUMBER) { netdev_err(dev->dev, "IsLegalChannel(): Invalid Channel\n"); return default_chn; } diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h index 967ef9a98fc0..68931e5ecd8f 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h @@ -238,8 +238,6 @@ typedef struct cb_desc { #define ieee80211_tkip_null ieee80211_tkip_null_rsl -#define ieee80211_wep_null ieee80211_wep_null_rsl - #define free_ieee80211 free_ieee80211_rsl #define alloc_ieee80211 alloc_ieee80211_rsl @@ -329,9 +327,6 @@ typedef struct ieee_param { // linux under 2.6.9 release may not support it, so modify it for common use -#define MSECS(t) msecs_to_jiffies(t) -#define msleep_interruptible_rsl msleep_interruptible - #define IEEE80211_DATA_LEN 2304 /* Maximum size for the MA-UNITDATA primitive, 802.11 standard section 6.2.1.1.2. @@ -2260,7 +2255,6 @@ void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee); /* ieee80211_crypt_ccmp&tkip&wep.c */ void ieee80211_tkip_null(void); -void ieee80211_wep_null(void); void ieee80211_ccmp_null(void); int ieee80211_crypto_init(void); diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c index 3995620b3442..9cf90d040cfe 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c @@ -176,8 +176,7 @@ struct ieee80211_crypto_ops *ieee80211_get_crypto_ops(const char *name) if (found_alg) return found_alg->ops; - else - return NULL; + return NULL; } diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c index 27ce4817faeb..2dc25cc2c726 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c @@ -242,7 +242,7 @@ static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv) ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0); - blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN; + blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN); last = data_len % AES_BLOCK_LEN; for (i = 1; i <= blocks; i++) { @@ -331,7 +331,7 @@ static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) ccmp_init_blocks(key->tfm, hdr, pn, data_len, b0, a, b); xor_block(mic, b, CCMP_MIC_LEN); - blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN; + blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN); last = data_len % AES_BLOCK_LEN; for (i = 1; i <= blocks; i++) { diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c index ababb6de125b..1999bc5cbbc1 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c @@ -288,6 +288,3 @@ void __exit ieee80211_crypto_wep_exit(void) ieee80211_unregister_crypto_ops(&ieee80211_crypt_wep); } -void ieee80211_wep_null(void) -{ -} diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c index 425b2ddfc916..30fff6c5696b 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c @@ -177,7 +177,6 @@ struct net_device *alloc_ieee80211(int sizeof_priv) /* These function were added to load crypte module autoly */ ieee80211_tkip_null(); - ieee80211_wep_null(); ieee80211_ccmp_null(); return dev; diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c index 130c852ffa02..f18fc0b6775b 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c @@ -460,10 +460,8 @@ static int is_duplicate_packet(struct ieee80211_device *ieee, // if (memcmp(entry->mac, mac, ETH_ALEN)){ if (p == &ieee->ibss_mac_hash[index]) { entry = kmalloc(sizeof(struct ieee_ibss_seq), GFP_ATOMIC); - if (!entry) { - printk(KERN_WARNING "Cannot malloc new mac entry\n"); + if (!entry) return 0; - } memcpy(entry->mac, mac, ETH_ALEN); entry->seq_num[tid] = seq; entry->frag_num[tid] = frag; @@ -594,12 +592,18 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee, { PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; PRX_REORDER_ENTRY pReorderEntry = NULL; - struct ieee80211_rxb *prxbIndicateArray[REORDER_WIN_SIZE]; + struct ieee80211_rxb **prxbIndicateArray; u8 WinSize = pHTInfo->RxReorderWinSize; u16 WinEnd = (pTS->RxIndicateSeq + WinSize -1)%4096; u8 index = 0; bool bMatchWinStart = false, bPktInBuf = false; IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): Seq is %d,pTS->RxIndicateSeq is %d, WinSize is %d\n",__func__,SeqNum,pTS->RxIndicateSeq,WinSize); + + prxbIndicateArray = kmalloc(sizeof(struct ieee80211_rxb *) * + REORDER_WIN_SIZE, GFP_KERNEL); + if (!prxbIndicateArray) + return; + /* Rx Reorder initialize condition.*/ if (pTS->RxIndicateSeq == 0xffff) { pTS->RxIndicateSeq = SeqNum; @@ -618,6 +622,8 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee, kfree(prxb); prxb = NULL; } + + kfree(prxbIndicateArray); return; } @@ -741,6 +747,7 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee, // Indicate packets if(index>REORDER_WIN_SIZE){ IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Rx Reorer buffer full!! \n"); + kfree(prxbIndicateArray); return; } ieee80211_indicate_packets(ieee, prxbIndicateArray, index); @@ -752,9 +759,12 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee, pTS->RxTimeoutIndicateSeq = pTS->RxIndicateSeq; if(timer_pending(&pTS->RxPktPendingTimer)) del_timer_sync(&pTS->RxPktPendingTimer); - pTS->RxPktPendingTimer.expires = jiffies + MSECS(pHTInfo->RxReorderPendingTime); + pTS->RxPktPendingTimer.expires = jiffies + + msecs_to_jiffies(pHTInfo->RxReorderPendingTime); add_timer(&pTS->RxPktPendingTimer); } + + kfree(prxbIndicateArray); } static u8 parse_subframe(struct sk_buff *skb, @@ -897,7 +907,6 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, //added by amy for reorder #ifdef NOT_YET struct net_device *wds = NULL; - struct sk_buff *skb2 = NULL; struct net_device *wds = NULL; int from_assoc_ap = 0; void *sta = NULL; @@ -1277,11 +1286,8 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, payload = skb->data + hdrlen; //ethertype = (payload[6] << 8) | payload[7]; rxb = kmalloc(sizeof(struct ieee80211_rxb), GFP_ATOMIC); - if (rxb == NULL) - { - IEEE80211_DEBUG(IEEE80211_DL_ERR,"%s(): kmalloc rxb error\n",__func__); + if (!rxb) goto rx_dropped; - } /* to parse amsdu packets */ /* qos data packets & reserved bit is 1 */ if (parse_subframe(skb, rx_stats, rxb, src, dst) == 0) { diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c index 38c3eb78094e..ae1274cfb392 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c @@ -103,12 +103,12 @@ static void ieee80211_WMM_Info(struct ieee80211_device *ieee, u8 **tag_p) { u8 *tag = *tag_p; - *tag++ = MFIE_TYPE_GENERIC; //0 + *tag++ = MFIE_TYPE_GENERIC; /* 0 */ *tag++ = 7; *tag++ = 0x00; *tag++ = 0x50; *tag++ = 0xf2; - *tag++ = 0x02;//5 + *tag++ = 0x02; /* 5 */ *tag++ = 0x00; *tag++ = 0x01; #ifdef SUPPORT_USPD @@ -128,12 +128,12 @@ static void ieee80211_TURBO_Info(struct ieee80211_device *ieee, u8 **tag_p) { u8 *tag = *tag_p; - *tag++ = MFIE_TYPE_GENERIC; //0 + *tag++ = MFIE_TYPE_GENERIC; /* 0 */ *tag++ = 7; *tag++ = 0x00; *tag++ = 0xe0; *tag++ = 0x4c; - *tag++ = 0x01;//5 + *tag++ = 0x01; /* 5 */ *tag++ = 0x02; *tag++ = 0x11; *tag++ = 0x00; @@ -186,14 +186,14 @@ static u8 MgntQuery_MgntFrameTxRate(struct ieee80211_device *ieee) PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; u8 rate; - // 2008/01/25 MH For broadcom, MGNT frame set as OFDM 6M. + /* 2008/01/25 MH For broadcom, MGNT frame set as OFDM 6M. */ if(pHTInfo->IOTAction & HT_IOT_ACT_MGNT_USE_CCK_6M) rate = 0x0c; else rate = ieee->basic_rate & 0x7f; if (rate == 0) { - // 2005.01.26, by rcnjko. + /* 2005.01.26, by rcnjko. */ if(ieee->mode == IEEE_A|| ieee->mode== IEEE_N_5G|| (ieee->mode== IEEE_N_24G&&!pHTInfo->bCurSuppCCK)) @@ -340,11 +340,11 @@ inline struct sk_buff *ieee80211_probe_req(struct ieee80211_device *ieee) req = (struct ieee80211_probe_request *) skb_put(skb,sizeof(struct ieee80211_probe_request)); req->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); - req->header.duration_id = 0; //FIXME: is this OK ? + req->header.duration_id = 0; /* FIXME: is this OK? */ - memset(req->header.addr1, 0xff, ETH_ALEN); + eth_broadcast_addr(req->header.addr1); memcpy(req->header.addr2, ieee->dev->dev_addr, ETH_ALEN); - memset(req->header.addr3, 0xff, ETH_ALEN); + eth_broadcast_addr(req->header.addr3); tag = (u8 *) skb_put(skb,len+2+rate_len); @@ -380,7 +380,8 @@ static void ieee80211_send_beacon(struct ieee80211_device *ieee) if (ieee->beacon_txing && ieee->ieee_up) { // if(!timer_pending(&ieee->beacon_timer)) // add_timer(&ieee->beacon_timer); - mod_timer(&ieee->beacon_timer,jiffies+(MSECS(ieee->current_network.beacon_interval-5))); + mod_timer(&ieee->beacon_timer, + jiffies + msecs_to_jiffies(ieee->current_network.beacon_interval-5)); } //spin_unlock_irqrestore(&ieee->beacon_lock,flags); } @@ -468,7 +469,7 @@ void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee) if (ieee->state >= IEEE80211_LINKED && ieee->sync_scan_hurryup) goto out; - msleep_interruptible_rsl(IEEE80211_SOFTMAC_SCAN_TIME); + msleep_interruptible(IEEE80211_SOFTMAC_SCAN_TIME); } out: @@ -487,7 +488,7 @@ EXPORT_SYMBOL(ieee80211_softmac_scan_syncro); static void ieee80211_softmac_scan_wq(struct work_struct *work) { - struct delayed_work *dwork = container_of(work, struct delayed_work, work); + struct delayed_work *dwork = to_delayed_work(work); struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, softmac_scan_wq); static short watchdog; u8 channel_map[MAX_CHANNEL_NUMBER+1]; @@ -514,7 +515,7 @@ static void ieee80211_softmac_scan_wq(struct work_struct *work) ieee80211_send_probe_requests(ieee); - queue_delayed_work(ieee->wq, &ieee->softmac_scan_wq, IEEE80211_SOFTMAC_SCAN_TIME); + schedule_delayed_work(&ieee->softmac_scan_wq, IEEE80211_SOFTMAC_SCAN_TIME); up(&ieee->scan_sem); return; @@ -613,7 +614,7 @@ static void ieee80211_start_scan(struct ieee80211_device *ieee) if (ieee->softmac_features & IEEE_SOFTMAC_SCAN){ if (ieee->scanning == 0) { ieee->scanning = 1; - queue_delayed_work(ieee->wq, &ieee->softmac_scan_wq, 0); + schedule_delayed_work(&ieee->softmac_scan_wq, 0); } }else ieee->start_scan(ieee->dev); @@ -672,7 +673,7 @@ inline struct sk_buff *ieee80211_authentication_req(struct ieee80211_network *be else if(ieee->auth_mode == 1) auth->algorithm = cpu_to_le16(WLAN_AUTH_SHARED_KEY); else if(ieee->auth_mode == 2) - auth->algorithm = WLAN_AUTH_OPEN;//0x80; + auth->algorithm = WLAN_AUTH_OPEN; /* 0x80; */ printk("=================>%s():auth->algorithm is %d\n",__func__,auth->algorithm); auth->transaction = cpu_to_le16(ieee->associate_seq); ieee->associate_seq++; @@ -727,7 +728,7 @@ static struct sk_buff *ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *d encrypt = ieee->host_encrypt && crypt && crypt->ops && ((0 == strcmp(crypt->ops->name, "WEP") || wpa_ie_len)); - //HT ralated element + /* HT ralated element */ tmp_ht_cap_buf =(u8 *) &(ieee->pHTInfo->SelfHTCap); tmp_ht_cap_len = sizeof(ieee->pHTInfo->SelfHTCap); tmp_ht_info_buf =(u8 *) &(ieee->pHTInfo->SelfHTInfo); @@ -765,13 +766,13 @@ static struct sk_buff *ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *d memcpy (beacon_buf->header.addr2, ieee->dev->dev_addr, ETH_ALEN); memcpy (beacon_buf->header.addr3, ieee->current_network.bssid, ETH_ALEN); - beacon_buf->header.duration_id = 0; //FIXME + beacon_buf->header.duration_id = 0; /* FIXME */ beacon_buf->beacon_interval = cpu_to_le16(ieee->current_network.beacon_interval); beacon_buf->capability = cpu_to_le16(ieee->current_network.capability & WLAN_CAPABILITY_IBSS); beacon_buf->capability |= - cpu_to_le16(ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE); //add short preamble here + cpu_to_le16(ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE); /* add short preamble here */ if(ieee->short_slot && (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_SLOT)) beacon_buf->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT); @@ -1012,7 +1013,7 @@ inline struct sk_buff *ieee80211_association_req(struct ieee80211_network *beaco crypt = ieee->crypt[ieee->tx_keyidx]; encrypt = ieee->host_encrypt && crypt && crypt->ops && ((0 == strcmp(crypt->ops->name,"WEP") || wpa_ie_len)); - //Include High Throuput capability && Realtek proprietary + /* Include High Throuput capability && Realtek proprietary */ if (ieee->pHTInfo->bCurrentHTSupport&&ieee->pHTInfo->bEnableHT) { ht_cap_buf = (u8 *)&(ieee->pHTInfo->SelfHTCap); @@ -1044,8 +1045,8 @@ inline struct sk_buff *ieee80211_association_req(struct ieee80211_network *beaco #ifdef THOMAS_TURBO len = sizeof(struct ieee80211_assoc_request_frame)+ 2 - + beacon->ssid_len//essid tagged val - + rate_len//rates tagged val + + beacon->ssid_len /* essid tagged val */ + + rate_len /* rates tagged val */ + wpa_ie_len + wmm_info_len + turbo_info_len @@ -1057,8 +1058,8 @@ inline struct sk_buff *ieee80211_association_req(struct ieee80211_network *beaco + ieee->tx_headroom; #else len = sizeof(struct ieee80211_assoc_request_frame)+ 2 - + beacon->ssid_len//essid tagged val - + rate_len//rates tagged val + + beacon->ssid_len /* essid tagged val */ + + rate_len /* rates tagged val */ + wpa_ie_len + wmm_info_len + ht_cap_len @@ -1240,7 +1241,7 @@ void ieee80211_associate_abort(struct ieee80211_device *ieee) ieee->state = IEEE80211_ASSOCIATING_RETRY; - queue_delayed_work(ieee->wq, &ieee->associate_retry_wq, \ + schedule_delayed_work(&ieee->associate_retry_wq, \ IEEE80211_SOFTMAC_ASSOC_RETRY_TIME); spin_unlock_irqrestore(&ieee->lock, flags); @@ -1381,7 +1382,7 @@ static void ieee80211_associate_complete(struct ieee80211_device *ieee) ieee->state = IEEE80211_LINKED; //ieee->UpdateHalRATRTableHandler(dev, ieee->dot11HTOperationalRateSet); - queue_work(ieee->wq, &ieee->associate_complete_wq); + schedule_work(&ieee->associate_complete_wq); } static void ieee80211_associate_procedure_wq(struct work_struct *work) @@ -1482,7 +1483,7 @@ inline void ieee80211_softmac_new_net(struct ieee80211_device *ieee, struct ieee } ieee->state = IEEE80211_ASSOCIATING; - queue_work(ieee->wq, &ieee->associate_procedure_wq); + schedule_work(&ieee->associate_procedure_wq); }else{ if(ieee80211_is_54g(&ieee->current_network) && (ieee->modulation & IEEE80211_OFDM_MODULATION)){ @@ -1735,10 +1736,12 @@ static short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h, if(dtim & ((IEEE80211_DTIM_UCAST | IEEE80211_DTIM_MBCAST)& ieee->ps)) return 2; - if(!time_after(jiffies, ieee->dev->trans_start + MSECS(timeout))) + if(!time_after(jiffies, + ieee->dev->trans_start + msecs_to_jiffies(timeout))) return 0; - if(!time_after(jiffies, ieee->last_rx_ps_time + MSECS(timeout))) + if(!time_after(jiffies, + ieee->last_rx_ps_time + msecs_to_jiffies(timeout))) return 0; if((ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE ) && @@ -2041,7 +2044,7 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb, "Association response status code 0x%x\n", errcode); if(ieee->AsocRetryCount < RT_ASOC_RETRY_LIMIT) { - queue_work(ieee->wq, &ieee->associate_procedure_wq); + schedule_work(&ieee->associate_procedure_wq); } else { ieee80211_associate_abort(ieee); } @@ -2097,7 +2100,7 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb, notify_wx_assoc_event(ieee); //HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT); RemovePeerTS(ieee, header->addr2); - queue_work(ieee->wq, &ieee->associate_procedure_wq); + schedule_work(&ieee->associate_procedure_wq); } break; case IEEE80211_STYPE_MANAGE_ACT: @@ -2284,12 +2287,6 @@ void ieee80211_stop_queue(struct ieee80211_device *ieee) } EXPORT_SYMBOL(ieee80211_stop_queue); -inline void ieee80211_randomize_cell(struct ieee80211_device *ieee) -{ - - random_ether_addr(ieee->current_network.bssid); -} - /* called in user context only */ void ieee80211_start_master_bss(struct ieee80211_device *ieee) { @@ -2330,7 +2327,7 @@ static void ieee80211_start_monitor_mode(struct ieee80211_device *ieee) static void ieee80211_start_ibss_wq(struct work_struct *work) { - struct delayed_work *dwork = container_of(work, struct delayed_work, work); + struct delayed_work *dwork = to_delayed_work(work); struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, start_ibss_wq); /* iwconfig mode ad-hoc will schedule this and return * on the other hand this will block further iwconfig SET @@ -2379,7 +2376,7 @@ static void ieee80211_start_ibss_wq(struct work_struct *work) if (ieee->state == IEEE80211_NOLINK) { printk("creating new IBSS cell\n"); if(!ieee->wap_set) - ieee80211_randomize_cell(ieee); + random_ether_addr(ieee->current_network.bssid); if(ieee->modulation & IEEE80211_CCK_MODULATION){ @@ -2439,7 +2436,7 @@ static void ieee80211_start_ibss_wq(struct work_struct *work) inline void ieee80211_start_ibss(struct ieee80211_device *ieee) { - queue_delayed_work(ieee->wq, &ieee->start_ibss_wq, 150); + schedule_delayed_work(&ieee->start_ibss_wq, 150); } /* this is called only in user context, with wx_sem held */ @@ -2504,7 +2501,7 @@ EXPORT_SYMBOL(ieee80211_disassociate); static void ieee80211_associate_retry_wq(struct work_struct *work) { - struct delayed_work *dwork = container_of(work, struct delayed_work, work); + struct delayed_work *dwork = to_delayed_work(work); struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, associate_retry_wq); unsigned long flags; @@ -2722,7 +2719,6 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee) setup_timer(&ieee->beacon_timer, ieee80211_send_beacon_cb, (unsigned long)ieee); - ieee->wq = create_workqueue(DRV_NAME); INIT_DELAYED_WORK(&ieee->start_ibss_wq, ieee80211_start_ibss_wq); INIT_WORK(&ieee->associate_complete_wq, ieee80211_associate_complete_wq); @@ -2752,7 +2748,6 @@ void ieee80211_softmac_free(struct ieee80211_device *ieee) del_timer_sync(&ieee->associate_timer); cancel_delayed_work(&ieee->associate_retry_wq); - destroy_workqueue(ieee->wq); up(&ieee->wx_sem); } diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c index 3bde744604c2..28737ec65186 100644 --- a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c +++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c @@ -19,7 +19,7 @@ static void ActivateBAEntry(struct ieee80211_device *ieee, PBA_RECORD pBA, u16 T { pBA->bValid = true; if(Time != 0) - mod_timer(&pBA->Timer, jiffies + MSECS(Time)); + mod_timer(&pBA->Timer, jiffies + msecs_to_jiffies(Time)); } /******************************************************************************************************************** @@ -254,7 +254,7 @@ static struct sk_buff *ieee80211_DELBA( static void ieee80211_send_ADDBAReq(struct ieee80211_device *ieee, u8 *dst, PBA_RECORD pBA) { - struct sk_buff *skb = NULL; + struct sk_buff *skb; skb = ieee80211_ADDBA(ieee, dst, pBA, 0, ACT_ADDBAREQ); //construct ACT_ADDBAREQ frames so set statuscode zero. if (skb) @@ -282,7 +282,7 @@ static void ieee80211_send_ADDBAReq(struct ieee80211_device *ieee, static void ieee80211_send_ADDBARsp(struct ieee80211_device *ieee, u8 *dst, PBA_RECORD pBA, u16 StatusCode) { - struct sk_buff *skb = NULL; + struct sk_buff *skb; skb = ieee80211_ADDBA(ieee, dst, pBA, StatusCode, ACT_ADDBARSP); //construct ACT_ADDBARSP frames if (skb) { @@ -311,7 +311,7 @@ static void ieee80211_send_DELBA(struct ieee80211_device *ieee, u8 *dst, PBA_RECORD pBA, TR_SELECT TxRxSelect, u16 ReasonCode) { - struct sk_buff *skb = NULL; + struct sk_buff *skb; skb = ieee80211_DELBA(ieee, dst, pBA, TxRxSelect, ReasonCode); //construct ACT_ADDBARSP frames if (skb) { diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c index f33c74342cf3..148d0d45547b 100644 --- a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c +++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c @@ -35,9 +35,7 @@ static void RxPktPendingTimeout(unsigned long data) u8 index = 0; bool bPktInBuf = false; - spin_lock_irqsave(&(ieee->reorder_spinlock), flags); - //PlatformAcquireSpinLock(Adapter, RT_RX_SPINLOCK); IEEE80211_DEBUG(IEEE80211_DL_REORDER,"==================>%s()\n",__func__); if(pRxTs->RxTimeoutIndicateSeq != 0xffff) { @@ -87,10 +85,10 @@ static void RxPktPendingTimeout(unsigned long data) if(bPktInBuf && (pRxTs->RxTimeoutIndicateSeq==0xffff)) { pRxTs->RxTimeoutIndicateSeq = pRxTs->RxIndicateSeq; - mod_timer(&pRxTs->RxPktPendingTimer, jiffies + MSECS(ieee->pHTInfo->RxReorderPendingTime)); + mod_timer(&pRxTs->RxPktPendingTimer, + jiffies + msecs_to_jiffies(ieee->pHTInfo->RxReorderPendingTime)); } spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags); - //PlatformReleaseSpinLock(Adapter, RT_RX_SPINLOCK); } /******************************************************************************************************************** @@ -212,7 +210,8 @@ static void AdmitTS(struct ieee80211_device *ieee, del_timer_sync(&pTsCommonInfo->InactTimer); if(InactTime!=0) - mod_timer(&pTsCommonInfo->InactTimer, jiffies + MSECS(InactTime)); + mod_timer(&pTsCommonInfo->InactTimer, + jiffies + msecs_to_jiffies(InactTime)); } @@ -469,7 +468,6 @@ static void RemoveTsEntry(struct ieee80211_device *ieee, PTS_COMMON_INFO pTs, while(!list_empty(&pRxTS->RxPendingPktList)) { - // PlatformAcquireSpinLock(Adapter, RT_RX_SPINLOCK); spin_lock_irqsave(&(ieee->reorder_spinlock), flags); //pRxReorderEntry = list_entry(&pRxTS->RxPendingPktList.prev,RX_REORDER_ENTRY,List); pRxReorderEntry = (PRX_REORDER_ENTRY)list_entry(pRxTS->RxPendingPktList.prev,RX_REORDER_ENTRY,List); @@ -489,7 +487,6 @@ static void RemoveTsEntry(struct ieee80211_device *ieee, PTS_COMMON_INFO pTs, prxb = NULL; } list_add_tail(&pRxReorderEntry->List,&ieee->RxReorder_Unused_List); - //PlatformReleaseSpinLock(Adapter, RT_RX_SPINLOCK); spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags); } @@ -590,7 +587,8 @@ void TsStartAddBaProcess(struct ieee80211_device *ieee, PTX_TS_RECORD pTxTS) if(pTxTS->bAddBaReqDelayed) { IEEE80211_DEBUG(IEEE80211_DL_BA, "TsStartAddBaProcess(): Delayed Start ADDBA after 60 sec!!\n"); - mod_timer(&pTxTS->TsAddBaTimer, jiffies + MSECS(TS_ADDBA_DELAY)); + mod_timer(&pTxTS->TsAddBaTimer, + jiffies + msecs_to_jiffies(TS_ADDBA_DELAY)); } else { diff --git a/drivers/staging/rtl8192u/r8190_rtl8256.c b/drivers/staging/rtl8192u/r8190_rtl8256.c index e00032947e0f..5c3bb3be2720 100644 --- a/drivers/staging/rtl8192u/r8190_rtl8256.c +++ b/drivers/staging/rtl8192u/r8190_rtl8256.c @@ -1,12 +1,11 @@ /* - This is part of the rtl8192 driver - released under the GPL (See file COPYING for details). - - This files contains programming code for the rtl8256 - radio frontend. - - *Many* thanks to Realtek Corp. for their great support! - +* This is part of the rtl8192 driver +* released under the GPL (See file COPYING for details). +* +* This files contains programming code for the rtl8256 +* radio frontend. +* +* *Many* thanks to Realtek Corp. for their great support! */ #include "r8192U.h" @@ -22,7 +21,8 @@ * Output: NONE * Return: NONE * Note: 8226 support both 20M and 40 MHz - *---------------------------------------------------------------------------*/ + *-------------------------------------------------------------------------- + */ void PHY_SetRF8256Bandwidth(struct net_device *dev, HT_CHANNEL_WIDTH Bandwidth) { u8 eRFPath; @@ -83,7 +83,8 @@ void PHY_SetRF8256Bandwidth(struct net_device *dev, HT_CHANNEL_WIDTH Bandwidth) * Input: struct net_device* dev * Output: NONE * Return: NONE - *---------------------------------------------------------------------------*/ + *-------------------------------------------------------------------------- + */ void PHY_RF8256_Config(struct net_device *dev) { struct r8192_priv *priv = ieee80211_priv(dev); @@ -100,7 +101,8 @@ void PHY_RF8256_Config(struct net_device *dev) * Input: struct net_device* dev * Output: NONE * Return: NONE - *---------------------------------------------------------------------------*/ + *-------------------------------------------------------------------------- + */ void phy_RF8256_Config_ParaFile(struct net_device *dev) { u32 u4RegValue = 0; diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c index f4a4eae72aa4..849a95ef723c 100644 --- a/drivers/staging/rtl8192u/r8192U_core.c +++ b/drivers/staging/rtl8192u/r8192U_core.c @@ -1092,10 +1092,17 @@ static int rtl8192_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) static void rtl8192_tx_isr(struct urb *tx_urb) { struct sk_buff *skb = (struct sk_buff *)tx_urb->context; - struct net_device *dev = (struct net_device *)(skb->cb); + struct net_device *dev; struct r8192_priv *priv = NULL; - cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); - u8 queue_index = tcb_desc->queue_index; + cb_desc *tcb_desc; + u8 queue_index; + + if (!skb) + return; + + dev = (struct net_device *)(skb->cb); + tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); + queue_index = tcb_desc->queue_index; priv = ieee80211_priv(dev); @@ -1113,11 +1120,9 @@ static void rtl8192_tx_isr(struct urb *tx_urb) } /* free skb and tx_urb */ - if (skb != NULL) { - dev_kfree_skb_any(skb); - usb_free_urb(tx_urb); - atomic_dec(&priv->tx_pending[queue_index]); - } + dev_kfree_skb_any(skb); + usb_free_urb(tx_urb); + atomic_dec(&priv->tx_pending[queue_index]); /* * Handle HW Beacon: @@ -1371,7 +1376,7 @@ short rtl819xU_tx_cmd(struct net_device *dev, struct sk_buff *skb) */ static u8 MapHwQueueToFirmwareQueue(u8 QueueID) { - u8 QueueSelect = 0x0; /* defualt set to */ + u8 QueueSelect = 0x0; /* default set to */ switch (QueueID) { case BE_QUEUE: @@ -1727,7 +1732,7 @@ static short rtl8192_usb_initendpoints(struct net_device *dev) priv->rx_urb = kmalloc(sizeof(struct urb *) * (MAX_RX_URB + 1), GFP_KERNEL); - if (priv->rx_urb == NULL) + if (!priv->rx_urb) return -ENOMEM; #ifndef JACKSON_NEW_RX @@ -1957,7 +1962,7 @@ static int rtl8192_qos_handle_probe_response(struct r8192_priv *priv, network->qos_data.param_count)) { network->qos_data.old_param_count = network->qos_data.param_count; - queue_work(priv->priv_wq, &priv->qos_activate); + schedule_work(&priv->qos_activate); RT_TRACE(COMP_QOS, "QoS parameters change call qos_activate\n"); } @@ -1966,7 +1971,7 @@ static int rtl8192_qos_handle_probe_response(struct r8192_priv *priv, &def_qos_parameters, size); if ((network->qos_data.active == 1) && (active_network == 1)) { - queue_work(priv->priv_wq, &priv->qos_activate); + schedule_work(&priv->qos_activate); RT_TRACE(COMP_QOS, "QoS was disabled call qos_activate\n"); } @@ -1985,7 +1990,7 @@ static int rtl8192_handle_beacon(struct net_device *dev, struct r8192_priv *priv = ieee80211_priv(dev); rtl8192_qos_handle_probe_response(priv, 1, network); - queue_delayed_work(priv->priv_wq, &priv->update_beacon_wq, 0); + schedule_delayed_work(&priv->update_beacon_wq, 0); return 0; } @@ -2037,7 +2042,7 @@ static int rtl8192_qos_association_resp(struct r8192_priv *priv, network->flags, priv->ieee80211->current_network.qos_data.active); if (set_qos_param == 1) - queue_work(priv->priv_wq, &priv->qos_activate); + schedule_work(&priv->qos_activate); return 0; @@ -2382,7 +2387,6 @@ static void rtl8192_init_priv_task(struct net_device *dev) { struct r8192_priv *priv = ieee80211_priv(dev); - priv->priv_wq = create_workqueue(DRV_NAME); INIT_WORK(&priv->reset_wq, rtl8192_restart); @@ -3436,8 +3440,7 @@ static void rtl819x_update_rxcounts(struct r8192_priv *priv, u32 *TotalRxBcnNum, static void rtl819x_watchdog_wqcallback(struct work_struct *work) { - struct delayed_work *dwork = container_of(work, - struct delayed_work, work); + struct delayed_work *dwork = to_delayed_work(work); struct r8192_priv *priv = container_of(dwork, struct r8192_priv, watch_dog_wq); struct net_device *dev = priv->ieee80211->dev; @@ -3514,7 +3517,7 @@ static void watch_dog_timer_callback(unsigned long data) { struct r8192_priv *priv = ieee80211_priv((struct net_device *)data); - queue_delayed_work(priv->priv_wq, &priv->watch_dog_wq, 0); + schedule_delayed_work(&priv->watch_dog_wq, 0); mod_timer(&priv->watch_dog_timer, jiffies + msecs_to_jiffies(IEEE80211_WATCH_DOG_TIME)); } @@ -4297,7 +4300,7 @@ static void rtl8192_query_rxphystatus(struct r8192_priv *priv, if (is_cck_rate) { /* (1)Hardware does not provide RSSI for CCK */ - /* (2)PWDB, Average PWDB cacluated by hardware + /* (2)PWDB, Average PWDB calculated by hardware * (for rate adaptive) */ u8 report; @@ -4398,7 +4401,7 @@ static void rtl8192_query_rxphystatus(struct r8192_priv *priv, } - /* (2)PWDB, Average PWDB cacluated by hardware + /* (2)PWDB, Average PWDB calculated by hardware * (for rate adaptive) */ rx_pwr_all = (((pofdm_buf->pwdb_all) >> 1) & 0x7f) - 106; @@ -5018,7 +5021,6 @@ fail2: kfree(priv->pFirmware); priv->pFirmware = NULL; rtl8192_usb_deleteendpoints(dev); - destroy_workqueue(priv->priv_wq); mdelay(10); fail: free_ieee80211(dev); @@ -5056,7 +5058,6 @@ static void rtl8192_usb_disconnect(struct usb_interface *intf) kfree(priv->pFirmware); priv->pFirmware = NULL; rtl8192_usb_deleteendpoints(dev); - destroy_workqueue(priv->priv_wq); mdelay(10); } free_ieee80211(dev); diff --git a/drivers/staging/rtl8192u/r8192U_dm.c b/drivers/staging/rtl8192u/r8192U_dm.c index 375ec96b9469..1e0e53c9c314 100644 --- a/drivers/staging/rtl8192u/r8192U_dm.c +++ b/drivers/staging/rtl8192u/r8192U_dm.c @@ -767,7 +767,7 @@ static void dm_TXPowerTrackingCallback_ThermalMeter(struct net_device *dev) void dm_txpower_trackingcallback(struct work_struct *work) { - struct delayed_work *dwork = container_of(work, struct delayed_work, work); + struct delayed_work *dwork = to_delayed_work(work); struct r8192_priv *priv = container_of(dwork, struct r8192_priv, txpower_tracking_wq); struct net_device *dev = priv->ieee80211->dev; @@ -1628,47 +1628,75 @@ static void dm_bb_initialgain_backup(struct net_device *dev) void dm_change_dynamic_initgain_thresh(struct net_device *dev, u32 dm_type, u32 dm_value) { - if (dm_type == DIG_TYPE_THRESH_HIGH) { + switch (dm_type) { + case DIG_TYPE_THRESH_HIGH: dm_digtable.rssi_high_thresh = dm_value; - } else if (dm_type == DIG_TYPE_THRESH_LOW) { + break; + + case DIG_TYPE_THRESH_LOW: dm_digtable.rssi_low_thresh = dm_value; - } else if (dm_type == DIG_TYPE_THRESH_HIGHPWR_HIGH) { + break; + + case DIG_TYPE_THRESH_HIGHPWR_HIGH: dm_digtable.rssi_high_power_highthresh = dm_value; - } else if (dm_type == DIG_TYPE_THRESH_HIGHPWR_LOW) { + break; + + case DIG_TYPE_THRESH_HIGHPWR_LOW: dm_digtable.rssi_high_power_lowthresh = dm_value; - } else if (dm_type == DIG_TYPE_ENABLE) { + break; + + case DIG_TYPE_ENABLE: dm_digtable.dig_state = DM_STA_DIG_MAX; dm_digtable.dig_enable_flag = true; - } else if (dm_type == DIG_TYPE_DISABLE) { + break; + + case DIG_TYPE_DISABLE: dm_digtable.dig_state = DM_STA_DIG_MAX; dm_digtable.dig_enable_flag = false; - } else if (dm_type == DIG_TYPE_DBG_MODE) { + break; + + case DIG_TYPE_DBG_MODE: if (dm_value >= DM_DBG_MAX) dm_value = DM_DBG_OFF; dm_digtable.dbg_mode = (u8)dm_value; - } else if (dm_type == DIG_TYPE_RSSI) { + break; + + case DIG_TYPE_RSSI: if (dm_value > 100) dm_value = 30; dm_digtable.rssi_val = (long)dm_value; - } else if (dm_type == DIG_TYPE_ALGORITHM) { + break; + + case DIG_TYPE_ALGORITHM: if (dm_value >= DIG_ALGO_MAX) dm_value = DIG_ALGO_BY_FALSE_ALARM; if (dm_digtable.dig_algorithm != (u8)dm_value) dm_digtable.dig_algorithm_switch = 1; dm_digtable.dig_algorithm = (u8)dm_value; - } else if (dm_type == DIG_TYPE_BACKOFF) { + break; + + case DIG_TYPE_BACKOFF: if (dm_value > 30) dm_value = 30; dm_digtable.backoff_val = (u8)dm_value; - } else if (dm_type == DIG_TYPE_RX_GAIN_MIN) { + break; + + case DIG_TYPE_RX_GAIN_MIN: if (dm_value == 0) dm_value = 0x1; dm_digtable.rx_gain_range_min = (u8)dm_value; - } else if (dm_type == DIG_TYPE_RX_GAIN_MAX) { + break; + + case DIG_TYPE_RX_GAIN_MAX: if (dm_value > 0x50) dm_value = 0x50; dm_digtable.rx_gain_range_max = (u8)dm_value; + break; + + default: + break; } + } /* DM_ChangeDynamicInitGainThresh */ /*----------------------------------------------------------------------------- @@ -2412,7 +2440,7 @@ static void dm_check_pbc_gpio(struct net_device *dev) *---------------------------------------------------------------------------*/ void dm_rf_pathcheck_workitemcallback(struct work_struct *work) { - struct delayed_work *dwork = container_of(work, struct delayed_work, work); + struct delayed_work *dwork = to_delayed_work(work); struct r8192_priv *priv = container_of(dwork, struct r8192_priv, rfpath_check_wq); struct net_device *dev = priv->ieee80211->dev; /*bool bactually_set = false;*/ @@ -2769,12 +2797,14 @@ void dm_fsync_timer_callback(unsigned long data) if (bDoubleTimeInterval) { if (timer_pending(&priv->fsync_timer)) del_timer_sync(&priv->fsync_timer); - priv->fsync_timer.expires = jiffies + MSECS(priv->ieee80211->fsync_time_interval*priv->ieee80211->fsync_multiple_timeinterval); + priv->fsync_timer.expires = jiffies + + msecs_to_jiffies(priv->ieee80211->fsync_time_interval*priv->ieee80211->fsync_multiple_timeinterval); add_timer(&priv->fsync_timer); } else { if (timer_pending(&priv->fsync_timer)) del_timer_sync(&priv->fsync_timer); - priv->fsync_timer.expires = jiffies + MSECS(priv->ieee80211->fsync_time_interval); + priv->fsync_timer.expires = jiffies + + msecs_to_jiffies(priv->ieee80211->fsync_time_interval); add_timer(&priv->fsync_timer); } } else { @@ -2847,7 +2877,8 @@ static void dm_StartSWFsync(struct net_device *dev) } if (timer_pending(&priv->fsync_timer)) del_timer_sync(&priv->fsync_timer); - priv->fsync_timer.expires = jiffies + MSECS(priv->ieee80211->fsync_time_interval); + priv->fsync_timer.expires = jiffies + + msecs_to_jiffies(priv->ieee80211->fsync_time_interval); add_timer(&priv->fsync_timer); write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c12cd); diff --git a/drivers/staging/rtl8192u/r8192U_wx.c b/drivers/staging/rtl8192u/r8192U_wx.c index 4911fef2e2e5..f828e6441f2d 100644 --- a/drivers/staging/rtl8192u/r8192U_wx.c +++ b/drivers/staging/rtl8192u/r8192U_wx.c @@ -1,21 +1,23 @@ -/* - This file contains wireless extension handlers. - - This is part of rtl8180 OpenSource driver. - Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com> - Released under the terms of GPL (General Public Licence) - - Parts of this driver are based on the GPL part - of the official realtek driver. - - Parts of this driver are based on the rtl8180 driver skeleton - from Patric Schenke & Andres Salomon. - - Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver. - - We want to thank the Authors of those projects and the Ndiswrapper - project Authors. -*/ +/****************************************************************************** + * + * This file contains wireless extension handlers. + * + * This is part of rtl8180 OpenSource driver. + * Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com> + * Released under the terms of GPL (General Public Licence) + * + * Parts of this driver are based on the GPL part + * of the official realtek driver. + * + * Parts of this driver are based on the rtl8180 driver skeleton + * from Patric Schenke & Andres Salomon. + * + * Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver. + * + * We want to thank the Authors of those projects and the Ndiswrapper + * project Authors. + * + *****************************************************************************/ #include <linux/string.h> #include "r8192U.h" diff --git a/drivers/staging/rtl8192u/r819xU_phy.c b/drivers/staging/rtl8192u/r819xU_phy.c index f264d88364a1..696df3440077 100644 --- a/drivers/staging/rtl8192u/r819xU_phy.c +++ b/drivers/staging/rtl8192u/r819xU_phy.c @@ -1683,8 +1683,7 @@ void InitialGain819xUsb(struct net_device *dev, u8 Operation) void InitialGainOperateWorkItemCallBack(struct work_struct *work) { - struct delayed_work *dwork = container_of(work, struct delayed_work, - work); + struct delayed_work *dwork = to_delayed_work(work); struct r8192_priv *priv = container_of(dwork, struct r8192_priv, initialgain_operate_wq); struct net_device *dev = priv->ieee80211->dev; diff --git a/drivers/staging/rtl8712/drv_types.h b/drivers/staging/rtl8712/drv_types.h index 3d64feeb80e7..29e47e1501c5 100644 --- a/drivers/staging/rtl8712/drv_types.h +++ b/drivers/staging/rtl8712/drv_types.h @@ -159,6 +159,7 @@ struct _adapter { struct mp_priv mppriv; s32 bDriverStopped; s32 bSurpriseRemoved; + s32 bSuspended; u32 IsrContent; u32 ImrContent; u8 EepromAddressSize; diff --git a/drivers/staging/rtl8712/ieee80211.c b/drivers/staging/rtl8712/ieee80211.c index 974ca021ccef..d13b4d53c256 100644 --- a/drivers/staging/rtl8712/ieee80211.c +++ b/drivers/staging/rtl8712/ieee80211.c @@ -376,7 +376,7 @@ int r8712_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len, uint cnt; /*Search required WPA or WPA2 IE and copy to sec_ie[ ]*/ - cnt = (_TIMESTAMP_ + _BEACON_ITERVAL_ + _CAPABILITY_); + cnt = _TIMESTAMP_ + _BEACON_ITERVAL_ + _CAPABILITY_; while (cnt < in_len) { authmode = in_ie[cnt]; if ((authmode == _WPA_IE_ID_) && diff --git a/drivers/staging/rtl8712/ieee80211.h b/drivers/staging/rtl8712/ieee80211.h index d374824c4f33..67ab58084e8a 100644 --- a/drivers/staging/rtl8712/ieee80211.h +++ b/drivers/staging/rtl8712/ieee80211.h @@ -12,8 +12,7 @@ * more details. * * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * this program; if not, see <http://www.gnu.org/licenses/>. * * Modifications for inclusion into the Linux staging tree are * Copyright(c) 2010 Larry Finger. All rights reserved. @@ -61,7 +60,6 @@ #define IEEE_CRYPT_ERR_TX_KEY_SET_FAILED 6 #define IEEE_CRYPT_ERR_CARD_CONF_FAILED 7 - #define IEEE_CRYPT_ALG_NAME_LEN 16 #define WPA_CIPHER_NONE BIT(0) @@ -70,8 +68,6 @@ #define WPA_CIPHER_TKIP BIT(3) #define WPA_CIPHER_CCMP BIT(4) - - #define WPA_SELECTOR_LEN 4 #define RSN_HEADER_LEN 4 @@ -88,7 +84,6 @@ enum NETWORK_TYPE { WIRELESS_11BGN = (WIRELESS_11B | WIRELESS_11G | WIRELESS_11N), }; - struct ieee_param { u32 cmd; u8 sta_addr[ETH_ALEN]; @@ -161,7 +156,6 @@ struct ieee80211_hdr_3addr { u16 seq_ctl; } __packed; - struct ieee80211_hdr_qos { u16 frame_ctl; u16 duration_id; @@ -191,7 +185,6 @@ struct eapol { u16 length; } __packed; - enum eap_type { EAP_PACKET = 0, EAPOL_START, @@ -255,7 +248,6 @@ enum eap_type { #define IEEE80211_STYPE_CFPOLL 0x0060 #define IEEE80211_STYPE_CFACKPOLL 0x0070 #define IEEE80211_QOS_DATAGRP 0x0080 -#define IEEE80211_QoS_DATAGRP IEEE80211_QOS_DATAGRP #define IEEE80211_SCTL_FRAG 0x000F #define IEEE80211_SCTL_SEQ 0xFFF0 @@ -305,15 +297,15 @@ struct ieee80211_snap_hdr { #define WLAN_AUTH_CHALLENGE_LEN 128 -#define WLAN_CAPABILITY_BSS (1<<0) -#define WLAN_CAPABILITY_IBSS (1<<1) -#define WLAN_CAPABILITY_CF_POLLABLE (1<<2) -#define WLAN_CAPABILITY_CF_POLL_REQUEST (1<<3) -#define WLAN_CAPABILITY_PRIVACY (1<<4) -#define WLAN_CAPABILITY_SHORT_PREAMBLE (1<<5) -#define WLAN_CAPABILITY_PBCC (1<<6) -#define WLAN_CAPABILITY_CHANNEL_AGILITY (1<<7) -#define WLAN_CAPABILITY_SHORT_SLOT (1<<10) +#define WLAN_CAPABILITY_BSS BIT(0) +#define WLAN_CAPABILITY_IBSS BIT(1) +#define WLAN_CAPABILITY_CF_POLLABLE BIT(2) +#define WLAN_CAPABILITY_CF_POLL_REQUEST BIT(3) +#define WLAN_CAPABILITY_PRIVACY BIT(4) +#define WLAN_CAPABILITY_SHORT_PREAMBLE BIT(5) +#define WLAN_CAPABILITY_PBCC BIT(6) +#define WLAN_CAPABILITY_CHANNEL_AGILITY BIT(7) +#define WLAN_CAPABILITY_SHORT_SLOT BIT(10) /* Information Element IDs */ #define WLAN_EID_SSID 0 @@ -331,24 +323,21 @@ struct ieee80211_snap_hdr { #define IEEE80211_DATA_HDR3_LEN 24 #define IEEE80211_DATA_HDR4_LEN 30 - -#define IEEE80211_STATMASK_SIGNAL (1<<0) -#define IEEE80211_STATMASK_RSSI (1<<1) -#define IEEE80211_STATMASK_NOISE (1<<2) -#define IEEE80211_STATMASK_RATE (1<<3) +#define IEEE80211_STATMASK_SIGNAL BIT(0) +#define IEEE80211_STATMASK_RSSI BIT(1) +#define IEEE80211_STATMASK_NOISE BIT(2) +#define IEEE80211_STATMASK_RATE BIT(3) #define IEEE80211_STATMASK_WEMASK 0x7 +#define IEEE80211_CCK_MODULATION BIT(0) +#define IEEE80211_OFDM_MODULATION BIT(1) -#define IEEE80211_CCK_MODULATION (1<<0) -#define IEEE80211_OFDM_MODULATION (1<<1) - -#define IEEE80211_24GHZ_BAND (1<<0) -#define IEEE80211_52GHZ_BAND (1<<1) +#define IEEE80211_24GHZ_BAND BIT(0) +#define IEEE80211_52GHZ_BAND BIT(1) #define IEEE80211_CCK_RATE_LEN 4 #define IEEE80211_NUM_OFDM_RATESLEN 8 - #define IEEE80211_CCK_RATE_1MB 0x02 #define IEEE80211_CCK_RATE_2MB 0x04 #define IEEE80211_CCK_RATE_5MB 0x0B @@ -364,18 +353,18 @@ struct ieee80211_snap_hdr { #define IEEE80211_OFDM_RATE_54MB 0x6C #define IEEE80211_BASIC_RATE_MASK 0x80 -#define IEEE80211_CCK_RATE_1MB_MASK (1<<0) -#define IEEE80211_CCK_RATE_2MB_MASK (1<<1) -#define IEEE80211_CCK_RATE_5MB_MASK (1<<2) -#define IEEE80211_CCK_RATE_11MB_MASK (1<<3) -#define IEEE80211_OFDM_RATE_6MB_MASK (1<<4) -#define IEEE80211_OFDM_RATE_9MB_MASK (1<<5) -#define IEEE80211_OFDM_RATE_12MB_MASK (1<<6) -#define IEEE80211_OFDM_RATE_18MB_MASK (1<<7) -#define IEEE80211_OFDM_RATE_24MB_MASK (1<<8) -#define IEEE80211_OFDM_RATE_36MB_MASK (1<<9) -#define IEEE80211_OFDM_RATE_48MB_MASK (1<<10) -#define IEEE80211_OFDM_RATE_54MB_MASK (1<<11) +#define IEEE80211_CCK_RATE_1MB_MASK BIT(0) +#define IEEE80211_CCK_RATE_2MB_MASK BIT(1) +#define IEEE80211_CCK_RATE_5MB_MASK BIT(2) +#define IEEE80211_CCK_RATE_11MB_MASK BIT(3) +#define IEEE80211_OFDM_RATE_6MB_MASK BIT(4) +#define IEEE80211_OFDM_RATE_9MB_MASK BIT(5) +#define IEEE80211_OFDM_RATE_12MB_MASK BIT(6) +#define IEEE80211_OFDM_RATE_18MB_MASK BIT(7) +#define IEEE80211_OFDM_RATE_24MB_MASK BIT(8) +#define IEEE80211_OFDM_RATE_36MB_MASK BIT(9) +#define IEEE80211_OFDM_RATE_48MB_MASK BIT(10) +#define IEEE80211_OFDM_RATE_54MB_MASK BIT(11) #define IEEE80211_CCK_RATES_MASK 0x0000000F #define IEEE80211_CCK_BASIC_RATES_MASK (IEEE80211_CCK_RATE_1MB_MASK | \ @@ -401,9 +390,6 @@ struct ieee80211_snap_hdr { #define IEEE80211_NUM_CCK_RATES 4 #define IEEE80211_OFDM_SHIFT_MASK_A 4 - - - /* NOTE: This data is for statistical purposes; not all hardware provides this * information for frames received. Not setting these will not cause * any adverse affects. @@ -481,15 +467,15 @@ struct ieee80211_softmac_stats { uint swtxawake; }; -#define SEC_KEY_1 (1<<0) -#define SEC_KEY_2 (1<<1) -#define SEC_KEY_3 (1<<2) -#define SEC_KEY_4 (1<<3) -#define SEC_ACTIVE_KEY (1<<4) -#define SEC_AUTH_MODE (1<<5) -#define SEC_UNICAST_GROUP (1<<6) -#define SEC_LEVEL (1<<7) -#define SEC_ENABLED (1<<8) +#define SEC_KEY_1 BIT(0) +#define SEC_KEY_2 BIT(1) +#define SEC_KEY_3 BIT(2) +#define SEC_KEY_4 BIT(3) +#define SEC_ACTIVE_KEY BIT(4) +#define SEC_AUTH_MODE BIT(5) +#define SEC_UNICAST_GROUP BIT(6) +#define SEC_LEVEL BIT(7) +#define SEC_ENABLED BIT(8) #define SEC_LEVEL_0 0 /* None */ #define SEC_LEVEL_1 1 /* WEP 40 and 104 bit */ @@ -645,9 +631,9 @@ struct ieee80211_txb { #define MAX_WPA_IE_LEN 128 -#define NETWORK_EMPTY_ESSID (1<<0) -#define NETWORK_HAS_OFDM (1<<1) -#define NETWORK_HAS_CCK (1<<2) +#define NETWORK_EMPTY_ESSID BIT(0) +#define NETWORK_HAS_OFDM BIT(1) +#define NETWORK_HAS_CCK BIT(2) #define IEEE80211_DTIM_MBCAST 4 #define IEEE80211_DTIM_UCAST 2 @@ -699,15 +685,15 @@ enum ieee80211_state { #define DEFAULT_MAX_SCAN_AGE (15 * HZ) #define DEFAULT_FTS 2346 -#define CFG_IEEE80211_RESERVE_FCS (1<<0) -#define CFG_IEEE80211_COMPUTE_FCS (1<<1) +#define CFG_IEEE80211_RESERVE_FCS BIT(0) +#define CFG_IEEE80211_COMPUTE_FCS BIT(1) #define MAXTID 16 -#define IEEE_A (1<<0) -#define IEEE_B (1<<1) -#define IEEE_G (1<<2) -#define IEEE_MODE_MASK (IEEE_A|IEEE_B|IEEE_G) +#define IEEE_A BIT(0) +#define IEEE_B BIT(1) +#define IEEE_G BIT(2) +#define IEEE_MODE_MASK (IEEE_A | IEEE_B | IEEE_G) static inline int ieee80211_is_empty_essid(const char *essid, int essid_len) { @@ -757,7 +743,7 @@ unsigned char *r8712_get_wpa_ie(unsigned char *pie, int *rsn_ie_len, int limit); unsigned char *r8712_get_wpa2_ie(unsigned char *pie, int *rsn_ie_len, int limit); int r8712_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher, - int *pairwise_cipher); + int *pairwise_cipher); int r8712_parse_wpa2_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwise_cipher); int r8712_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len, diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c index b89e2d3c4fe1..ab19112eae13 100644 --- a/drivers/staging/rtl8712/os_intfs.c +++ b/drivers/staging/rtl8712/os_intfs.c @@ -269,7 +269,6 @@ void r8712_stop_drv_timers(struct _adapter *padapter) static u8 init_default_value(struct _adapter *padapter) { - u8 ret = _SUCCESS; struct registry_priv *pregistrypriv = &padapter->registrypriv; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; @@ -302,7 +301,7 @@ static u8 init_default_value(struct _adapter *padapter) r8712_init_registrypriv_dev_network(padapter); r8712_update_registrypriv_dev_network(padapter); /*misc.*/ - return ret; + return _SUCCESS; } u8 r8712_init_drv_sw(struct _adapter *padapter) diff --git a/drivers/staging/rtl8712/recv_linux.c b/drivers/staging/rtl8712/recv_linux.c index 2f5460dbda8b..735a0eadd98c 100644 --- a/drivers/staging/rtl8712/recv_linux.c +++ b/drivers/staging/rtl8712/recv_linux.c @@ -44,7 +44,8 @@ int r8712_os_recv_resource_alloc(struct _adapter *padapter, union recv_frame *precvframe) { - precvframe->u.hdr.pkt_newalloc = precvframe->u.hdr.pkt = NULL; + precvframe->u.hdr.pkt_newalloc = NULL; + precvframe->u.hdr.pkt = NULL; return _SUCCESS; } @@ -56,7 +57,7 @@ int r8712_os_recvbuf_resource_alloc(struct _adapter *padapter, precvbuf->irp_pending = false; precvbuf->purb = usb_alloc_urb(0, GFP_KERNEL); - if (precvbuf->purb == NULL) + if (!precvbuf->purb) res = _FAIL; precvbuf->pskb = NULL; precvbuf->reuse = false; @@ -114,7 +115,7 @@ void r8712_recv_indicatepkt(struct _adapter *padapter, precvpriv = &(padapter->recvpriv); pfree_recv_queue = &(precvpriv->free_recv_queue); skb = precv_frame->u.hdr.pkt; - if (skb == NULL) + if (!skb) goto _recv_indicatepkt_drop; skb->data = precv_frame->u.hdr.rx_data; skb->len = precv_frame->u.hdr.len; diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c index 9b9160947943..50f400234593 100644 --- a/drivers/staging/rtl8712/rtl8712_cmd.c +++ b/drivers/staging/rtl8712/rtl8712_cmd.c @@ -293,7 +293,7 @@ u8 r8712_fw_cmd(struct _adapter *pAdapter, u32 cmd) r8712_write32(pAdapter, IOCMD_CTRL_REG, cmd); msleep(100); - while ((0 != r8712_read32(pAdapter, IOCMD_CTRL_REG)) && + while ((r8712_read32(pAdapter, IOCMD_CTRL_REG != 0)) && (pollingcnts > 0)) { pollingcnts--; msleep(20); @@ -317,7 +317,7 @@ int r8712_cmd_thread(void *context) unsigned int cmdsz, wr_sz, *pcmdbuf; struct tx_desc *pdesc; void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd); - struct _adapter *padapter = (struct _adapter *)context; + struct _adapter *padapter = context; struct cmd_priv *pcmdpriv = &(padapter->cmdpriv); allow_signal(SIGTERM); diff --git a/drivers/staging/rtl8712/rtl8712_efuse.c b/drivers/staging/rtl8712/rtl8712_efuse.c index eaa93fbb95a3..76f60ba5ee9b 100644 --- a/drivers/staging/rtl8712/rtl8712_efuse.c +++ b/drivers/staging/rtl8712/rtl8712_efuse.c @@ -161,7 +161,7 @@ static u8 efuse_is_empty(struct _adapter *padapter, u8 *empty) /* read one byte to check if E-Fuse is empty */ if (efuse_one_byte_rw(padapter, true, 0, &value)) { - if (0xFF == value) + if (value == 0xFF) *empty = true; else *empty = false; @@ -345,7 +345,7 @@ static u8 fix_header(struct _adapter *padapter, u8 header, u16 header_addr) ret = false; } else if (pkt.data[i * 2] != value) { ret = false; - if (0xFF == value) /* write again */ + if (value == 0xFF) /* write again */ efuse_one_byte_write(padapter, addr, pkt.data[i * 2]); } @@ -353,7 +353,7 @@ static u8 fix_header(struct _adapter *padapter, u8 header, u16 header_addr) ret = false; } else if (pkt.data[i * 2 + 1] != value) { ret = false; - if (0xFF == value) /* write again */ + if (value == 0xFF) /* write again */ efuse_one_byte_write(padapter, addr + 1, pkt.data[i * 2 + 1]); @@ -420,7 +420,7 @@ u8 r8712_efuse_pg_packet_write(struct _adapter *padapter, const u8 offset, } /* write header fail */ bResult = false; - if (0xFF == efuse_data) + if (efuse_data == 0xFF) return bResult; /* nothing damaged. */ /* call rescue procedure */ if (!fix_header(padapter, efuse_data, efuse_addr)) diff --git a/drivers/staging/rtl8712/rtl8712_io.c b/drivers/staging/rtl8712/rtl8712_io.c index 4148d48ece62..391eff37f573 100644 --- a/drivers/staging/rtl8712/rtl8712_io.c +++ b/drivers/staging/rtl8712/rtl8712_io.c @@ -36,109 +36,76 @@ u8 r8712_read8(struct _adapter *adapter, u32 addr) { - struct io_queue *pio_queue = adapter->pio_queue; - struct intf_hdl *pintfhdl = &(pio_queue->intf); - u8 (*_read8)(struct intf_hdl *pintfhdl, u32 addr); + struct intf_hdl *hdl = &adapter->pio_queue->intf; - _read8 = pintfhdl->io_ops._read8; - return _read8(pintfhdl, addr); + return hdl->io_ops._read8(hdl, addr); } u16 r8712_read16(struct _adapter *adapter, u32 addr) { - struct io_queue *pio_queue = adapter->pio_queue; - struct intf_hdl *pintfhdl = &(pio_queue->intf); - u16 (*_read16)(struct intf_hdl *pintfhdl, u32 addr); + struct intf_hdl *hdl = &adapter->pio_queue->intf; - _read16 = pintfhdl->io_ops._read16; - return _read16(pintfhdl, addr); + return hdl->io_ops._read16(hdl, addr); } u32 r8712_read32(struct _adapter *adapter, u32 addr) { - struct io_queue *pio_queue = adapter->pio_queue; - struct intf_hdl *pintfhdl = &(pio_queue->intf); - u32 (*_read32)(struct intf_hdl *pintfhdl, u32 addr); + struct intf_hdl *hdl = &adapter->pio_queue->intf; - _read32 = pintfhdl->io_ops._read32; - return _read32(pintfhdl, addr); + return hdl->io_ops._read32(hdl, addr); } void r8712_write8(struct _adapter *adapter, u32 addr, u8 val) { - struct io_queue *pio_queue = adapter->pio_queue; - struct intf_hdl *pintfhdl = &(pio_queue->intf); - void (*_write8)(struct intf_hdl *pintfhdl, u32 addr, u8 val); + struct intf_hdl *hdl = &adapter->pio_queue->intf; - _write8 = pintfhdl->io_ops._write8; - _write8(pintfhdl, addr, val); + hdl->io_ops._write8(hdl, addr, val); } void r8712_write16(struct _adapter *adapter, u32 addr, u16 val) { - struct io_queue *pio_queue = adapter->pio_queue; - struct intf_hdl *pintfhdl = &(pio_queue->intf); - void (*_write16)(struct intf_hdl *pintfhdl, u32 addr, u16 val); + struct intf_hdl *hdl = &adapter->pio_queue->intf; - _write16 = pintfhdl->io_ops._write16; - _write16(pintfhdl, addr, val); + hdl->io_ops._write16(hdl, addr, val); } void r8712_write32(struct _adapter *adapter, u32 addr, u32 val) { - struct io_queue *pio_queue = adapter->pio_queue; - struct intf_hdl *pintfhdl = &(pio_queue->intf); + struct intf_hdl *hdl = &adapter->pio_queue->intf; - void (*_write32)(struct intf_hdl *pintfhdl, u32 addr, u32 val); - - _write32 = pintfhdl->io_ops._write32; - _write32(pintfhdl, addr, val); + hdl->io_ops._write32(hdl, addr, val); } void r8712_read_mem(struct _adapter *adapter, u32 addr, u32 cnt, u8 *pmem) { - struct io_queue *pio_queue = adapter->pio_queue; - struct intf_hdl *pintfhdl = &(pio_queue->intf); + struct intf_hdl *hdl = &adapter->pio_queue->intf; - void (*_read_mem)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, - u8 *pmem); if (adapter->bDriverStopped || adapter->bSurpriseRemoved) return; - _read_mem = pintfhdl->io_ops._read_mem; - _read_mem(pintfhdl, addr, cnt, pmem); + + hdl->io_ops._read_mem(hdl, addr, cnt, pmem); } void r8712_write_mem(struct _adapter *adapter, u32 addr, u32 cnt, u8 *pmem) { - struct io_queue *pio_queue = adapter->pio_queue; - struct intf_hdl *pintfhdl = &(pio_queue->intf); - void (*_write_mem)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, - u8 *pmem); + struct intf_hdl *hdl = &adapter->pio_queue->intf; - _write_mem = pintfhdl->io_ops._write_mem; - _write_mem(pintfhdl, addr, cnt, pmem); + hdl->io_ops._write_mem(hdl, addr, cnt, pmem); } void r8712_read_port(struct _adapter *adapter, u32 addr, u32 cnt, u8 *pmem) { - struct io_queue *pio_queue = adapter->pio_queue; - struct intf_hdl *pintfhdl = &(pio_queue->intf); + struct intf_hdl *hdl = &adapter->pio_queue->intf; - u32 (*_read_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, - u8 *pmem); if (adapter->bDriverStopped || adapter->bSurpriseRemoved) return; - _read_port = pintfhdl->io_ops._read_port; - _read_port(pintfhdl, addr, cnt, pmem); + + hdl->io_ops._read_port(hdl, addr, cnt, pmem); } void r8712_write_port(struct _adapter *adapter, u32 addr, u32 cnt, u8 *pmem) { - struct io_queue *pio_queue = adapter->pio_queue; - struct intf_hdl *pintfhdl = &(pio_queue->intf); + struct intf_hdl *hdl = &adapter->pio_queue->intf; - u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, - u8 *pmem); - _write_port = pintfhdl->io_ops._write_port; - _write_port(pintfhdl, addr, cnt, pmem); + hdl->io_ops._write_port(hdl, addr, cnt, pmem); } diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c index 562a10203127..86136cc73672 100644 --- a/drivers/staging/rtl8712/rtl871x_cmd.c +++ b/drivers/staging/rtl8712/rtl871x_cmd.c @@ -136,15 +136,12 @@ static struct cmd_obj *_dequeue_cmd(struct __queue *queue) unsigned long irqL; struct cmd_obj *obj; - spin_lock_irqsave(&(queue->lock), irqL); - if (list_empty(&(queue->queue))) { - obj = NULL; - } else { - obj = LIST_CONTAINOR(queue->queue.next, - struct cmd_obj, list); + spin_lock_irqsave(&queue->lock, irqL); + obj = list_first_entry_or_null(&queue->queue, + struct cmd_obj, list); + if (obj) list_del_init(&obj->list); - } - spin_unlock_irqrestore(&(queue->lock), irqL); + spin_unlock_irqrestore(&queue->lock, irqL); return obj; } @@ -318,27 +315,6 @@ u8 r8712_setbasicrate_cmd(struct _adapter *padapter, u8 *rateset) return _SUCCESS; } -/* power tracking mechanism setting */ -u8 r8712_setptm_cmd(struct _adapter *padapter, u8 type) -{ - struct cmd_obj *ph2c; - struct writePTM_parm *pwriteptmparm; - struct cmd_priv *pcmdpriv = &padapter->cmdpriv; - - ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC); - if (ph2c == NULL) - return _FAIL; - pwriteptmparm = kmalloc(sizeof(*pwriteptmparm), GFP_ATOMIC); - if (pwriteptmparm == NULL) { - kfree(ph2c); - return _FAIL; - } - init_h2fwcmd_w_parm_no_rsp(ph2c, pwriteptmparm, GEN_CMD_CODE(_SetPT)); - pwriteptmparm->type = type; - r8712_enqueue_cmd(pcmdpriv, ph2c); - return _SUCCESS; -} - u8 r8712_setfwdig_cmd(struct _adapter *padapter, u8 type) { struct cmd_obj *ph2c; @@ -733,32 +709,6 @@ u8 r8712_setrttbl_cmd(struct _adapter *padapter, return _SUCCESS; } -u8 r8712_gettssi_cmd(struct _adapter *padapter, u8 offset, u8 *pval) -{ - struct cmd_priv *pcmdpriv = &padapter->cmdpriv; - struct cmd_obj *ph2c; - struct readTSSI_parm *prdtssiparm; - - ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC); - if (ph2c == NULL) - return _FAIL; - prdtssiparm = kmalloc(sizeof(*prdtssiparm), GFP_ATOMIC); - if (prdtssiparm == NULL) { - kfree(ph2c); - return _FAIL; - } - INIT_LIST_HEAD(&ph2c->list); - ph2c->cmdcode = GEN_CMD_CODE(_ReadTSSI); - ph2c->parmbuf = (unsigned char *)prdtssiparm; - ph2c->cmdsz = sizeof(struct readTSSI_parm); - ph2c->rsp = pval; - ph2c->rspsz = sizeof(struct readTSSI_rsp); - - prdtssiparm->offset = offset; - r8712_enqueue_cmd(pcmdpriv, ph2c); - return _SUCCESS; -} - u8 r8712_setMacAddr_cmd(struct _adapter *padapter, u8 *mac_addr) { struct cmd_priv *pcmdpriv = &padapter->cmdpriv; diff --git a/drivers/staging/rtl8712/rtl871x_cmd.h b/drivers/staging/rtl8712/rtl871x_cmd.h index 818cd8807a38..e4a2a50c85de 100644 --- a/drivers/staging/rtl8712/rtl871x_cmd.h +++ b/drivers/staging/rtl8712/rtl871x_cmd.h @@ -736,8 +736,6 @@ u8 r8712_setrfintfs_cmd(struct _adapter *padapter, u8 mode); u8 r8712_setrfreg_cmd(struct _adapter *padapter, u8 offset, u32 val); u8 r8712_setrttbl_cmd(struct _adapter *padapter, struct setratable_parm *prate_table); -u8 r8712_gettssi_cmd(struct _adapter *padapter, u8 offset, u8 *pval); -u8 r8712_setptm_cmd(struct _adapter *padapter, u8 type); u8 r8712_setfwdig_cmd(struct _adapter *padapter, u8 type); u8 r8712_setfwra_cmd(struct _adapter *padapter, u8 type); u8 r8712_addbareq_cmd(struct _adapter *padapter, u8 tid); diff --git a/drivers/staging/rtl8712/rtl871x_io.c b/drivers/staging/rtl8712/rtl871x_io.c index fbbc63570eab..3a10940db9b7 100644 --- a/drivers/staging/rtl8712/rtl871x_io.c +++ b/drivers/staging/rtl8712/rtl871x_io.c @@ -113,7 +113,7 @@ uint r8712_alloc_io_queue(struct _adapter *adapter) struct io_req *pio_req; pio_queue = kmalloc(sizeof(*pio_queue), GFP_ATOMIC); - if (pio_queue == NULL) + if (!pio_queue) goto alloc_io_queue_fail; INIT_LIST_HEAD(&pio_queue->free_ioreqs); INIT_LIST_HEAD(&pio_queue->processing); diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c index edfc6805e012..1b9e24900477 100644 --- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c +++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c @@ -137,7 +137,7 @@ static inline void handle_group_key(struct ieee_param *param, } } -static inline char *translate_scan(struct _adapter *padapter, +static noinline_for_stack char *translate_scan(struct _adapter *padapter, struct iw_request_info *info, struct wlan_network *pnetwork, char *start, char *stop) @@ -398,12 +398,9 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param, wep_key_idx = 0; if (wep_key_len > 0) { wep_key_len = wep_key_len <= 5 ? 5 : 13; - pwep = kmalloc((u32)(wep_key_len + - FIELD_OFFSET(struct NDIS_802_11_WEP, - KeyMaterial)), GFP_ATOMIC); + pwep = kzalloc(sizeof(*pwep), GFP_ATOMIC); if (pwep == NULL) return -ENOMEM; - memset(pwep, 0, sizeof(struct NDIS_802_11_WEP)); pwep->KeyLength = wep_key_len; pwep->Length = wep_key_len + FIELD_OFFSET(struct NDIS_802_11_WEP, @@ -1964,7 +1961,7 @@ static int r871x_get_ap_info(struct net_device *dev, struct list_head *plist, *phead; unsigned char *pbuf; u8 bssid[ETH_ALEN]; - char data[32]; + char data[33]; if (padapter->bDriverStopped || (pdata == NULL)) return -EINVAL; @@ -1979,6 +1976,7 @@ static int r871x_get_ap_info(struct net_device *dev, if (pdata->length >= 32) { if (copy_from_user(data, pdata->pointer, 32)) return -EINVAL; + data[32] = 0; } else { return -EINVAL; } diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_rtl.c b/drivers/staging/rtl8712/rtl871x_ioctl_rtl.c index 7c346a405a20..c7f2e5167cb7 100644 --- a/drivers/staging/rtl8712/rtl871x_ioctl_rtl.c +++ b/drivers/staging/rtl8712/rtl871x_ioctl_rtl.c @@ -49,8 +49,7 @@ uint oid_rt_get_signal_quality_hdl(struct oid_par_priv *poid_par_priv) uint oid_rt_get_small_packet_crc_hdl(struct oid_par_priv *poid_par_priv) { - struct _adapter *padapter = (struct _adapter *) - (poid_par_priv->adapter_context); + struct _adapter *padapter = poid_par_priv->adapter_context; if (poid_par_priv->type_of_oid != QUERY_OID) return RNDIS_STATUS_NOT_ACCEPTED; @@ -66,8 +65,7 @@ uint oid_rt_get_small_packet_crc_hdl(struct oid_par_priv *poid_par_priv) uint oid_rt_get_middle_packet_crc_hdl(struct oid_par_priv *poid_par_priv) { - struct _adapter *padapter = (struct _adapter *) - (poid_par_priv->adapter_context); + struct _adapter *padapter = poid_par_priv->adapter_context; if (poid_par_priv->type_of_oid != QUERY_OID) return RNDIS_STATUS_NOT_ACCEPTED; @@ -83,8 +81,7 @@ uint oid_rt_get_middle_packet_crc_hdl(struct oid_par_priv *poid_par_priv) uint oid_rt_get_large_packet_crc_hdl(struct oid_par_priv *poid_par_priv) { - struct _adapter *padapter = (struct _adapter *) - (poid_par_priv->adapter_context); + struct _adapter *padapter = poid_par_priv->adapter_context; if (poid_par_priv->type_of_oid != QUERY_OID) return RNDIS_STATUS_NOT_ACCEPTED; @@ -115,8 +112,7 @@ uint oid_rt_get_rx_retry_hdl(struct oid_par_priv *poid_par_priv) uint oid_rt_get_rx_total_packet_hdl(struct oid_par_priv *poid_par_priv) { - struct _adapter *padapter = (struct _adapter *) - (poid_par_priv->adapter_context); + struct _adapter *padapter = poid_par_priv->adapter_context; if (poid_par_priv->type_of_oid != QUERY_OID) return RNDIS_STATUS_NOT_ACCEPTED; @@ -147,8 +143,7 @@ uint oid_rt_get_tx_beacon_err_hdl(struct oid_par_priv *poid_par_priv) uint oid_rt_get_rx_icv_err_hdl(struct oid_par_priv *poid_par_priv) { - struct _adapter *padapter = (struct _adapter *) - (poid_par_priv->adapter_context); + struct _adapter *padapter = poid_par_priv->adapter_context; if (poid_par_priv->type_of_oid != QUERY_OID) return RNDIS_STATUS_NOT_ACCEPTED; @@ -172,8 +167,7 @@ uint oid_rt_set_encryption_algorithm_hdl(struct oid_par_priv uint oid_rt_get_preamble_mode_hdl(struct oid_par_priv *poid_par_priv) { - struct _adapter *padapter = (struct _adapter *) - (poid_par_priv->adapter_context); + struct _adapter *padapter = poid_par_priv->adapter_context; u32 preamblemode = 0; if (poid_par_priv->type_of_oid != QUERY_OID) @@ -202,8 +196,7 @@ uint oid_rt_get_ap_ip_hdl(struct oid_par_priv *poid_par_priv) uint oid_rt_get_channelplan_hdl(struct oid_par_priv *poid_par_priv) { - struct _adapter *padapter = (struct _adapter *) - (poid_par_priv->adapter_context); + struct _adapter *padapter = poid_par_priv->adapter_context; struct eeprom_priv *peeprompriv = &padapter->eeprompriv; if (poid_par_priv->type_of_oid != QUERY_OID) @@ -216,8 +209,7 @@ uint oid_rt_get_channelplan_hdl(struct oid_par_priv *poid_par_priv) uint oid_rt_set_channelplan_hdl(struct oid_par_priv *poid_par_priv) { - struct _adapter *padapter = (struct _adapter *) - (poid_par_priv->adapter_context); + struct _adapter *padapter = poid_par_priv->adapter_context; struct eeprom_priv *peeprompriv = &padapter->eeprompriv; if (poid_par_priv->type_of_oid != SET_OID) @@ -229,8 +221,7 @@ uint oid_rt_set_channelplan_hdl(struct oid_par_priv uint oid_rt_set_preamble_mode_hdl(struct oid_par_priv *poid_par_priv) { - struct _adapter *padapter = (struct _adapter *) - (poid_par_priv->adapter_context); + struct _adapter *padapter = poid_par_priv->adapter_context; u32 preamblemode = 0; if (poid_par_priv->type_of_oid != SET_OID) @@ -267,8 +258,7 @@ uint oid_rt_dedicate_probe_hdl(struct oid_par_priv uint oid_rt_get_total_tx_bytes_hdl(struct oid_par_priv *poid_par_priv) { - struct _adapter *padapter = (struct _adapter *) - (poid_par_priv->adapter_context); + struct _adapter *padapter = poid_par_priv->adapter_context; if (poid_par_priv->type_of_oid != QUERY_OID) return RNDIS_STATUS_NOT_ACCEPTED; @@ -285,8 +275,7 @@ uint oid_rt_get_total_tx_bytes_hdl(struct oid_par_priv uint oid_rt_get_total_rx_bytes_hdl(struct oid_par_priv *poid_par_priv) { - struct _adapter *padapter = (struct _adapter *) - (poid_par_priv->adapter_context); + struct _adapter *padapter = poid_par_priv->adapter_context; if (poid_par_priv->type_of_oid != QUERY_OID) return RNDIS_STATUS_NOT_ACCEPTED; @@ -325,8 +314,7 @@ uint oid_rt_get_enc_key_match_count_hdl(struct oid_par_priv uint oid_rt_get_channel_hdl(struct oid_par_priv *poid_par_priv) { - struct _adapter *padapter = (struct _adapter *) - (poid_par_priv->adapter_context); + struct _adapter *padapter = poid_par_priv->adapter_context; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct NDIS_802_11_CONFIGURATION *pnic_Config; u32 channelnum; @@ -449,8 +437,7 @@ uint oid_rt_pro_rf_write_registry_hdl(struct oid_par_priv* poid_par_priv) { uint status = RNDIS_STATUS_SUCCESS; - struct _adapter *Adapter = (struct _adapter *) - (poid_par_priv->adapter_context); + struct _adapter *Adapter = poid_par_priv->adapter_context; if (poid_par_priv->type_of_oid != SET_OID) /* QUERY_OID */ return RNDIS_STATUS_NOT_ACCEPTED; @@ -470,8 +457,7 @@ uint oid_rt_pro_rf_write_registry_hdl(struct oid_par_priv* uint oid_rt_pro_rf_read_registry_hdl(struct oid_par_priv *poid_par_priv) { uint status = RNDIS_STATUS_SUCCESS; - struct _adapter *Adapter = (struct _adapter *) - (poid_par_priv->adapter_context); + struct _adapter *Adapter = poid_par_priv->adapter_context; if (poid_par_priv->type_of_oid != SET_OID) /* QUERY_OID */ return RNDIS_STATUS_NOT_ACCEPTED; @@ -516,8 +502,7 @@ enum _CONNECT_STATE_ { uint oid_rt_get_connect_state_hdl(struct oid_par_priv *poid_par_priv) { - struct _adapter *padapter = (struct _adapter *) - (poid_par_priv->adapter_context); + struct _adapter *padapter = poid_par_priv->adapter_context; struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); u32 ulInfo; diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c index 04f727fc95ea..62d4ae85af15 100644 --- a/drivers/staging/rtl8712/rtl871x_mlme.c +++ b/drivers/staging/rtl8712/rtl871x_mlme.c @@ -64,7 +64,7 @@ static sint _init_mlme_priv(struct _adapter *padapter) memset(&pmlmepriv->assoc_ssid, 0, sizeof(struct ndis_802_11_ssid)); pbuf = kmalloc_array(MAX_BSS_CNT, sizeof(struct wlan_network), GFP_ATOMIC); - if (pbuf == NULL) + if (!pbuf) return _FAIL; pmlmepriv->free_bss_buf = pbuf; pnetwork = (struct wlan_network *)pbuf; @@ -87,16 +87,15 @@ struct wlan_network *_r8712_alloc_network(struct mlme_priv *pmlmepriv) unsigned long irqL; struct wlan_network *pnetwork; struct __queue *free_queue = &pmlmepriv->free_bss_pool; - struct list_head *plist = NULL; - if (list_empty(&free_queue->queue)) - return NULL; spin_lock_irqsave(&free_queue->lock, irqL); - plist = free_queue->queue.next; - pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list); - list_del_init(&pnetwork->list); - pnetwork->last_scanned = jiffies; - pmlmepriv->num_of_scanned++; + pnetwork = list_first_entry_or_null(&free_queue->queue, + struct wlan_network, list); + if (pnetwork) { + list_del_init(&pnetwork->list); + pnetwork->last_scanned = jiffies; + pmlmepriv->num_of_scanned++; + } spin_unlock_irqrestore(&free_queue->lock, irqL); return pnetwork; } @@ -469,8 +468,7 @@ static int is_desired_network(struct _adapter *adapter, pnetwork->network.IELength, wps_ie, &wps_ielen)) return true; - else - return false; + return false; } if ((psecuritypriv->PrivacyAlgrthm != _NO_PRIVACY_) && (pnetwork->network.Privacy == 0)) @@ -1203,7 +1201,7 @@ sint r8712_set_auth(struct _adapter *adapter, struct setauth_parm *psetauthparm; pcmd = kmalloc(sizeof(*pcmd), GFP_ATOMIC); - if (pcmd == NULL) + if (!pcmd) return _FAIL; psetauthparm = kzalloc(sizeof(*psetauthparm), GFP_ATOMIC); @@ -1233,7 +1231,7 @@ sint r8712_set_key(struct _adapter *adapter, sint ret = _SUCCESS; pcmd = kmalloc(sizeof(*pcmd), GFP_ATOMIC); - if (pcmd == NULL) + if (!pcmd) return _FAIL; psetkeyparm = kzalloc(sizeof(*psetkeyparm), GFP_ATOMIC); if (psetkeyparm == NULL) { diff --git a/drivers/staging/rtl8712/rtl871x_mp.c b/drivers/staging/rtl8712/rtl871x_mp.c index 44da4fe89381..5e4fda1890f5 100644 --- a/drivers/staging/rtl8712/rtl871x_mp.c +++ b/drivers/staging/rtl8712/rtl871x_mp.c @@ -235,7 +235,7 @@ static u8 set_bb_reg(struct _adapter *pAdapter, if (bitmask != bMaskDWord) { org_value = r8712_bb_reg_read(pAdapter, offset); bit_shift = bitshift(bitmask); - new_value = ((org_value & (~bitmask)) | (value << bit_shift)); + new_value = (org_value & (~bitmask)) | (value << bit_shift); } else { new_value = value; } @@ -260,7 +260,7 @@ static u8 set_rf_reg(struct _adapter *pAdapter, u8 path, u8 offset, u32 bitmask, if (bitmask != bMaskDWord) { org_value = r8712_rf_reg_read(pAdapter, path, offset); bit_shift = bitshift(bitmask); - new_value = ((org_value & (~bitmask)) | (value << bit_shift)); + new_value = (org_value & (~bitmask)) | (value << bit_shift); } else { new_value = value; } @@ -281,10 +281,10 @@ void r8712_SetChannel(struct _adapter *pAdapter) u16 code = GEN_CMD_CODE(_SetChannel); pcmd = kmalloc(sizeof(*pcmd), GFP_ATOMIC); - if (pcmd == NULL) + if (!pcmd) return; pparm = kmalloc(sizeof(*pparm), GFP_ATOMIC); - if (pparm == NULL) { + if (!pparm) { kfree(pcmd); return; } @@ -327,10 +327,10 @@ void r8712_SetTxAGCOffset(struct _adapter *pAdapter, u32 ulTxAGCOffset) { u32 TxAGCOffset_B, TxAGCOffset_C, TxAGCOffset_D, tmpAGC; - TxAGCOffset_B = (ulTxAGCOffset & 0x000000ff); + TxAGCOffset_B = ulTxAGCOffset & 0x000000ff; TxAGCOffset_C = (ulTxAGCOffset & 0x0000ff00) >> 8; TxAGCOffset_D = (ulTxAGCOffset & 0x00ff0000) >> 16; - tmpAGC = (TxAGCOffset_D << 8 | TxAGCOffset_C << 4 | TxAGCOffset_B); + tmpAGC = TxAGCOffset_D << 8 | TxAGCOffset_C << 4 | TxAGCOffset_B; set_bb_reg(pAdapter, rFPGA0_TxGainStage, (bXBTxAGC | bXCTxAGC | bXDTxAGC), tmpAGC); } diff --git a/drivers/staging/rtl8712/rtl871x_mp_ioctl.h b/drivers/staging/rtl8712/rtl871x_mp_ioctl.h index 8e7c7f8b69f9..8dc898024e07 100644 --- a/drivers/staging/rtl8712/rtl871x_mp_ioctl.h +++ b/drivers/staging/rtl8712/rtl871x_mp_ioctl.h @@ -150,90 +150,90 @@ uint oid_rt_get_power_mode_hdl( #ifdef _RTL871X_MP_IOCTL_C_ /* CAUTION!!! */ /* This ifdef _MUST_ be left in!! */ static const struct oid_obj_priv oid_rtl_seg_81_80_00[] = { - {1, &oid_null_function}, /*0x00 OID_RT_PRO_RESET_DUT */ - {1, &oid_rt_pro_set_data_rate_hdl}, /*0x01*/ - {1, &oid_rt_pro_start_test_hdl},/*0x02*/ - {1, &oid_rt_pro_stop_test_hdl}, /*0x03*/ - {1, &oid_null_function}, /*0x04 OID_RT_PRO_SET_PREAMBLE*/ - {1, &oid_null_function}, /*0x05 OID_RT_PRO_SET_SCRAMBLER*/ - {1, &oid_null_function}, /*0x06 OID_RT_PRO_SET_FILTER_BB*/ - {1, &oid_null_function}, /*0x07 + {1, oid_null_function}, /*0x00 OID_RT_PRO_RESET_DUT */ + {1, oid_rt_pro_set_data_rate_hdl}, /*0x01*/ + {1, oid_rt_pro_start_test_hdl}, /*0x02*/ + {1, oid_rt_pro_stop_test_hdl}, /*0x03*/ + {1, oid_null_function}, /*0x04 OID_RT_PRO_SET_PREAMBLE*/ + {1, oid_null_function}, /*0x05 OID_RT_PRO_SET_SCRAMBLER*/ + {1, oid_null_function}, /*0x06 OID_RT_PRO_SET_FILTER_BB*/ + {1, oid_null_function}, /*0x07 * OID_RT_PRO_SET_MANUAL_DIVERS_BB*/ - {1, &oid_rt_pro_set_channel_direct_call_hdl}, /*0x08*/ - {1, &oid_null_function}, /*0x09 + {1, oid_rt_pro_set_channel_direct_call_hdl}, /*0x08*/ + {1, oid_null_function}, /*0x09 * OID_RT_PRO_SET_SLEEP_MODE_DIRECT_CALL*/ - {1, &oid_null_function}, /*0x0A + {1, oid_null_function}, /*0x0A * OID_RT_PRO_SET_WAKE_MODE_DIRECT_CALL*/ - {1, &oid_rt_pro_set_continuous_tx_hdl}, /*0x0B + {1, oid_rt_pro_set_continuous_tx_hdl}, /*0x0B * OID_RT_PRO_SET_TX_CONTINUOUS_DIRECT_CALL*/ - {1, &oid_rt_pro_set_single_carrier_tx_hdl}, /*0x0C + {1, oid_rt_pro_set_single_carrier_tx_hdl}, /*0x0C * OID_RT_PRO_SET_SINGLE_CARRIER_TX_CONTINUOUS*/ - {1, &oid_null_function}, /*0x0D + {1, oid_null_function}, /*0x0D * OID_RT_PRO_SET_TX_ANTENNA_BB*/ - {1, &oid_rt_pro_set_antenna_bb_hdl}, /*0x0E*/ - {1, &oid_null_function}, /*0x0F OID_RT_PRO_SET_CR_SCRAMBLER*/ - {1, &oid_null_function}, /*0x10 OID_RT_PRO_SET_CR_NEW_FILTER*/ - {1, &oid_rt_pro_set_tx_power_control_hdl}, /*0x11 + {1, oid_rt_pro_set_antenna_bb_hdl}, /*0x0E*/ + {1, oid_null_function}, /*0x0F OID_RT_PRO_SET_CR_SCRAMBLER*/ + {1, oid_null_function}, /*0x10 OID_RT_PRO_SET_CR_NEW_FILTER*/ + {1, oid_rt_pro_set_tx_power_control_hdl}, /*0x11 * OID_RT_PRO_SET_TX_POWER_CONTROL*/ - {1, &oid_null_function}, /*0x12 OID_RT_PRO_SET_CR_TX_CONFIG*/ - {1, &oid_null_function}, /*0x13 + {1, oid_null_function}, /*0x12 OID_RT_PRO_SET_CR_TX_CONFIG*/ + {1, oid_null_function}, /*0x13 * OID_RT_PRO_GET_TX_POWER_CONTROL*/ - {1, &oid_null_function}, /*0x14 + {1, oid_null_function}, /*0x14 * OID_RT_PRO_GET_CR_SIGNAL_QUALITY*/ - {1, &oid_null_function}, /*0x15 OID_RT_PRO_SET_CR_SETPOINT*/ - {1, &oid_null_function}, /*0x16 OID_RT_PRO_SET_INTEGRATOR*/ - {1, &oid_null_function}, /*0x17 OID_RT_PRO_SET_SIGNAL_QUALITY*/ - {1, &oid_null_function}, /*0x18 OID_RT_PRO_GET_INTEGRATOR*/ - {1, &oid_null_function}, /*0x19 OID_RT_PRO_GET_SIGNAL_QUALITY*/ - {1, &oid_null_function}, /*0x1A OID_RT_PRO_QUERY_EEPROM_TYPE*/ - {1, &oid_null_function}, /*0x1B OID_RT_PRO_WRITE_MAC_ADDRESS*/ - {1, &oid_null_function}, /*0x1C OID_RT_PRO_READ_MAC_ADDRESS*/ - {1, &oid_null_function}, /*0x1D OID_RT_PRO_WRITE_CIS_DATA*/ - {1, &oid_null_function}, /*0x1E OID_RT_PRO_READ_CIS_DATA*/ - {1, &oid_null_function} /*0x1F OID_RT_PRO_WRITE_POWER_CONTROL*/ + {1, oid_null_function}, /*0x15 OID_RT_PRO_SET_CR_SETPOINT*/ + {1, oid_null_function}, /*0x16 OID_RT_PRO_SET_INTEGRATOR*/ + {1, oid_null_function}, /*0x17 OID_RT_PRO_SET_SIGNAL_QUALITY*/ + {1, oid_null_function}, /*0x18 OID_RT_PRO_GET_INTEGRATOR*/ + {1, oid_null_function}, /*0x19 OID_RT_PRO_GET_SIGNAL_QUALITY*/ + {1, oid_null_function}, /*0x1A OID_RT_PRO_QUERY_EEPROM_TYPE*/ + {1, oid_null_function}, /*0x1B OID_RT_PRO_WRITE_MAC_ADDRESS*/ + {1, oid_null_function}, /*0x1C OID_RT_PRO_READ_MAC_ADDRESS*/ + {1, oid_null_function}, /*0x1D OID_RT_PRO_WRITE_CIS_DATA*/ + {1, oid_null_function}, /*0x1E OID_RT_PRO_READ_CIS_DATA*/ + {1, oid_null_function} /*0x1F OID_RT_PRO_WRITE_POWER_CONTROL*/ }; static const struct oid_obj_priv oid_rtl_seg_81_80_20[] = { - {1, &oid_null_function}, /*0x20 OID_RT_PRO_READ_POWER_CONTROL*/ - {1, &oid_null_function}, /*0x21 OID_RT_PRO_WRITE_EEPROM*/ - {1, &oid_null_function}, /*0x22 OID_RT_PRO_READ_EEPROM*/ - {1, &oid_rt_pro_reset_tx_packet_sent_hdl}, /*0x23*/ - {1, &oid_rt_pro_query_tx_packet_sent_hdl}, /*0x24*/ - {1, &oid_rt_pro_reset_rx_packet_received_hdl}, /*0x25*/ - {1, &oid_rt_pro_query_rx_packet_received_hdl}, /*0x26*/ - {1, &oid_rt_pro_query_rx_packet_crc32_error_hdl},/*0x27*/ - {1, &oid_null_function}, /*0x28 + {1, oid_null_function}, /*0x20 OID_RT_PRO_READ_POWER_CONTROL*/ + {1, oid_null_function}, /*0x21 OID_RT_PRO_WRITE_EEPROM*/ + {1, oid_null_function}, /*0x22 OID_RT_PRO_READ_EEPROM*/ + {1, oid_rt_pro_reset_tx_packet_sent_hdl}, /*0x23*/ + {1, oid_rt_pro_query_tx_packet_sent_hdl}, /*0x24*/ + {1, oid_rt_pro_reset_rx_packet_received_hdl}, /*0x25*/ + {1, oid_rt_pro_query_rx_packet_received_hdl}, /*0x26*/ + {1, oid_rt_pro_query_rx_packet_crc32_error_hdl},/*0x27*/ + {1, oid_null_function}, /*0x28 *OID_RT_PRO_QUERY_CURRENT_ADDRESS*/ - {1, &oid_null_function}, /*0x29 + {1, oid_null_function}, /*0x29 *OID_RT_PRO_QUERY_PERMANENT_ADDRESS*/ - {1, &oid_null_function}, /*0x2A + {1, oid_null_function}, /*0x2A *OID_RT_PRO_SET_PHILIPS_RF_PARAMETERS*/ - {1, &oid_rt_pro_set_carrier_suppression_tx_hdl},/*0x2B + {1, oid_rt_pro_set_carrier_suppression_tx_hdl},/*0x2B *OID_RT_PRO_SET_CARRIER_SUPPRESSION_TX*/ - {1, &oid_null_function}, /*0x2C OID_RT_PRO_RECEIVE_PACKET*/ - {1, &oid_null_function}, /*0x2D OID_RT_PRO_WRITE_EEPROM_BYTE*/ - {1, &oid_null_function}, /*0x2E OID_RT_PRO_READ_EEPROM_BYTE*/ - {1, &oid_rt_pro_set_modulation_hdl} /*0x2F*/ + {1, oid_null_function}, /*0x2C OID_RT_PRO_RECEIVE_PACKET*/ + {1, oid_null_function}, /*0x2D OID_RT_PRO_WRITE_EEPROM_BYTE*/ + {1, oid_null_function}, /*0x2E OID_RT_PRO_READ_EEPROM_BYTE*/ + {1, oid_rt_pro_set_modulation_hdl} /*0x2F*/ }; static const struct oid_obj_priv oid_rtl_seg_81_80_40[] = { - {1, &oid_null_function}, /*0x40*/ - {1, &oid_null_function}, /*0x41*/ - {1, &oid_null_function}, /*0x42*/ - {1, &oid_rt_pro_set_single_tone_tx_hdl}, /*0x43*/ - {1, &oid_null_function}, /*0x44*/ - {1, &oid_null_function} /*0x45*/ + {1, oid_null_function}, /*0x40*/ + {1, oid_null_function}, /*0x41*/ + {1, oid_null_function}, /*0x42*/ + {1, oid_rt_pro_set_single_tone_tx_hdl}, /*0x43*/ + {1, oid_null_function}, /*0x44*/ + {1, oid_null_function} /*0x45*/ }; static const struct oid_obj_priv oid_rtl_seg_81_80_80[] = { - {1, &oid_null_function}, /*0x80 OID_RT_DRIVER_OPTION*/ - {1, &oid_null_function}, /*0x81 OID_RT_RF_OFF*/ - {1, &oid_null_function} /*0x82 OID_RT_AUTH_STATUS*/ + {1, oid_null_function}, /*0x80 OID_RT_DRIVER_OPTION*/ + {1, oid_null_function}, /*0x81 OID_RT_RF_OFF*/ + {1, oid_null_function} /*0x82 OID_RT_AUTH_STATUS*/ }; static const struct oid_obj_priv oid_rtl_seg_81_85[] = { - {1, &oid_rt_wireless_mode_hdl} /*0x00 OID_RT_WIRELESS_MODE*/ + {1, oid_rt_wireless_mode_hdl} /*0x00 OID_RT_WIRELESS_MODE*/ }; #else /* _RTL871X_MP_IOCTL_C_ */ @@ -384,7 +384,7 @@ static struct mp_ioctl_handler mp_ioctl_hdl[] = { oid_rt_pro_write_rf_reg_hdl, OID_RT_PRO_RF_WRITE_REGISTRY}, {sizeof(struct rfintfs_parm), NULL, 0}, - {0, &mp_ioctl_xmit_packet_hdl, 0},/*12*/ + {0, mp_ioctl_xmit_packet_hdl, 0},/*12*/ {sizeof(struct psmode_param), NULL, 0},/*13*/ {sizeof(struct eeprom_rw_param), NULL, 0},/*14*/ {sizeof(struct eeprom_rw_param), NULL, 0},/*15*/ diff --git a/drivers/staging/rtl8712/rtl871x_recv.c b/drivers/staging/rtl8712/rtl871x_recv.c index 4ff530155187..616ca3965919 100644 --- a/drivers/staging/rtl8712/rtl871x_recv.c +++ b/drivers/staging/rtl8712/rtl871x_recv.c @@ -72,14 +72,12 @@ sint _r8712_init_recv_priv(struct recv_priv *precvpriv, _init_queue(&precvpriv->recv_pending_queue); precvpriv->adapter = padapter; precvpriv->free_recvframe_cnt = NR_RECVFRAME; - precvpriv->pallocated_frame_buf = kmalloc(NR_RECVFRAME * + precvpriv->pallocated_frame_buf = kzalloc(NR_RECVFRAME * sizeof(union recv_frame) + RXFRAME_ALIGN_SZ, GFP_ATOMIC); if (precvpriv->pallocated_frame_buf == NULL) return _FAIL; kmemleak_not_leak(precvpriv->pallocated_frame_buf); - memset(precvpriv->pallocated_frame_buf, 0, NR_RECVFRAME * - sizeof(union recv_frame) + RXFRAME_ALIGN_SZ); precvpriv->precv_frame_buf = precvpriv->pallocated_frame_buf + RXFRAME_ALIGN_SZ - ((addr_t)(precvpriv->pallocated_frame_buf) & @@ -103,21 +101,17 @@ void _r8712_free_recv_priv(struct recv_priv *precvpriv) r8712_free_recv_priv(precvpriv); } -union recv_frame *r8712_alloc_recvframe(struct __queue *pfree_recv_queue) +union recv_frame *r8712_alloc_recvframe(struct __queue *pfree_recv_queue) { unsigned long irqL; union recv_frame *precvframe; - struct list_head *plist, *phead; struct _adapter *padapter; struct recv_priv *precvpriv; spin_lock_irqsave(&pfree_recv_queue->lock, irqL); - if (list_empty(&pfree_recv_queue->queue)) { - precvframe = NULL; - } else { - phead = &pfree_recv_queue->queue; - plist = phead->next; - precvframe = LIST_CONTAINOR(plist, union recv_frame, u); + precvframe = list_first_entry_or_null(&pfree_recv_queue->queue, + union recv_frame, u.hdr.list); + if (precvframe) { list_del_init(&precvframe->u.hdr.list); padapter = precvframe->u.hdr.adapter; if (padapter != NULL) { diff --git a/drivers/staging/rtl8712/rtl871x_sta_mgt.c b/drivers/staging/rtl8712/rtl871x_sta_mgt.c index 162e61c6ea06..e90c00de7499 100644 --- a/drivers/staging/rtl8712/rtl871x_sta_mgt.c +++ b/drivers/staging/rtl8712/rtl871x_sta_mgt.c @@ -53,7 +53,7 @@ u32 _r8712_init_sta_priv(struct sta_priv *pstapriv) pstapriv->pallocated_stainfo_buf = kmalloc(sizeof(struct sta_info) * NUM_STA + 4, GFP_ATOMIC); - if (pstapriv->pallocated_stainfo_buf == NULL) + if (!pstapriv->pallocated_stainfo_buf) return _FAIL; pstapriv->pstainfo_buf = pstapriv->pallocated_stainfo_buf + 4 - ((addr_t)(pstapriv->pallocated_stainfo_buf) & 3); @@ -89,16 +89,11 @@ static void mfree_all_stainfo(struct sta_priv *pstapriv) spin_unlock_irqrestore(&pstapriv->sta_hash_lock, irqL); } - -static void mfree_sta_priv_lock(struct sta_priv *pstapriv) -{ - mfree_all_stainfo(pstapriv); /* be done before free sta_hash_lock */ -} - u32 _r8712_free_sta_priv(struct sta_priv *pstapriv) { if (pstapriv) { - mfree_sta_priv_lock(pstapriv); + /* be done before free sta_hash_lock */ + mfree_all_stainfo(pstapriv); kfree(pstapriv->pallocated_stainfo_buf); } return _SUCCESS; @@ -116,13 +111,11 @@ struct sta_info *r8712_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr) unsigned long flags; pfree_sta_queue = &pstapriv->free_sta_queue; - spin_lock_irqsave(&(pfree_sta_queue->lock), flags); - if (list_empty(&pfree_sta_queue->queue)) { - psta = NULL; - } else { - psta = LIST_CONTAINOR(pfree_sta_queue->queue.next, - struct sta_info, list); - list_del_init(&(psta->list)); + spin_lock_irqsave(&pfree_sta_queue->lock, flags); + psta = list_first_entry_or_null(&pfree_sta_queue->queue, + struct sta_info, list); + if (psta) { + list_del_init(&psta->list); _init_stainfo(psta); memcpy(psta->hwaddr, hwaddr, ETH_ALEN); index = wifi_mac_hash(hwaddr); @@ -130,7 +123,7 @@ struct sta_info *r8712_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr) psta = NULL; goto exit; } - phash_list = &(pstapriv->sta_hash[index]); + phash_list = &pstapriv->sta_hash[index]; list_add_tail(&psta->hash_list, phash_list); pstapriv->asoc_sta_count++; @@ -154,7 +147,7 @@ struct sta_info *r8712_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr) } } exit: - spin_unlock_irqrestore(&(pfree_sta_queue->lock), flags); + spin_unlock_irqrestore(&pfree_sta_queue->lock, flags); return psta; } diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c index 68d65d230fe3..c6d952f5d8f9 100644 --- a/drivers/staging/rtl8712/rtl871x_xmit.c +++ b/drivers/staging/rtl8712/rtl871x_xmit.c @@ -89,7 +89,7 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv, */ pxmitpriv->pallocated_frame_buf = kmalloc(NR_XMITFRAME * sizeof(struct xmit_frame) + 4, GFP_ATOMIC); - if (pxmitpriv->pallocated_frame_buf == NULL) { + if (!pxmitpriv->pallocated_frame_buf) { pxmitpriv->pxmit_frame_buf = NULL; return _FAIL; } @@ -128,7 +128,7 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv, _init_queue(&pxmitpriv->pending_xmitbuf_queue); pxmitpriv->pallocated_xmitbuf = kmalloc(NR_XMITBUFF * sizeof(struct xmit_buf) + 4, GFP_ATOMIC); - if (pxmitpriv->pallocated_xmitbuf == NULL) + if (!pxmitpriv->pallocated_xmitbuf) return _FAIL; pxmitpriv->pxmitbuf = pxmitpriv->pallocated_xmitbuf + 4 - ((addr_t)(pxmitpriv->pallocated_xmitbuf) & 3); @@ -137,7 +137,7 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv, INIT_LIST_HEAD(&pxmitbuf->list); pxmitbuf->pallocated_buf = kmalloc(MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ, GFP_ATOMIC); - if (pxmitbuf->pallocated_buf == NULL) + if (!pxmitbuf->pallocated_buf) return _FAIL; pxmitbuf->pbuf = pxmitbuf->pallocated_buf + XMITBUF_ALIGN_SZ - ((addr_t) (pxmitbuf->pallocated_buf) & @@ -241,7 +241,7 @@ sint r8712_update_attrib(struct _adapter *padapter, _pkt *pkt, } /* r8712_xmitframe_coalesce() overwrite this!*/ pattrib->pktlen = pktfile.pkt_len; - if (ETH_P_IP == pattrib->ether_type) { + if (pattrib->ether_type == ETH_P_IP) { /* The following is for DHCP and ARP packet, we use cck1M to * tx these packets and let LPS awake some time * to prevent DHCP protocol fail */ @@ -250,7 +250,7 @@ sint r8712_update_attrib(struct _adapter *padapter, _pkt *pkt, _r8712_pktfile_read(&pktfile, &tmp[0], 24); pattrib->dhcp_pkt = 0; if (pktfile.pkt_len > 282) {/*MINIMUM_DHCP_PACKET_SIZE)*/ - if (ETH_P_IP == pattrib->ether_type) {/* IP header*/ + if (pattrib->ether_type == ETH_P_IP) {/* IP header*/ if (((tmp[21] == 68) && (tmp[23] == 67)) || ((tmp[21] == 67) && (tmp[23] == 68))) { /* 68 : UDP BOOTP client @@ -741,21 +741,16 @@ void r8712_update_protection(struct _adapter *padapter, u8 *ie, uint ie_len) struct xmit_buf *r8712_alloc_xmitbuf(struct xmit_priv *pxmitpriv) { unsigned long irqL; - struct xmit_buf *pxmitbuf = NULL; - struct list_head *plist, *phead; + struct xmit_buf *pxmitbuf; struct __queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue; spin_lock_irqsave(&pfree_xmitbuf_queue->lock, irqL); - if (list_empty(&pfree_xmitbuf_queue->queue)) { - pxmitbuf = NULL; - } else { - phead = &pfree_xmitbuf_queue->queue; - plist = phead->next; - pxmitbuf = LIST_CONTAINOR(plist, struct xmit_buf, list); - list_del_init(&(pxmitbuf->list)); - } - if (pxmitbuf != NULL) + pxmitbuf = list_first_entry_or_null(&pfree_xmitbuf_queue->queue, + struct xmit_buf, list); + if (pxmitbuf) { + list_del_init(&pxmitbuf->list); pxmitpriv->free_xmitbuf_cnt--; + } spin_unlock_irqrestore(&pfree_xmitbuf_queue->lock, irqL); return pxmitbuf; } @@ -795,20 +790,14 @@ struct xmit_frame *r8712_alloc_xmitframe(struct xmit_priv *pxmitpriv) pfree_xmit_queue */ unsigned long irqL; - struct xmit_frame *pxframe = NULL; - struct list_head *plist, *phead; + struct xmit_frame *pxframe; struct __queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue; spin_lock_irqsave(&pfree_xmit_queue->lock, irqL); - if (list_empty(&pfree_xmit_queue->queue)) { - pxframe = NULL; - } else { - phead = &pfree_xmit_queue->queue; - plist = phead->next; - pxframe = LIST_CONTAINOR(plist, struct xmit_frame, list); - list_del_init(&(pxframe->list)); - } - if (pxframe != NULL) { + pxframe = list_first_entry_or_null(&pfree_xmit_queue->queue, + struct xmit_frame, list); + if (pxframe) { + list_del_init(&pxframe->list); pxmitpriv->free_xmitframe_cnt--; pxframe->buf_addr = NULL; pxframe->pxmitbuf = NULL; @@ -954,7 +943,7 @@ static void alloc_hwxmits(struct _adapter *padapter) pxmitpriv->hwxmit_entry = HWXMIT_ENTRY; pxmitpriv->hwxmits = kmalloc_array(pxmitpriv->hwxmit_entry, sizeof(struct hw_xmit), GFP_ATOMIC); - if (pxmitpriv->hwxmits == NULL) + if (!pxmitpriv->hwxmits) return; hwxmits = pxmitpriv->hwxmits; if (pxmitpriv->hwxmit_entry == 5) { diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c index c71333fbe823..c1a0ca490546 100644 --- a/drivers/staging/rtl8712/usb_intf.c +++ b/drivers/staging/rtl8712/usb_intf.c @@ -205,12 +205,15 @@ struct drv_priv { static int r871x_suspend(struct usb_interface *pusb_intf, pm_message_t state) { struct net_device *pnetdev = usb_get_intfdata(pusb_intf); + struct _adapter *padapter = netdev_priv(pnetdev); netdev_info(pnetdev, "Suspending...\n"); if (!pnetdev || !netif_running(pnetdev)) { netdev_info(pnetdev, "Unable to suspend\n"); return 0; } + padapter->bSuspended = true; + rtl871x_intf_stop(padapter); if (pnetdev->netdev_ops->ndo_stop) pnetdev->netdev_ops->ndo_stop(pnetdev); mdelay(10); @@ -218,9 +221,16 @@ static int r871x_suspend(struct usb_interface *pusb_intf, pm_message_t state) return 0; } +static void rtl871x_intf_resume(struct _adapter *padapter) +{ + if (padapter->dvobjpriv.inirp_init) + padapter->dvobjpriv.inirp_init(padapter); +} + static int r871x_resume(struct usb_interface *pusb_intf) { struct net_device *pnetdev = usb_get_intfdata(pusb_intf); + struct _adapter *padapter = netdev_priv(pnetdev); netdev_info(pnetdev, "Resuming...\n"); if (!pnetdev || !netif_running(pnetdev)) { @@ -230,6 +240,8 @@ static int r871x_resume(struct usb_interface *pusb_intf) netif_device_attach(pnetdev); if (pnetdev->netdev_ops->ndo_open) pnetdev->netdev_ops->ndo_open(pnetdev); + padapter->bSuspended = false; + rtl871x_intf_resume(padapter); return 0; } @@ -387,11 +399,11 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf, SET_NETDEV_DEV(pnetdev, &pusb_intf->dev); pnetdev->dev.type = &wlan_type; /* step 2. */ - padapter->dvobj_init = &r8712_usb_dvobj_init; - padapter->dvobj_deinit = &r8712_usb_dvobj_deinit; - padapter->halpriv.hal_bus_init = &r8712_usb_hal_bus_init; - padapter->dvobjpriv.inirp_init = &r8712_usb_inirp_init; - padapter->dvobjpriv.inirp_deinit = &r8712_usb_inirp_deinit; + padapter->dvobj_init = r8712_usb_dvobj_init; + padapter->dvobj_deinit = r8712_usb_dvobj_deinit; + padapter->halpriv.hal_bus_init = r8712_usb_hal_bus_init; + padapter->dvobjpriv.inirp_init = r8712_usb_inirp_init; + padapter->dvobjpriv.inirp_deinit = r8712_usb_inirp_deinit; /* step 3. * initialize the dvobj_priv */ diff --git a/drivers/staging/rtl8712/usb_ops.c b/drivers/staging/rtl8712/usb_ops.c index 856f257bb77e..9172400efe9a 100644 --- a/drivers/staging/rtl8712/usb_ops.c +++ b/drivers/staging/rtl8712/usb_ops.c @@ -179,22 +179,22 @@ static void usb_intf_hdl_close(u8 *priv) void r8712_usb_set_intf_funs(struct intf_hdl *pintf_hdl) { - pintf_hdl->intf_hdl_init = &usb_intf_hdl_init; - pintf_hdl->intf_hdl_unload = &usb_intf_hdl_unload; - pintf_hdl->intf_hdl_open = &usb_intf_hdl_open; - pintf_hdl->intf_hdl_close = &usb_intf_hdl_close; + pintf_hdl->intf_hdl_init = usb_intf_hdl_init; + pintf_hdl->intf_hdl_unload = usb_intf_hdl_unload; + pintf_hdl->intf_hdl_open = usb_intf_hdl_open; + pintf_hdl->intf_hdl_close = usb_intf_hdl_close; } void r8712_usb_set_intf_ops(struct _io_ops *pops) { memset((u8 *)pops, 0, sizeof(struct _io_ops)); - pops->_read8 = &usb_read8; - pops->_read16 = &usb_read16; - pops->_read32 = &usb_read32; - pops->_read_port = &r8712_usb_read_port; - pops->_write8 = &usb_write8; - pops->_write16 = &usb_write16; - pops->_write32 = &usb_write32; - pops->_write_mem = &r8712_usb_write_mem; - pops->_write_port = &r8712_usb_write_port; + pops->_read8 = usb_read8; + pops->_read16 = usb_read16; + pops->_read32 = usb_read32; + pops->_read_port = r8712_usb_read_port; + pops->_write8 = usb_write8; + pops->_write16 = usb_write16; + pops->_write32 = usb_write32; + pops->_write_mem = r8712_usb_write_mem; + pops->_write_port = r8712_usb_write_port; } diff --git a/drivers/staging/rtl8712/usb_ops_linux.c b/drivers/staging/rtl8712/usb_ops_linux.c index 489a9e6d52fc..454cdf6c7fa1 100644 --- a/drivers/staging/rtl8712/usb_ops_linux.c +++ b/drivers/staging/rtl8712/usb_ops_linux.c @@ -232,9 +232,14 @@ static void r8712_usb_read_port_complete(struct urb *purb) case -EPIPE: case -ENODEV: case -ESHUTDOWN: - case -ENOENT: padapter->bDriverStopped = true; break; + case -ENOENT: + if (!padapter->bSuspended) { + padapter->bDriverStopped = true; + break; + } + /* Fall through. */ case -EPROTO: precvbuf->reuse = true; r8712_read_port(padapter, precvpriv->ff_hwaddr, 0, @@ -329,7 +334,7 @@ void r8712_usb_read_port_cancel(struct _adapter *padapter) void r8712_xmit_bh(void *priv) { int ret = false; - struct _adapter *padapter = (struct _adapter *)priv; + struct _adapter *padapter = priv; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; if (padapter->bDriverStopped || diff --git a/drivers/staging/rtl8712/xmit_linux.c b/drivers/staging/rtl8712/xmit_linux.c index d3981836ce26..695f9b9fc749 100644 --- a/drivers/staging/rtl8712/xmit_linux.c +++ b/drivers/staging/rtl8712/xmit_linux.c @@ -70,10 +70,7 @@ uint _r8712_pktfile_read(struct pkt_file *pfile, u8 *rmem, uint rlen) sint r8712_endofpktfile(struct pkt_file *pfile) { - if (pfile->pkt_len == 0) - return true; - else - return false; + return (pfile->pkt_len == 0); } @@ -131,7 +128,7 @@ int r8712_xmit_resource_alloc(struct _adapter *padapter, for (i = 0; i < 8; i++) { pxmitbuf->pxmit_urb[i] = usb_alloc_urb(0, GFP_KERNEL); - if (pxmitbuf->pxmit_urb[i] == NULL) { + if (!pxmitbuf->pxmit_urb[i]) { netdev_err(padapter->pnetdev, "pxmitbuf->pxmit_urb[i] == NULL\n"); return _FAIL; } @@ -164,19 +161,15 @@ int r8712_xmit_entry(_pkt *pkt, struct net_device *pnetdev) struct xmit_frame *pxmitframe = NULL; struct _adapter *padapter = netdev_priv(pnetdev); struct xmit_priv *pxmitpriv = &(padapter->xmitpriv); - int ret = 0; if (!r8712_if_up(padapter)) { - ret = 0; goto _xmit_entry_drop; } pxmitframe = r8712_alloc_xmitframe(pxmitpriv); - if (pxmitframe == NULL) { - ret = 0; + if (!pxmitframe) { goto _xmit_entry_drop; } if ((!r8712_update_attrib(padapter, pkt, &pxmitframe->attrib))) { - ret = 0; goto _xmit_entry_drop; } padapter->ledpriv.LedControlHandler(padapter, LED_CTL_TX); @@ -188,11 +181,11 @@ int r8712_xmit_entry(_pkt *pkt, struct net_device *pnetdev) } pxmitpriv->tx_pkts++; pxmitpriv->tx_bytes += pxmitframe->attrib.last_txcmdsz; - return ret; + return 0; _xmit_entry_drop: if (pxmitframe) r8712_free_xmitframe(pxmitpriv, pxmitframe); pxmitpriv->tx_drop++; dev_kfree_skb_any(pkt); - return ret; + return 0; } diff --git a/drivers/staging/rtl8723au/TODO b/drivers/staging/rtl8723au/TODO index 175a0ceb7421..f5d57d32fae6 100644 --- a/drivers/staging/rtl8723au/TODO +++ b/drivers/staging/rtl8723au/TODO @@ -9,5 +9,5 @@ TODO: - merge Realtek's bugfixes and new features into the driver - switch to use MAC80211 -Please send any patches to Greg Kroah-Hartman <gregkh@linux.com>, +Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Jes Sorensen <Jes.Sorensen@redhat.com>, and Larry Finger <Larry.Finger@lwfinger.net>. diff --git a/drivers/staging/rtl8723au/core/rtw_ap.c b/drivers/staging/rtl8723au/core/rtw_ap.c index 1aa9b267c30e..f68e2770255d 100644 --- a/drivers/staging/rtl8723au/core/rtw_ap.c +++ b/drivers/staging/rtl8723au/core/rtw_ap.c @@ -171,24 +171,20 @@ static u8 chk_sta_is_alive(struct sta_info *psta) return ret; } -void expire_timeout_chk23a(struct rtw_adapter *padapter) +void expire_timeout_chk23a(struct rtw_adapter *padapter) { - struct list_head *phead, *plist, *ptmp; + struct list_head *phead; u8 updated = 0; - struct sta_info *psta; + struct sta_info *psta, *ptmp; struct sta_priv *pstapriv = &padapter->stapriv; u8 chk_alive_num = 0; struct sta_info *chk_alive_list[NUM_STA]; int i; spin_lock_bh(&pstapriv->auth_list_lock); - phead = &pstapriv->auth_list; - /* check auth_queue */ - list_for_each_safe(plist, ptmp, phead) { - psta = container_of(plist, struct sta_info, auth_list); - + list_for_each_entry_safe(psta, ptmp, phead, auth_list) { if (psta->expire_to > 0) { psta->expire_to--; if (psta->expire_to == 0) { @@ -206,19 +202,13 @@ void expire_timeout_chk23a(struct rtw_adapter *padapter) spin_lock_bh(&pstapriv->auth_list_lock); } } - } - spin_unlock_bh(&pstapriv->auth_list_lock); spin_lock_bh(&pstapriv->asoc_list_lock); - phead = &pstapriv->asoc_list; - /* check asoc_queue */ - list_for_each_safe(plist, ptmp, phead) { - psta = container_of(plist, struct sta_info, asoc_list); - + list_for_each_entry_safe(psta, ptmp, phead, asoc_list) { if (chk_sta_is_alive(psta) || !psta->expire_to) { psta->expire_to = pstapriv->expire_to; psta->keep_alive_trycnt = 0; @@ -283,7 +273,6 @@ void expire_timeout_chk23a(struct rtw_adapter *padapter) } } } - spin_unlock_bh(&pstapriv->asoc_list_lock); if (chk_alive_num) { @@ -1057,103 +1046,6 @@ void rtw_set_macaddr_acl23a(struct rtw_adapter *padapter, int mode) pacl_list->mode = mode; } -int rtw_acl_add_sta23a(struct rtw_adapter *padapter, u8 *addr) -{ - struct list_head *plist, *phead; - u8 added = false; - int i, ret = 0; - struct rtw_wlan_acl_node *paclnode; - struct sta_priv *pstapriv = &padapter->stapriv; - struct wlan_acl_pool *pacl_list = &pstapriv->acl_list; - struct rtw_queue *pacl_node_q = &pacl_list->acl_node_q; - - DBG_8723A("%s(acl_num =%d) =%pM\n", __func__, pacl_list->num, addr); - - if ((NUM_ACL-1) < pacl_list->num) - return -1; - - spin_lock_bh(&pacl_node_q->lock); - - phead = get_list_head(pacl_node_q); - - list_for_each(plist, phead) { - paclnode = container_of(plist, struct rtw_wlan_acl_node, list); - - if (!memcmp(paclnode->addr, addr, ETH_ALEN)) { - if (paclnode->valid == true) { - added = true; - DBG_8723A("%s, sta has been added\n", __func__); - break; - } - } - } - - spin_unlock_bh(&pacl_node_q->lock); - - if (added) - return ret; - - spin_lock_bh(&pacl_node_q->lock); - - for (i = 0; i < NUM_ACL; i++) { - paclnode = &pacl_list->aclnode[i]; - - if (!paclnode->valid) { - INIT_LIST_HEAD(&paclnode->list); - - memcpy(paclnode->addr, addr, ETH_ALEN); - - paclnode->valid = true; - - list_add_tail(&paclnode->list, get_list_head(pacl_node_q)); - - pacl_list->num++; - - break; - } - } - - DBG_8723A("%s, acl_num =%d\n", __func__, pacl_list->num); - - spin_unlock_bh(&pacl_node_q->lock); - return ret; -} - -int rtw_acl_remove_sta23a(struct rtw_adapter *padapter, u8 *addr) -{ - struct list_head *plist, *phead, *ptmp; - struct rtw_wlan_acl_node *paclnode; - struct sta_priv *pstapriv = &padapter->stapriv; - struct wlan_acl_pool *pacl_list = &pstapriv->acl_list; - struct rtw_queue *pacl_node_q = &pacl_list->acl_node_q; - - DBG_8723A("%s(acl_num =%d) = %pM\n", __func__, pacl_list->num, addr); - - spin_lock_bh(&pacl_node_q->lock); - - phead = get_list_head(pacl_node_q); - - list_for_each_safe(plist, ptmp, phead) { - paclnode = container_of(plist, struct rtw_wlan_acl_node, list); - - if (!memcmp(paclnode->addr, addr, ETH_ALEN)) { - if (paclnode->valid) { - paclnode->valid = false; - - list_del_init(&paclnode->list); - - pacl_list->num--; - } - } - } - - spin_unlock_bh(&pacl_node_q->lock); - - DBG_8723A("%s, acl_num =%d\n", __func__, pacl_list->num); - - return 0; -} - static void update_bcn_erpinfo_ie(struct rtw_adapter *padapter) { struct mlme_priv *pmlmepriv = &padapter->mlmepriv; @@ -1354,20 +1246,14 @@ void associated_clients_update23a(struct rtw_adapter *padapter, u8 updated) { /* update associated stations cap. */ if (updated == true) { - struct list_head *phead, *plist, *ptmp; - struct sta_info *psta; + struct list_head *phead; + struct sta_info *psta, *ptmp; struct sta_priv *pstapriv = &padapter->stapriv; spin_lock_bh(&pstapriv->asoc_list_lock); - phead = &pstapriv->asoc_list; - - list_for_each_safe(plist, ptmp, phead) { - psta = container_of(plist, struct sta_info, asoc_list); - + list_for_each_entry_safe(psta, ptmp, phead, asoc_list) VCS_update23a(padapter, psta); - } - spin_unlock_bh(&pstapriv->asoc_list_lock); } } @@ -1625,41 +1511,10 @@ u8 ap_free_sta23a(struct rtw_adapter *padapter, struct sta_info *psta, bool acti return beacon_updated; } -int rtw_ap_inform_ch_switch23a(struct rtw_adapter *padapter, u8 new_ch, u8 ch_offset) -{ - struct list_head *phead, *plist; - struct sta_info *psta = NULL; - struct sta_priv *pstapriv = &padapter->stapriv; - struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; - struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info; - u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; - - if ((pmlmeinfo->state&0x03) != MSR_AP) - return 0; - - DBG_8723A("%s(%s): with ch:%u, offset:%u\n", __func__, - padapter->pnetdev->name, new_ch, ch_offset); - - spin_lock_bh(&pstapriv->asoc_list_lock); - phead = &pstapriv->asoc_list; - - list_for_each(plist, phead) { - psta = container_of(plist, struct sta_info, asoc_list); - - issue_action_spct_ch_switch23a(padapter, psta->hwaddr, new_ch, ch_offset); - psta->expire_to = ((pstapriv->expire_to * 2) > 5) ? 5 : (pstapriv->expire_to * 2); - } - spin_unlock_bh(&pstapriv->asoc_list_lock); - - issue_action_spct_ch_switch23a(padapter, bc_addr, new_ch, ch_offset); - - return 0; -} - int rtw_sta_flush23a(struct rtw_adapter *padapter) { - struct list_head *phead, *plist, *ptmp; - struct sta_info *psta; + struct list_head *phead; + struct sta_info *psta, *ptmp; struct sta_priv *pstapriv = &padapter->stapriv; struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info; @@ -1675,10 +1530,7 @@ int rtw_sta_flush23a(struct rtw_adapter *padapter) spin_lock_bh(&pstapriv->asoc_list_lock); phead = &pstapriv->asoc_list; - - list_for_each_safe(plist, ptmp, phead) { - psta = container_of(plist, struct sta_info, asoc_list); - + list_for_each_entry_safe(psta, ptmp, phead, asoc_list) { /* Remove sta from asoc_list */ list_del_init(&psta->asoc_list); pstapriv->asoc_list_cnt--; @@ -1744,9 +1596,9 @@ void rtw_ap_restore_network(struct rtw_adapter *padapter) struct mlme_priv *mlmepriv = &padapter->mlmepriv; struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct sta_priv *pstapriv = &padapter->stapriv; - struct sta_info *psta; + struct sta_info *psta, *ptmp; struct security_priv *psecuritypriv = &padapter->securitypriv; - struct list_head *phead, *plist, *ptmp; + struct list_head *phead; u8 chk_alive_num = 0; struct sta_info *chk_alive_list[NUM_STA]; int i; @@ -1775,15 +1627,9 @@ void rtw_ap_restore_network(struct rtw_adapter *padapter) } spin_lock_bh(&pstapriv->asoc_list_lock); - phead = &pstapriv->asoc_list; - - list_for_each_safe(plist, ptmp, phead) { - psta = container_of(plist, struct sta_info, asoc_list); - + list_for_each_entry_safe(psta, ptmp, phead, asoc_list) chk_alive_list[chk_alive_num++] = psta; - } - spin_unlock_bh(&pstapriv->asoc_list_lock); for (i = 0; i < chk_alive_num; i++) { @@ -1841,8 +1687,8 @@ void start_ap_mode23a(struct rtw_adapter *padapter) void stop_ap_mode23a(struct rtw_adapter *padapter) { - struct list_head *phead, *plist, *ptmp; - struct rtw_wlan_acl_node *paclnode; + struct list_head *phead; + struct rtw_wlan_acl_node *paclnode, *ptmp; struct sta_info *psta = NULL; struct sta_priv *pstapriv = &padapter->stapriv; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; @@ -1864,15 +1710,10 @@ void stop_ap_mode23a(struct rtw_adapter *padapter) /* for ACL */ spin_lock_bh(&pacl_node_q->lock); phead = get_list_head(pacl_node_q); - - list_for_each_safe(plist, ptmp, phead) { - paclnode = container_of(plist, struct rtw_wlan_acl_node, list); - + list_for_each_entry_safe(paclnode, ptmp, phead, list) { if (paclnode->valid == true) { paclnode->valid = false; - list_del_init(&paclnode->list); - pacl_list->num--; } } diff --git a/drivers/staging/rtl8723au/core/rtw_cmd.c b/drivers/staging/rtl8723au/core/rtw_cmd.c index 3035bb864c39..cd4e0f05d82f 100644 --- a/drivers/staging/rtl8723au/core/rtw_cmd.c +++ b/drivers/staging/rtl8723au/core/rtw_cmd.c @@ -295,8 +295,7 @@ static void rtw_cmd_work(struct work_struct *work) post_process: /* call callback function for post-processed */ - if (pcmd->cmdcode < (sizeof(rtw_cmd_callback) / - sizeof(struct _cmd_callback))) { + if (pcmd->cmdcode < ARRAY_SIZE(rtw_cmd_callback)) { pcmd_callback = rtw_cmd_callback[pcmd->cmdcode].callback; if (!pcmd_callback) { RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, diff --git a/drivers/staging/rtl8723au/core/rtw_efuse.c b/drivers/staging/rtl8723au/core/rtw_efuse.c index f174b4d1a018..359ef4197e94 100644 --- a/drivers/staging/rtl8723au/core/rtw_efuse.c +++ b/drivers/staging/rtl8723au/core/rtw_efuse.c @@ -269,8 +269,8 @@ u8 EFUSE_Read1Byte23a(struct rtw_adapter *Adapter, u16 Address) } data = rtl8723au_read8(Adapter, EFUSE_CTRL); return data; - } else - return 0xFF; + } + return 0xFF; } /* Read one byte from real Efuse. */ diff --git a/drivers/staging/rtl8723au/core/rtw_mlme.c b/drivers/staging/rtl8723au/core/rtw_mlme.c index 3c09ea9b7348..a786fc4bdb53 100644 --- a/drivers/staging/rtl8723au/core/rtw_mlme.c +++ b/drivers/staging/rtl8723au/core/rtw_mlme.c @@ -171,21 +171,15 @@ exit: void rtw_free_network_queue23a(struct rtw_adapter *padapter) { - struct list_head *phead, *plist, *ptmp; - struct wlan_network *pnetwork; + struct list_head *phead; + struct wlan_network *pnetwork, *ptmp; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct rtw_queue *scanned_queue = &pmlmepriv->scanned_queue; spin_lock_bh(&scanned_queue->lock); - phead = get_list_head(scanned_queue); - - list_for_each_safe(plist, ptmp, phead) { - pnetwork = container_of(plist, struct wlan_network, list); - + list_for_each_entry_safe(pnetwork, ptmp, phead, list) _rtw_free_network23a(pmlmepriv, pnetwork); - } - spin_unlock_bh(&scanned_queue->lock); } @@ -329,15 +323,12 @@ int is_same_network23a(struct wlan_bssid_ex *src, struct wlan_bssid_ex *dst) struct wlan_network * rtw_get_oldest_wlan_network23a(struct rtw_queue *scanned_queue) { - struct list_head *plist, *phead; + struct list_head *phead; struct wlan_network *pwlan; struct wlan_network *oldest = NULL; phead = get_list_head(scanned_queue); - - list_for_each(plist, phead) { - pwlan = container_of(plist, struct wlan_network, list); - + list_for_each_entry(pwlan, phead, list) { if (pwlan->fixed != true) { if (!oldest || time_after(oldest->last_scanned, pwlan->last_scanned)) @@ -445,7 +436,6 @@ static void rtw_update_scanned_network(struct rtw_adapter *adapter, spin_lock_bh(&queue->lock); phead = get_list_head(queue); - list_for_each(plist, phead) { pnetwork = container_of(plist, struct wlan_network, list); @@ -710,21 +700,17 @@ rtw_surveydone_event_callback23a(struct rtw_adapter *adapter, const u8 *pbuf) static void free_scanqueue(struct mlme_priv *pmlmepriv) { - struct wlan_network *pnetwork; + struct wlan_network *pnetwork, *ptemp; struct rtw_queue *scan_queue = &pmlmepriv->scanned_queue; - struct list_head *plist, *phead, *ptemp; + struct list_head *phead; RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, "+free_scanqueue\n"); spin_lock_bh(&scan_queue->lock); - phead = get_list_head(scan_queue); - - list_for_each_safe(plist, ptemp, phead) { - pnetwork = container_of(plist, struct wlan_network, list); + list_for_each_entry_safe(pnetwork, ptemp, phead, list) { pnetwork->fixed = false; _rtw_free_network23a(pmlmepriv, pnetwork); } - spin_unlock_bh(&scan_queue->lock); } @@ -1625,27 +1611,16 @@ exit: static struct wlan_network * rtw_select_candidate_from_queue(struct mlme_priv *pmlmepriv) { - struct wlan_network *pnetwork, *candidate = NULL; + struct wlan_network *pnetwork, *ptmp, *candidate = NULL; struct rtw_queue *queue = &pmlmepriv->scanned_queue; - struct list_head *phead, *plist, *ptmp; + struct list_head *phead; spin_lock_bh(&pmlmepriv->scanned_queue.lock); phead = get_list_head(queue); - - list_for_each_safe(plist, ptmp, phead) { - pnetwork = container_of(plist, struct wlan_network, list); - if (!pnetwork) { - RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, - "%s: return _FAIL:(pnetwork == NULL)\n", - __func__); - goto exit; - } - + list_for_each_entry_safe(pnetwork, ptmp, phead, list) rtw_check_join_candidate(pmlmepriv, &candidate, pnetwork); - } - -exit: spin_unlock_bh(&pmlmepriv->scanned_queue.lock); + return candidate; } diff --git a/drivers/staging/rtl8723au/core/rtw_mlme_ext.c b/drivers/staging/rtl8723au/core/rtw_mlme_ext.c index d28f29a93810..f4fff385aeb2 100644 --- a/drivers/staging/rtl8723au/core/rtw_mlme_ext.c +++ b/drivers/staging/rtl8723au/core/rtw_mlme_ext.c @@ -2154,8 +2154,7 @@ OnAction23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame) category = mgmt->u.action.category; - for (i = 0; - i < sizeof(OnAction23a_tbl) / sizeof(struct action_handler); i++) { + for (i = 0; i < ARRAY_SIZE(OnAction23a_tbl); i++) { ptable = &OnAction23a_tbl[i]; if (category == ptable->num) @@ -2656,8 +2655,6 @@ static void issue_probersp(struct rtw_adapter *padapter, unsigned char *da) pattrib->last_txcmdsz = pattrib->pktlen; dump_mgntframe23a(padapter, pmgntframe); - - return; } static int _issue_probereq(struct rtw_adapter *padapter, @@ -2957,8 +2954,6 @@ static void issue_auth(struct rtw_adapter *padapter, struct sta_info *psta, rtw_wep_encrypt23a(padapter, pmgntframe); DBG_8723A("%s\n", __func__); dump_mgntframe23a(padapter, pmgntframe); - - return; } #ifdef CONFIG_8723AU_AP_MODE @@ -3338,8 +3333,6 @@ exit: } } else kfree(pmlmepriv->assoc_req); - - return; } /* when wait_ack is true, this function should be called at process context */ @@ -4102,8 +4095,6 @@ static void rtw_site_survey(struct rtw_adapter *padapter) pmlmeext->chan_scan_time = SURVEY_TO; pmlmeext->sitesurvey_res.state = SCAN_DISABLE; } - - return; } /* collect bss info from Beacon and Probe request/response frames. */ @@ -4759,8 +4750,6 @@ void report_survey_event23a(struct rtw_adapter *padapter, rtw_enqueue_cmd23a(pcmdpriv, pcmd_obj); pmlmeext->sitesurvey_res.bss_cnt++; - - return; } void report_surveydone_event23a(struct rtw_adapter *padapter) @@ -4802,8 +4791,6 @@ void report_surveydone_event23a(struct rtw_adapter *padapter) DBG_8723A("survey done event(%x)\n", psurveydone_evt->bss_cnt); rtw_enqueue_cmd23a(pcmdpriv, pcmd_obj); - - return; } void report_join_res23a(struct rtw_adapter *padapter, int res) @@ -4850,8 +4837,6 @@ void report_join_res23a(struct rtw_adapter *padapter, int res) rtw_joinbss_event_prehandle23a(padapter, (u8 *)&pjoinbss_evt->network); rtw_enqueue_cmd23a(pcmdpriv, pcmd_obj); - - return; } void report_del_sta_event23a(struct rtw_adapter *padapter, @@ -4906,8 +4891,6 @@ void report_del_sta_event23a(struct rtw_adapter *padapter, DBG_8723A("report_del_sta_event23a: delete STA, mac_id =%d\n", mac_id); rtw_enqueue_cmd23a(pcmdpriv, pcmd_obj); - - return; } void report_add_sta_event23a(struct rtw_adapter *padapter, @@ -4951,8 +4934,6 @@ void report_add_sta_event23a(struct rtw_adapter *padapter, DBG_8723A("report_add_sta_event23a: add STA\n"); rtw_enqueue_cmd23a(pcmdpriv, pcmd_obj); - - return; } /**************************************************************************** @@ -5394,8 +5375,6 @@ static void link_timer_hdl(unsigned long data) issue_assocreq(padapter); set_link_timer(pmlmeext, REASSOC_TO); } - - return; } static void addba_timer_hdl(unsigned long data) @@ -6082,10 +6061,10 @@ int tx_beacon_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf) #ifdef CONFIG_8723AU_AP_MODE else { /* tx bc/mc frames after update TIM */ struct sta_info *psta_bmc; - struct list_head *plist, *phead, *ptmp; - struct xmit_frame *pxmitframe; + struct list_head *phead; + struct xmit_frame *pxmitframe, *ptmp; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; - struct sta_priv *pstapriv = &padapter->stapriv; + struct sta_priv *pstapriv = &padapter->stapriv; /* for BC/MC Frames */ psta_bmc = rtw_get_bcmc_stainfo23a(padapter); @@ -6099,10 +6078,8 @@ int tx_beacon_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf) phead = get_list_head(&psta_bmc->sleep_q); - list_for_each_safe(plist, ptmp, phead) { - pxmitframe = container_of(plist, - struct xmit_frame, - list); + list_for_each_entry_safe(pxmitframe, ptmp, + phead, list) { list_del_init(&pxmitframe->list); @@ -6119,7 +6096,6 @@ int tx_beacon_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf) rtl8723au_hal_xmitframe_enqueue(padapter, pxmitframe); } - /* spin_unlock_bh(&psta_bmc->sleep_q.lock); */ spin_unlock_bh(&pxmitpriv->lock); } diff --git a/drivers/staging/rtl8723au/core/rtw_recv.c b/drivers/staging/rtl8723au/core/rtw_recv.c index 404b61898d08..989ed0726817 100644 --- a/drivers/staging/rtl8723au/core/rtw_recv.c +++ b/drivers/staging/rtl8723au/core/rtw_recv.c @@ -85,16 +85,15 @@ int _rtw_init_recv_priv23a(struct recv_priv *precvpriv, return res; } -void _rtw_free_recv_priv23a (struct recv_priv *precvpriv) +void _rtw_free_recv_priv23a(struct recv_priv *precvpriv) { struct rtw_adapter *padapter = precvpriv->adapter; - struct recv_frame *precvframe; - struct list_head *plist, *ptmp; + struct recv_frame *precvframe, *ptmp; rtw_free_uc_swdec_pending_queue23a(padapter); - list_for_each_safe(plist, ptmp, &precvpriv->free_recv_queue.queue) { - precvframe = container_of(plist, struct recv_frame, list); + list_for_each_entry_safe(precvframe, ptmp, + &precvpriv->free_recv_queue.queue, list) { list_del_init(&precvframe->list); kfree(precvframe); } @@ -105,21 +104,14 @@ void _rtw_free_recv_priv23a (struct recv_priv *precvpriv) struct recv_frame *rtw_alloc_recvframe23a(struct rtw_queue *pfree_recv_queue) { struct recv_frame *pframe; - struct list_head *plist, *phead; struct rtw_adapter *padapter; struct recv_priv *precvpriv; spin_lock_bh(&pfree_recv_queue->lock); - if (list_empty(&pfree_recv_queue->queue)) - pframe = NULL; - else { - phead = get_list_head(pfree_recv_queue); - - plist = phead->next; - - pframe = container_of(plist, struct recv_frame, list); - + pframe = list_first_entry_or_null(&pfree_recv_queue->queue, + struct recv_frame, list); + if (pframe) { list_del_init(&pframe->list); padapter = pframe->adapter; if (padapter) { @@ -195,19 +187,13 @@ using spinlock to protect static void rtw_free_recvframe23a_queue(struct rtw_queue *pframequeue) { - struct recv_frame *hdr; - struct list_head *plist, *phead, *ptmp; + struct recv_frame *hdr, *ptmp; + struct list_head *phead; spin_lock(&pframequeue->lock); - phead = get_list_head(pframequeue); - plist = phead->next; - - list_for_each_safe(plist, ptmp, phead) { - hdr = container_of(plist, struct recv_frame, list); + list_for_each_entry_safe(hdr, ptmp, phead, list) rtw_free_recvframe23a(hdr); - } - spin_unlock(&pframequeue->lock); } @@ -254,21 +240,13 @@ struct recv_buf *rtw_dequeue_recvbuf23a (struct rtw_queue *queue) { unsigned long irqL; struct recv_buf *precvbuf; - struct list_head *plist, *phead; spin_lock_irqsave(&queue->lock, irqL); - if (list_empty(&queue->queue)) { - precvbuf = NULL; - } else { - phead = get_list_head(queue); - - plist = phead->next; - - precvbuf = container_of(plist, struct recv_buf, list); - + precvbuf = list_first_entry_or_null(&queue->queue, + struct recv_buf, list); + if (precvbuf) list_del_init(&precvbuf->list); - } spin_unlock_irqrestore(&queue->lock, irqL); @@ -286,7 +264,6 @@ int recvframe_chkmic(struct rtw_adapter *adapter, u8 bmic_err = false, brpt_micerror = true; u8 *pframe, *payload, *pframemic; u8 *mickey; - /* u8 *iv, rxdata_key_idx = 0; */ struct sta_info *stainfo; struct rx_pkt_attrib *prxattrib = &precvframe->attrib; struct security_priv *psecuritypriv = &adapter->securitypriv; @@ -361,33 +338,19 @@ int recvframe_chkmic(struct rtw_adapter *adapter, int i; RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, - "*(pframemic-8)-*(pframemic-1) =0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n", - *(pframemic - 8), *(pframemic - 7), - *(pframemic - 6), *(pframemic - 5), - *(pframemic - 4), *(pframemic - 3), - *(pframemic - 2), *(pframemic - 1)); + "*(pframemic-8)-*(pframemic-1) =%*phC\n", + 8, pframemic - 8); RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, - "*(pframemic-16)-*(pframemic-9) =0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n", - *(pframemic - 16), *(pframemic - 15), - *(pframemic - 14), *(pframemic - 13), - *(pframemic - 12), *(pframemic - 11), - *(pframemic - 10), *(pframemic - 9)); + "*(pframemic-16)-*(pframemic-9) =%*phC\n", + 8, pframemic - 16); RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "====== demp packet (len =%d) ======\n", precvframe->pkt->len); for (i = 0; i < precvframe->pkt->len; i = i + 8) { RT_TRACE(_module_rtl871x_recv_c_, - _drv_err_, - "0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n", - *(precvframe->pkt->data+i), - *(precvframe->pkt->data+i+1), - *(precvframe->pkt->data+i+2), - *(precvframe->pkt->data+i+3), - *(precvframe->pkt->data+i+4), - *(precvframe->pkt->data+i+5), - *(precvframe->pkt->data+i+6), - *(precvframe->pkt->data+i+7)); + _drv_err_, "%*phC\n", + 8, precvframe->pkt->data + i); } RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "====== demp packet end [len =%d]======\n", @@ -1100,22 +1063,17 @@ static int validate_recv_ctrl_frame(struct rtw_adapter *padapter, if ((psta->state & WIFI_SLEEP_STATE) && (pstapriv->sta_dz_bitmap & CHKBIT(psta->aid))) { - struct list_head *xmitframe_plist, *xmitframe_phead; + struct list_head *xmitframe_phead; struct xmit_frame *pxmitframe; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; spin_lock_bh(&pxmitpriv->lock); xmitframe_phead = get_list_head(&psta->sleep_q); - xmitframe_plist = xmitframe_phead->next; - - if (!list_empty(xmitframe_phead)) { - pxmitframe = container_of(xmitframe_plist, - struct xmit_frame, - list); - - xmitframe_plist = xmitframe_plist->next; - + pxmitframe = list_first_entry_or_null(xmitframe_phead, + struct xmit_frame, + list); + if (pxmitframe) { list_del_init(&pxmitframe->list); psta->sleepq_len--; @@ -1127,30 +1085,20 @@ static int validate_recv_ctrl_frame(struct rtw_adapter *padapter, pxmitframe->attrib.triggered = 1; - /* DBG_8723A("handling ps-poll, q_len =%d, tim =%x\n", psta->sleepq_len, pstapriv->tim_bitmap); */ - rtl8723au_hal_xmitframe_enqueue(padapter, pxmitframe); if (psta->sleepq_len == 0) { pstapriv->tim_bitmap &= ~CHKBIT(psta->aid); - - /* DBG_8723A("after handling ps-poll, tim =%x\n", pstapriv->tim_bitmap); */ - - /* update BCN for TIM IE */ - /* update_BCNTIM(padapter); */ update_beacon23a(padapter, WLAN_EID_TIM, NULL, false); } - /* spin_unlock_bh(&psta->sleep_q.lock); */ spin_unlock_bh(&pxmitpriv->lock); } else { - /* spin_unlock_bh(&psta->sleep_q.lock); */ spin_unlock_bh(&pxmitpriv->lock); - /* DBG_8723A("no buffered packets to xmit\n"); */ if (pstapriv->tim_bitmap & CHKBIT(psta->aid)) { if (psta->sleepq_len == 0) { DBG_8723A("no buffered packets " @@ -1169,8 +1117,6 @@ static int validate_recv_ctrl_frame(struct rtw_adapter *padapter, pstapriv->tim_bitmap &= ~CHKBIT(psta->aid); - /* update BCN for TIM IE */ - /* update_BCNTIM(padapter); */ update_beacon23a(padapter, WLAN_EID_TIM, NULL, false); } @@ -1190,7 +1136,6 @@ static int validate_recv_mgnt_frame(struct rtw_adapter *padapter, struct sta_info *psta; struct sk_buff *skb; struct ieee80211_hdr *hdr; - /* struct mlme_priv *pmlmepriv = &adapter->mlmepriv; */ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, "+validate_recv_mgnt_frame\n"); @@ -1298,8 +1243,6 @@ static int validate_recv_data_frame(struct rtw_adapter *adapter, goto exit; } - /* psta->rssi = prxcmd->rssi; */ - /* psta->signal_quality = prxcmd->sq; */ precv_frame->psta = psta; pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr); @@ -1402,11 +1345,7 @@ static void dump_rx_pkt(struct sk_buff *skb, u16 type, int level) DBG_8723A("#############################\n"); for (i = 0; i < 64; i = i + 8) - DBG_8723A("%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X:\n", - *(ptr + i), *(ptr + i + 1), *(ptr + i + 2), - *(ptr + i + 3), *(ptr + i + 4), - *(ptr + i + 5), *(ptr + i + 6), - *(ptr + i + 7)); + DBG_8723A("%*phC:\n", 8, ptr + i); DBG_8723A("#############################\n"); } } @@ -1513,7 +1452,6 @@ static int wlanhdr_to_ethhdr (struct recv_frame *precvframe) psnap = ptr + hdrlen; eth_type = (psnap[6] << 8) | psnap[7]; /* convert hdr + possible LLC headers into Ethernet header */ - /* eth_type = (psnap_type[0] << 8) | psnap_type[1]; */ if ((ether_addr_equal(psnap, rfc1042_header) && eth_type != ETH_P_AARP && eth_type != ETH_P_IPX) || ether_addr_equal(psnap, bridge_tunnel_header)) { @@ -1567,22 +1505,19 @@ struct recv_frame *recvframe_defrag(struct rtw_adapter *adapter, struct recv_frame *recvframe_defrag(struct rtw_adapter *adapter, struct rtw_queue *defrag_q) { - struct list_head *plist, *phead, *ptmp; - u8 *data, wlanhdr_offset; - u8 curfragnum; - struct recv_frame *pnfhdr; + struct list_head *phead; + u8 wlanhdr_offset; + u8 curfragnum; + struct recv_frame *pnfhdr, *ptmp; struct recv_frame *prframe, *pnextrframe; - struct rtw_queue *pfree_recv_queue; + struct rtw_queue *pfree_recv_queue; struct sk_buff *skb; - - curfragnum = 0; pfree_recv_queue = &adapter->recvpriv.free_recv_queue; phead = get_list_head(defrag_q); - plist = phead->next; - prframe = container_of(plist, struct recv_frame, list); + prframe = list_first_entry(phead, struct recv_frame, list); list_del_init(&prframe->list); skb = prframe->pkt; @@ -1597,12 +1532,7 @@ struct recv_frame *recvframe_defrag(struct rtw_adapter *adapter, curfragnum++; - phead = get_list_head(defrag_q); - - data = prframe->pkt->data; - - list_for_each_safe(plist, ptmp, phead) { - pnfhdr = container_of(plist, struct recv_frame, list); + list_for_each_entry_safe(pnfhdr, ptmp, phead, list) { pnextrframe = (struct recv_frame *)pnfhdr; /* check the fragment sequence (2nd ~n fragment frame) */ @@ -1644,8 +1574,6 @@ struct recv_frame *recvframe_defrag(struct rtw_adapter *adapter, RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, "Performance defrag!!!!!\n"); - - return prframe; } @@ -1844,11 +1772,6 @@ static int enqueue_reorder_recvframe23a(struct recv_reorder_ctrl *preorder_ctrl, struct rx_pkt_attrib *pnextattrib; ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue; - /* DbgPrint("+enqueue_reorder_recvframe23a()\n"); */ - - /* spin_lock_irqsave(&ppending_recvframe_queue->lock); */ - /* spin_lock_ex(&ppending_recvframe_queue->lock); */ - phead = get_list_head(ppending_recvframe_queue); list_for_each_safe(plist, ptmp, phead) { @@ -1859,26 +1782,17 @@ static int enqueue_reorder_recvframe23a(struct recv_reorder_ctrl *preorder_ctrl, continue; } else if (SN_EQUAL(pnextattrib->seq_num, pattrib->seq_num)) { /* Duplicate entry is found!! Do not insert current entry. */ - - /* spin_unlock_irqrestore(&ppending_recvframe_queue->lock); */ return false; } else { break; } - /* DbgPrint("enqueue_reorder_recvframe23a():while\n"); */ } - /* spin_lock_irqsave(&ppending_recvframe_queue->lock); */ - /* spin_lock_ex(&ppending_recvframe_queue->lock); */ - list_del_init(&prframe->list); list_add_tail(&prframe->list, plist); - /* spin_unlock_ex(&ppending_recvframe_queue->lock); */ - /* spin_unlock_irqrestore(&ppending_recvframe_queue->lock); */ - return true; } @@ -1889,30 +1803,21 @@ int recv_indicatepkts_in_order(struct rtw_adapter *padapter, struct recv_reorder_ctrl *preorder_ctrl, int bforced) { - /* u8 bcancelled; */ struct list_head *phead, *plist; struct recv_frame *prframe; struct rx_pkt_attrib *pattrib; - /* u8 index = 0; */ int bPktInBuf = false; struct recv_priv *precvpriv; struct rtw_queue *ppending_recvframe_queue; precvpriv = &padapter->recvpriv; ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue; - /* DbgPrint("+recv_indicatepkts_in_order\n"); */ - - /* spin_lock_irqsave(&ppending_recvframe_queue->lock); */ - /* spin_lock_ex(&ppending_recvframe_queue->lock); */ - phead = get_list_head(ppending_recvframe_queue); plist = phead->next; /* Handling some condition for forced indicate case. */ if (bforced) { if (list_empty(phead)) { - /* spin_unlock_irqrestore(&ppending_recvframe_queue->lock); */ - /* spin_unlock_ex(&ppending_recvframe_queue->lock); */ return true; } @@ -1962,12 +1867,8 @@ int recv_indicatepkts_in_order(struct rtw_adapter *padapter, break; } - /* DbgPrint("recv_indicatepkts_in_order():while\n"); */ } - /* spin_unlock_ex(&ppending_recvframe_queue->lock); */ - /* spin_unlock_irqrestore(&ppending_recvframe_queue->lock); */ - return bPktInBuf; } @@ -2083,8 +1984,6 @@ void rtw_reordering_ctrl_timeout_handler23a(unsigned long pcontext) return; } - /* DBG_8723A("+rtw_reordering_ctrl_timeout_handler23a() =>\n"); */ - spin_lock_bh(&ppending_recvframe_queue->lock); if (recv_indicatepkts_in_order(padapter, preorder_ctrl, true) == true) { @@ -2101,14 +2000,10 @@ int process_recv_indicatepkts(struct rtw_adapter *padapter, struct recv_frame *prframe) { int retval = _SUCCESS; - /* struct recv_priv *precvpriv = &padapter->recvpriv; */ - /* struct rx_pkt_attrib *pattrib = &prframe->attrib; */ struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct ht_priv *phtpriv = &pmlmepriv->htpriv; if (phtpriv->ht_option == true) { /* B/G/N Mode */ - /* prframe->preorder_ctrl = &precvpriv->recvreorder_ctrl[pattrib->priority]; */ - /* including perform A-MPDU Rx Ordering Buffer Control */ if (recv_indicatepkt_reorder(padapter, prframe) != _SUCCESS) { if ((padapter->bDriverStopped == false) && diff --git a/drivers/staging/rtl8723au/core/rtw_security.c b/drivers/staging/rtl8723au/core/rtw_security.c index 038b57b3afe2..5a4cfdf1ebd4 100644 --- a/drivers/staging/rtl8723au/core/rtw_security.c +++ b/drivers/staging/rtl8723au/core/rtw_security.c @@ -634,7 +634,7 @@ int rtw_tkip_encrypt23a(struct rtw_adapter *padapter, &pattrib->ra[0]); } - if (stainfo == NULL) { + if (!stainfo) { RT_TRACE(_module_rtl871x_security_c_, _drv_err_, "%s: stainfo == NULL!!!\n", __func__); DBG_8723A("%s, psta == NUL\n", __func__); @@ -731,7 +731,7 @@ int rtw_tkip_decrypt23a(struct rtw_adapter *padapter, stainfo = rtw_get_stainfo23a(&padapter->stapriv, &prxattrib->ta[0]); - if (stainfo == NULL) { + if (!stainfo) { RT_TRACE(_module_rtl871x_security_c_, _drv_err_, "%s: stainfo == NULL!!!\n", __func__); return _FAIL; @@ -1617,9 +1617,9 @@ exit: return res; } -void rtw_use_tkipkey_handler23a(void *FunctionContext) +void rtw_use_tkipkey_handler23a(void *function_context) { - struct rtw_adapter *padapter = (struct rtw_adapter *)FunctionContext; + struct rtw_adapter *padapter = function_context; RT_TRACE(_module_rtl871x_security_c_, _drv_err_, "^^^%s ^^^\n", __func__); diff --git a/drivers/staging/rtl8723au/core/rtw_sta_mgt.c b/drivers/staging/rtl8723au/core/rtw_sta_mgt.c index b06bff74502a..a9b778c45d44 100644 --- a/drivers/staging/rtl8723au/core/rtw_sta_mgt.c +++ b/drivers/staging/rtl8723au/core/rtw_sta_mgt.c @@ -83,8 +83,8 @@ int _rtw_init_sta_priv23a(struct sta_priv *pstapriv) int _rtw_free_sta_priv23a(struct sta_priv *pstapriv) { - struct list_head *phead, *plist, *ptmp; - struct sta_info *psta; + struct list_head *phead; + struct sta_info *psta, *ptmp; struct recv_reorder_ctrl *preorder_ctrl; int index; @@ -93,12 +93,9 @@ int _rtw_free_sta_priv23a(struct sta_priv *pstapriv) spin_lock_bh(&pstapriv->sta_hash_lock); for (index = 0; index < NUM_STA; index++) { phead = &pstapriv->sta_hash[index]; - - list_for_each_safe(plist, ptmp, phead) { + list_for_each_entry_safe(psta, ptmp, phead, hash_list) { int i; - psta = container_of(plist, struct sta_info, - hash_list); for (i = 0; i < 16 ; i++) { preorder_ctrl = &psta->recvreorder_ctrl[i]; del_timer_sync(&preorder_ctrl->reordering_ctrl_timer); @@ -203,7 +200,7 @@ int rtw_free_stainfo23a(struct rtw_adapter *padapter, struct sta_info *psta) struct hw_xmit *phwxmit; int i; - if (psta == NULL) + if (!psta) goto exit; spin_lock_bh(&psta->lock); @@ -325,8 +322,8 @@ exit: /* free all stainfo which in sta_hash[all] */ void rtw_free_all_stainfo23a(struct rtw_adapter *padapter) { - struct list_head *plist, *phead, *ptmp; - struct sta_info *psta; + struct list_head *phead; + struct sta_info *psta, *ptmp; struct sta_priv *pstapriv = &padapter->stapriv; struct sta_info *pbcmc_stainfo = rtw_get_bcmc_stainfo23a(padapter); s32 index; @@ -335,13 +332,9 @@ void rtw_free_all_stainfo23a(struct rtw_adapter *padapter) return; spin_lock_bh(&pstapriv->sta_hash_lock); - for (index = 0; index < NUM_STA; index++) { phead = &pstapriv->sta_hash[index]; - - list_for_each_safe(plist, ptmp, phead) { - psta = container_of(plist, struct sta_info, hash_list); - + list_for_each_entry_safe(psta, ptmp, phead, hash_list) { if (pbcmc_stainfo != psta) rtw_free_stainfo23a(padapter, psta); } @@ -352,12 +345,12 @@ void rtw_free_all_stainfo23a(struct rtw_adapter *padapter) /* any station allocated can be searched by hash list */ struct sta_info *rtw_get_stainfo23a(struct sta_priv *pstapriv, const u8 *hwaddr) { - struct list_head *plist, *phead; - struct sta_info *psta = NULL; - u32 index; + struct list_head *phead; + struct sta_info *pos, *psta = NULL; + u32 index; const u8 *addr; - if (hwaddr == NULL) + if (!hwaddr) return NULL; if (is_multicast_ether_addr(hwaddr)) @@ -368,11 +361,9 @@ struct sta_info *rtw_get_stainfo23a(struct sta_priv *pstapriv, const u8 *hwaddr) index = wifi_mac_hash(addr); spin_lock_bh(&pstapriv->sta_hash_lock); - phead = &pstapriv->sta_hash[index]; - - list_for_each(plist, phead) { - psta = container_of(plist, struct sta_info, hash_list); + list_for_each_entry(pos, phead, hash_list) { + psta = pos; /* if found the matched address */ if (ether_addr_equal(psta->hwaddr, addr)) @@ -392,7 +383,7 @@ int rtw_init_bcmc_stainfo23a(struct rtw_adapter *padapter) int res = _SUCCESS; psta = rtw_alloc_stainfo23a(pstapriv, bc_addr, GFP_KERNEL); - if (psta == NULL) { + if (!psta) { res = _FAIL; RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_err_, "rtw_alloc_stainfo23a fail\n"); @@ -418,7 +409,7 @@ bool rtw_access_ctrl23a(struct rtw_adapter *padapter, u8 *mac_addr) { bool res = true; #ifdef CONFIG_8723AU_AP_MODE - struct list_head *plist, *phead; + struct list_head *phead; struct rtw_wlan_acl_node *paclnode; bool match = false; struct sta_priv *pstapriv = &padapter->stapriv; @@ -427,10 +418,7 @@ bool rtw_access_ctrl23a(struct rtw_adapter *padapter, u8 *mac_addr) spin_lock_bh(&pacl_node_q->lock); phead = get_list_head(pacl_node_q); - - list_for_each(plist, phead) { - paclnode = container_of(plist, struct rtw_wlan_acl_node, list); - + list_for_each_entry(paclnode, phead, list) { if (ether_addr_equal(paclnode->addr, mac_addr)) { if (paclnode->valid) { match = true; diff --git a/drivers/staging/rtl8723au/core/rtw_xmit.c b/drivers/staging/rtl8723au/core/rtw_xmit.c index a4b6bb6c79a9..3de40cfa5f3b 100644 --- a/drivers/staging/rtl8723au/core/rtw_xmit.c +++ b/drivers/staging/rtl8723au/core/rtw_xmit.c @@ -193,39 +193,38 @@ fail: goto exit; } -void _rtw_free_xmit_priv23a (struct xmit_priv *pxmitpriv) +void _rtw_free_xmit_priv23a(struct xmit_priv *pxmitpriv) { struct rtw_adapter *padapter = pxmitpriv->adapter; - struct xmit_frame *pxframe; - struct xmit_buf *pxmitbuf; - struct list_head *plist, *ptmp; + struct xmit_frame *pxframe, *ptmp; + struct xmit_buf *pxmitbuf, *ptmp2; - list_for_each_safe(plist, ptmp, &pxmitpriv->free_xmit_queue.queue) { - pxframe = container_of(plist, struct xmit_frame, list); + list_for_each_entry_safe(pxframe, ptmp, + &pxmitpriv->free_xmit_queue.queue, list) { list_del_init(&pxframe->list); rtw_os_xmit_complete23a(padapter, pxframe); kfree(pxframe); } - list_for_each_safe(plist, ptmp, &pxmitpriv->xmitbuf_list) { - pxmitbuf = container_of(plist, struct xmit_buf, list2); + list_for_each_entry_safe(pxmitbuf, ptmp2, + &pxmitpriv->xmitbuf_list, list2) { list_del_init(&pxmitbuf->list2); rtw_os_xmit_resource_free23a(padapter, pxmitbuf); kfree(pxmitbuf); } /* free xframe_ext queue, the same count as extbuf */ - list_for_each_safe(plist, ptmp, - &pxmitpriv->free_xframe_ext_queue.queue) { - pxframe = container_of(plist, struct xmit_frame, list); + list_for_each_entry_safe(pxframe, ptmp, + &pxmitpriv->free_xframe_ext_queue.queue, + list) { list_del_init(&pxframe->list); rtw_os_xmit_complete23a(padapter, pxframe); kfree(pxframe); } /* free xmit extension buff */ - list_for_each_safe(plist, ptmp, &pxmitpriv->xmitextbuf_list) { - pxmitbuf = container_of(plist, struct xmit_buf, list2); + list_for_each_entry_safe(pxmitbuf, ptmp2, + &pxmitpriv->xmitextbuf_list, list2) { list_del_init(&pxmitbuf->list2); rtw_os_xmit_resource_free23a(padapter, pxmitbuf); kfree(pxmitbuf); @@ -1444,24 +1443,18 @@ Must be very very cautious... */ static struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv) { - struct xmit_frame *pxframe = NULL; - struct list_head *plist, *phead; + struct xmit_frame *pxframe; struct rtw_queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue; spin_lock_bh(&pfree_xmit_queue->lock); - if (list_empty(&pfree_xmit_queue->queue)) { + pxframe = list_first_entry_or_null(&pfree_xmit_queue->queue, + struct xmit_frame, list); + if (!pxframe) { RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, "rtw_alloc_xmitframe:%d\n", pxmitpriv->free_xmitframe_cnt); - pxframe = NULL; } else { - phead = get_list_head(pfree_xmit_queue); - - plist = phead->next; - - pxframe = container_of(plist, struct xmit_frame, list); - list_del_init(&pxframe->list); pxmitpriv->free_xmitframe_cnt--; RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, @@ -1478,22 +1471,18 @@ static struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv) struct xmit_frame *rtw_alloc_xmitframe23a_ext(struct xmit_priv *pxmitpriv) { - struct xmit_frame *pxframe = NULL; - struct list_head *plist, *phead; + struct xmit_frame *pxframe; struct rtw_queue *queue = &pxmitpriv->free_xframe_ext_queue; spin_lock_bh(&queue->lock); - if (list_empty(&queue->queue)) { + pxframe = list_first_entry_or_null(&queue->queue, + struct xmit_frame, list); + if (!pxframe) { RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, "rtw_alloc_xmitframe23a_ext:%d\n", pxmitpriv->free_xframe_ext_cnt); - pxframe = NULL; } else { - phead = get_list_head(queue); - plist = phead->next; - pxframe = container_of(plist, struct xmit_frame, list); - list_del_init(&pxframe->list); pxmitpriv->free_xframe_ext_cnt--; RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, @@ -1563,18 +1552,13 @@ exit: void rtw_free_xmitframe_queue23a(struct xmit_priv *pxmitpriv, struct rtw_queue *pframequeue) { - struct list_head *plist, *phead, *ptmp; - struct xmit_frame *pxmitframe; + struct list_head *phead; + struct xmit_frame *pxmitframe, *ptmp; spin_lock_bh(&pframequeue->lock); - phead = get_list_head(pframequeue); - - list_for_each_safe(plist, ptmp, phead) { - pxmitframe = container_of(plist, struct xmit_frame, list); - + list_for_each_entry_safe(pxmitframe, ptmp, phead, list) rtw_free_xmitframe23a(pxmitpriv, pxmitframe); - } spin_unlock_bh(&pframequeue->lock); } @@ -1612,9 +1596,9 @@ struct xmit_frame * rtw_dequeue_xframe23a(struct xmit_priv *pxmitpriv, struct hw_xmit *phwxmit_i, int entry) { - struct list_head *sta_plist, *sta_phead, *ptmp; + struct list_head *sta_phead; struct hw_xmit *phwxmit; - struct tx_servq *ptxservq = NULL; + struct tx_servq *ptxservq = NULL, *ptmp; struct rtw_queue *pframe_queue = NULL; struct xmit_frame *pxmitframe = NULL; struct rtw_adapter *padapter = pxmitpriv->adapter; @@ -1638,11 +1622,8 @@ rtw_dequeue_xframe23a(struct xmit_priv *pxmitpriv, struct hw_xmit *phwxmit_i, phwxmit = phwxmit_i + inx[i]; sta_phead = get_list_head(phwxmit->sta_queue); - - list_for_each_safe(sta_plist, ptmp, sta_phead) { - ptxservq = container_of(sta_plist, struct tx_servq, - tx_pending); - + list_for_each_entry_safe(ptxservq, ptmp, sta_phead, + tx_pending) { pframe_queue = &ptxservq->sta_pending; pxmitframe = dequeue_one_xmitframe(pxmitpriv, phwxmit, ptxservq, pframe_queue); @@ -2052,18 +2033,15 @@ dequeue_xmitframes_to_sleeping_queue(struct rtw_adapter *padapter, struct rtw_queue *pframequeue) { int ret; - struct list_head *plist, *phead, *ptmp; - u8 ac_index; + struct list_head *phead; + u8 ac_index; struct tx_servq *ptxservq; - struct pkt_attrib *pattrib; - struct xmit_frame *pxmitframe; - struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits; + struct pkt_attrib *pattrib; + struct xmit_frame *pxmitframe, *ptmp; + struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits; phead = get_list_head(pframequeue); - - list_for_each_safe(plist, ptmp, phead) { - pxmitframe = container_of(plist, struct xmit_frame, list); - + list_for_each_entry_safe(pxmitframe, ptmp, phead, list) { ret = xmitframe_enqueue_for_sleeping_sta23a(padapter, pxmitframe); if (ret == true) { @@ -2124,17 +2102,14 @@ void wakeup_sta_to_xmit23a(struct rtw_adapter *padapter, struct sta_info *psta) { u8 update_mask = 0, wmmps_ac = 0; struct sta_info *psta_bmc; - struct list_head *plist, *phead, *ptmp; - struct xmit_frame *pxmitframe = NULL; + struct list_head *phead; + struct xmit_frame *pxmitframe = NULL, *ptmp; struct sta_priv *pstapriv = &padapter->stapriv; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; spin_lock_bh(&pxmitpriv->lock); - phead = get_list_head(&psta->sleep_q); - - list_for_each_safe(plist, ptmp, phead) { - pxmitframe = container_of(plist, struct xmit_frame, list); + list_for_each_entry_safe(pxmitframe, ptmp, phead, list) { list_del_init(&pxmitframe->list); switch (pxmitframe->attrib.priority) { @@ -2194,7 +2169,6 @@ void wakeup_sta_to_xmit23a(struct rtw_adapter *padapter, struct sta_info *psta) pstapriv->sta_dz_bitmap &= ~CHKBIT(psta->aid); } - /* spin_unlock_bh(&psta->sleep_q.lock); */ spin_unlock_bh(&pxmitpriv->lock); @@ -2206,13 +2180,8 @@ void wakeup_sta_to_xmit23a(struct rtw_adapter *padapter, struct sta_info *psta) if ((pstapriv->sta_dz_bitmap&0xfffe) == 0x0) { /* no any sta in ps mode */ spin_lock_bh(&pxmitpriv->lock); - phead = get_list_head(&psta_bmc->sleep_q); - - list_for_each_safe(plist, ptmp, phead) { - pxmitframe = container_of(plist, struct xmit_frame, - list); - + list_for_each_entry_safe(pxmitframe, ptmp, phead, list) { list_del_init(&pxmitframe->list); psta_bmc->sleepq_len--; @@ -2232,7 +2201,6 @@ void wakeup_sta_to_xmit23a(struct rtw_adapter *padapter, struct sta_info *psta) /* update_BCNTIM(padapter); */ update_mask |= BIT(1); } - /* spin_unlock_bh(&psta_bmc->sleep_q.lock); */ spin_unlock_bh(&pxmitpriv->lock); } @@ -2245,19 +2213,15 @@ void xmit_delivery_enabled_frames23a(struct rtw_adapter *padapter, struct sta_info *psta) { u8 wmmps_ac = 0; - struct list_head *plist, *phead, *ptmp; - struct xmit_frame *pxmitframe; + struct list_head *phead; + struct xmit_frame *pxmitframe, *ptmp; struct sta_priv *pstapriv = &padapter->stapriv; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; /* spin_lock_bh(&psta->sleep_q.lock); */ spin_lock_bh(&pxmitpriv->lock); - phead = get_list_head(&psta->sleep_q); - - list_for_each_safe(plist, ptmp, phead) { - pxmitframe = container_of(plist, struct xmit_frame, list); - + list_for_each_entry_safe(pxmitframe, ptmp, phead, list) { switch (pxmitframe->attrib.priority) { case 1: case 2: diff --git a/drivers/staging/rtl8723au/hal/HalHWImg8723A_BB.c b/drivers/staging/rtl8723au/hal/HalHWImg8723A_BB.c index e8cab9e97385..8d3ea6c0cbe6 100644 --- a/drivers/staging/rtl8723au/hal/HalHWImg8723A_BB.c +++ b/drivers/staging/rtl8723au/hal/HalHWImg8723A_BB.c @@ -219,7 +219,7 @@ void ODM_ReadAndConfig_AGC_TAB_1T_8723A(struct dm_odm_t *pDM_Odm) u32 i; u8 platform = 0x04; u8 board = pDM_Odm->BoardType; - u32 ArrayLen = sizeof(Array_AGC_TAB_1T_8723A)/sizeof(u32); + u32 ArrayLen = ARRAY_SIZE(Array_AGC_TAB_1T_8723A); u32 *Array = Array_AGC_TAB_1T_8723A; hex = board; @@ -467,7 +467,7 @@ void ODM_ReadAndConfig_PHY_REG_1T_8723A(struct dm_odm_t *pDM_Odm) u32 i = 0; u8 platform = 0x04; u8 board = pDM_Odm->BoardType; - u32 ArrayLen = sizeof(Array_PHY_REG_1T_8723A)/sizeof(u32); + u32 ArrayLen = ARRAY_SIZE(Array_PHY_REG_1T_8723A); u32 *Array = Array_PHY_REG_1T_8723A; hex += board; @@ -523,7 +523,7 @@ void ODM_ReadAndConfig_PHY_REG_MP_8723A(struct dm_odm_t *pDM_Odm) u32 i; u8 platform = 0x04; u8 board = pDM_Odm->BoardType; - u32 ArrayLen = sizeof(Array_PHY_REG_MP_8723A)/sizeof(u32); + u32 ArrayLen = ARRAY_SIZE(Array_PHY_REG_MP_8723A); u32 *Array = Array_PHY_REG_MP_8723A; hex += board; diff --git a/drivers/staging/rtl8723au/hal/HalHWImg8723A_MAC.c b/drivers/staging/rtl8723au/hal/HalHWImg8723A_MAC.c index 93b2d183d694..9bf685905e68 100644 --- a/drivers/staging/rtl8723au/hal/HalHWImg8723A_MAC.c +++ b/drivers/staging/rtl8723au/hal/HalHWImg8723A_MAC.c @@ -145,7 +145,7 @@ void ODM_ReadAndConfig_MAC_REG_8723A(struct dm_odm_t *pDM_Odm) u32 i = 0; u8 platform = 0x04; u8 board = pDM_Odm->BoardType; - u32 ArrayLen = sizeof(Array_MAC_REG_8723A)/sizeof(u32); + u32 ArrayLen = ARRAY_SIZE(Array_MAC_REG_8723A); u32 *Array = Array_MAC_REG_8723A; hex += board; diff --git a/drivers/staging/rtl8723au/hal/HalHWImg8723A_RF.c b/drivers/staging/rtl8723au/hal/HalHWImg8723A_RF.c index dbf571e8b908..286f3ea3d263 100644 --- a/drivers/staging/rtl8723au/hal/HalHWImg8723A_RF.c +++ b/drivers/staging/rtl8723au/hal/HalHWImg8723A_RF.c @@ -215,7 +215,7 @@ void ODM_ReadAndConfig_RadioA_1T_8723A(struct dm_odm_t *pDM_Odm) u32 i = 0; u8 platform = 0x04; u8 board = pDM_Odm->BoardType; - u32 ArrayLen = sizeof(Array_RadioA_1T_8723A)/sizeof(u32); + u32 ArrayLen = ARRAY_SIZE(Array_RadioA_1T_8723A); u32 *Array = Array_RadioA_1T_8723A; hex += board; diff --git a/drivers/staging/rtl8723au/hal/hal_com.c b/drivers/staging/rtl8723au/hal/hal_com.c index 530db57e8842..9d7b11b63957 100644 --- a/drivers/staging/rtl8723au/hal/hal_com.c +++ b/drivers/staging/rtl8723au/hal/hal_com.c @@ -328,7 +328,7 @@ int c2h_evt_read23a(struct rtw_adapter *adapter, u8 *buf) if (trigger == C2H_EVT_HOST_CLOSE) goto exit; /* Not ready */ - else if (trigger != C2H_EVT_FW_CLOSE) + if (trigger != C2H_EVT_FW_CLOSE) goto clear_evt; /* Not a valid value */ c2h_evt = (struct c2h_evt_hdr *)buf; diff --git a/drivers/staging/rtl8723au/hal/odm.c b/drivers/staging/rtl8723au/hal/odm.c index 6b9dbeffafcb..e279c34b3fc6 100644 --- a/drivers/staging/rtl8723au/hal/odm.c +++ b/drivers/staging/rtl8723au/hal/odm.c @@ -185,7 +185,6 @@ void odm_CCKPacketDetectionThresh23a(struct dm_odm_t *pDM_Odm); /* START-------BB POWER SAVE----------------------- */ void odm23a_DynBBPSInit(struct dm_odm_t *pDM_Odm); -void odm_DynamicBBPowerSaving23a(struct dm_odm_t *pDM_Odm); /* END---------BB POWER SAVE----------------------- */ @@ -270,7 +269,6 @@ void ODM_DMWatchdog23a(struct rtw_adapter *adapter) odm_RefreshRateAdaptiveMask(pDM_Odm); - odm_DynamicBBPowerSaving23a(pDM_Odm); odm_EdcaTurboCheck23a(pDM_Odm); } @@ -894,10 +892,6 @@ void odm23a_DynBBPSInit(struct dm_odm_t *pDM_Odm) pDM_PSTable->initialize = 0; } -void odm_DynamicBBPowerSaving23a(struct dm_odm_t *pDM_Odm) -{ - return; -} void ODM_RF_Saving23a(struct dm_odm_t *pDM_Odm, u8 bForceInNormal) { @@ -1274,7 +1268,7 @@ static void odm_RSSIMonitorCheck(struct dm_odm_t *pDM_Odm) for (i = 0; i < sta_cnt; i++) { if (PWDB_rssi[i] != (0)) - rtl8723a_set_rssi_cmd(Adapter, (u8 *)&PWDB_rssi[i]); + rtl8723a_set_rssi_cmd(Adapter, PWDB_rssi[i]); } pdmpriv->EntryMaxUndecoratedSmoothedPWDB = MaxDB; diff --git a/drivers/staging/rtl8723au/hal/odm_HWConfig.c b/drivers/staging/rtl8723au/hal/odm_HWConfig.c index 7b9799e3dbda..0562f61bd1dc 100644 --- a/drivers/staging/rtl8723au/hal/odm_HWConfig.c +++ b/drivers/staging/rtl8723au/hal/odm_HWConfig.c @@ -270,10 +270,6 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct dm_odm_t *pDM_Odm, } } -void odm_Init_RSSIForDM23a(struct dm_odm_t *pDM_Odm) -{ -} - static void odm_Process_RSSIForDM(struct dm_odm_t *pDM_Odm, struct phy_info *pPhyInfo, struct odm_packet_info *pPktinfo) diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c b/drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c index d5c48a56d4ac..bfcbd7a349cf 100644 --- a/drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c +++ b/drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c @@ -77,12 +77,6 @@ if ((BTCoexDbgLevel == _bt_dbg_on_)) {\ #define PlatformZeroMemory(ptr, sz) memset(ptr, 0, sz) -#define PlatformProcessHCICommands(...) -#define PlatformTxBTQueuedPackets(...) -#define PlatformIndicateBTACLData(...) (RT_STATUS_SUCCESS) -#define PlatformAcquireSpinLock(padapter, type) -#define PlatformReleaseSpinLock(padapter, type) - #define GET_UNDECORATED_AVERAGE_RSSI(padapter) \ (GET_HAL_DATA(padapter)->dmpriv.EntryMinUndecoratedSmoothedPWDB) #define RT_RF_CHANGE_SOURCE u32 @@ -798,11 +792,7 @@ bthci_IndicateEvent( u32 dataLen ) { - enum rt_status rt_status; - - rt_status = PlatformIndicateBTEvent(padapter, pEvntData, dataLen); - - return rt_status; + return PlatformIndicateBTEvent(padapter, pEvntData, dataLen); } static void @@ -1454,21 +1444,11 @@ bthci_StartBeaconAndConnect( } if (pBTInfo->BtAsocEntry[CurrentAssocNum].AMPRole == AMP_BTAP_CREATOR) { - snprintf((char *)pBTInfo->BtAsocEntry[CurrentAssocNum].BTSsidBuf, 32, "AMP-%02x-%02x-%02x-%02x-%02x-%02x", - padapter->eeprompriv.mac_addr[0], - padapter->eeprompriv.mac_addr[1], - padapter->eeprompriv.mac_addr[2], - padapter->eeprompriv.mac_addr[3], - padapter->eeprompriv.mac_addr[4], - padapter->eeprompriv.mac_addr[5]); + snprintf((char *)pBTInfo->BtAsocEntry[CurrentAssocNum].BTSsidBuf, 32, + "AMP-%pMF", padapter->eeprompriv.mac_addr); } else if (pBTInfo->BtAsocEntry[CurrentAssocNum].AMPRole == AMP_BTAP_JOINER) { - snprintf((char *)pBTInfo->BtAsocEntry[CurrentAssocNum].BTSsidBuf, 32, "AMP-%02x-%02x-%02x-%02x-%02x-%02x", - pBTInfo->BtAsocEntry[CurrentAssocNum].BTRemoteMACAddr[0], - pBTInfo->BtAsocEntry[CurrentAssocNum].BTRemoteMACAddr[1], - pBTInfo->BtAsocEntry[CurrentAssocNum].BTRemoteMACAddr[2], - pBTInfo->BtAsocEntry[CurrentAssocNum].BTRemoteMACAddr[3], - pBTInfo->BtAsocEntry[CurrentAssocNum].BTRemoteMACAddr[4], - pBTInfo->BtAsocEntry[CurrentAssocNum].BTRemoteMACAddr[5]); + snprintf((char *)pBTInfo->BtAsocEntry[CurrentAssocNum].BTSsidBuf, 32, + "AMP-%pMF", pBTInfo->BtAsocEntry[CurrentAssocNum].BTRemoteMACAddr); } FillOctetString(pBTInfo->BtAsocEntry[CurrentAssocNum].BTSsid, pBTInfo->BtAsocEntry[CurrentAssocNum].BTSsidBuf, 21); @@ -2909,16 +2889,13 @@ bthci_CmdCreatePhysicalLink( struct packet_irp_hcicmd_data *pHciCmd ) { - enum hci_status status; struct bt_30info *pBTInfo = GET_BT_INFO(padapter); struct bt_dgb *pBtDbg = &pBTInfo->BtDbg; pBtDbg->dbgHciInfo.hciCmdCntCreatePhyLink++; - status = bthci_BuildPhysicalLink(padapter, + return bthci_BuildPhysicalLink(padapter, pHciCmd, HCI_CREATE_PHYSICAL_LINK); - - return status; } static enum hci_status @@ -3184,16 +3161,13 @@ static enum hci_status bthci_CmdAcceptPhysicalLink(struct rtw_adapter *padapter, struct packet_irp_hcicmd_data *pHciCmd) { - enum hci_status status; struct bt_30info *pBTInfo = GET_BT_INFO(padapter); struct bt_dgb *pBtDbg = &pBTInfo->BtDbg; pBtDbg->dbgHciInfo.hciCmdCntAcceptPhyLink++; - status = bthci_BuildPhysicalLink(padapter, + return bthci_BuildPhysicalLink(padapter, pHciCmd, HCI_ACCEPT_PHYSICAL_LINK); - - return status; } static enum hci_status @@ -9475,10 +9449,8 @@ static void BTDM_Display8723ABtCoexInfo(struct rtw_adapter *padapter) psTdmaCase = pHalData->bt_coexist.halCoex8723.btdm1Ant.curPsTdma; else psTdmaCase = pHalData->bt_coexist.halCoex8723.btdm2Ant.curPsTdma; - snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x case-%d", "PS TDMA(0x3a)", \ - pHalData->bt_coexist.fw3aVal[0], pHalData->bt_coexist.fw3aVal[1], - pHalData->bt_coexist.fw3aVal[2], pHalData->bt_coexist.fw3aVal[3], - pHalData->bt_coexist.fw3aVal[4], psTdmaCase); + snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %*ph case-%d", + "PS TDMA(0x3a)", 5, pHalData->bt_coexist.fw3aVal, psTdmaCase); DCMD_Printf(btCoexDbgBuf); snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d ", "Decrease Bt Power", \ diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_cmd.c b/drivers/staging/rtl8723au/hal/rtl8723a_cmd.c index 1662c03c1323..2230f4c539ec 100644 --- a/drivers/staging/rtl8723au/hal/rtl8723a_cmd.c +++ b/drivers/staging/rtl8723au/hal/rtl8723a_cmd.c @@ -113,11 +113,11 @@ exit: return ret; } -int rtl8723a_set_rssi_cmd(struct rtw_adapter *padapter, u8 *param) +int rtl8723a_set_rssi_cmd(struct rtw_adapter *padapter, u32 param) { - *((u32 *)param) = cpu_to_le32(*((u32 *)param)); + __le32 cmd = cpu_to_le32(param); - FillH2CCmd(padapter, RSSI_SETTING_EID, 3, param); + FillH2CCmd(padapter, RSSI_SETTING_EID, 3, (void *)&cmd); return _SUCCESS; } diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c b/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c index ecf54ee47f7c..e81301fcb01d 100644 --- a/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c +++ b/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c @@ -399,10 +399,8 @@ hal_ReadEFuse_WiFi(struct rtw_adapter *padapter, } efuseTbl = kmalloc(EFUSE_MAP_LEN_8723A, GFP_KERNEL); - if (efuseTbl == NULL) { - DBG_8723A("%s: alloc efuseTbl fail!\n", __func__); + if (!efuseTbl) return; - } /* 0xff will be efuse default value instead of 0x00. */ memset(efuseTbl, 0xFF, EFUSE_MAP_LEN_8723A); @@ -491,10 +489,8 @@ hal_ReadEFuse_BT(struct rtw_adapter *padapter, } efuseTbl = kmalloc(EFUSE_BT_MAP_LEN, GFP_KERNEL); - if (efuseTbl == NULL) { - DBG_8723A("%s: efuseTbl malloc fail!\n", __func__); + if (!efuseTbl) return; - } /* 0xff will be efuse default value instead of 0x00. */ memset(efuseTbl, 0xFF, EFUSE_BT_MAP_LEN); @@ -1044,7 +1040,7 @@ void rtl8723a_InitAntenna_Selection(struct rtw_adapter *padapter) u8 val; val = rtl8723au_read8(padapter, REG_LEDCFG2); - /* Let 8051 take control antenna settting */ + /* Let 8051 take control antenna setting */ val |= BIT(7); /* DPDT_SEL_EN, 0x4C[23] */ rtl8723au_write8(padapter, REG_LEDCFG2, val); } @@ -1054,7 +1050,7 @@ void rtl8723a_CheckAntenna_Selection(struct rtw_adapter *padapter) u8 val; val = rtl8723au_read8(padapter, REG_LEDCFG2); - /* Let 8051 take control antenna settting */ + /* Let 8051 take control antenna setting */ if (!(val & BIT(7))) { val |= BIT(7); /* DPDT_SEL_EN, 0x4C[23] */ rtl8723au_write8(padapter, REG_LEDCFG2, val); @@ -1066,7 +1062,7 @@ void rtl8723a_DeinitAntenna_Selection(struct rtw_adapter *padapter) u8 val; val = rtl8723au_read8(padapter, REG_LEDCFG2); - /* Let 8051 take control antenna settting */ + /* Let 8051 take control antenna setting */ val &= ~BIT(7); /* DPDT_SEL_EN, clear 0x4C[23] */ rtl8723au_write8(padapter, REG_LEDCFG2, val); } @@ -1297,7 +1293,7 @@ static void _ResetDigitalProcedure1_92C(struct rtw_adapter *padapter, /* If we want to SS mode, we can not reset 8051. */ if ((val8 & BIT(1)) && padapter->bFWReady) { /* IF fw in RAM code, do reset */ - /* 2010/08/25 MH Accordign to RD alfred's + /* 2010/08/25 MH According to RD alfred's suggestion, we need to disable other */ /* HRCV INT to influence 8051 reset. */ rtl8723au_write8(padapter, REG_FWIMR, 0x20); diff --git a/drivers/staging/rtl8723au/hal/usb_halinit.c b/drivers/staging/rtl8723au/hal/usb_halinit.c index 9926b0790e75..fa47aebf8b98 100644 --- a/drivers/staging/rtl8723au/hal/usb_halinit.c +++ b/drivers/staging/rtl8723au/hal/usb_halinit.c @@ -736,8 +736,7 @@ int rtl8723au_hal_init(struct rtw_adapter *Adapter) rtl8723a_InitHalDm(Adapter); - val8 = (WiFiNavUpperUs + HAL_8723A_NAV_UPPER_UNIT - 1) / - HAL_8723A_NAV_UPPER_UNIT; + val8 = DIV_ROUND_UP(WiFiNavUpperUs, HAL_8723A_NAV_UPPER_UNIT); rtl8723au_write8(Adapter, REG_NAV_UPPER, val8); /* 2011/03/09 MH debug only, UMC-B cut pass 2500 S5 test, but we need to fin root cause. */ @@ -1021,10 +1020,8 @@ static void Hal_EfuseParseMACAddr_8723AU(struct rtw_adapter *padapter, } RT_TRACE(_module_hci_hal_init_c_, _drv_notice_, - "Hal_EfuseParseMACAddr_8723AU: Permanent Address =%02x:%02x:%02x:%02x:%02x:%02x\n", - pEEPROM->mac_addr[0], pEEPROM->mac_addr[1], - pEEPROM->mac_addr[2], pEEPROM->mac_addr[3], - pEEPROM->mac_addr[4], pEEPROM->mac_addr[5]); + "Hal_EfuseParseMACAddr_8723AU: Permanent Address =%pM\n", + pEEPROM->mac_addr); } static void readAdapterInfo(struct rtw_adapter *padapter) diff --git a/drivers/staging/rtl8723au/hal/usb_ops_linux.c b/drivers/staging/rtl8723au/hal/usb_ops_linux.c index 371e6b373420..5c81ff48252e 100644 --- a/drivers/staging/rtl8723au/hal/usb_ops_linux.c +++ b/drivers/staging/rtl8723au/hal/usb_ops_linux.c @@ -256,12 +256,8 @@ static void usb_read_interrupt_complete(struct urb *purb) c2w = kmalloc(sizeof(struct evt_work), GFP_ATOMIC); - if (!c2w) { - printk(KERN_WARNING "%s: unable to " - "allocate work buffer\n", - __func__); + if (!c2w) goto urb_submit; - } c2w->adapter = padapter; INIT_WORK(&c2w->work, rtw_evt_work); diff --git a/drivers/staging/rtl8723au/include/odm_HWConfig.h b/drivers/staging/rtl8723au/include/odm_HWConfig.h index ce7abe770b5a..c748d5fb47fa 100644 --- a/drivers/staging/rtl8723au/include/odm_HWConfig.h +++ b/drivers/staging/rtl8723au/include/odm_HWConfig.h @@ -142,8 +142,6 @@ struct phy_status_rpt_8195 { }; -void odm_Init_RSSIForDM23a(struct dm_odm_t *pDM_Odm); - void ODM_PhyStatusQuery23a( struct dm_odm_t *pDM_Odm, diff --git a/drivers/staging/rtl8723au/include/osdep_service.h b/drivers/staging/rtl8723au/include/osdep_service.h index dedb41874de5..98250b12e9f2 100644 --- a/drivers/staging/rtl8723au/include/osdep_service.h +++ b/drivers/staging/rtl8723au/include/osdep_service.h @@ -29,10 +29,10 @@ #include <linux/kref.h> #include <linux/netdevice.h> #include <linux/skbuff.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include <asm/byteorder.h> -#include <asm/atomic.h> -#include <asm/io.h> +#include <linux/atomic.h> +#include <linux/io.h> #include <linux/semaphore.h> #include <linux/sem.h> #include <linux/sched.h> diff --git a/drivers/staging/rtl8723au/include/rtl8723a_cmd.h b/drivers/staging/rtl8723au/include/rtl8723a_cmd.h index 014c02edded6..f95535a915ab 100644 --- a/drivers/staging/rtl8723au/include/rtl8723a_cmd.h +++ b/drivers/staging/rtl8723au/include/rtl8723a_cmd.h @@ -149,7 +149,7 @@ void rtl8723a_set_BTCoex_AP_mode_FwRsvdPkt_cmd(struct rtw_adapter *padapter); #else #define rtl8723a_set_BTCoex_AP_mode_FwRsvdPkt_cmd(padapter) do {} while(0) #endif -int rtl8723a_set_rssi_cmd(struct rtw_adapter *padapter, u8 *param); +int rtl8723a_set_rssi_cmd(struct rtw_adapter *padapter, u32 param); int rtl8723a_set_raid_cmd(struct rtw_adapter *padapter, u32 mask, u8 arg); void rtl8723a_add_rateatid(struct rtw_adapter *padapter, u32 bitmap, u8 arg, u8 rssi_level); diff --git a/drivers/staging/rtl8723au/include/rtw_ap.h b/drivers/staging/rtl8723au/include/rtw_ap.h index 9f8d235c992f..55a708f9fc5b 100644 --- a/drivers/staging/rtl8723au/include/rtw_ap.h +++ b/drivers/staging/rtl8723au/include/rtw_ap.h @@ -36,8 +36,6 @@ int rtw_check_beacon_data23a(struct rtw_adapter *padapter, struct ieee80211_mgmt *mgmt, unsigned int len); void rtw_ap_restore_network(struct rtw_adapter *padapter); void rtw_set_macaddr_acl23a(struct rtw_adapter *padapter, int mode); -int rtw_acl_add_sta23a(struct rtw_adapter *padapter, u8 *addr); -int rtw_acl_remove_sta23a(struct rtw_adapter *padapter, u8 *addr); void associated_clients_update23a(struct rtw_adapter *padapter, u8 updated); void bss_cap_update_on_sta_join23a(struct rtw_adapter *padapter, struct sta_info *psta); @@ -46,7 +44,6 @@ void sta_info_update23a(struct rtw_adapter *padapter, struct sta_info *psta); void ap_sta_info_defer_update23a(struct rtw_adapter *padapter, struct sta_info *psta); u8 ap_free_sta23a(struct rtw_adapter *padapter, struct sta_info *psta, bool active, u16 reason); int rtw_sta_flush23a(struct rtw_adapter *padapter); -int rtw_ap_inform_ch_switch23a(struct rtw_adapter *padapter, u8 new_ch, u8 ch_offset); void start_ap_mode23a(struct rtw_adapter *padapter); void stop_ap_mode23a(struct rtw_adapter *padapter); #endif /* end of CONFIG_8723AU_AP_MODE */ diff --git a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c index 0ae2180a35b7..12d18440e824 100644 --- a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c +++ b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c @@ -458,7 +458,7 @@ static int set_group_key(struct rtw_adapter *padapter, struct key_params *parms, pcmd->cmdcode = _SetKey_CMD_; pcmd->parmbuf = (u8 *) psetkeyparm; - pcmd->cmdsz = (sizeof(struct setkey_parm)); + pcmd->cmdsz = sizeof(struct setkey_parm); pcmd->rsp = NULL; pcmd->rspsz = 0; @@ -543,7 +543,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, u8 key_index, memcpy(psecuritypriv-> dot118021XGrpKey[key_index].skey, keyparms->key, - (key_len > 16 ? 16 : key_len)); + (min(16, key_len))); /* set mic key */ memcpy(psecuritypriv-> @@ -565,7 +565,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, u8 key_index, memcpy(psecuritypriv-> dot118021XGrpKey[key_index].skey, keyparms->key, - (key_len > 16 ? 16 : key_len)); + (min(16, key_len))); } else { DBG_8723A("%s, set group_key, none\n", __func__); @@ -603,7 +603,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, u8 key_index, if (set_tx == 1) { /* pairwise key */ memcpy(psta->dot118021x_UncstKey.skey, - keyparms->key, (key_len > 16 ? 16 : key_len)); + keyparms->key, (min(16, key_len))); if (keyparms->cipher == WLAN_CIPHER_SUITE_WEP40 || keyparms->cipher == WLAN_CIPHER_SUITE_WEP104) { @@ -661,7 +661,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, u8 key_index, memcpy(psecuritypriv-> dot118021XGrpKey[key_index].skey, keyparms->key, - (key_len > 16 ? 16 : key_len)); + (min(16, key_len))); /* set mic key */ memcpy(psecuritypriv-> @@ -679,7 +679,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, u8 key_index, memcpy(psecuritypriv-> dot118021XGrpKey[key_index].skey, keyparms->key, - (key_len > 16 ? 16 : key_len)); + (min(16, key_len))); } else { psecuritypriv->dot118021XGrpPrivacy = 0; } @@ -789,7 +789,7 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, u8 key_index, memcpy(psta->dot118021x_UncstKey.skey, keyparms->key, - (key_len > 16 ? 16 : key_len)); + (min(16, key_len))); if (keyparms->cipher == WLAN_CIPHER_SUITE_TKIP) { @@ -812,7 +812,7 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, u8 key_index, memcpy(padapter->securitypriv. dot118021XGrpKey[key_index].skey, keyparms->key, - (key_len > 16 ? 16 : key_len)); + (min(16, key_len))); memcpy(padapter->securitypriv. dot118021XGrptxmickey[key_index]. skey, &keyparms->key[16], 8); @@ -1270,18 +1270,14 @@ void rtw_cfg80211_indicate_scan_done(struct rtw_wdev_priv *pwdev_priv, void rtw_cfg80211_surveydone_event_callback(struct rtw_adapter *padapter) { - struct list_head *plist, *phead, *ptmp; + struct list_head *phead; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct rtw_queue *queue = &pmlmepriv->scanned_queue; - struct wlan_network *pnetwork; + struct wlan_network *pnetwork, *ptmp; spin_lock_bh(&pmlmepriv->scanned_queue.lock); - phead = get_list_head(queue); - - list_for_each_safe(plist, ptmp, phead) { - pnetwork = container_of(plist, struct wlan_network, list); - + list_for_each_entry_safe(pnetwork, ptmp, phead, list) { /* report network only if the current channel set contains the channel to which this network belongs */ if (rtw_ch_set_search_ch23a @@ -1289,7 +1285,6 @@ void rtw_cfg80211_surveydone_event_callback(struct rtw_adapter *padapter) pnetwork->network.DSConfig) >= 0) rtw_cfg80211_inform_bss(padapter, pnetwork); } - spin_unlock_bh(&pmlmepriv->scanned_queue.lock); /* call this after other things have been done */ @@ -2202,7 +2197,7 @@ static int cfg80211_rtw_get_txpower(struct wiphy *wiphy, struct wireless_dev *wdev, int *dbm) { DBG_8723A("%s\n", __func__); - *dbm = (12); + *dbm = 12; return 0; } @@ -2615,8 +2610,6 @@ static int rtw_cfg80211_add_monitor_if(struct rtw_adapter *padapter, char *name, /* wdev */ mon_wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL); if (!mon_wdev) { - DBG_8723A("%s(%s): allocate mon_wdev fail\n", __func__, - padapter->pnetdev->name); ret = -ENOMEM; goto out; } @@ -2850,9 +2843,9 @@ static int cfg80211_rtw_del_station(struct wiphy *wiphy, { const u8 *mac = params->mac; int ret = 0; - struct list_head *phead, *plist, *ptmp; + struct list_head *phead; u8 updated = 0; - struct sta_info *psta; + struct sta_info *psta, *ptmp; struct rtw_adapter *padapter = netdev_priv(ndev); struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct sta_priv *pstapriv = &padapter->stapriv; @@ -2881,13 +2874,9 @@ static int cfg80211_rtw_del_station(struct wiphy *wiphy, return -EINVAL; spin_lock_bh(&pstapriv->asoc_list_lock); - phead = &pstapriv->asoc_list; - /* check asoc_queue */ - list_for_each_safe(plist, ptmp, phead) { - psta = container_of(plist, struct sta_info, asoc_list); - + list_for_each_entry_safe(psta, ptmp, phead, asoc_list) { if (ether_addr_equal(mac, psta->hwaddr)) { if (psta->dot8021xalg == 1 && psta->bpairwise_key_installed == false) { @@ -2912,7 +2901,6 @@ static int cfg80211_rtw_del_station(struct wiphy *wiphy, } } } - spin_unlock_bh(&pstapriv->asoc_list_lock); associated_clients_update23a(padapter, updated); @@ -3272,7 +3260,6 @@ int rtw_wdev_alloc(struct rtw_adapter *padapter, struct device *dev) /* wdev */ wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL); if (!wdev) { - DBG_8723A("Couldn't allocate wireless device\n"); ret = -ENOMEM; goto free_wiphy; } diff --git a/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c b/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c index 0cdaef0a8c24..cf4a50618670 100644 --- a/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c +++ b/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c @@ -210,22 +210,21 @@ exit: void rtl8723au_write_port_cancel(struct rtw_adapter *padapter) { struct xmit_buf *pxmitbuf; - struct list_head *plist; int j; DBG_8723A("%s\n", __func__); padapter->bWritePortCancel = true; - list_for_each(plist, &padapter->xmitpriv.xmitbuf_list) { - pxmitbuf = container_of(plist, struct xmit_buf, list2); + list_for_each_entry(pxmitbuf, &padapter->xmitpriv.xmitbuf_list, + list2) { for (j = 0; j < 8; j++) { if (pxmitbuf->pxmit_urb[j]) usb_kill_urb(pxmitbuf->pxmit_urb[j]); } } - list_for_each(plist, &padapter->xmitpriv.xmitextbuf_list) { - pxmitbuf = container_of(plist, struct xmit_buf, list2); + list_for_each_entry(pxmitbuf, &padapter->xmitpriv.xmitextbuf_list, + list2) { for (j = 0; j < 8; j++) { if (pxmitbuf->pxmit_urb[j]) usb_kill_urb(pxmitbuf->pxmit_urb[j]); diff --git a/drivers/staging/rtl8723au/os_dep/xmit_linux.c b/drivers/staging/rtl8723au/os_dep/xmit_linux.c index 9a14074ecec0..64be72ac38ee 100644 --- a/drivers/staging/rtl8723au/os_dep/xmit_linux.c +++ b/drivers/staging/rtl8723au/os_dep/xmit_linux.c @@ -37,7 +37,7 @@ int rtw_os_xmit_resource_alloc23a(struct rtw_adapter *padapter, for (i = 0; i < 8; i++) { pxmitbuf->pxmit_urb[i] = usb_alloc_urb(0, GFP_KERNEL); - if (pxmitbuf->pxmit_urb[i] == NULL) { + if (!pxmitbuf->pxmit_urb[i]) { DBG_8723A("pxmitbuf->pxmit_urb[i]==NULL"); return _FAIL; } diff --git a/drivers/staging/rts5208/ms.c b/drivers/staging/rts5208/ms.c index cdaa1aba50ed..a780185a3754 100644 --- a/drivers/staging/rts5208/ms.c +++ b/drivers/staging/rts5208/ms.c @@ -30,14 +30,14 @@ static inline void ms_set_err_code(struct rtsx_chip *chip, u8 err_code) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; ms_card->err_code = err_code; } static inline int ms_check_err_code(struct rtsx_chip *chip, u8 err_code) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; return (ms_card->err_code == err_code); } @@ -51,7 +51,7 @@ static int ms_parse_err_code(struct rtsx_chip *chip) static int ms_transfer_tpc(struct rtsx_chip *chip, u8 trans_mode, u8 tpc, u8 cnt, u8 cfg) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; int retval; u8 *ptr; @@ -185,7 +185,7 @@ static int ms_transfer_data(struct rtsx_chip *chip, u8 trans_mode, static int ms_write_bytes(struct rtsx_chip *chip, u8 tpc, u8 cnt, u8 cfg, u8 *data, int data_len) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; int retval, i; if (!data || (data_len < cnt)) { @@ -255,7 +255,7 @@ static int ms_write_bytes(struct rtsx_chip *chip, static int ms_read_bytes(struct rtsx_chip *chip, u8 tpc, u8 cnt, u8 cfg, u8 *data, int data_len) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; int retval, i; u8 *ptr; @@ -369,7 +369,7 @@ static int ms_send_cmd(struct rtsx_chip *chip, u8 cmd, u8 cfg) static int ms_set_init_para(struct rtsx_chip *chip) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; int retval; if (CHK_HG8BIT(ms_card)) { @@ -408,7 +408,7 @@ static int ms_set_init_para(struct rtsx_chip *chip) static int ms_switch_clock(struct rtsx_chip *chip) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; int retval; retval = select_card(chip, MS_CARD); @@ -542,7 +542,7 @@ static int ms_pull_ctl_enable(struct rtsx_chip *chip) static int ms_prepare_reset(struct rtsx_chip *chip) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; int retval; u8 oc_mask = 0; @@ -653,7 +653,7 @@ static int ms_prepare_reset(struct rtsx_chip *chip) static int ms_identify_media_type(struct rtsx_chip *chip, int switch_8bit_bus) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; int retval, i; u8 val; @@ -829,7 +829,7 @@ static int ms_switch_parallel_bus(struct rtsx_chip *chip) static int ms_switch_8bit_bus(struct rtsx_chip *chip) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; int retval, i; u8 data[2]; @@ -873,7 +873,7 @@ static int ms_switch_8bit_bus(struct rtsx_chip *chip) static int ms_pro_reset_flow(struct rtsx_chip *chip, int switch_8bit_bus) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; int retval, i; for (i = 0; i < 3; i++) { @@ -994,7 +994,7 @@ static int msxc_change_power(struct rtsx_chip *chip, u8 mode) static int ms_read_attribute_info(struct rtsx_chip *chip) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; int retval, i; u8 val, *buf, class_code, device_type, sub_class, data[16]; u16 total_blk = 0, blk_size = 0; @@ -1039,7 +1039,7 @@ static int ms_read_attribute_info(struct rtsx_chip *chip) } buf = kmalloc(64 * 512, GFP_KERNEL); - if (buf == NULL) { + if (!buf) { rtsx_trace(chip); return STATUS_ERROR; } @@ -1303,7 +1303,7 @@ static int mg_set_tpc_para_sub(struct rtsx_chip *chip, static int reset_ms_pro(struct rtsx_chip *chip) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; int retval; #ifdef XC_POWERCLASS u8 change_power_class; @@ -1421,7 +1421,7 @@ static int ms_read_status_reg(struct rtsx_chip *chip) static int ms_read_extra_data(struct rtsx_chip *chip, u16 block_addr, u8 page_num, u8 *buf, int buf_len) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; int retval, i; u8 val, data[10]; @@ -1516,7 +1516,7 @@ static int ms_read_extra_data(struct rtsx_chip *chip, static int ms_write_extra_data(struct rtsx_chip *chip, u16 block_addr, u8 page_num, u8 *buf, int buf_len) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; int retval, i; u8 val, data[16]; @@ -1585,7 +1585,7 @@ static int ms_write_extra_data(struct rtsx_chip *chip, static int ms_read_page(struct rtsx_chip *chip, u16 block_addr, u8 page_num) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; int retval; u8 val, data[6]; @@ -1670,7 +1670,7 @@ static int ms_read_page(struct rtsx_chip *chip, u16 block_addr, u8 page_num) static int ms_set_bad_block(struct rtsx_chip *chip, u16 phy_blk) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; int retval; u8 val, data[8], extra[MS_EXTRA_SIZE]; @@ -1741,7 +1741,7 @@ static int ms_set_bad_block(struct rtsx_chip *chip, u16 phy_blk) static int ms_erase_block(struct rtsx_chip *chip, u16 phy_blk) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; int retval, i = 0; u8 val, data[6]; @@ -1862,7 +1862,7 @@ static int ms_init_page(struct rtsx_chip *chip, u16 phy_blk, u16 log_blk, static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk, u16 log_blk, u8 start_page, u8 end_page) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; bool uncorrect_flag = false; int retval, rty_cnt; u8 extra[MS_EXTRA_SIZE], val, i, j, data[16]; @@ -2155,7 +2155,7 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk, static int reset_ms(struct rtsx_chip *chip) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; int retval; u16 i, reg_addr, block_size; u8 val, extra[MS_EXTRA_SIZE], j, *ptr; @@ -2394,7 +2394,7 @@ RE_SEARCH: static int ms_init_l2p_tbl(struct rtsx_chip *chip) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; int size, i, seg_no, retval; u16 defect_block, reg_addr; u8 val1, val2; @@ -2405,7 +2405,7 @@ static int ms_init_l2p_tbl(struct rtsx_chip *chip) size = ms_card->segment_cnt * sizeof(struct zone_entry); ms_card->segment = vzalloc(size); - if (ms_card->segment == NULL) { + if (!ms_card->segment) { rtsx_trace(chip); return STATUS_FAIL; } @@ -2457,20 +2457,18 @@ static int ms_init_l2p_tbl(struct rtsx_chip *chip) return STATUS_SUCCESS; INIT_FAIL: - if (ms_card->segment) { - vfree(ms_card->segment); - ms_card->segment = NULL; - } + vfree(ms_card->segment); + ms_card->segment = NULL; return STATUS_FAIL; } static u16 ms_get_l2p_tbl(struct rtsx_chip *chip, int seg_no, u16 log_off) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; struct zone_entry *segment; - if (ms_card->segment == NULL) + if (!ms_card->segment) return 0xFFFF; segment = &(ms_card->segment[seg_no]); @@ -2484,10 +2482,10 @@ static u16 ms_get_l2p_tbl(struct rtsx_chip *chip, int seg_no, u16 log_off) static void ms_set_l2p_tbl(struct rtsx_chip *chip, int seg_no, u16 log_off, u16 phy_blk) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; struct zone_entry *segment; - if (ms_card->segment == NULL) + if (!ms_card->segment) return; segment = &(ms_card->segment[seg_no]); @@ -2497,7 +2495,7 @@ static void ms_set_l2p_tbl(struct rtsx_chip *chip, static void ms_set_unused_block(struct rtsx_chip *chip, u16 phy_blk) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; struct zone_entry *segment; int seg_no; @@ -2513,7 +2511,7 @@ static void ms_set_unused_block(struct rtsx_chip *chip, u16 phy_blk) static u16 ms_get_unused_block(struct rtsx_chip *chip, int seg_no) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; struct zone_entry *segment; u16 phy_blk; @@ -2540,7 +2538,7 @@ static const unsigned short ms_start_idx[] = {0, 494, 990, 1486, 1982, 2478, static int ms_arbitrate_l2p(struct rtsx_chip *chip, u16 phy_blk, u16 log_off, u8 us1, u8 us2) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; struct zone_entry *segment; int seg_no; u16 tmp_blk; @@ -2582,7 +2580,7 @@ static int ms_arbitrate_l2p(struct rtsx_chip *chip, u16 phy_blk, static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; struct zone_entry *segment; bool defect_flag; int retval, table_size, disable_cnt, i; @@ -2591,7 +2589,7 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no) dev_dbg(rtsx_dev(chip), "ms_build_l2p_tbl: %d\n", seg_no); - if (ms_card->segment == NULL) { + if (!ms_card->segment) { retval = ms_init_l2p_tbl(chip); if (retval != STATUS_SUCCESS) { rtsx_trace(chip); @@ -2612,18 +2610,18 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no) segment = &(ms_card->segment[seg_no]); - if (segment->l2p_table == NULL) { + if (!segment->l2p_table) { segment->l2p_table = vmalloc(table_size * 2); - if (segment->l2p_table == NULL) { + if (!segment->l2p_table) { rtsx_trace(chip); goto BUILD_FAIL; } } memset((u8 *)(segment->l2p_table), 0xff, table_size * 2); - if (segment->free_table == NULL) { + if (!segment->free_table) { segment->free_table = vmalloc(MS_FREE_TABLE_CNT * 2); - if (segment->free_table == NULL) { + if (!segment->free_table) { rtsx_trace(chip); goto BUILD_FAIL; } @@ -2803,14 +2801,10 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no) BUILD_FAIL: segment->build_flag = 0; - if (segment->l2p_table) { - vfree(segment->l2p_table); - segment->l2p_table = NULL; - } - if (segment->free_table) { - vfree(segment->free_table); - segment->free_table = NULL; - } + vfree(segment->l2p_table); + segment->l2p_table = NULL; + vfree(segment->free_table); + segment->free_table = NULL; return STATUS_FAIL; } @@ -2818,7 +2812,7 @@ BUILD_FAIL: int reset_ms_card(struct rtsx_chip *chip) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; int retval; memset(ms_card, 0, sizeof(struct ms_info)); @@ -2905,7 +2899,7 @@ static int mspro_set_rw_cmd(struct rtsx_chip *chip, void mspro_stop_seq_mode(struct rtsx_chip *chip) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; int retval; if (ms_card->seq_mode) { @@ -2923,7 +2917,7 @@ void mspro_stop_seq_mode(struct rtsx_chip *chip) static inline int ms_auto_tune_clock(struct rtsx_chip *chip) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; int retval; if (chip->asic_code) { @@ -2949,7 +2943,7 @@ static int mspro_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector, u16 sector_cnt) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; bool mode_2k = false; int retval; u16 count; @@ -3092,7 +3086,7 @@ static int mspro_rw_multi_sector(struct scsi_cmnd *srb, static int mspro_read_format_progress(struct rtsx_chip *chip, const int short_data_len) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; int retval, i; u32 total_progress, cur_progress; u8 cnt, tmp; @@ -3214,7 +3208,7 @@ static int mspro_read_format_progress(struct rtsx_chip *chip, void mspro_polling_format_status(struct rtsx_chip *chip) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; int i; if (ms_card->pro_under_formatting && @@ -3232,7 +3226,7 @@ void mspro_polling_format_status(struct rtsx_chip *chip) int mspro_format(struct scsi_cmnd *srb, struct rtsx_chip *chip, int short_data_len, bool quick_format) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; int retval, i; u8 buf[8], tmp; u16 para; @@ -3324,7 +3318,7 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk, u8 *buf, unsigned int *index, unsigned int *offset) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; int retval, i; u8 extra[MS_EXTRA_SIZE], page_addr, val, trans_cfg, data[6]; u8 *ptr; @@ -3508,7 +3502,7 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk, u8 end_page, u8 *buf, unsigned int *index, unsigned int *offset) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; int retval, i; u8 page_addr, val, data[16]; u8 *ptr; @@ -3729,7 +3723,7 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk, static int ms_finish_write(struct rtsx_chip *chip, u16 old_blk, u16 new_blk, u16 log_blk, u8 page_off) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; int retval, seg_no; retval = ms_copy_page(chip, old_blk, new_blk, log_blk, @@ -3775,7 +3769,7 @@ static int ms_prepare_write(struct rtsx_chip *chip, u16 old_blk, u16 new_blk, #ifdef MS_DELAY_WRITE int ms_delay_write(struct rtsx_chip *chip) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; struct ms_delay_write_tag *delay_write = &(ms_card->delay_write); int retval; @@ -3814,7 +3808,7 @@ static inline void ms_rw_fail(struct scsi_cmnd *srb, struct rtsx_chip *chip) static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector, u16 sector_cnt) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; unsigned int lun = SCSI_LUN(srb); int retval, seg_no; unsigned int index = 0, offset = 0; @@ -4075,7 +4069,7 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip, int ms_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector, u16 sector_cnt) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; int retval; if (CHK_MSPRO(ms_card)) @@ -4091,19 +4085,15 @@ int ms_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, void ms_free_l2p_tbl(struct rtsx_chip *chip) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; int i = 0; if (ms_card->segment != NULL) { for (i = 0; i < ms_card->segment_cnt; i++) { - if (ms_card->segment[i].l2p_table != NULL) { - vfree(ms_card->segment[i].l2p_table); - ms_card->segment[i].l2p_table = NULL; - } - if (ms_card->segment[i].free_table != NULL) { - vfree(ms_card->segment[i].free_table); - ms_card->segment[i].free_table = NULL; - } + vfree(ms_card->segment[i].l2p_table); + ms_card->segment[i].l2p_table = NULL; + vfree(ms_card->segment[i].free_table); + ms_card->segment[i].free_table = NULL; } vfree(ms_card->segment); ms_card->segment = NULL; @@ -4351,7 +4341,7 @@ GetEKBFinish: int mg_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; int retval; int bufflen; int i; @@ -4435,7 +4425,7 @@ int mg_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip) int mg_get_rsp_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; int retval; int bufflen; unsigned int lun = SCSI_LUN(srb); @@ -4495,7 +4485,7 @@ int mg_get_rsp_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip) int mg_rsp(struct scsi_cmnd *srb, struct rtsx_chip *chip) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; int retval; int i; int bufflen; @@ -4547,7 +4537,7 @@ int mg_rsp(struct scsi_cmnd *srb, struct rtsx_chip *chip) int mg_get_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; int retval; int bufflen; unsigned int lun = SCSI_LUN(srb); @@ -4604,7 +4594,7 @@ GetICVFinish: int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip) { - struct ms_info *ms_card = &(chip->ms_card); + struct ms_info *ms_card = &chip->ms_card; int retval; int bufflen; #ifdef MG_SET_ICV_SLOW diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c index 1fe8e3e0a3fb..25d095a5ade7 100644 --- a/drivers/staging/rts5208/rtsx.c +++ b/drivers/staging/rts5208/rtsx.c @@ -320,7 +320,6 @@ static int rtsx_suspend(struct pci_dev *pci, pm_message_t state) rtsx_do_before_power_down(chip, PM_S3); if (dev->irq >= 0) { - synchronize_irq(dev->irq); free_irq(dev->irq, (void *)dev); dev->irq = -1; } @@ -398,7 +397,6 @@ static void rtsx_shutdown(struct pci_dev *pci) rtsx_do_before_power_down(chip, PM_S1); if (dev->irq >= 0) { - synchronize_irq(dev->irq); free_irq(dev->irq, (void *)dev); dev->irq = -1; } @@ -658,9 +656,6 @@ static void rtsx_release_resources(struct rtsx_dev *dev) if (dev->remap_addr) iounmap(dev->remap_addr); - pci_disable_device(dev->pci); - pci_release_regions(dev->pci); - rtsx_release_chip(dev->chip); kfree(dev->chip); } @@ -715,7 +710,7 @@ static void release_everything(struct rtsx_dev *dev) /* Thread to carry out delayed SCSI-device scanning */ static int rtsx_scan_thread(void *__dev) { - struct rtsx_dev *dev = (struct rtsx_dev *)__dev; + struct rtsx_dev *dev = __dev; struct rtsx_chip *chip = dev->chip; /* Wait for the timeout to expire or for a disconnect */ @@ -852,7 +847,7 @@ static int rtsx_probe(struct pci_dev *pci, dev_dbg(&pci->dev, "Realtek PCI-E card reader detected\n"); - err = pci_enable_device(pci); + err = pcim_enable_device(pci); if (err < 0) { dev_err(&pci->dev, "PCI enable device failed!\n"); return err; @@ -862,7 +857,6 @@ static int rtsx_probe(struct pci_dev *pci, if (err < 0) { dev_err(&pci->dev, "PCI request regions for %s failed!\n", CR_DRIVER_NAME); - pci_disable_device(pci); return err; } @@ -873,8 +867,6 @@ static int rtsx_probe(struct pci_dev *pci, host = scsi_host_alloc(&rtsx_host_template, sizeof(*dev)); if (!host) { dev_err(&pci->dev, "Unable to allocate the scsi host\n"); - pci_release_regions(pci); - pci_disable_device(pci); return -ENOMEM; } @@ -882,7 +874,7 @@ static int rtsx_probe(struct pci_dev *pci, memset(dev, 0, sizeof(struct rtsx_dev)); dev->chip = kzalloc(sizeof(struct rtsx_chip), GFP_KERNEL); - if (dev->chip == NULL) { + if (!dev->chip) { err = -ENOMEM; goto errout; } @@ -903,7 +895,7 @@ static int rtsx_probe(struct pci_dev *pci, (unsigned int)pci_resource_len(pci, 0)); dev->addr = pci_resource_start(pci, 0); dev->remap_addr = ioremap_nocache(dev->addr, pci_resource_len(pci, 0)); - if (dev->remap_addr == NULL) { + if (!dev->remap_addr) { dev_err(&pci->dev, "ioremap error\n"); err = -ENXIO; goto errout; @@ -918,7 +910,7 @@ static int rtsx_probe(struct pci_dev *pci, dev->rtsx_resv_buf = dmam_alloc_coherent(&pci->dev, RTSX_RESV_BUF_LEN, &dev->rtsx_resv_buf_addr, GFP_KERNEL); - if (dev->rtsx_resv_buf == NULL) { + if (!dev->rtsx_resv_buf) { dev_err(&pci->dev, "alloc dma buffer fail\n"); err = -ENXIO; goto errout; @@ -1011,8 +1003,6 @@ static void rtsx_remove(struct pci_dev *pci) quiesce_and_remove_host(dev); release_everything(dev); - - pci_set_drvdata(pci, NULL); } /* PCI IDs */ diff --git a/drivers/staging/rts5208/rtsx_scsi.c b/drivers/staging/rts5208/rtsx_scsi.c index 60871f3022b1..d2031044ea34 100644 --- a/drivers/staging/rts5208/rtsx_scsi.c +++ b/drivers/staging/rts5208/rtsx_scsi.c @@ -507,7 +507,7 @@ static int inquiry(struct scsi_cmnd *srb, struct rtsx_chip *chip) } buf = vmalloc(scsi_bufflen(srb)); - if (buf == NULL) { + if (!buf) { rtsx_trace(chip); return TRANSPORT_ERROR; } @@ -644,7 +644,7 @@ static int request_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip) } buf = vmalloc(scsi_bufflen(srb)); - if (buf == NULL) { + if (!buf) { rtsx_trace(chip); return TRANSPORT_ERROR; } @@ -792,7 +792,7 @@ static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip) #endif buf = kmalloc(dataSize, GFP_KERNEL); - if (buf == NULL) { + if (!buf) { rtsx_trace(chip); return TRANSPORT_ERROR; } @@ -1017,7 +1017,7 @@ static int read_format_capacity(struct scsi_cmnd *srb, struct rtsx_chip *chip) buf_len = (scsi_bufflen(srb) > 12) ? 0x14 : 12; buf = kmalloc(buf_len, GFP_KERNEL); - if (buf == NULL) { + if (!buf) { rtsx_trace(chip); return TRANSPORT_ERROR; } @@ -1096,7 +1096,7 @@ static int read_capacity(struct scsi_cmnd *srb, struct rtsx_chip *chip) } buf = kmalloc(8, GFP_KERNEL); - if (buf == NULL) { + if (!buf) { rtsx_trace(chip); return TRANSPORT_ERROR; } @@ -1206,7 +1206,7 @@ static int write_eeprom(struct scsi_cmnd *srb, struct rtsx_chip *chip) len = (unsigned short)min_t(unsigned int, scsi_bufflen(srb), len); buf = vmalloc(len); - if (buf == NULL) { + if (!buf) { rtsx_trace(chip); return TRANSPORT_ERROR; } @@ -1315,7 +1315,7 @@ static int write_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip) len = (unsigned short)min_t(unsigned int, scsi_bufflen(srb), len); buf = vmalloc(len); - if (buf == NULL) { + if (!buf) { rtsx_trace(chip); return TRANSPORT_ERROR; } @@ -1410,7 +1410,7 @@ static int trace_msg_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip) clear = srb->cmnd[2]; buf = vmalloc(scsi_bufflen(srb)); - if (buf == NULL) { + if (!buf) { rtsx_trace(chip); return TRANSPORT_ERROR; } @@ -1931,20 +1931,15 @@ static int rw_mem_cmd_buf(struct scsi_cmnd *srb, struct rtsx_chip *chip) static int suit_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip) { - int result; - switch (srb->cmnd[3]) { case INIT_BATCHCMD: case ADD_BATCHCMD: case SEND_BATCHCMD: case GET_BATCHRSP: - result = rw_mem_cmd_buf(srb, chip); - break; + return rw_mem_cmd_buf(srb, chip); default: - result = TRANSPORT_ERROR; + return TRANSPORT_ERROR; } - - return result; } static int read_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip) @@ -2035,7 +2030,7 @@ static int write_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip) len); buf = vmalloc(len); - if (buf == NULL) { + if (!buf) { rtsx_trace(chip); return TRANSPORT_ERROR; } @@ -2191,7 +2186,7 @@ static int write_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip) len = (unsigned short)min_t(unsigned int, scsi_bufflen(srb), len); buf = vmalloc(len); - if (buf == NULL) { + if (!buf) { rtsx_trace(chip); return TRANSPORT_ERROR; } @@ -2295,7 +2290,7 @@ static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip) len = (u8)min_t(unsigned int, scsi_bufflen(srb), len); buf = vmalloc(len); - if (buf == NULL) { + if (!buf) { rtsx_trace(chip); return TRANSPORT_ERROR; } diff --git a/drivers/staging/rts5208/rtsx_transport.c b/drivers/staging/rts5208/rtsx_transport.c index f27491e802ed..4d8e7c5c26d5 100644 --- a/drivers/staging/rts5208/rtsx_transport.c +++ b/drivers/staging/rts5208/rtsx_transport.c @@ -1,4 +1,5 @@ -/* Driver for Realtek PCI-Express card reader +/* + * Driver for Realtek PCI-Express card reader * * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved. * @@ -30,74 +31,76 @@ * Scatter-gather transfer buffer access routines ***********************************************************************/ -/* Copy a buffer of length buflen to/from the srb's transfer buffer. +/* + * Copy a buffer of length buflen to/from the srb's transfer buffer. * (Note: for scatter-gather transfers (srb->use_sg > 0), srb->request_buffer * points to a list of s-g entries and we ignore srb->request_bufflen. * For non-scatter-gather transfers, srb->request_buffer points to the * transfer buffer itself and srb->request_bufflen is the buffer's length.) * Update the *index and *offset variables so that the next copy will - * pick up from where this one left off. */ + * pick up from where this one left off. + */ unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer, - unsigned int buflen, struct scsi_cmnd *srb, unsigned int *index, - unsigned int *offset, enum xfer_buf_dir dir) + unsigned int buflen, + struct scsi_cmnd *srb, + unsigned int *index, + unsigned int *offset, + enum xfer_buf_dir dir) { unsigned int cnt; - /* If not using scatter-gather, just transfer the data directly. - * Make certain it will fit in the available buffer space. */ + /* If not using scatter-gather, just transfer the data directly. */ if (scsi_sg_count(srb) == 0) { + unsigned char *sgbuffer; + if (*offset >= scsi_bufflen(srb)) return 0; cnt = min(buflen, scsi_bufflen(srb) - *offset); + + sgbuffer = (unsigned char *)scsi_sglist(srb) + *offset; + if (dir == TO_XFER_BUF) - memcpy((unsigned char *) scsi_sglist(srb) + *offset, - buffer, cnt); + memcpy(sgbuffer, buffer, cnt); else - memcpy(buffer, (unsigned char *) scsi_sglist(srb) + - *offset, cnt); + memcpy(buffer, sgbuffer, cnt); *offset += cnt; - /* Using scatter-gather. We have to go through the list one entry + /* + * Using scatter-gather. We have to go through the list one entry * at a time. Each s-g entry contains some number of pages, and - * each page has to be kmap()'ed separately. If the page is already - * in kernel-addressable memory then kmap() will return its address. - * If the page is not directly accessible -- such as a user buffer - * located in high memory -- then kmap() will map it to a temporary - * position in the kernel's virtual address space. */ + * each page has to be kmap()'ed separately. + */ } else { struct scatterlist *sg = - (struct scatterlist *) scsi_sglist(srb) + (struct scatterlist *)scsi_sglist(srb) + *index; - /* This loop handles a single s-g list entry, which may + /* + * This loop handles a single s-g list entry, which may * include multiple pages. Find the initial page structure * and the starting offset within the page, and update - * the *offset and *index values for the next loop. */ + * the *offset and *index values for the next loop. + */ cnt = 0; while (cnt < buflen && *index < scsi_sg_count(srb)) { struct page *page = sg_page(sg) + ((sg->offset + *offset) >> PAGE_SHIFT); - unsigned int poff = - (sg->offset + *offset) & (PAGE_SIZE-1); + unsigned int poff = (sg->offset + *offset) & + (PAGE_SIZE - 1); unsigned int sglen = sg->length - *offset; if (sglen > buflen - cnt) { - /* Transfer ends within this s-g entry */ sglen = buflen - cnt; *offset += sglen; } else { - /* Transfer continues to next s-g entry */ *offset = 0; ++*index; ++sg; } - /* Transfer the data for all the pages in this - * s-g entry. For each page: call kmap(), do the - * transfer, and call kunmap() immediately after. */ while (sglen > 0) { unsigned int plen = min(sglen, (unsigned int) PAGE_SIZE - poff); @@ -122,10 +125,12 @@ unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer, return cnt; } -/* Store the contents of buffer into srb's transfer buffer and set the -* SCSI residue. */ +/* + * Store the contents of buffer into srb's transfer buffer and set the + * SCSI residue. + */ void rtsx_stor_set_xfer_buf(unsigned char *buffer, - unsigned int buflen, struct scsi_cmnd *srb) + unsigned int buflen, struct scsi_cmnd *srb) { unsigned int index = 0, offset = 0; @@ -136,7 +141,7 @@ void rtsx_stor_set_xfer_buf(unsigned char *buffer, } void rtsx_stor_get_xfer_buf(unsigned char *buffer, - unsigned int buflen, struct scsi_cmnd *srb) + unsigned int buflen, struct scsi_cmnd *srb) { unsigned int index = 0, offset = 0; @@ -146,12 +151,12 @@ void rtsx_stor_get_xfer_buf(unsigned char *buffer, scsi_set_resid(srb, scsi_bufflen(srb) - buflen); } - /*********************************************************************** * Transport routines ***********************************************************************/ -/* Invoke the transport and basic error-handling/recovery methods +/* + * Invoke the transport and basic error-handling/recovery methods * * This is used to send the message to the device and receive the response. */ @@ -161,20 +166,21 @@ void rtsx_invoke_transport(struct scsi_cmnd *srb, struct rtsx_chip *chip) result = rtsx_scsi_handler(srb, chip); - /* if the command gets aborted by the higher layers, we need to - * short-circuit all other processing + /* + * if the command gets aborted by the higher layers, we need to + * short-circuit all other processing. */ if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) { dev_dbg(rtsx_dev(chip), "-- command was aborted\n"); srb->result = DID_ABORT << 16; - goto Handle_Errors; + goto handle_errors; } /* if there is a transport error, reset and don't auto-sense */ if (result == TRANSPORT_ERROR) { dev_dbg(rtsx_dev(chip), "-- transport indicates error, resetting\n"); srb->result = DID_ERROR << 16; - goto Handle_Errors; + goto handle_errors; } srb->result = SAM_STAT_GOOD; @@ -188,21 +194,18 @@ void rtsx_invoke_transport(struct scsi_cmnd *srb, struct rtsx_chip *chip) /* set the result so the higher layers expect this data */ srb->result = SAM_STAT_CHECK_CONDITION; memcpy(srb->sense_buffer, - (unsigned char *)&(chip->sense_buffer[SCSI_LUN(srb)]), - sizeof(struct sense_data_t)); + (unsigned char *)&chip->sense_buffer[SCSI_LUN(srb)], + sizeof(struct sense_data_t)); } return; - /* Error and abort processing: try to resynchronize with the device - * by issuing a port reset. If that fails, try a class-specific - * device reset. */ -Handle_Errors: +handle_errors: return; } void rtsx_add_cmd(struct rtsx_chip *chip, - u8 cmd_type, u16 reg_addr, u8 mask, u8 data) + u8 cmd_type, u16 reg_addr, u8 mask, u8 data) { u32 *cb = (u32 *)(chip->host_cmds_ptr); u32 val = 0; @@ -221,7 +224,7 @@ void rtsx_add_cmd(struct rtsx_chip *chip, void rtsx_send_cmd_no_wait(struct rtsx_chip *chip) { - u32 val = 1 << 31; + u32 val = BIT(31); rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr); @@ -235,7 +238,7 @@ int rtsx_send_cmd(struct rtsx_chip *chip, u8 card, int timeout) { struct rtsx_dev *rtsx = chip->rtsx; struct completion trans_done; - u32 val = 1 << 31; + u32 val = BIT(31); long timeleft; int err = 0; @@ -321,9 +324,11 @@ static inline void rtsx_add_sg_tbl( } static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card, - struct scatterlist *sg, int num_sg, unsigned int *index, - unsigned int *offset, int size, - enum dma_data_direction dma_dir, int timeout) + struct scatterlist *sg, int num_sg, + unsigned int *index, + unsigned int *offset, int size, + enum dma_data_direction dma_dir, + int timeout) { struct rtsx_dev *rtsx = chip->rtsx; struct completion trans_done; @@ -334,7 +339,7 @@ static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card, struct scatterlist *sg_ptr; u32 val = TRIG_DMA; - if ((sg == NULL) || (num_sg <= 0) || !offset || !index) + if (!sg || (num_sg <= 0) || !offset || !index) return -EIO; if (dma_dir == DMA_TO_DEVICE) @@ -363,15 +368,16 @@ static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card, spin_unlock_irq(&rtsx->reg_lock); - sg_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir); + sg_cnt = dma_map_sg(&rtsx->pci->dev, sg, num_sg, dma_dir); resid = size; sg_ptr = sg; chip->sgi = 0; - /* Usually the next entry will be @sg@ + 1, but if this sg element + /* + * Usually the next entry will be @sg@ + 1, but if this sg element * is part of a chained scatterlist, it could jump to the start of * a new scatterlist array. So here we use sg_next to move to - * the proper sg + * the proper sg. */ for (i = 0; i < *index; i++) sg_ptr = sg_next(sg_ptr); @@ -476,7 +482,7 @@ static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card, out: rtsx->done = NULL; rtsx->trans_state = STATE_TRANS_NONE; - dma_unmap_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir); + dma_unmap_sg(&rtsx->pci->dev, sg, num_sg, dma_dir); if (err < 0) rtsx_stop_cmd(chip, card); @@ -485,8 +491,9 @@ out: } static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card, - struct scatterlist *sg, int num_sg, - enum dma_data_direction dma_dir, int timeout) + struct scatterlist *sg, int num_sg, + enum dma_data_direction dma_dir, + int timeout) { struct rtsx_dev *rtsx = chip->rtsx; struct completion trans_done; @@ -496,7 +503,7 @@ static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card, long timeleft; struct scatterlist *sg_ptr; - if ((sg == NULL) || (num_sg <= 0)) + if (!sg || (num_sg <= 0)) return -EIO; if (dma_dir == DMA_TO_DEVICE) @@ -525,7 +532,7 @@ static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card, spin_unlock_irq(&rtsx->reg_lock); - buf_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir); + buf_cnt = dma_map_sg(&rtsx->pci->dev, sg, num_sg, dma_dir); sg_ptr = sg; @@ -623,7 +630,7 @@ static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card, out: rtsx->done = NULL; rtsx->trans_state = STATE_TRANS_NONE; - dma_unmap_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir); + dma_unmap_sg(&rtsx->pci->dev, sg, num_sg, dma_dir); if (err < 0) rtsx_stop_cmd(chip, card); @@ -632,17 +639,18 @@ out: } static int rtsx_transfer_buf(struct rtsx_chip *chip, u8 card, void *buf, - size_t len, enum dma_data_direction dma_dir, int timeout) + size_t len, enum dma_data_direction dma_dir, + int timeout) { struct rtsx_dev *rtsx = chip->rtsx; struct completion trans_done; dma_addr_t addr; u8 dir; int err = 0; - u32 val = 1 << 31; + u32 val = BIT(31); long timeleft; - if ((buf == NULL) || (len <= 0)) + if (!buf || (len <= 0)) return -EIO; if (dma_dir == DMA_TO_DEVICE) @@ -652,8 +660,8 @@ static int rtsx_transfer_buf(struct rtsx_chip *chip, u8 card, void *buf, else return -ENXIO; - addr = dma_map_single(&(rtsx->pci->dev), buf, len, dma_dir); - if (!addr) + addr = dma_map_single(&rtsx->pci->dev, buf, len, dma_dir); + if (dma_mapping_error(&rtsx->pci->dev, addr)) return -ENOMEM; if (card == SD_CARD) @@ -706,7 +714,7 @@ static int rtsx_transfer_buf(struct rtsx_chip *chip, u8 card, void *buf, out: rtsx->done = NULL; rtsx->trans_state = STATE_TRANS_NONE; - dma_unmap_single(&(rtsx->pci->dev), addr, len, dma_dir); + dma_unmap_single(&rtsx->pci->dev, addr, len, dma_dir); if (err < 0) rtsx_stop_cmd(chip, card); @@ -715,9 +723,9 @@ out: } int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card, - void *buf, size_t len, int use_sg, unsigned int *index, - unsigned int *offset, enum dma_data_direction dma_dir, - int timeout) + void *buf, size_t len, int use_sg, + unsigned int *index, unsigned int *offset, + enum dma_data_direction dma_dir, int timeout) { int err = 0; @@ -725,13 +733,16 @@ int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card, if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) return -EIO; - if (use_sg) - err = rtsx_transfer_sglist_adma_partial(chip, card, - (struct scatterlist *)buf, use_sg, - index, offset, (int)len, dma_dir, timeout); - else + if (use_sg) { + struct scatterlist *sg = buf; + + err = rtsx_transfer_sglist_adma_partial(chip, card, sg, use_sg, + index, offset, (int)len, + dma_dir, timeout); + } else { err = rtsx_transfer_buf(chip, card, buf, len, dma_dir, timeout); + } if (err < 0) { if (RTSX_TST_DELINK(chip)) { RTSX_CLR_DELINK(chip); @@ -744,7 +755,7 @@ int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card, } int rtsx_transfer_data(struct rtsx_chip *chip, u8 card, void *buf, size_t len, - int use_sg, enum dma_data_direction dma_dir, int timeout) + int use_sg, enum dma_data_direction dma_dir, int timeout) { int err = 0; @@ -756,8 +767,8 @@ int rtsx_transfer_data(struct rtsx_chip *chip, u8 card, void *buf, size_t len, if (use_sg) { err = rtsx_transfer_sglist_adma(chip, card, - (struct scatterlist *)buf, - use_sg, dma_dir, timeout); + (struct scatterlist *)buf, + use_sg, dma_dir, timeout); } else { err = rtsx_transfer_buf(chip, card, buf, len, dma_dir, timeout); } diff --git a/drivers/staging/rts5208/sd.c b/drivers/staging/rts5208/sd.c index d6c498209b2c..87d697623cba 100644 --- a/drivers/staging/rts5208/sd.c +++ b/drivers/staging/rts5208/sd.c @@ -303,7 +303,7 @@ static int sd_read_data(struct rtsx_chip *chip, if (cmd_len) { dev_dbg(rtsx_dev(chip), "SD/MMC CMD %d\n", cmd[0] - 0x40); - for (i = 0; i < (cmd_len < 6 ? cmd_len : 6); i++) + for (i = 0; i < (min(cmd_len, 6)); i++) rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0 + i, 0xFF, cmd[i]); } @@ -383,7 +383,7 @@ static int sd_write_data(struct rtsx_chip *chip, u8 trans_mode, if (cmd_len) { dev_dbg(rtsx_dev(chip), "SD/MMC CMD %d\n", cmd[0] - 0x40); - for (i = 0; i < (cmd_len < 6 ? cmd_len : 6); i++) { + for (i = 0; i < (min(cmd_len, 6)); i++) { rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0 + i, 0xFF, cmd[i]); } @@ -4260,10 +4260,10 @@ int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip) return TRANSPORT_FAILED; } - if ((0x53 != srb->cmnd[2]) || (0x44 != srb->cmnd[3]) || - (0x20 != srb->cmnd[4]) || (0x43 != srb->cmnd[5]) || - (0x61 != srb->cmnd[6]) || (0x72 != srb->cmnd[7]) || - (0x64 != srb->cmnd[8])) { + if ((srb->cmnd[2] != 0x53) || (srb->cmnd[3] != 0x44) || + (srb->cmnd[4] != 0x20) || (srb->cmnd[5] != 0x43) || + (srb->cmnd[6] != 0x61) || (srb->cmnd[7] != 0x72) || + (srb->cmnd[8] != 0x64)) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); rtsx_trace(chip); return TRANSPORT_FAILED; @@ -4284,7 +4284,7 @@ int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip) return TRANSPORT_FAILED; } - buf[5] = (1 == CHK_SD(sd_card)) ? 0x01 : 0x02; + buf[5] = (CHK_SD(sd_card) == 1) ? 0x01 : 0x02; if (chip->card_wp & SD_CARD) buf[5] |= 0x80; @@ -4588,7 +4588,7 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) cmd[4] = srb->cmnd[6]; buf = kmalloc(data_len, GFP_KERNEL); - if (buf == NULL) { + if (!buf) { rtsx_trace(chip); return TRANSPORT_ERROR; } @@ -4871,7 +4871,7 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip) u8 *buf; buf = kmalloc(data_len, GFP_KERNEL); - if (buf == NULL) { + if (!buf) { rtsx_trace(chip); return TRANSPORT_ERROR; } @@ -5176,10 +5176,10 @@ int sd_hw_rst(struct scsi_cmnd *srb, struct rtsx_chip *chip) return TRANSPORT_FAILED; } - if ((0x53 != srb->cmnd[2]) || (0x44 != srb->cmnd[3]) || - (0x20 != srb->cmnd[4]) || (0x43 != srb->cmnd[5]) || - (0x61 != srb->cmnd[6]) || (0x72 != srb->cmnd[7]) || - (0x64 != srb->cmnd[8])) { + if ((srb->cmnd[2] != 0x53) || (srb->cmnd[3] != 0x44) || + (srb->cmnd[4] != 0x20) || (srb->cmnd[5] != 0x43) || + (srb->cmnd[6] != 0x61) || (srb->cmnd[7] != 0x72) || + (srb->cmnd[8] != 0x64)) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); rtsx_trace(chip); return TRANSPORT_FAILED; @@ -5188,7 +5188,7 @@ int sd_hw_rst(struct scsi_cmnd *srb, struct rtsx_chip *chip) switch (srb->cmnd[1] & 0x0F) { case 0: #ifdef SUPPORT_SD_LOCK - if (0x64 == srb->cmnd[9]) + if (srb->cmnd[9] == 0x64) sd_card->sd_lock_status |= SD_SDR_RST; #endif retval = reset_sd_card(chip); diff --git a/drivers/staging/rts5208/spi.c b/drivers/staging/rts5208/spi.c index e67e7ecc2cbd..26eb2a184f91 100644 --- a/drivers/staging/rts5208/spi.c +++ b/drivers/staging/rts5208/spi.c @@ -420,7 +420,6 @@ int spi_erase_eeprom_byte(struct rtsx_chip *chip, u16 addr) return STATUS_SUCCESS; } - int spi_read_eeprom(struct rtsx_chip *chip, u16 addr, u8 *val) { int retval; @@ -516,7 +515,6 @@ int spi_write_eeprom(struct rtsx_chip *chip, u16 addr, u8 val) return STATUS_SUCCESS; } - int spi_get_status(struct scsi_cmnd *srb, struct rtsx_chip *chip) { struct spi_info *spi = &(chip->spi); @@ -664,7 +662,7 @@ int spi_read_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip) } buf = kmalloc(SF_PAGE_LEN, GFP_KERNEL); - if (buf == NULL) { + if (!buf) { rtsx_trace(chip); return STATUS_ERROR; } diff --git a/drivers/staging/rts5208/xd.c b/drivers/staging/rts5208/xd.c index 10fea7bb8f30..fc1dfe0991d4 100644 --- a/drivers/staging/rts5208/xd.c +++ b/drivers/staging/rts5208/xd.c @@ -903,14 +903,10 @@ static inline void free_zone(struct zone_entry *zone) zone->set_index = 0; zone->get_index = 0; zone->unused_blk_cnt = 0; - if (zone->l2p_table) { - vfree(zone->l2p_table); - zone->l2p_table = NULL; - } - if (zone->free_table) { - vfree(zone->free_table); - zone->free_table = NULL; - } + vfree(zone->l2p_table); + zone->l2p_table = NULL; + vfree(zone->free_table); + zone->free_table = NULL; } static void xd_set_unused_block(struct rtsx_chip *chip, u32 phy_blk) @@ -1435,7 +1431,7 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no) if (zone->l2p_table == NULL) { zone->l2p_table = vmalloc(2000); - if (zone->l2p_table == NULL) { + if (!zone->l2p_table) { rtsx_trace(chip); goto Build_Fail; } @@ -1444,7 +1440,7 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no) if (zone->free_table == NULL) { zone->free_table = vmalloc(XD_FREE_TABLE_CNT * 2); - if (zone->free_table == NULL) { + if (!zone->free_table) { rtsx_trace(chip); goto Build_Fail; } @@ -1588,14 +1584,10 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no) return STATUS_SUCCESS; Build_Fail: - if (zone->l2p_table) { - vfree(zone->l2p_table); - zone->l2p_table = NULL; - } - if (zone->free_table) { - vfree(zone->free_table); - zone->free_table = NULL; - } + vfree(zone->l2p_table); + zone->l2p_table = NULL; + vfree(zone->free_table); + zone->free_table = NULL; return STATUS_FAIL; } @@ -2251,14 +2243,10 @@ void xd_free_l2p_tbl(struct rtsx_chip *chip) if (xd_card->zone != NULL) { for (i = 0; i < xd_card->zone_cnt; i++) { - if (xd_card->zone[i].l2p_table != NULL) { - vfree(xd_card->zone[i].l2p_table); - xd_card->zone[i].l2p_table = NULL; - } - if (xd_card->zone[i].free_table != NULL) { - vfree(xd_card->zone[i].free_table); - xd_card->zone[i].free_table = NULL; - } + vfree(xd_card->zone[i].l2p_table); + xd_card->zone[i].l2p_table = NULL; + vfree(xd_card->zone[i].free_table); + xd_card->zone[i].free_table = NULL; } vfree(xd_card->zone); xd_card->zone = NULL; diff --git a/drivers/staging/skein/threefish_block.c b/drivers/staging/skein/threefish_block.c index bd1e15caae4e..e19ac4368651 100644 --- a/drivers/staging/skein/threefish_block.c +++ b/drivers/staging/skein/threefish_block.c @@ -1,3 +1,4 @@ +#include <linux/bitops.h> #include "threefish_api.h" void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input, @@ -13,479 +14,479 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input, b1 += k1 + t0; b0 += b1 + k0; - b1 = ((b1 << 14) | (b1 >> (64 - 14))) ^ b0; + b1 = rol64(b1, 14) ^ b0; b3 += k3; b2 += b3 + k2 + t1; - b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b2; + b3 = rol64(b3, 16) ^ b2; b0 += b3; - b3 = ((b3 << 52) | (b3 >> (64 - 52))) ^ b0; + b3 = rol64(b3, 52) ^ b0; b2 += b1; - b1 = ((b1 << 57) | (b1 >> (64 - 57))) ^ b2; + b1 = rol64(b1, 57) ^ b2; b0 += b1; - b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b0; + b1 = rol64(b1, 23) ^ b0; b2 += b3; - b3 = ((b3 << 40) | (b3 >> (64 - 40))) ^ b2; + b3 = rol64(b3, 40) ^ b2; b0 += b3; - b3 = ((b3 << 5) | (b3 >> (64 - 5))) ^ b0; + b3 = rol64(b3, 5) ^ b0; b2 += b1; - b1 = ((b1 << 37) | (b1 >> (64 - 37))) ^ b2; + b1 = rol64(b1, 37) ^ b2; b1 += k2 + t1; b0 += b1 + k1; - b1 = ((b1 << 25) | (b1 >> (64 - 25))) ^ b0; + b1 = rol64(b1, 25) ^ b0; b3 += k4 + 1; b2 += b3 + k3 + t2; - b3 = ((b3 << 33) | (b3 >> (64 - 33))) ^ b2; + b3 = rol64(b3, 33) ^ b2; b0 += b3; - b3 = ((b3 << 46) | (b3 >> (64 - 46))) ^ b0; + b3 = rol64(b3, 46) ^ b0; b2 += b1; - b1 = ((b1 << 12) | (b1 >> (64 - 12))) ^ b2; + b1 = rol64(b1, 12) ^ b2; b0 += b1; - b1 = ((b1 << 58) | (b1 >> (64 - 58))) ^ b0; + b1 = rol64(b1, 58) ^ b0; b2 += b3; - b3 = ((b3 << 22) | (b3 >> (64 - 22))) ^ b2; + b3 = rol64(b3, 22) ^ b2; b0 += b3; - b3 = ((b3 << 32) | (b3 >> (64 - 32))) ^ b0; + b3 = rol64(b3, 32) ^ b0; b2 += b1; - b1 = ((b1 << 32) | (b1 >> (64 - 32))) ^ b2; + b1 = rol64(b1, 32) ^ b2; b1 += k3 + t2; b0 += b1 + k2; - b1 = ((b1 << 14) | (b1 >> (64 - 14))) ^ b0; + b1 = rol64(b1, 14) ^ b0; b3 += k0 + 2; b2 += b3 + k4 + t0; - b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b2; + b3 = rol64(b3, 16) ^ b2; b0 += b3; - b3 = ((b3 << 52) | (b3 >> (64 - 52))) ^ b0; + b3 = rol64(b3, 52) ^ b0; b2 += b1; - b1 = ((b1 << 57) | (b1 >> (64 - 57))) ^ b2; + b1 = rol64(b1, 57) ^ b2; b0 += b1; - b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b0; + b1 = rol64(b1, 23) ^ b0; b2 += b3; - b3 = ((b3 << 40) | (b3 >> (64 - 40))) ^ b2; + b3 = rol64(b3, 40) ^ b2; b0 += b3; - b3 = ((b3 << 5) | (b3 >> (64 - 5))) ^ b0; + b3 = rol64(b3, 5) ^ b0; b2 += b1; - b1 = ((b1 << 37) | (b1 >> (64 - 37))) ^ b2; + b1 = rol64(b1, 37) ^ b2; b1 += k4 + t0; b0 += b1 + k3; - b1 = ((b1 << 25) | (b1 >> (64 - 25))) ^ b0; + b1 = rol64(b1, 25) ^ b0; b3 += k1 + 3; b2 += b3 + k0 + t1; - b3 = ((b3 << 33) | (b3 >> (64 - 33))) ^ b2; + b3 = rol64(b3, 33) ^ b2; b0 += b3; - b3 = ((b3 << 46) | (b3 >> (64 - 46))) ^ b0; + b3 = rol64(b3, 46) ^ b0; b2 += b1; - b1 = ((b1 << 12) | (b1 >> (64 - 12))) ^ b2; + b1 = rol64(b1, 12) ^ b2; b0 += b1; - b1 = ((b1 << 58) | (b1 >> (64 - 58))) ^ b0; + b1 = rol64(b1, 58) ^ b0; b2 += b3; - b3 = ((b3 << 22) | (b3 >> (64 - 22))) ^ b2; + b3 = rol64(b3, 22) ^ b2; b0 += b3; - b3 = ((b3 << 32) | (b3 >> (64 - 32))) ^ b0; + b3 = rol64(b3, 32) ^ b0; b2 += b1; - b1 = ((b1 << 32) | (b1 >> (64 - 32))) ^ b2; + b1 = rol64(b1, 32) ^ b2; b1 += k0 + t1; b0 += b1 + k4; - b1 = ((b1 << 14) | (b1 >> (64 - 14))) ^ b0; + b1 = rol64(b1, 14) ^ b0; b3 += k2 + 4; b2 += b3 + k1 + t2; - b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b2; + b3 = rol64(b3, 16) ^ b2; b0 += b3; - b3 = ((b3 << 52) | (b3 >> (64 - 52))) ^ b0; + b3 = rol64(b3, 52) ^ b0; b2 += b1; - b1 = ((b1 << 57) | (b1 >> (64 - 57))) ^ b2; + b1 = rol64(b1, 57) ^ b2; b0 += b1; - b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b0; + b1 = rol64(b1, 23) ^ b0; b2 += b3; - b3 = ((b3 << 40) | (b3 >> (64 - 40))) ^ b2; + b3 = rol64(b3, 40) ^ b2; b0 += b3; - b3 = ((b3 << 5) | (b3 >> (64 - 5))) ^ b0; + b3 = rol64(b3, 5) ^ b0; b2 += b1; - b1 = ((b1 << 37) | (b1 >> (64 - 37))) ^ b2; + b1 = rol64(b1, 37) ^ b2; b1 += k1 + t2; b0 += b1 + k0; - b1 = ((b1 << 25) | (b1 >> (64 - 25))) ^ b0; + b1 = rol64(b1, 25) ^ b0; b3 += k3 + 5; b2 += b3 + k2 + t0; - b3 = ((b3 << 33) | (b3 >> (64 - 33))) ^ b2; + b3 = rol64(b3, 33) ^ b2; b0 += b3; - b3 = ((b3 << 46) | (b3 >> (64 - 46))) ^ b0; + b3 = rol64(b3, 46) ^ b0; b2 += b1; - b1 = ((b1 << 12) | (b1 >> (64 - 12))) ^ b2; + b1 = rol64(b1, 12) ^ b2; b0 += b1; - b1 = ((b1 << 58) | (b1 >> (64 - 58))) ^ b0; + b1 = rol64(b1, 58) ^ b0; b2 += b3; - b3 = ((b3 << 22) | (b3 >> (64 - 22))) ^ b2; + b3 = rol64(b3, 22) ^ b2; b0 += b3; - b3 = ((b3 << 32) | (b3 >> (64 - 32))) ^ b0; + b3 = rol64(b3, 32) ^ b0; b2 += b1; - b1 = ((b1 << 32) | (b1 >> (64 - 32))) ^ b2; + b1 = rol64(b1, 32) ^ b2; b1 += k2 + t0; b0 += b1 + k1; - b1 = ((b1 << 14) | (b1 >> (64 - 14))) ^ b0; + b1 = rol64(b1, 14) ^ b0; b3 += k4 + 6; b2 += b3 + k3 + t1; - b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b2; + b3 = rol64(b3, 16) ^ b2; b0 += b3; - b3 = ((b3 << 52) | (b3 >> (64 - 52))) ^ b0; + b3 = rol64(b3, 52) ^ b0; b2 += b1; - b1 = ((b1 << 57) | (b1 >> (64 - 57))) ^ b2; + b1 = rol64(b1, 57) ^ b2; b0 += b1; - b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b0; + b1 = rol64(b1, 23) ^ b0; b2 += b3; - b3 = ((b3 << 40) | (b3 >> (64 - 40))) ^ b2; + b3 = rol64(b3, 40) ^ b2; b0 += b3; - b3 = ((b3 << 5) | (b3 >> (64 - 5))) ^ b0; + b3 = rol64(b3, 5) ^ b0; b2 += b1; - b1 = ((b1 << 37) | (b1 >> (64 - 37))) ^ b2; + b1 = rol64(b1, 37) ^ b2; b1 += k3 + t1; b0 += b1 + k2; - b1 = ((b1 << 25) | (b1 >> (64 - 25))) ^ b0; + b1 = rol64(b1, 25) ^ b0; b3 += k0 + 7; b2 += b3 + k4 + t2; - b3 = ((b3 << 33) | (b3 >> (64 - 33))) ^ b2; + b3 = rol64(b3, 33) ^ b2; b0 += b3; - b3 = ((b3 << 46) | (b3 >> (64 - 46))) ^ b0; + b3 = rol64(b3, 46) ^ b0; b2 += b1; - b1 = ((b1 << 12) | (b1 >> (64 - 12))) ^ b2; + b1 = rol64(b1, 12) ^ b2; b0 += b1; - b1 = ((b1 << 58) | (b1 >> (64 - 58))) ^ b0; + b1 = rol64(b1, 58) ^ b0; b2 += b3; - b3 = ((b3 << 22) | (b3 >> (64 - 22))) ^ b2; + b3 = rol64(b3, 22) ^ b2; b0 += b3; - b3 = ((b3 << 32) | (b3 >> (64 - 32))) ^ b0; + b3 = rol64(b3, 32) ^ b0; b2 += b1; - b1 = ((b1 << 32) | (b1 >> (64 - 32))) ^ b2; + b1 = rol64(b1, 32) ^ b2; b1 += k4 + t2; b0 += b1 + k3; - b1 = ((b1 << 14) | (b1 >> (64 - 14))) ^ b0; + b1 = rol64(b1, 14) ^ b0; b3 += k1 + 8; b2 += b3 + k0 + t0; - b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b2; + b3 = rol64(b3, 16) ^ b2; b0 += b3; - b3 = ((b3 << 52) | (b3 >> (64 - 52))) ^ b0; + b3 = rol64(b3, 52) ^ b0; b2 += b1; - b1 = ((b1 << 57) | (b1 >> (64 - 57))) ^ b2; + b1 = rol64(b1, 57) ^ b2; b0 += b1; - b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b0; + b1 = rol64(b1, 23) ^ b0; b2 += b3; - b3 = ((b3 << 40) | (b3 >> (64 - 40))) ^ b2; + b3 = rol64(b3, 40) ^ b2; b0 += b3; - b3 = ((b3 << 5) | (b3 >> (64 - 5))) ^ b0; + b3 = rol64(b3, 5) ^ b0; b2 += b1; - b1 = ((b1 << 37) | (b1 >> (64 - 37))) ^ b2; + b1 = rol64(b1, 37) ^ b2; b1 += k0 + t0; b0 += b1 + k4; - b1 = ((b1 << 25) | (b1 >> (64 - 25))) ^ b0; + b1 = rol64(b1, 25) ^ b0; b3 += k2 + 9; b2 += b3 + k1 + t1; - b3 = ((b3 << 33) | (b3 >> (64 - 33))) ^ b2; + b3 = rol64(b3, 33) ^ b2; b0 += b3; - b3 = ((b3 << 46) | (b3 >> (64 - 46))) ^ b0; + b3 = rol64(b3, 46) ^ b0; b2 += b1; - b1 = ((b1 << 12) | (b1 >> (64 - 12))) ^ b2; + b1 = rol64(b1, 12) ^ b2; b0 += b1; - b1 = ((b1 << 58) | (b1 >> (64 - 58))) ^ b0; + b1 = rol64(b1, 58) ^ b0; b2 += b3; - b3 = ((b3 << 22) | (b3 >> (64 - 22))) ^ b2; + b3 = rol64(b3, 22) ^ b2; b0 += b3; - b3 = ((b3 << 32) | (b3 >> (64 - 32))) ^ b0; + b3 = rol64(b3, 32) ^ b0; b2 += b1; - b1 = ((b1 << 32) | (b1 >> (64 - 32))) ^ b2; + b1 = rol64(b1, 32) ^ b2; b1 += k1 + t1; b0 += b1 + k0; - b1 = ((b1 << 14) | (b1 >> (64 - 14))) ^ b0; + b1 = rol64(b1, 14) ^ b0; b3 += k3 + 10; b2 += b3 + k2 + t2; - b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b2; + b3 = rol64(b3, 16) ^ b2; b0 += b3; - b3 = ((b3 << 52) | (b3 >> (64 - 52))) ^ b0; + b3 = rol64(b3, 52) ^ b0; b2 += b1; - b1 = ((b1 << 57) | (b1 >> (64 - 57))) ^ b2; + b1 = rol64(b1, 57) ^ b2; b0 += b1; - b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b0; + b1 = rol64(b1, 23) ^ b0; b2 += b3; - b3 = ((b3 << 40) | (b3 >> (64 - 40))) ^ b2; + b3 = rol64(b3, 40) ^ b2; b0 += b3; - b3 = ((b3 << 5) | (b3 >> (64 - 5))) ^ b0; + b3 = rol64(b3, 5) ^ b0; b2 += b1; - b1 = ((b1 << 37) | (b1 >> (64 - 37))) ^ b2; + b1 = rol64(b1, 37) ^ b2; b1 += k2 + t2; b0 += b1 + k1; - b1 = ((b1 << 25) | (b1 >> (64 - 25))) ^ b0; + b1 = rol64(b1, 25) ^ b0; b3 += k4 + 11; b2 += b3 + k3 + t0; - b3 = ((b3 << 33) | (b3 >> (64 - 33))) ^ b2; + b3 = rol64(b3, 33) ^ b2; b0 += b3; - b3 = ((b3 << 46) | (b3 >> (64 - 46))) ^ b0; + b3 = rol64(b3, 46) ^ b0; b2 += b1; - b1 = ((b1 << 12) | (b1 >> (64 - 12))) ^ b2; + b1 = rol64(b1, 12) ^ b2; b0 += b1; - b1 = ((b1 << 58) | (b1 >> (64 - 58))) ^ b0; + b1 = rol64(b1, 58) ^ b0; b2 += b3; - b3 = ((b3 << 22) | (b3 >> (64 - 22))) ^ b2; + b3 = rol64(b3, 22) ^ b2; b0 += b3; - b3 = ((b3 << 32) | (b3 >> (64 - 32))) ^ b0; + b3 = rol64(b3, 32) ^ b0; b2 += b1; - b1 = ((b1 << 32) | (b1 >> (64 - 32))) ^ b2; + b1 = rol64(b1, 32) ^ b2; b1 += k3 + t0; b0 += b1 + k2; - b1 = ((b1 << 14) | (b1 >> (64 - 14))) ^ b0; + b1 = rol64(b1, 14) ^ b0; b3 += k0 + 12; b2 += b3 + k4 + t1; - b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b2; + b3 = rol64(b3, 16) ^ b2; b0 += b3; - b3 = ((b3 << 52) | (b3 >> (64 - 52))) ^ b0; + b3 = rol64(b3, 52) ^ b0; b2 += b1; - b1 = ((b1 << 57) | (b1 >> (64 - 57))) ^ b2; + b1 = rol64(b1, 57) ^ b2; b0 += b1; - b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b0; + b1 = rol64(b1, 23) ^ b0; b2 += b3; - b3 = ((b3 << 40) | (b3 >> (64 - 40))) ^ b2; + b3 = rol64(b3, 40) ^ b2; b0 += b3; - b3 = ((b3 << 5) | (b3 >> (64 - 5))) ^ b0; + b3 = rol64(b3, 5) ^ b0; b2 += b1; - b1 = ((b1 << 37) | (b1 >> (64 - 37))) ^ b2; + b1 = rol64(b1, 37) ^ b2; b1 += k4 + t1; b0 += b1 + k3; - b1 = ((b1 << 25) | (b1 >> (64 - 25))) ^ b0; + b1 = rol64(b1, 25) ^ b0; b3 += k1 + 13; b2 += b3 + k0 + t2; - b3 = ((b3 << 33) | (b3 >> (64 - 33))) ^ b2; + b3 = rol64(b3, 33) ^ b2; b0 += b3; - b3 = ((b3 << 46) | (b3 >> (64 - 46))) ^ b0; + b3 = rol64(b3, 46) ^ b0; b2 += b1; - b1 = ((b1 << 12) | (b1 >> (64 - 12))) ^ b2; + b1 = rol64(b1, 12) ^ b2; b0 += b1; - b1 = ((b1 << 58) | (b1 >> (64 - 58))) ^ b0; + b1 = rol64(b1, 58) ^ b0; b2 += b3; - b3 = ((b3 << 22) | (b3 >> (64 - 22))) ^ b2; + b3 = rol64(b3, 22) ^ b2; b0 += b3; - b3 = ((b3 << 32) | (b3 >> (64 - 32))) ^ b0; + b3 = rol64(b3, 32) ^ b0; b2 += b1; - b1 = ((b1 << 32) | (b1 >> (64 - 32))) ^ b2; + b1 = rol64(b1, 32) ^ b2; b1 += k0 + t2; b0 += b1 + k4; - b1 = ((b1 << 14) | (b1 >> (64 - 14))) ^ b0; + b1 = rol64(b1, 14) ^ b0; b3 += k2 + 14; b2 += b3 + k1 + t0; - b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b2; + b3 = rol64(b3, 16) ^ b2; b0 += b3; - b3 = ((b3 << 52) | (b3 >> (64 - 52))) ^ b0; + b3 = rol64(b3, 52) ^ b0; b2 += b1; - b1 = ((b1 << 57) | (b1 >> (64 - 57))) ^ b2; + b1 = rol64(b1, 57) ^ b2; b0 += b1; - b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b0; + b1 = rol64(b1, 23) ^ b0; b2 += b3; - b3 = ((b3 << 40) | (b3 >> (64 - 40))) ^ b2; + b3 = rol64(b3, 40) ^ b2; b0 += b3; - b3 = ((b3 << 5) | (b3 >> (64 - 5))) ^ b0; + b3 = rol64(b3, 5) ^ b0; b2 += b1; - b1 = ((b1 << 37) | (b1 >> (64 - 37))) ^ b2; + b1 = rol64(b1, 37) ^ b2; b1 += k1 + t0; b0 += b1 + k0; - b1 = ((b1 << 25) | (b1 >> (64 - 25))) ^ b0; + b1 = rol64(b1, 25) ^ b0; b3 += k3 + 15; b2 += b3 + k2 + t1; - b3 = ((b3 << 33) | (b3 >> (64 - 33))) ^ b2; + b3 = rol64(b3, 33) ^ b2; b0 += b3; - b3 = ((b3 << 46) | (b3 >> (64 - 46))) ^ b0; + b3 = rol64(b3, 46) ^ b0; b2 += b1; - b1 = ((b1 << 12) | (b1 >> (64 - 12))) ^ b2; + b1 = rol64(b1, 12) ^ b2; b0 += b1; - b1 = ((b1 << 58) | (b1 >> (64 - 58))) ^ b0; + b1 = rol64(b1, 58) ^ b0; b2 += b3; - b3 = ((b3 << 22) | (b3 >> (64 - 22))) ^ b2; + b3 = rol64(b3, 22) ^ b2; b0 += b3; - b3 = ((b3 << 32) | (b3 >> (64 - 32))) ^ b0; + b3 = rol64(b3, 32) ^ b0; b2 += b1; - b1 = ((b1 << 32) | (b1 >> (64 - 32))) ^ b2; + b1 = rol64(b1, 32) ^ b2; b1 += k2 + t1; b0 += b1 + k1; - b1 = ((b1 << 14) | (b1 >> (64 - 14))) ^ b0; + b1 = rol64(b1, 14) ^ b0; b3 += k4 + 16; b2 += b3 + k3 + t2; - b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b2; + b3 = rol64(b3, 16) ^ b2; b0 += b3; - b3 = ((b3 << 52) | (b3 >> (64 - 52))) ^ b0; + b3 = rol64(b3, 52) ^ b0; b2 += b1; - b1 = ((b1 << 57) | (b1 >> (64 - 57))) ^ b2; + b1 = rol64(b1, 57) ^ b2; b0 += b1; - b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b0; + b1 = rol64(b1, 23) ^ b0; b2 += b3; - b3 = ((b3 << 40) | (b3 >> (64 - 40))) ^ b2; + b3 = rol64(b3, 40) ^ b2; b0 += b3; - b3 = ((b3 << 5) | (b3 >> (64 - 5))) ^ b0; + b3 = rol64(b3, 5) ^ b0; b2 += b1; - b1 = ((b1 << 37) | (b1 >> (64 - 37))) ^ b2; + b1 = rol64(b1, 37) ^ b2; b1 += k3 + t2; b0 += b1 + k2; - b1 = ((b1 << 25) | (b1 >> (64 - 25))) ^ b0; + b1 = rol64(b1, 25) ^ b0; b3 += k0 + 17; b2 += b3 + k4 + t0; - b3 = ((b3 << 33) | (b3 >> (64 - 33))) ^ b2; + b3 = rol64(b3, 33) ^ b2; b0 += b3; - b3 = ((b3 << 46) | (b3 >> (64 - 46))) ^ b0; + b3 = rol64(b3, 46) ^ b0; b2 += b1; - b1 = ((b1 << 12) | (b1 >> (64 - 12))) ^ b2; + b1 = rol64(b1, 12) ^ b2; b0 += b1; - b1 = ((b1 << 58) | (b1 >> (64 - 58))) ^ b0; + b1 = rol64(b1, 58) ^ b0; b2 += b3; - b3 = ((b3 << 22) | (b3 >> (64 - 22))) ^ b2; + b3 = rol64(b3, 22) ^ b2; b0 += b3; - b3 = ((b3 << 32) | (b3 >> (64 - 32))) ^ b0; + b3 = rol64(b3, 32) ^ b0; b2 += b1; - b1 = ((b1 << 32) | (b1 >> (64 - 32))) ^ b2; + b1 = rol64(b1, 32) ^ b2; output[0] = b0 + k3; output[1] = b1 + k4 + t0; @@ -1153,939 +1154,939 @@ void threefish_encrypt_512(struct threefish_key *key_ctx, u64 *input, b1 += k1; b0 += b1 + k0; - b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b0; + b1 = rol64(b1, 46) ^ b0; b3 += k3; b2 += b3 + k2; - b3 = ((b3 << 36) | (b3 >> (64 - 36))) ^ b2; + b3 = rol64(b3, 36) ^ b2; b5 += k5 + t0; b4 += b5 + k4; - b5 = ((b5 << 19) | (b5 >> (64 - 19))) ^ b4; + b5 = rol64(b5, 19) ^ b4; b7 += k7; b6 += b7 + k6 + t1; - b7 = ((b7 << 37) | (b7 >> (64 - 37))) ^ b6; + b7 = rol64(b7, 37) ^ b6; b2 += b1; - b1 = ((b1 << 33) | (b1 >> (64 - 33))) ^ b2; + b1 = rol64(b1, 33) ^ b2; b4 += b7; - b7 = ((b7 << 27) | (b7 >> (64 - 27))) ^ b4; + b7 = rol64(b7, 27) ^ b4; b6 += b5; - b5 = ((b5 << 14) | (b5 >> (64 - 14))) ^ b6; + b5 = rol64(b5, 14) ^ b6; b0 += b3; - b3 = ((b3 << 42) | (b3 >> (64 - 42))) ^ b0; + b3 = rol64(b3, 42) ^ b0; b4 += b1; - b1 = ((b1 << 17) | (b1 >> (64 - 17))) ^ b4; + b1 = rol64(b1, 17) ^ b4; b6 += b3; - b3 = ((b3 << 49) | (b3 >> (64 - 49))) ^ b6; + b3 = rol64(b3, 49) ^ b6; b0 += b5; - b5 = ((b5 << 36) | (b5 >> (64 - 36))) ^ b0; + b5 = rol64(b5, 36) ^ b0; b2 += b7; - b7 = ((b7 << 39) | (b7 >> (64 - 39))) ^ b2; + b7 = rol64(b7, 39) ^ b2; b6 += b1; - b1 = ((b1 << 44) | (b1 >> (64 - 44))) ^ b6; + b1 = rol64(b1, 44) ^ b6; b0 += b7; - b7 = ((b7 << 9) | (b7 >> (64 - 9))) ^ b0; + b7 = rol64(b7, 9) ^ b0; b2 += b5; - b5 = ((b5 << 54) | (b5 >> (64 - 54))) ^ b2; + b5 = rol64(b5, 54) ^ b2; b4 += b3; - b3 = ((b3 << 56) | (b3 >> (64 - 56))) ^ b4; + b3 = rol64(b3, 56) ^ b4; b1 += k2; b0 += b1 + k1; - b1 = ((b1 << 39) | (b1 >> (64 - 39))) ^ b0; + b1 = rol64(b1, 39) ^ b0; b3 += k4; b2 += b3 + k3; - b3 = ((b3 << 30) | (b3 >> (64 - 30))) ^ b2; + b3 = rol64(b3, 30) ^ b2; b5 += k6 + t1; b4 += b5 + k5; - b5 = ((b5 << 34) | (b5 >> (64 - 34))) ^ b4; + b5 = rol64(b5, 34) ^ b4; b7 += k8 + 1; b6 += b7 + k7 + t2; - b7 = ((b7 << 24) | (b7 >> (64 - 24))) ^ b6; + b7 = rol64(b7, 24) ^ b6; b2 += b1; - b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b2; + b1 = rol64(b1, 13) ^ b2; b4 += b7; - b7 = ((b7 << 50) | (b7 >> (64 - 50))) ^ b4; + b7 = rol64(b7, 50) ^ b4; b6 += b5; - b5 = ((b5 << 10) | (b5 >> (64 - 10))) ^ b6; + b5 = rol64(b5, 10) ^ b6; b0 += b3; - b3 = ((b3 << 17) | (b3 >> (64 - 17))) ^ b0; + b3 = rol64(b3, 17) ^ b0; b4 += b1; - b1 = ((b1 << 25) | (b1 >> (64 - 25))) ^ b4; + b1 = rol64(b1, 25) ^ b4; b6 += b3; - b3 = ((b3 << 29) | (b3 >> (64 - 29))) ^ b6; + b3 = rol64(b3, 29) ^ b6; b0 += b5; - b5 = ((b5 << 39) | (b5 >> (64 - 39))) ^ b0; + b5 = rol64(b5, 39) ^ b0; b2 += b7; - b7 = ((b7 << 43) | (b7 >> (64 - 43))) ^ b2; + b7 = rol64(b7, 43) ^ b2; b6 += b1; - b1 = ((b1 << 8) | (b1 >> (64 - 8))) ^ b6; + b1 = rol64(b1, 8) ^ b6; b0 += b7; - b7 = ((b7 << 35) | (b7 >> (64 - 35))) ^ b0; + b7 = rol64(b7, 35) ^ b0; b2 += b5; - b5 = ((b5 << 56) | (b5 >> (64 - 56))) ^ b2; + b5 = rol64(b5, 56) ^ b2; b4 += b3; - b3 = ((b3 << 22) | (b3 >> (64 - 22))) ^ b4; + b3 = rol64(b3, 22) ^ b4; b1 += k3; b0 += b1 + k2; - b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b0; + b1 = rol64(b1, 46) ^ b0; b3 += k5; b2 += b3 + k4; - b3 = ((b3 << 36) | (b3 >> (64 - 36))) ^ b2; + b3 = rol64(b3, 36) ^ b2; b5 += k7 + t2; b4 += b5 + k6; - b5 = ((b5 << 19) | (b5 >> (64 - 19))) ^ b4; + b5 = rol64(b5, 19) ^ b4; b7 += k0 + 2; b6 += b7 + k8 + t0; - b7 = ((b7 << 37) | (b7 >> (64 - 37))) ^ b6; + b7 = rol64(b7, 37) ^ b6; b2 += b1; - b1 = ((b1 << 33) | (b1 >> (64 - 33))) ^ b2; + b1 = rol64(b1, 33) ^ b2; b4 += b7; - b7 = ((b7 << 27) | (b7 >> (64 - 27))) ^ b4; + b7 = rol64(b7, 27) ^ b4; b6 += b5; - b5 = ((b5 << 14) | (b5 >> (64 - 14))) ^ b6; + b5 = rol64(b5, 14) ^ b6; b0 += b3; - b3 = ((b3 << 42) | (b3 >> (64 - 42))) ^ b0; + b3 = rol64(b3, 42) ^ b0; b4 += b1; - b1 = ((b1 << 17) | (b1 >> (64 - 17))) ^ b4; + b1 = rol64(b1, 17) ^ b4; b6 += b3; - b3 = ((b3 << 49) | (b3 >> (64 - 49))) ^ b6; + b3 = rol64(b3, 49) ^ b6; b0 += b5; - b5 = ((b5 << 36) | (b5 >> (64 - 36))) ^ b0; + b5 = rol64(b5, 36) ^ b0; b2 += b7; - b7 = ((b7 << 39) | (b7 >> (64 - 39))) ^ b2; + b7 = rol64(b7, 39) ^ b2; b6 += b1; - b1 = ((b1 << 44) | (b1 >> (64 - 44))) ^ b6; + b1 = rol64(b1, 44) ^ b6; b0 += b7; - b7 = ((b7 << 9) | (b7 >> (64 - 9))) ^ b0; + b7 = rol64(b7, 9) ^ b0; b2 += b5; - b5 = ((b5 << 54) | (b5 >> (64 - 54))) ^ b2; + b5 = rol64(b5, 54) ^ b2; b4 += b3; - b3 = ((b3 << 56) | (b3 >> (64 - 56))) ^ b4; + b3 = rol64(b3, 56) ^ b4; b1 += k4; b0 += b1 + k3; - b1 = ((b1 << 39) | (b1 >> (64 - 39))) ^ b0; + b1 = rol64(b1, 39) ^ b0; b3 += k6; b2 += b3 + k5; - b3 = ((b3 << 30) | (b3 >> (64 - 30))) ^ b2; + b3 = rol64(b3, 30) ^ b2; b5 += k8 + t0; b4 += b5 + k7; - b5 = ((b5 << 34) | (b5 >> (64 - 34))) ^ b4; + b5 = rol64(b5, 34) ^ b4; b7 += k1 + 3; b6 += b7 + k0 + t1; - b7 = ((b7 << 24) | (b7 >> (64 - 24))) ^ b6; + b7 = rol64(b7, 24) ^ b6; b2 += b1; - b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b2; + b1 = rol64(b1, 13) ^ b2; b4 += b7; - b7 = ((b7 << 50) | (b7 >> (64 - 50))) ^ b4; + b7 = rol64(b7, 50) ^ b4; b6 += b5; - b5 = ((b5 << 10) | (b5 >> (64 - 10))) ^ b6; + b5 = rol64(b5, 10) ^ b6; b0 += b3; - b3 = ((b3 << 17) | (b3 >> (64 - 17))) ^ b0; + b3 = rol64(b3, 17) ^ b0; b4 += b1; - b1 = ((b1 << 25) | (b1 >> (64 - 25))) ^ b4; + b1 = rol64(b1, 25) ^ b4; b6 += b3; - b3 = ((b3 << 29) | (b3 >> (64 - 29))) ^ b6; + b3 = rol64(b3, 29) ^ b6; b0 += b5; - b5 = ((b5 << 39) | (b5 >> (64 - 39))) ^ b0; + b5 = rol64(b5, 39) ^ b0; b2 += b7; - b7 = ((b7 << 43) | (b7 >> (64 - 43))) ^ b2; + b7 = rol64(b7, 43) ^ b2; b6 += b1; - b1 = ((b1 << 8) | (b1 >> (64 - 8))) ^ b6; + b1 = rol64(b1, 8) ^ b6; b0 += b7; - b7 = ((b7 << 35) | (b7 >> (64 - 35))) ^ b0; + b7 = rol64(b7, 35) ^ b0; b2 += b5; - b5 = ((b5 << 56) | (b5 >> (64 - 56))) ^ b2; + b5 = rol64(b5, 56) ^ b2; b4 += b3; - b3 = ((b3 << 22) | (b3 >> (64 - 22))) ^ b4; + b3 = rol64(b3, 22) ^ b4; b1 += k5; b0 += b1 + k4; - b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b0; + b1 = rol64(b1, 46) ^ b0; b3 += k7; b2 += b3 + k6; - b3 = ((b3 << 36) | (b3 >> (64 - 36))) ^ b2; + b3 = rol64(b3, 36) ^ b2; b5 += k0 + t1; b4 += b5 + k8; - b5 = ((b5 << 19) | (b5 >> (64 - 19))) ^ b4; + b5 = rol64(b5, 19) ^ b4; b7 += k2 + 4; b6 += b7 + k1 + t2; - b7 = ((b7 << 37) | (b7 >> (64 - 37))) ^ b6; + b7 = rol64(b7, 37) ^ b6; b2 += b1; - b1 = ((b1 << 33) | (b1 >> (64 - 33))) ^ b2; + b1 = rol64(b1, 33) ^ b2; b4 += b7; - b7 = ((b7 << 27) | (b7 >> (64 - 27))) ^ b4; + b7 = rol64(b7, 27) ^ b4; b6 += b5; - b5 = ((b5 << 14) | (b5 >> (64 - 14))) ^ b6; + b5 = rol64(b5, 14) ^ b6; b0 += b3; - b3 = ((b3 << 42) | (b3 >> (64 - 42))) ^ b0; + b3 = rol64(b3, 42) ^ b0; b4 += b1; - b1 = ((b1 << 17) | (b1 >> (64 - 17))) ^ b4; + b1 = rol64(b1, 17) ^ b4; b6 += b3; - b3 = ((b3 << 49) | (b3 >> (64 - 49))) ^ b6; + b3 = rol64(b3, 49) ^ b6; b0 += b5; - b5 = ((b5 << 36) | (b5 >> (64 - 36))) ^ b0; + b5 = rol64(b5, 36) ^ b0; b2 += b7; - b7 = ((b7 << 39) | (b7 >> (64 - 39))) ^ b2; + b7 = rol64(b7, 39) ^ b2; b6 += b1; - b1 = ((b1 << 44) | (b1 >> (64 - 44))) ^ b6; + b1 = rol64(b1, 44) ^ b6; b0 += b7; - b7 = ((b7 << 9) | (b7 >> (64 - 9))) ^ b0; + b7 = rol64(b7, 9) ^ b0; b2 += b5; - b5 = ((b5 << 54) | (b5 >> (64 - 54))) ^ b2; + b5 = rol64(b5, 54) ^ b2; b4 += b3; - b3 = ((b3 << 56) | (b3 >> (64 - 56))) ^ b4; + b3 = rol64(b3, 56) ^ b4; b1 += k6; b0 += b1 + k5; - b1 = ((b1 << 39) | (b1 >> (64 - 39))) ^ b0; + b1 = rol64(b1, 39) ^ b0; b3 += k8; b2 += b3 + k7; - b3 = ((b3 << 30) | (b3 >> (64 - 30))) ^ b2; + b3 = rol64(b3, 30) ^ b2; b5 += k1 + t2; b4 += b5 + k0; - b5 = ((b5 << 34) | (b5 >> (64 - 34))) ^ b4; + b5 = rol64(b5, 34) ^ b4; b7 += k3 + 5; b6 += b7 + k2 + t0; - b7 = ((b7 << 24) | (b7 >> (64 - 24))) ^ b6; + b7 = rol64(b7, 24) ^ b6; b2 += b1; - b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b2; + b1 = rol64(b1, 13) ^ b2; b4 += b7; - b7 = ((b7 << 50) | (b7 >> (64 - 50))) ^ b4; + b7 = rol64(b7, 50) ^ b4; b6 += b5; - b5 = ((b5 << 10) | (b5 >> (64 - 10))) ^ b6; + b5 = rol64(b5, 10) ^ b6; b0 += b3; - b3 = ((b3 << 17) | (b3 >> (64 - 17))) ^ b0; + b3 = rol64(b3, 17) ^ b0; b4 += b1; - b1 = ((b1 << 25) | (b1 >> (64 - 25))) ^ b4; + b1 = rol64(b1, 25) ^ b4; b6 += b3; - b3 = ((b3 << 29) | (b3 >> (64 - 29))) ^ b6; + b3 = rol64(b3, 29) ^ b6; b0 += b5; - b5 = ((b5 << 39) | (b5 >> (64 - 39))) ^ b0; + b5 = rol64(b5, 39) ^ b0; b2 += b7; - b7 = ((b7 << 43) | (b7 >> (64 - 43))) ^ b2; + b7 = rol64(b7, 43) ^ b2; b6 += b1; - b1 = ((b1 << 8) | (b1 >> (64 - 8))) ^ b6; + b1 = rol64(b1, 8) ^ b6; b0 += b7; - b7 = ((b7 << 35) | (b7 >> (64 - 35))) ^ b0; + b7 = rol64(b7, 35) ^ b0; b2 += b5; - b5 = ((b5 << 56) | (b5 >> (64 - 56))) ^ b2; + b5 = rol64(b5, 56) ^ b2; b4 += b3; - b3 = ((b3 << 22) | (b3 >> (64 - 22))) ^ b4; + b3 = rol64(b3, 22) ^ b4; b1 += k7; b0 += b1 + k6; - b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b0; + b1 = rol64(b1, 46) ^ b0; b3 += k0; b2 += b3 + k8; - b3 = ((b3 << 36) | (b3 >> (64 - 36))) ^ b2; + b3 = rol64(b3, 36) ^ b2; b5 += k2 + t0; b4 += b5 + k1; - b5 = ((b5 << 19) | (b5 >> (64 - 19))) ^ b4; + b5 = rol64(b5, 19) ^ b4; b7 += k4 + 6; b6 += b7 + k3 + t1; - b7 = ((b7 << 37) | (b7 >> (64 - 37))) ^ b6; + b7 = rol64(b7, 37) ^ b6; b2 += b1; - b1 = ((b1 << 33) | (b1 >> (64 - 33))) ^ b2; + b1 = rol64(b1, 33) ^ b2; b4 += b7; - b7 = ((b7 << 27) | (b7 >> (64 - 27))) ^ b4; + b7 = rol64(b7, 27) ^ b4; b6 += b5; - b5 = ((b5 << 14) | (b5 >> (64 - 14))) ^ b6; + b5 = rol64(b5, 14) ^ b6; b0 += b3; - b3 = ((b3 << 42) | (b3 >> (64 - 42))) ^ b0; + b3 = rol64(b3, 42) ^ b0; b4 += b1; - b1 = ((b1 << 17) | (b1 >> (64 - 17))) ^ b4; + b1 = rol64(b1, 17) ^ b4; b6 += b3; - b3 = ((b3 << 49) | (b3 >> (64 - 49))) ^ b6; + b3 = rol64(b3, 49) ^ b6; b0 += b5; - b5 = ((b5 << 36) | (b5 >> (64 - 36))) ^ b0; + b5 = rol64(b5, 36) ^ b0; b2 += b7; - b7 = ((b7 << 39) | (b7 >> (64 - 39))) ^ b2; + b7 = rol64(b7, 39) ^ b2; b6 += b1; - b1 = ((b1 << 44) | (b1 >> (64 - 44))) ^ b6; + b1 = rol64(b1, 44) ^ b6; b0 += b7; - b7 = ((b7 << 9) | (b7 >> (64 - 9))) ^ b0; + b7 = rol64(b7, 9) ^ b0; b2 += b5; - b5 = ((b5 << 54) | (b5 >> (64 - 54))) ^ b2; + b5 = rol64(b5, 54) ^ b2; b4 += b3; - b3 = ((b3 << 56) | (b3 >> (64 - 56))) ^ b4; + b3 = rol64(b3, 56) ^ b4; b1 += k8; b0 += b1 + k7; - b1 = ((b1 << 39) | (b1 >> (64 - 39))) ^ b0; + b1 = rol64(b1, 39) ^ b0; b3 += k1; b2 += b3 + k0; - b3 = ((b3 << 30) | (b3 >> (64 - 30))) ^ b2; + b3 = rol64(b3, 30) ^ b2; b5 += k3 + t1; b4 += b5 + k2; - b5 = ((b5 << 34) | (b5 >> (64 - 34))) ^ b4; + b5 = rol64(b5, 34) ^ b4; b7 += k5 + 7; b6 += b7 + k4 + t2; - b7 = ((b7 << 24) | (b7 >> (64 - 24))) ^ b6; + b7 = rol64(b7, 24) ^ b6; b2 += b1; - b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b2; + b1 = rol64(b1, 13) ^ b2; b4 += b7; - b7 = ((b7 << 50) | (b7 >> (64 - 50))) ^ b4; + b7 = rol64(b7, 50) ^ b4; b6 += b5; - b5 = ((b5 << 10) | (b5 >> (64 - 10))) ^ b6; + b5 = rol64(b5, 10) ^ b6; b0 += b3; - b3 = ((b3 << 17) | (b3 >> (64 - 17))) ^ b0; + b3 = rol64(b3, 17) ^ b0; b4 += b1; - b1 = ((b1 << 25) | (b1 >> (64 - 25))) ^ b4; + b1 = rol64(b1, 25) ^ b4; b6 += b3; - b3 = ((b3 << 29) | (b3 >> (64 - 29))) ^ b6; + b3 = rol64(b3, 29) ^ b6; b0 += b5; - b5 = ((b5 << 39) | (b5 >> (64 - 39))) ^ b0; + b5 = rol64(b5, 39) ^ b0; b2 += b7; - b7 = ((b7 << 43) | (b7 >> (64 - 43))) ^ b2; + b7 = rol64(b7, 43) ^ b2; b6 += b1; - b1 = ((b1 << 8) | (b1 >> (64 - 8))) ^ b6; + b1 = rol64(b1, 8) ^ b6; b0 += b7; - b7 = ((b7 << 35) | (b7 >> (64 - 35))) ^ b0; + b7 = rol64(b7, 35) ^ b0; b2 += b5; - b5 = ((b5 << 56) | (b5 >> (64 - 56))) ^ b2; + b5 = rol64(b5, 56) ^ b2; b4 += b3; - b3 = ((b3 << 22) | (b3 >> (64 - 22))) ^ b4; + b3 = rol64(b3, 22) ^ b4; b1 += k0; b0 += b1 + k8; - b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b0; + b1 = rol64(b1, 46) ^ b0; b3 += k2; b2 += b3 + k1; - b3 = ((b3 << 36) | (b3 >> (64 - 36))) ^ b2; + b3 = rol64(b3, 36) ^ b2; b5 += k4 + t2; b4 += b5 + k3; - b5 = ((b5 << 19) | (b5 >> (64 - 19))) ^ b4; + b5 = rol64(b5, 19) ^ b4; b7 += k6 + 8; b6 += b7 + k5 + t0; - b7 = ((b7 << 37) | (b7 >> (64 - 37))) ^ b6; + b7 = rol64(b7, 37) ^ b6; b2 += b1; - b1 = ((b1 << 33) | (b1 >> (64 - 33))) ^ b2; + b1 = rol64(b1, 33) ^ b2; b4 += b7; - b7 = ((b7 << 27) | (b7 >> (64 - 27))) ^ b4; + b7 = rol64(b7, 27) ^ b4; b6 += b5; - b5 = ((b5 << 14) | (b5 >> (64 - 14))) ^ b6; + b5 = rol64(b5, 14) ^ b6; b0 += b3; - b3 = ((b3 << 42) | (b3 >> (64 - 42))) ^ b0; + b3 = rol64(b3, 42) ^ b0; b4 += b1; - b1 = ((b1 << 17) | (b1 >> (64 - 17))) ^ b4; + b1 = rol64(b1, 17) ^ b4; b6 += b3; - b3 = ((b3 << 49) | (b3 >> (64 - 49))) ^ b6; + b3 = rol64(b3, 49) ^ b6; b0 += b5; - b5 = ((b5 << 36) | (b5 >> (64 - 36))) ^ b0; + b5 = rol64(b5, 36) ^ b0; b2 += b7; - b7 = ((b7 << 39) | (b7 >> (64 - 39))) ^ b2; + b7 = rol64(b7, 39) ^ b2; b6 += b1; - b1 = ((b1 << 44) | (b1 >> (64 - 44))) ^ b6; + b1 = rol64(b1, 44) ^ b6; b0 += b7; - b7 = ((b7 << 9) | (b7 >> (64 - 9))) ^ b0; + b7 = rol64(b7, 9) ^ b0; b2 += b5; - b5 = ((b5 << 54) | (b5 >> (64 - 54))) ^ b2; + b5 = rol64(b5, 54) ^ b2; b4 += b3; - b3 = ((b3 << 56) | (b3 >> (64 - 56))) ^ b4; + b3 = rol64(b3, 56) ^ b4; b1 += k1; b0 += b1 + k0; - b1 = ((b1 << 39) | (b1 >> (64 - 39))) ^ b0; + b1 = rol64(b1, 39) ^ b0; b3 += k3; b2 += b3 + k2; - b3 = ((b3 << 30) | (b3 >> (64 - 30))) ^ b2; + b3 = rol64(b3, 30) ^ b2; b5 += k5 + t0; b4 += b5 + k4; - b5 = ((b5 << 34) | (b5 >> (64 - 34))) ^ b4; + b5 = rol64(b5, 34) ^ b4; b7 += k7 + 9; b6 += b7 + k6 + t1; - b7 = ((b7 << 24) | (b7 >> (64 - 24))) ^ b6; + b7 = rol64(b7, 24) ^ b6; b2 += b1; - b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b2; + b1 = rol64(b1, 13) ^ b2; b4 += b7; - b7 = ((b7 << 50) | (b7 >> (64 - 50))) ^ b4; + b7 = rol64(b7, 50) ^ b4; b6 += b5; - b5 = ((b5 << 10) | (b5 >> (64 - 10))) ^ b6; + b5 = rol64(b5, 10) ^ b6; b0 += b3; - b3 = ((b3 << 17) | (b3 >> (64 - 17))) ^ b0; + b3 = rol64(b3, 17) ^ b0; b4 += b1; - b1 = ((b1 << 25) | (b1 >> (64 - 25))) ^ b4; + b1 = rol64(b1, 25) ^ b4; b6 += b3; - b3 = ((b3 << 29) | (b3 >> (64 - 29))) ^ b6; + b3 = rol64(b3, 29) ^ b6; b0 += b5; - b5 = ((b5 << 39) | (b5 >> (64 - 39))) ^ b0; + b5 = rol64(b5, 39) ^ b0; b2 += b7; - b7 = ((b7 << 43) | (b7 >> (64 - 43))) ^ b2; + b7 = rol64(b7, 43) ^ b2; b6 += b1; - b1 = ((b1 << 8) | (b1 >> (64 - 8))) ^ b6; + b1 = rol64(b1, 8) ^ b6; b0 += b7; - b7 = ((b7 << 35) | (b7 >> (64 - 35))) ^ b0; + b7 = rol64(b7, 35) ^ b0; b2 += b5; - b5 = ((b5 << 56) | (b5 >> (64 - 56))) ^ b2; + b5 = rol64(b5, 56) ^ b2; b4 += b3; - b3 = ((b3 << 22) | (b3 >> (64 - 22))) ^ b4; + b3 = rol64(b3, 22) ^ b4; b1 += k2; b0 += b1 + k1; - b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b0; + b1 = rol64(b1, 46) ^ b0; b3 += k4; b2 += b3 + k3; - b3 = ((b3 << 36) | (b3 >> (64 - 36))) ^ b2; + b3 = rol64(b3, 36) ^ b2; b5 += k6 + t1; b4 += b5 + k5; - b5 = ((b5 << 19) | (b5 >> (64 - 19))) ^ b4; + b5 = rol64(b5, 19) ^ b4; b7 += k8 + 10; b6 += b7 + k7 + t2; - b7 = ((b7 << 37) | (b7 >> (64 - 37))) ^ b6; + b7 = rol64(b7, 37) ^ b6; b2 += b1; - b1 = ((b1 << 33) | (b1 >> (64 - 33))) ^ b2; + b1 = rol64(b1, 33) ^ b2; b4 += b7; - b7 = ((b7 << 27) | (b7 >> (64 - 27))) ^ b4; + b7 = rol64(b7, 27) ^ b4; b6 += b5; - b5 = ((b5 << 14) | (b5 >> (64 - 14))) ^ b6; + b5 = rol64(b5, 14) ^ b6; b0 += b3; - b3 = ((b3 << 42) | (b3 >> (64 - 42))) ^ b0; + b3 = rol64(b3, 42) ^ b0; b4 += b1; - b1 = ((b1 << 17) | (b1 >> (64 - 17))) ^ b4; + b1 = rol64(b1, 17) ^ b4; b6 += b3; - b3 = ((b3 << 49) | (b3 >> (64 - 49))) ^ b6; + b3 = rol64(b3, 49) ^ b6; b0 += b5; - b5 = ((b5 << 36) | (b5 >> (64 - 36))) ^ b0; + b5 = rol64(b5, 36) ^ b0; b2 += b7; - b7 = ((b7 << 39) | (b7 >> (64 - 39))) ^ b2; + b7 = rol64(b7, 39) ^ b2; b6 += b1; - b1 = ((b1 << 44) | (b1 >> (64 - 44))) ^ b6; + b1 = rol64(b1, 44) ^ b6; b0 += b7; - b7 = ((b7 << 9) | (b7 >> (64 - 9))) ^ b0; + b7 = rol64(b7, 9) ^ b0; b2 += b5; - b5 = ((b5 << 54) | (b5 >> (64 - 54))) ^ b2; + b5 = rol64(b5, 54) ^ b2; b4 += b3; - b3 = ((b3 << 56) | (b3 >> (64 - 56))) ^ b4; + b3 = rol64(b3, 56) ^ b4; b1 += k3; b0 += b1 + k2; - b1 = ((b1 << 39) | (b1 >> (64 - 39))) ^ b0; + b1 = rol64(b1, 39) ^ b0; b3 += k5; b2 += b3 + k4; - b3 = ((b3 << 30) | (b3 >> (64 - 30))) ^ b2; + b3 = rol64(b3, 30) ^ b2; b5 += k7 + t2; b4 += b5 + k6; - b5 = ((b5 << 34) | (b5 >> (64 - 34))) ^ b4; + b5 = rol64(b5, 34) ^ b4; b7 += k0 + 11; b6 += b7 + k8 + t0; - b7 = ((b7 << 24) | (b7 >> (64 - 24))) ^ b6; + b7 = rol64(b7, 24) ^ b6; b2 += b1; - b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b2; + b1 = rol64(b1, 13) ^ b2; b4 += b7; - b7 = ((b7 << 50) | (b7 >> (64 - 50))) ^ b4; + b7 = rol64(b7, 50) ^ b4; b6 += b5; - b5 = ((b5 << 10) | (b5 >> (64 - 10))) ^ b6; + b5 = rol64(b5, 10) ^ b6; b0 += b3; - b3 = ((b3 << 17) | (b3 >> (64 - 17))) ^ b0; + b3 = rol64(b3, 17) ^ b0; b4 += b1; - b1 = ((b1 << 25) | (b1 >> (64 - 25))) ^ b4; + b1 = rol64(b1, 25) ^ b4; b6 += b3; - b3 = ((b3 << 29) | (b3 >> (64 - 29))) ^ b6; + b3 = rol64(b3, 29) ^ b6; b0 += b5; - b5 = ((b5 << 39) | (b5 >> (64 - 39))) ^ b0; + b5 = rol64(b5, 39) ^ b0; b2 += b7; - b7 = ((b7 << 43) | (b7 >> (64 - 43))) ^ b2; + b7 = rol64(b7, 43) ^ b2; b6 += b1; - b1 = ((b1 << 8) | (b1 >> (64 - 8))) ^ b6; + b1 = rol64(b1, 8) ^ b6; b0 += b7; - b7 = ((b7 << 35) | (b7 >> (64 - 35))) ^ b0; + b7 = rol64(b7, 35) ^ b0; b2 += b5; - b5 = ((b5 << 56) | (b5 >> (64 - 56))) ^ b2; + b5 = rol64(b5, 56) ^ b2; b4 += b3; - b3 = ((b3 << 22) | (b3 >> (64 - 22))) ^ b4; + b3 = rol64(b3, 22) ^ b4; b1 += k4; b0 += b1 + k3; - b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b0; + b1 = rol64(b1, 46) ^ b0; b3 += k6; b2 += b3 + k5; - b3 = ((b3 << 36) | (b3 >> (64 - 36))) ^ b2; + b3 = rol64(b3, 36) ^ b2; b5 += k8 + t0; b4 += b5 + k7; - b5 = ((b5 << 19) | (b5 >> (64 - 19))) ^ b4; + b5 = rol64(b5, 19) ^ b4; b7 += k1 + 12; b6 += b7 + k0 + t1; - b7 = ((b7 << 37) | (b7 >> (64 - 37))) ^ b6; + b7 = rol64(b7, 37) ^ b6; b2 += b1; - b1 = ((b1 << 33) | (b1 >> (64 - 33))) ^ b2; + b1 = rol64(b1, 33) ^ b2; b4 += b7; - b7 = ((b7 << 27) | (b7 >> (64 - 27))) ^ b4; + b7 = rol64(b7, 27) ^ b4; b6 += b5; - b5 = ((b5 << 14) | (b5 >> (64 - 14))) ^ b6; + b5 = rol64(b5, 14) ^ b6; b0 += b3; - b3 = ((b3 << 42) | (b3 >> (64 - 42))) ^ b0; + b3 = rol64(b3, 42) ^ b0; b4 += b1; - b1 = ((b1 << 17) | (b1 >> (64 - 17))) ^ b4; + b1 = rol64(b1, 17) ^ b4; b6 += b3; - b3 = ((b3 << 49) | (b3 >> (64 - 49))) ^ b6; + b3 = rol64(b3, 49) ^ b6; b0 += b5; - b5 = ((b5 << 36) | (b5 >> (64 - 36))) ^ b0; + b5 = rol64(b5, 36) ^ b0; b2 += b7; - b7 = ((b7 << 39) | (b7 >> (64 - 39))) ^ b2; + b7 = rol64(b7, 39) ^ b2; b6 += b1; - b1 = ((b1 << 44) | (b1 >> (64 - 44))) ^ b6; + b1 = rol64(b1, 44) ^ b6; b0 += b7; - b7 = ((b7 << 9) | (b7 >> (64 - 9))) ^ b0; + b7 = rol64(b7, 9) ^ b0; b2 += b5; - b5 = ((b5 << 54) | (b5 >> (64 - 54))) ^ b2; + b5 = rol64(b5, 54) ^ b2; b4 += b3; - b3 = ((b3 << 56) | (b3 >> (64 - 56))) ^ b4; + b3 = rol64(b3, 56) ^ b4; b1 += k5; b0 += b1 + k4; - b1 = ((b1 << 39) | (b1 >> (64 - 39))) ^ b0; + b1 = rol64(b1, 39) ^ b0; b3 += k7; b2 += b3 + k6; - b3 = ((b3 << 30) | (b3 >> (64 - 30))) ^ b2; + b3 = rol64(b3, 30) ^ b2; b5 += k0 + t1; b4 += b5 + k8; - b5 = ((b5 << 34) | (b5 >> (64 - 34))) ^ b4; + b5 = rol64(b5, 34) ^ b4; b7 += k2 + 13; b6 += b7 + k1 + t2; - b7 = ((b7 << 24) | (b7 >> (64 - 24))) ^ b6; + b7 = rol64(b7, 24) ^ b6; b2 += b1; - b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b2; + b1 = rol64(b1, 13) ^ b2; b4 += b7; - b7 = ((b7 << 50) | (b7 >> (64 - 50))) ^ b4; + b7 = rol64(b7, 50) ^ b4; b6 += b5; - b5 = ((b5 << 10) | (b5 >> (64 - 10))) ^ b6; + b5 = rol64(b5, 10) ^ b6; b0 += b3; - b3 = ((b3 << 17) | (b3 >> (64 - 17))) ^ b0; + b3 = rol64(b3, 17) ^ b0; b4 += b1; - b1 = ((b1 << 25) | (b1 >> (64 - 25))) ^ b4; + b1 = rol64(b1, 25) ^ b4; b6 += b3; - b3 = ((b3 << 29) | (b3 >> (64 - 29))) ^ b6; + b3 = rol64(b3, 29) ^ b6; b0 += b5; - b5 = ((b5 << 39) | (b5 >> (64 - 39))) ^ b0; + b5 = rol64(b5, 39) ^ b0; b2 += b7; - b7 = ((b7 << 43) | (b7 >> (64 - 43))) ^ b2; + b7 = rol64(b7, 43) ^ b2; b6 += b1; - b1 = ((b1 << 8) | (b1 >> (64 - 8))) ^ b6; + b1 = rol64(b1, 8) ^ b6; b0 += b7; - b7 = ((b7 << 35) | (b7 >> (64 - 35))) ^ b0; + b7 = rol64(b7, 35) ^ b0; b2 += b5; - b5 = ((b5 << 56) | (b5 >> (64 - 56))) ^ b2; + b5 = rol64(b5, 56) ^ b2; b4 += b3; - b3 = ((b3 << 22) | (b3 >> (64 - 22))) ^ b4; + b3 = rol64(b3, 22) ^ b4; b1 += k6; b0 += b1 + k5; - b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b0; + b1 = rol64(b1, 46) ^ b0; b3 += k8; b2 += b3 + k7; - b3 = ((b3 << 36) | (b3 >> (64 - 36))) ^ b2; + b3 = rol64(b3, 36) ^ b2; b5 += k1 + t2; b4 += b5 + k0; - b5 = ((b5 << 19) | (b5 >> (64 - 19))) ^ b4; + b5 = rol64(b5, 19) ^ b4; b7 += k3 + 14; b6 += b7 + k2 + t0; - b7 = ((b7 << 37) | (b7 >> (64 - 37))) ^ b6; + b7 = rol64(b7, 37) ^ b6; b2 += b1; - b1 = ((b1 << 33) | (b1 >> (64 - 33))) ^ b2; + b1 = rol64(b1, 33) ^ b2; b4 += b7; - b7 = ((b7 << 27) | (b7 >> (64 - 27))) ^ b4; + b7 = rol64(b7, 27) ^ b4; b6 += b5; - b5 = ((b5 << 14) | (b5 >> (64 - 14))) ^ b6; + b5 = rol64(b5, 14) ^ b6; b0 += b3; - b3 = ((b3 << 42) | (b3 >> (64 - 42))) ^ b0; + b3 = rol64(b3, 42) ^ b0; b4 += b1; - b1 = ((b1 << 17) | (b1 >> (64 - 17))) ^ b4; + b1 = rol64(b1, 17) ^ b4; b6 += b3; - b3 = ((b3 << 49) | (b3 >> (64 - 49))) ^ b6; + b3 = rol64(b3, 49) ^ b6; b0 += b5; - b5 = ((b5 << 36) | (b5 >> (64 - 36))) ^ b0; + b5 = rol64(b5, 36) ^ b0; b2 += b7; - b7 = ((b7 << 39) | (b7 >> (64 - 39))) ^ b2; + b7 = rol64(b7, 39) ^ b2; b6 += b1; - b1 = ((b1 << 44) | (b1 >> (64 - 44))) ^ b6; + b1 = rol64(b1, 44) ^ b6; b0 += b7; - b7 = ((b7 << 9) | (b7 >> (64 - 9))) ^ b0; + b7 = rol64(b7, 9) ^ b0; b2 += b5; - b5 = ((b5 << 54) | (b5 >> (64 - 54))) ^ b2; + b5 = rol64(b5, 54) ^ b2; b4 += b3; - b3 = ((b3 << 56) | (b3 >> (64 - 56))) ^ b4; + b3 = rol64(b3, 56) ^ b4; b1 += k7; b0 += b1 + k6; - b1 = ((b1 << 39) | (b1 >> (64 - 39))) ^ b0; + b1 = rol64(b1, 39) ^ b0; b3 += k0; b2 += b3 + k8; - b3 = ((b3 << 30) | (b3 >> (64 - 30))) ^ b2; + b3 = rol64(b3, 30) ^ b2; b5 += k2 + t0; b4 += b5 + k1; - b5 = ((b5 << 34) | (b5 >> (64 - 34))) ^ b4; + b5 = rol64(b5, 34) ^ b4; b7 += k4 + 15; b6 += b7 + k3 + t1; - b7 = ((b7 << 24) | (b7 >> (64 - 24))) ^ b6; + b7 = rol64(b7, 24) ^ b6; b2 += b1; - b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b2; + b1 = rol64(b1, 13) ^ b2; b4 += b7; - b7 = ((b7 << 50) | (b7 >> (64 - 50))) ^ b4; + b7 = rol64(b7, 50) ^ b4; b6 += b5; - b5 = ((b5 << 10) | (b5 >> (64 - 10))) ^ b6; + b5 = rol64(b5, 10) ^ b6; b0 += b3; - b3 = ((b3 << 17) | (b3 >> (64 - 17))) ^ b0; + b3 = rol64(b3, 17) ^ b0; b4 += b1; - b1 = ((b1 << 25) | (b1 >> (64 - 25))) ^ b4; + b1 = rol64(b1, 25) ^ b4; b6 += b3; - b3 = ((b3 << 29) | (b3 >> (64 - 29))) ^ b6; + b3 = rol64(b3, 29) ^ b6; b0 += b5; - b5 = ((b5 << 39) | (b5 >> (64 - 39))) ^ b0; + b5 = rol64(b5, 39) ^ b0; b2 += b7; - b7 = ((b7 << 43) | (b7 >> (64 - 43))) ^ b2; + b7 = rol64(b7, 43) ^ b2; b6 += b1; - b1 = ((b1 << 8) | (b1 >> (64 - 8))) ^ b6; + b1 = rol64(b1, 8) ^ b6; b0 += b7; - b7 = ((b7 << 35) | (b7 >> (64 - 35))) ^ b0; + b7 = rol64(b7, 35) ^ b0; b2 += b5; - b5 = ((b5 << 56) | (b5 >> (64 - 56))) ^ b2; + b5 = rol64(b5, 56) ^ b2; b4 += b3; - b3 = ((b3 << 22) | (b3 >> (64 - 22))) ^ b4; + b3 = rol64(b3, 22) ^ b4; b1 += k8; b0 += b1 + k7; - b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b0; + b1 = rol64(b1, 46) ^ b0; b3 += k1; b2 += b3 + k0; - b3 = ((b3 << 36) | (b3 >> (64 - 36))) ^ b2; + b3 = rol64(b3, 36) ^ b2; b5 += k3 + t1; b4 += b5 + k2; - b5 = ((b5 << 19) | (b5 >> (64 - 19))) ^ b4; + b5 = rol64(b5, 19) ^ b4; b7 += k5 + 16; b6 += b7 + k4 + t2; - b7 = ((b7 << 37) | (b7 >> (64 - 37))) ^ b6; + b7 = rol64(b7, 37) ^ b6; b2 += b1; - b1 = ((b1 << 33) | (b1 >> (64 - 33))) ^ b2; + b1 = rol64(b1, 33) ^ b2; b4 += b7; - b7 = ((b7 << 27) | (b7 >> (64 - 27))) ^ b4; + b7 = rol64(b7, 27) ^ b4; b6 += b5; - b5 = ((b5 << 14) | (b5 >> (64 - 14))) ^ b6; + b5 = rol64(b5, 14) ^ b6; b0 += b3; - b3 = ((b3 << 42) | (b3 >> (64 - 42))) ^ b0; + b3 = rol64(b3, 42) ^ b0; b4 += b1; - b1 = ((b1 << 17) | (b1 >> (64 - 17))) ^ b4; + b1 = rol64(b1, 17) ^ b4; b6 += b3; - b3 = ((b3 << 49) | (b3 >> (64 - 49))) ^ b6; + b3 = rol64(b3, 49) ^ b6; b0 += b5; - b5 = ((b5 << 36) | (b5 >> (64 - 36))) ^ b0; + b5 = rol64(b5, 36) ^ b0; b2 += b7; - b7 = ((b7 << 39) | (b7 >> (64 - 39))) ^ b2; + b7 = rol64(b7, 39) ^ b2; b6 += b1; - b1 = ((b1 << 44) | (b1 >> (64 - 44))) ^ b6; + b1 = rol64(b1, 44) ^ b6; b0 += b7; - b7 = ((b7 << 9) | (b7 >> (64 - 9))) ^ b0; + b7 = rol64(b7, 9) ^ b0; b2 += b5; - b5 = ((b5 << 54) | (b5 >> (64 - 54))) ^ b2; + b5 = rol64(b5, 54) ^ b2; b4 += b3; - b3 = ((b3 << 56) | (b3 >> (64 - 56))) ^ b4; + b3 = rol64(b3, 56) ^ b4; b1 += k0; b0 += b1 + k8; - b1 = ((b1 << 39) | (b1 >> (64 - 39))) ^ b0; + b1 = rol64(b1, 39) ^ b0; b3 += k2; b2 += b3 + k1; - b3 = ((b3 << 30) | (b3 >> (64 - 30))) ^ b2; + b3 = rol64(b3, 30) ^ b2; b5 += k4 + t2; b4 += b5 + k3; - b5 = ((b5 << 34) | (b5 >> (64 - 34))) ^ b4; + b5 = rol64(b5, 34) ^ b4; b7 += k6 + 17; b6 += b7 + k5 + t0; - b7 = ((b7 << 24) | (b7 >> (64 - 24))) ^ b6; + b7 = rol64(b7, 24) ^ b6; b2 += b1; - b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b2; + b1 = rol64(b1, 13) ^ b2; b4 += b7; - b7 = ((b7 << 50) | (b7 >> (64 - 50))) ^ b4; + b7 = rol64(b7, 50) ^ b4; b6 += b5; - b5 = ((b5 << 10) | (b5 >> (64 - 10))) ^ b6; + b5 = rol64(b5, 10) ^ b6; b0 += b3; - b3 = ((b3 << 17) | (b3 >> (64 - 17))) ^ b0; + b3 = rol64(b3, 17) ^ b0; b4 += b1; - b1 = ((b1 << 25) | (b1 >> (64 - 25))) ^ b4; + b1 = rol64(b1, 25) ^ b4; b6 += b3; - b3 = ((b3 << 29) | (b3 >> (64 - 29))) ^ b6; + b3 = rol64(b3, 29) ^ b6; b0 += b5; - b5 = ((b5 << 39) | (b5 >> (64 - 39))) ^ b0; + b5 = rol64(b5, 39) ^ b0; b2 += b7; - b7 = ((b7 << 43) | (b7 >> (64 - 43))) ^ b2; + b7 = rol64(b7, 43) ^ b2; b6 += b1; - b1 = ((b1 << 8) | (b1 >> (64 - 8))) ^ b6; + b1 = rol64(b1, 8) ^ b6; b0 += b7; - b7 = ((b7 << 35) | (b7 >> (64 - 35))) ^ b0; + b7 = rol64(b7, 35) ^ b0; b2 += b5; - b5 = ((b5 << 56) | (b5 >> (64 - 56))) ^ b2; + b5 = rol64(b5, 56) ^ b2; b4 += b3; - b3 = ((b3 << 22) | (b3 >> (64 - 22))) ^ b4; + b3 = rol64(b3, 22) ^ b4; output[0] = b0 + k0; output[1] = b1 + k1; @@ -3383,2083 +3384,2083 @@ void threefish_encrypt_1024(struct threefish_key *key_ctx, u64 *input, b1 += k1; b0 += b1 + k0; - b1 = ((b1 << 24) | (b1 >> (64 - 24))) ^ b0; + b1 = rol64(b1, 24) ^ b0; b3 += k3; b2 += b3 + k2; - b3 = ((b3 << 13) | (b3 >> (64 - 13))) ^ b2; + b3 = rol64(b3, 13) ^ b2; b5 += k5; b4 += b5 + k4; - b5 = ((b5 << 8) | (b5 >> (64 - 8))) ^ b4; + b5 = rol64(b5, 8) ^ b4; b7 += k7; b6 += b7 + k6; - b7 = ((b7 << 47) | (b7 >> (64 - 47))) ^ b6; + b7 = rol64(b7, 47) ^ b6; b9 += k9; b8 += b9 + k8; - b9 = ((b9 << 8) | (b9 >> (64 - 8))) ^ b8; + b9 = rol64(b9, 8) ^ b8; b11 += k11; b10 += b11 + k10; - b11 = ((b11 << 17) | (b11 >> (64 - 17))) ^ b10; + b11 = rol64(b11, 17) ^ b10; b13 += k13 + t0; b12 += b13 + k12; - b13 = ((b13 << 22) | (b13 >> (64 - 22))) ^ b12; + b13 = rol64(b13, 22) ^ b12; b15 += k15; b14 += b15 + k14 + t1; - b15 = ((b15 << 37) | (b15 >> (64 - 37))) ^ b14; + b15 = rol64(b15, 37) ^ b14; b0 += b9; - b9 = ((b9 << 38) | (b9 >> (64 - 38))) ^ b0; + b9 = rol64(b9, 38) ^ b0; b2 += b13; - b13 = ((b13 << 19) | (b13 >> (64 - 19))) ^ b2; + b13 = rol64(b13, 19) ^ b2; b6 += b11; - b11 = ((b11 << 10) | (b11 >> (64 - 10))) ^ b6; + b11 = rol64(b11, 10) ^ b6; b4 += b15; - b15 = ((b15 << 55) | (b15 >> (64 - 55))) ^ b4; + b15 = rol64(b15, 55) ^ b4; b10 += b7; - b7 = ((b7 << 49) | (b7 >> (64 - 49))) ^ b10; + b7 = rol64(b7, 49) ^ b10; b12 += b3; - b3 = ((b3 << 18) | (b3 >> (64 - 18))) ^ b12; + b3 = rol64(b3, 18) ^ b12; b14 += b5; - b5 = ((b5 << 23) | (b5 >> (64 - 23))) ^ b14; + b5 = rol64(b5, 23) ^ b14; b8 += b1; - b1 = ((b1 << 52) | (b1 >> (64 - 52))) ^ b8; + b1 = rol64(b1, 52) ^ b8; b0 += b7; - b7 = ((b7 << 33) | (b7 >> (64 - 33))) ^ b0; + b7 = rol64(b7, 33) ^ b0; b2 += b5; - b5 = ((b5 << 4) | (b5 >> (64 - 4))) ^ b2; + b5 = rol64(b5, 4) ^ b2; b4 += b3; - b3 = ((b3 << 51) | (b3 >> (64 - 51))) ^ b4; + b3 = rol64(b3, 51) ^ b4; b6 += b1; - b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b6; + b1 = rol64(b1, 13) ^ b6; b12 += b15; - b15 = ((b15 << 34) | (b15 >> (64 - 34))) ^ b12; + b15 = rol64(b15, 34) ^ b12; b14 += b13; - b13 = ((b13 << 41) | (b13 >> (64 - 41))) ^ b14; + b13 = rol64(b13, 41) ^ b14; b8 += b11; - b11 = ((b11 << 59) | (b11 >> (64 - 59))) ^ b8; + b11 = rol64(b11, 59) ^ b8; b10 += b9; - b9 = ((b9 << 17) | (b9 >> (64 - 17))) ^ b10; + b9 = rol64(b9, 17) ^ b10; b0 += b15; - b15 = ((b15 << 5) | (b15 >> (64 - 5))) ^ b0; + b15 = rol64(b15, 5) ^ b0; b2 += b11; - b11 = ((b11 << 20) | (b11 >> (64 - 20))) ^ b2; + b11 = rol64(b11, 20) ^ b2; b6 += b13; - b13 = ((b13 << 48) | (b13 >> (64 - 48))) ^ b6; + b13 = rol64(b13, 48) ^ b6; b4 += b9; - b9 = ((b9 << 41) | (b9 >> (64 - 41))) ^ b4; + b9 = rol64(b9, 41) ^ b4; b14 += b1; - b1 = ((b1 << 47) | (b1 >> (64 - 47))) ^ b14; + b1 = rol64(b1, 47) ^ b14; b8 += b5; - b5 = ((b5 << 28) | (b5 >> (64 - 28))) ^ b8; + b5 = rol64(b5, 28) ^ b8; b10 += b3; - b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b10; + b3 = rol64(b3, 16) ^ b10; b12 += b7; - b7 = ((b7 << 25) | (b7 >> (64 - 25))) ^ b12; + b7 = rol64(b7, 25) ^ b12; b1 += k2; b0 += b1 + k1; - b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b0; + b1 = rol64(b1, 41) ^ b0; b3 += k4; b2 += b3 + k3; - b3 = ((b3 << 9) | (b3 >> (64 - 9))) ^ b2; + b3 = rol64(b3, 9) ^ b2; b5 += k6; b4 += b5 + k5; - b5 = ((b5 << 37) | (b5 >> (64 - 37))) ^ b4; + b5 = rol64(b5, 37) ^ b4; b7 += k8; b6 += b7 + k7; - b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b6; + b7 = rol64(b7, 31) ^ b6; b9 += k10; b8 += b9 + k9; - b9 = ((b9 << 12) | (b9 >> (64 - 12))) ^ b8; + b9 = rol64(b9, 12) ^ b8; b11 += k12; b10 += b11 + k11; - b11 = ((b11 << 47) | (b11 >> (64 - 47))) ^ b10; + b11 = rol64(b11, 47) ^ b10; b13 += k14 + t1; b12 += b13 + k13; - b13 = ((b13 << 44) | (b13 >> (64 - 44))) ^ b12; + b13 = rol64(b13, 44) ^ b12; b15 += k16 + 1; b14 += b15 + k15 + t2; - b15 = ((b15 << 30) | (b15 >> (64 - 30))) ^ b14; + b15 = rol64(b15, 30) ^ b14; b0 += b9; - b9 = ((b9 << 16) | (b9 >> (64 - 16))) ^ b0; + b9 = rol64(b9, 16) ^ b0; b2 += b13; - b13 = ((b13 << 34) | (b13 >> (64 - 34))) ^ b2; + b13 = rol64(b13, 34) ^ b2; b6 += b11; - b11 = ((b11 << 56) | (b11 >> (64 - 56))) ^ b6; + b11 = rol64(b11, 56) ^ b6; b4 += b15; - b15 = ((b15 << 51) | (b15 >> (64 - 51))) ^ b4; + b15 = rol64(b15, 51) ^ b4; b10 += b7; - b7 = ((b7 << 4) | (b7 >> (64 - 4))) ^ b10; + b7 = rol64(b7, 4) ^ b10; b12 += b3; - b3 = ((b3 << 53) | (b3 >> (64 - 53))) ^ b12; + b3 = rol64(b3, 53) ^ b12; b14 += b5; - b5 = ((b5 << 42) | (b5 >> (64 - 42))) ^ b14; + b5 = rol64(b5, 42) ^ b14; b8 += b1; - b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b8; + b1 = rol64(b1, 41) ^ b8; b0 += b7; - b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b0; + b7 = rol64(b7, 31) ^ b0; b2 += b5; - b5 = ((b5 << 44) | (b5 >> (64 - 44))) ^ b2; + b5 = rol64(b5, 44) ^ b2; b4 += b3; - b3 = ((b3 << 47) | (b3 >> (64 - 47))) ^ b4; + b3 = rol64(b3, 47) ^ b4; b6 += b1; - b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b6; + b1 = rol64(b1, 46) ^ b6; b12 += b15; - b15 = ((b15 << 19) | (b15 >> (64 - 19))) ^ b12; + b15 = rol64(b15, 19) ^ b12; b14 += b13; - b13 = ((b13 << 42) | (b13 >> (64 - 42))) ^ b14; + b13 = rol64(b13, 42) ^ b14; b8 += b11; - b11 = ((b11 << 44) | (b11 >> (64 - 44))) ^ b8; + b11 = rol64(b11, 44) ^ b8; b10 += b9; - b9 = ((b9 << 25) | (b9 >> (64 - 25))) ^ b10; + b9 = rol64(b9, 25) ^ b10; b0 += b15; - b15 = ((b15 << 9) | (b15 >> (64 - 9))) ^ b0; + b15 = rol64(b15, 9) ^ b0; b2 += b11; - b11 = ((b11 << 48) | (b11 >> (64 - 48))) ^ b2; + b11 = rol64(b11, 48) ^ b2; b6 += b13; - b13 = ((b13 << 35) | (b13 >> (64 - 35))) ^ b6; + b13 = rol64(b13, 35) ^ b6; b4 += b9; - b9 = ((b9 << 52) | (b9 >> (64 - 52))) ^ b4; + b9 = rol64(b9, 52) ^ b4; b14 += b1; - b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b14; + b1 = rol64(b1, 23) ^ b14; b8 += b5; - b5 = ((b5 << 31) | (b5 >> (64 - 31))) ^ b8; + b5 = rol64(b5, 31) ^ b8; b10 += b3; - b3 = ((b3 << 37) | (b3 >> (64 - 37))) ^ b10; + b3 = rol64(b3, 37) ^ b10; b12 += b7; - b7 = ((b7 << 20) | (b7 >> (64 - 20))) ^ b12; + b7 = rol64(b7, 20) ^ b12; b1 += k3; b0 += b1 + k2; - b1 = ((b1 << 24) | (b1 >> (64 - 24))) ^ b0; + b1 = rol64(b1, 24) ^ b0; b3 += k5; b2 += b3 + k4; - b3 = ((b3 << 13) | (b3 >> (64 - 13))) ^ b2; + b3 = rol64(b3, 13) ^ b2; b5 += k7; b4 += b5 + k6; - b5 = ((b5 << 8) | (b5 >> (64 - 8))) ^ b4; + b5 = rol64(b5, 8) ^ b4; b7 += k9; b6 += b7 + k8; - b7 = ((b7 << 47) | (b7 >> (64 - 47))) ^ b6; + b7 = rol64(b7, 47) ^ b6; b9 += k11; b8 += b9 + k10; - b9 = ((b9 << 8) | (b9 >> (64 - 8))) ^ b8; + b9 = rol64(b9, 8) ^ b8; b11 += k13; b10 += b11 + k12; - b11 = ((b11 << 17) | (b11 >> (64 - 17))) ^ b10; + b11 = rol64(b11, 17) ^ b10; b13 += k15 + t2; b12 += b13 + k14; - b13 = ((b13 << 22) | (b13 >> (64 - 22))) ^ b12; + b13 = rol64(b13, 22) ^ b12; b15 += k0 + 2; b14 += b15 + k16 + t0; - b15 = ((b15 << 37) | (b15 >> (64 - 37))) ^ b14; + b15 = rol64(b15, 37) ^ b14; b0 += b9; - b9 = ((b9 << 38) | (b9 >> (64 - 38))) ^ b0; + b9 = rol64(b9, 38) ^ b0; b2 += b13; - b13 = ((b13 << 19) | (b13 >> (64 - 19))) ^ b2; + b13 = rol64(b13, 19) ^ b2; b6 += b11; - b11 = ((b11 << 10) | (b11 >> (64 - 10))) ^ b6; + b11 = rol64(b11, 10) ^ b6; b4 += b15; - b15 = ((b15 << 55) | (b15 >> (64 - 55))) ^ b4; + b15 = rol64(b15, 55) ^ b4; b10 += b7; - b7 = ((b7 << 49) | (b7 >> (64 - 49))) ^ b10; + b7 = rol64(b7, 49) ^ b10; b12 += b3; - b3 = ((b3 << 18) | (b3 >> (64 - 18))) ^ b12; + b3 = rol64(b3, 18) ^ b12; b14 += b5; - b5 = ((b5 << 23) | (b5 >> (64 - 23))) ^ b14; + b5 = rol64(b5, 23) ^ b14; b8 += b1; - b1 = ((b1 << 52) | (b1 >> (64 - 52))) ^ b8; + b1 = rol64(b1, 52) ^ b8; b0 += b7; - b7 = ((b7 << 33) | (b7 >> (64 - 33))) ^ b0; + b7 = rol64(b7, 33) ^ b0; b2 += b5; - b5 = ((b5 << 4) | (b5 >> (64 - 4))) ^ b2; + b5 = rol64(b5, 4) ^ b2; b4 += b3; - b3 = ((b3 << 51) | (b3 >> (64 - 51))) ^ b4; + b3 = rol64(b3, 51) ^ b4; b6 += b1; - b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b6; + b1 = rol64(b1, 13) ^ b6; b12 += b15; - b15 = ((b15 << 34) | (b15 >> (64 - 34))) ^ b12; + b15 = rol64(b15, 34) ^ b12; b14 += b13; - b13 = ((b13 << 41) | (b13 >> (64 - 41))) ^ b14; + b13 = rol64(b13, 41) ^ b14; b8 += b11; - b11 = ((b11 << 59) | (b11 >> (64 - 59))) ^ b8; + b11 = rol64(b11, 59) ^ b8; b10 += b9; - b9 = ((b9 << 17) | (b9 >> (64 - 17))) ^ b10; + b9 = rol64(b9, 17) ^ b10; b0 += b15; - b15 = ((b15 << 5) | (b15 >> (64 - 5))) ^ b0; + b15 = rol64(b15, 5) ^ b0; b2 += b11; - b11 = ((b11 << 20) | (b11 >> (64 - 20))) ^ b2; + b11 = rol64(b11, 20) ^ b2; b6 += b13; - b13 = ((b13 << 48) | (b13 >> (64 - 48))) ^ b6; + b13 = rol64(b13, 48) ^ b6; b4 += b9; - b9 = ((b9 << 41) | (b9 >> (64 - 41))) ^ b4; + b9 = rol64(b9, 41) ^ b4; b14 += b1; - b1 = ((b1 << 47) | (b1 >> (64 - 47))) ^ b14; + b1 = rol64(b1, 47) ^ b14; b8 += b5; - b5 = ((b5 << 28) | (b5 >> (64 - 28))) ^ b8; + b5 = rol64(b5, 28) ^ b8; b10 += b3; - b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b10; + b3 = rol64(b3, 16) ^ b10; b12 += b7; - b7 = ((b7 << 25) | (b7 >> (64 - 25))) ^ b12; + b7 = rol64(b7, 25) ^ b12; b1 += k4; b0 += b1 + k3; - b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b0; + b1 = rol64(b1, 41) ^ b0; b3 += k6; b2 += b3 + k5; - b3 = ((b3 << 9) | (b3 >> (64 - 9))) ^ b2; + b3 = rol64(b3, 9) ^ b2; b5 += k8; b4 += b5 + k7; - b5 = ((b5 << 37) | (b5 >> (64 - 37))) ^ b4; + b5 = rol64(b5, 37) ^ b4; b7 += k10; b6 += b7 + k9; - b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b6; + b7 = rol64(b7, 31) ^ b6; b9 += k12; b8 += b9 + k11; - b9 = ((b9 << 12) | (b9 >> (64 - 12))) ^ b8; + b9 = rol64(b9, 12) ^ b8; b11 += k14; b10 += b11 + k13; - b11 = ((b11 << 47) | (b11 >> (64 - 47))) ^ b10; + b11 = rol64(b11, 47) ^ b10; b13 += k16 + t0; b12 += b13 + k15; - b13 = ((b13 << 44) | (b13 >> (64 - 44))) ^ b12; + b13 = rol64(b13, 44) ^ b12; b15 += k1 + 3; b14 += b15 + k0 + t1; - b15 = ((b15 << 30) | (b15 >> (64 - 30))) ^ b14; + b15 = rol64(b15, 30) ^ b14; b0 += b9; - b9 = ((b9 << 16) | (b9 >> (64 - 16))) ^ b0; + b9 = rol64(b9, 16) ^ b0; b2 += b13; - b13 = ((b13 << 34) | (b13 >> (64 - 34))) ^ b2; + b13 = rol64(b13, 34) ^ b2; b6 += b11; - b11 = ((b11 << 56) | (b11 >> (64 - 56))) ^ b6; + b11 = rol64(b11, 56) ^ b6; b4 += b15; - b15 = ((b15 << 51) | (b15 >> (64 - 51))) ^ b4; + b15 = rol64(b15, 51) ^ b4; b10 += b7; - b7 = ((b7 << 4) | (b7 >> (64 - 4))) ^ b10; + b7 = rol64(b7, 4) ^ b10; b12 += b3; - b3 = ((b3 << 53) | (b3 >> (64 - 53))) ^ b12; + b3 = rol64(b3, 53) ^ b12; b14 += b5; - b5 = ((b5 << 42) | (b5 >> (64 - 42))) ^ b14; + b5 = rol64(b5, 42) ^ b14; b8 += b1; - b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b8; + b1 = rol64(b1, 41) ^ b8; b0 += b7; - b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b0; + b7 = rol64(b7, 31) ^ b0; b2 += b5; - b5 = ((b5 << 44) | (b5 >> (64 - 44))) ^ b2; + b5 = rol64(b5, 44) ^ b2; b4 += b3; - b3 = ((b3 << 47) | (b3 >> (64 - 47))) ^ b4; + b3 = rol64(b3, 47) ^ b4; b6 += b1; - b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b6; + b1 = rol64(b1, 46) ^ b6; b12 += b15; - b15 = ((b15 << 19) | (b15 >> (64 - 19))) ^ b12; + b15 = rol64(b15, 19) ^ b12; b14 += b13; - b13 = ((b13 << 42) | (b13 >> (64 - 42))) ^ b14; + b13 = rol64(b13, 42) ^ b14; b8 += b11; - b11 = ((b11 << 44) | (b11 >> (64 - 44))) ^ b8; + b11 = rol64(b11, 44) ^ b8; b10 += b9; - b9 = ((b9 << 25) | (b9 >> (64 - 25))) ^ b10; + b9 = rol64(b9, 25) ^ b10; b0 += b15; - b15 = ((b15 << 9) | (b15 >> (64 - 9))) ^ b0; + b15 = rol64(b15, 9) ^ b0; b2 += b11; - b11 = ((b11 << 48) | (b11 >> (64 - 48))) ^ b2; + b11 = rol64(b11, 48) ^ b2; b6 += b13; - b13 = ((b13 << 35) | (b13 >> (64 - 35))) ^ b6; + b13 = rol64(b13, 35) ^ b6; b4 += b9; - b9 = ((b9 << 52) | (b9 >> (64 - 52))) ^ b4; + b9 = rol64(b9, 52) ^ b4; b14 += b1; - b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b14; + b1 = rol64(b1, 23) ^ b14; b8 += b5; - b5 = ((b5 << 31) | (b5 >> (64 - 31))) ^ b8; + b5 = rol64(b5, 31) ^ b8; b10 += b3; - b3 = ((b3 << 37) | (b3 >> (64 - 37))) ^ b10; + b3 = rol64(b3, 37) ^ b10; b12 += b7; - b7 = ((b7 << 20) | (b7 >> (64 - 20))) ^ b12; + b7 = rol64(b7, 20) ^ b12; b1 += k5; b0 += b1 + k4; - b1 = ((b1 << 24) | (b1 >> (64 - 24))) ^ b0; + b1 = rol64(b1, 24) ^ b0; b3 += k7; b2 += b3 + k6; - b3 = ((b3 << 13) | (b3 >> (64 - 13))) ^ b2; + b3 = rol64(b3, 13) ^ b2; b5 += k9; b4 += b5 + k8; - b5 = ((b5 << 8) | (b5 >> (64 - 8))) ^ b4; + b5 = rol64(b5, 8) ^ b4; b7 += k11; b6 += b7 + k10; - b7 = ((b7 << 47) | (b7 >> (64 - 47))) ^ b6; + b7 = rol64(b7, 47) ^ b6; b9 += k13; b8 += b9 + k12; - b9 = ((b9 << 8) | (b9 >> (64 - 8))) ^ b8; + b9 = rol64(b9, 8) ^ b8; b11 += k15; b10 += b11 + k14; - b11 = ((b11 << 17) | (b11 >> (64 - 17))) ^ b10; + b11 = rol64(b11, 17) ^ b10; b13 += k0 + t1; b12 += b13 + k16; - b13 = ((b13 << 22) | (b13 >> (64 - 22))) ^ b12; + b13 = rol64(b13, 22) ^ b12; b15 += k2 + 4; b14 += b15 + k1 + t2; - b15 = ((b15 << 37) | (b15 >> (64 - 37))) ^ b14; + b15 = rol64(b15, 37) ^ b14; b0 += b9; - b9 = ((b9 << 38) | (b9 >> (64 - 38))) ^ b0; + b9 = rol64(b9, 38) ^ b0; b2 += b13; - b13 = ((b13 << 19) | (b13 >> (64 - 19))) ^ b2; + b13 = rol64(b13, 19) ^ b2; b6 += b11; - b11 = ((b11 << 10) | (b11 >> (64 - 10))) ^ b6; + b11 = rol64(b11, 10) ^ b6; b4 += b15; - b15 = ((b15 << 55) | (b15 >> (64 - 55))) ^ b4; + b15 = rol64(b15, 55) ^ b4; b10 += b7; - b7 = ((b7 << 49) | (b7 >> (64 - 49))) ^ b10; + b7 = rol64(b7, 49) ^ b10; b12 += b3; - b3 = ((b3 << 18) | (b3 >> (64 - 18))) ^ b12; + b3 = rol64(b3, 18) ^ b12; b14 += b5; - b5 = ((b5 << 23) | (b5 >> (64 - 23))) ^ b14; + b5 = rol64(b5, 23) ^ b14; b8 += b1; - b1 = ((b1 << 52) | (b1 >> (64 - 52))) ^ b8; + b1 = rol64(b1, 52) ^ b8; b0 += b7; - b7 = ((b7 << 33) | (b7 >> (64 - 33))) ^ b0; + b7 = rol64(b7, 33) ^ b0; b2 += b5; - b5 = ((b5 << 4) | (b5 >> (64 - 4))) ^ b2; + b5 = rol64(b5, 4) ^ b2; b4 += b3; - b3 = ((b3 << 51) | (b3 >> (64 - 51))) ^ b4; + b3 = rol64(b3, 51) ^ b4; b6 += b1; - b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b6; + b1 = rol64(b1, 13) ^ b6; b12 += b15; - b15 = ((b15 << 34) | (b15 >> (64 - 34))) ^ b12; + b15 = rol64(b15, 34) ^ b12; b14 += b13; - b13 = ((b13 << 41) | (b13 >> (64 - 41))) ^ b14; + b13 = rol64(b13, 41) ^ b14; b8 += b11; - b11 = ((b11 << 59) | (b11 >> (64 - 59))) ^ b8; + b11 = rol64(b11, 59) ^ b8; b10 += b9; - b9 = ((b9 << 17) | (b9 >> (64 - 17))) ^ b10; + b9 = rol64(b9, 17) ^ b10; b0 += b15; - b15 = ((b15 << 5) | (b15 >> (64 - 5))) ^ b0; + b15 = rol64(b15, 5) ^ b0; b2 += b11; - b11 = ((b11 << 20) | (b11 >> (64 - 20))) ^ b2; + b11 = rol64(b11, 20) ^ b2; b6 += b13; - b13 = ((b13 << 48) | (b13 >> (64 - 48))) ^ b6; + b13 = rol64(b13, 48) ^ b6; b4 += b9; - b9 = ((b9 << 41) | (b9 >> (64 - 41))) ^ b4; + b9 = rol64(b9, 41) ^ b4; b14 += b1; - b1 = ((b1 << 47) | (b1 >> (64 - 47))) ^ b14; + b1 = rol64(b1, 47) ^ b14; b8 += b5; - b5 = ((b5 << 28) | (b5 >> (64 - 28))) ^ b8; + b5 = rol64(b5, 28) ^ b8; b10 += b3; - b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b10; + b3 = rol64(b3, 16) ^ b10; b12 += b7; - b7 = ((b7 << 25) | (b7 >> (64 - 25))) ^ b12; + b7 = rol64(b7, 25) ^ b12; b1 += k6; b0 += b1 + k5; - b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b0; + b1 = rol64(b1, 41) ^ b0; b3 += k8; b2 += b3 + k7; - b3 = ((b3 << 9) | (b3 >> (64 - 9))) ^ b2; + b3 = rol64(b3, 9) ^ b2; b5 += k10; b4 += b5 + k9; - b5 = ((b5 << 37) | (b5 >> (64 - 37))) ^ b4; + b5 = rol64(b5, 37) ^ b4; b7 += k12; b6 += b7 + k11; - b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b6; + b7 = rol64(b7, 31) ^ b6; b9 += k14; b8 += b9 + k13; - b9 = ((b9 << 12) | (b9 >> (64 - 12))) ^ b8; + b9 = rol64(b9, 12) ^ b8; b11 += k16; b10 += b11 + k15; - b11 = ((b11 << 47) | (b11 >> (64 - 47))) ^ b10; + b11 = rol64(b11, 47) ^ b10; b13 += k1 + t2; b12 += b13 + k0; - b13 = ((b13 << 44) | (b13 >> (64 - 44))) ^ b12; + b13 = rol64(b13, 44) ^ b12; b15 += k3 + 5; b14 += b15 + k2 + t0; - b15 = ((b15 << 30) | (b15 >> (64 - 30))) ^ b14; + b15 = rol64(b15, 30) ^ b14; b0 += b9; - b9 = ((b9 << 16) | (b9 >> (64 - 16))) ^ b0; + b9 = rol64(b9, 16) ^ b0; b2 += b13; - b13 = ((b13 << 34) | (b13 >> (64 - 34))) ^ b2; + b13 = rol64(b13, 34) ^ b2; b6 += b11; - b11 = ((b11 << 56) | (b11 >> (64 - 56))) ^ b6; + b11 = rol64(b11, 56) ^ b6; b4 += b15; - b15 = ((b15 << 51) | (b15 >> (64 - 51))) ^ b4; + b15 = rol64(b15, 51) ^ b4; b10 += b7; - b7 = ((b7 << 4) | (b7 >> (64 - 4))) ^ b10; + b7 = rol64(b7, 4) ^ b10; b12 += b3; - b3 = ((b3 << 53) | (b3 >> (64 - 53))) ^ b12; + b3 = rol64(b3, 53) ^ b12; b14 += b5; - b5 = ((b5 << 42) | (b5 >> (64 - 42))) ^ b14; + b5 = rol64(b5, 42) ^ b14; b8 += b1; - b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b8; + b1 = rol64(b1, 41) ^ b8; b0 += b7; - b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b0; + b7 = rol64(b7, 31) ^ b0; b2 += b5; - b5 = ((b5 << 44) | (b5 >> (64 - 44))) ^ b2; + b5 = rol64(b5, 44) ^ b2; b4 += b3; - b3 = ((b3 << 47) | (b3 >> (64 - 47))) ^ b4; + b3 = rol64(b3, 47) ^ b4; b6 += b1; - b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b6; + b1 = rol64(b1, 46) ^ b6; b12 += b15; - b15 = ((b15 << 19) | (b15 >> (64 - 19))) ^ b12; + b15 = rol64(b15, 19) ^ b12; b14 += b13; - b13 = ((b13 << 42) | (b13 >> (64 - 42))) ^ b14; + b13 = rol64(b13, 42) ^ b14; b8 += b11; - b11 = ((b11 << 44) | (b11 >> (64 - 44))) ^ b8; + b11 = rol64(b11, 44) ^ b8; b10 += b9; - b9 = ((b9 << 25) | (b9 >> (64 - 25))) ^ b10; + b9 = rol64(b9, 25) ^ b10; b0 += b15; - b15 = ((b15 << 9) | (b15 >> (64 - 9))) ^ b0; + b15 = rol64(b15, 9) ^ b0; b2 += b11; - b11 = ((b11 << 48) | (b11 >> (64 - 48))) ^ b2; + b11 = rol64(b11, 48) ^ b2; b6 += b13; - b13 = ((b13 << 35) | (b13 >> (64 - 35))) ^ b6; + b13 = rol64(b13, 35) ^ b6; b4 += b9; - b9 = ((b9 << 52) | (b9 >> (64 - 52))) ^ b4; + b9 = rol64(b9, 52) ^ b4; b14 += b1; - b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b14; + b1 = rol64(b1, 23) ^ b14; b8 += b5; - b5 = ((b5 << 31) | (b5 >> (64 - 31))) ^ b8; + b5 = rol64(b5, 31) ^ b8; b10 += b3; - b3 = ((b3 << 37) | (b3 >> (64 - 37))) ^ b10; + b3 = rol64(b3, 37) ^ b10; b12 += b7; - b7 = ((b7 << 20) | (b7 >> (64 - 20))) ^ b12; + b7 = rol64(b7, 20) ^ b12; b1 += k7; b0 += b1 + k6; - b1 = ((b1 << 24) | (b1 >> (64 - 24))) ^ b0; + b1 = rol64(b1, 24) ^ b0; b3 += k9; b2 += b3 + k8; - b3 = ((b3 << 13) | (b3 >> (64 - 13))) ^ b2; + b3 = rol64(b3, 13) ^ b2; b5 += k11; b4 += b5 + k10; - b5 = ((b5 << 8) | (b5 >> (64 - 8))) ^ b4; + b5 = rol64(b5, 8) ^ b4; b7 += k13; b6 += b7 + k12; - b7 = ((b7 << 47) | (b7 >> (64 - 47))) ^ b6; + b7 = rol64(b7, 47) ^ b6; b9 += k15; b8 += b9 + k14; - b9 = ((b9 << 8) | (b9 >> (64 - 8))) ^ b8; + b9 = rol64(b9, 8) ^ b8; b11 += k0; b10 += b11 + k16; - b11 = ((b11 << 17) | (b11 >> (64 - 17))) ^ b10; + b11 = rol64(b11, 17) ^ b10; b13 += k2 + t0; b12 += b13 + k1; - b13 = ((b13 << 22) | (b13 >> (64 - 22))) ^ b12; + b13 = rol64(b13, 22) ^ b12; b15 += k4 + 6; b14 += b15 + k3 + t1; - b15 = ((b15 << 37) | (b15 >> (64 - 37))) ^ b14; + b15 = rol64(b15, 37) ^ b14; b0 += b9; - b9 = ((b9 << 38) | (b9 >> (64 - 38))) ^ b0; + b9 = rol64(b9, 38) ^ b0; b2 += b13; - b13 = ((b13 << 19) | (b13 >> (64 - 19))) ^ b2; + b13 = rol64(b13, 19) ^ b2; b6 += b11; - b11 = ((b11 << 10) | (b11 >> (64 - 10))) ^ b6; + b11 = rol64(b11, 10) ^ b6; b4 += b15; - b15 = ((b15 << 55) | (b15 >> (64 - 55))) ^ b4; + b15 = rol64(b15, 55) ^ b4; b10 += b7; - b7 = ((b7 << 49) | (b7 >> (64 - 49))) ^ b10; + b7 = rol64(b7, 49) ^ b10; b12 += b3; - b3 = ((b3 << 18) | (b3 >> (64 - 18))) ^ b12; + b3 = rol64(b3, 18) ^ b12; b14 += b5; - b5 = ((b5 << 23) | (b5 >> (64 - 23))) ^ b14; + b5 = rol64(b5, 23) ^ b14; b8 += b1; - b1 = ((b1 << 52) | (b1 >> (64 - 52))) ^ b8; + b1 = rol64(b1, 52) ^ b8; b0 += b7; - b7 = ((b7 << 33) | (b7 >> (64 - 33))) ^ b0; + b7 = rol64(b7, 33) ^ b0; b2 += b5; - b5 = ((b5 << 4) | (b5 >> (64 - 4))) ^ b2; + b5 = rol64(b5, 4) ^ b2; b4 += b3; - b3 = ((b3 << 51) | (b3 >> (64 - 51))) ^ b4; + b3 = rol64(b3, 51) ^ b4; b6 += b1; - b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b6; + b1 = rol64(b1, 13) ^ b6; b12 += b15; - b15 = ((b15 << 34) | (b15 >> (64 - 34))) ^ b12; + b15 = rol64(b15, 34) ^ b12; b14 += b13; - b13 = ((b13 << 41) | (b13 >> (64 - 41))) ^ b14; + b13 = rol64(b13, 41) ^ b14; b8 += b11; - b11 = ((b11 << 59) | (b11 >> (64 - 59))) ^ b8; + b11 = rol64(b11, 59) ^ b8; b10 += b9; - b9 = ((b9 << 17) | (b9 >> (64 - 17))) ^ b10; + b9 = rol64(b9, 17) ^ b10; b0 += b15; - b15 = ((b15 << 5) | (b15 >> (64 - 5))) ^ b0; + b15 = rol64(b15, 5) ^ b0; b2 += b11; - b11 = ((b11 << 20) | (b11 >> (64 - 20))) ^ b2; + b11 = rol64(b11, 20) ^ b2; b6 += b13; - b13 = ((b13 << 48) | (b13 >> (64 - 48))) ^ b6; + b13 = rol64(b13, 48) ^ b6; b4 += b9; - b9 = ((b9 << 41) | (b9 >> (64 - 41))) ^ b4; + b9 = rol64(b9, 41) ^ b4; b14 += b1; - b1 = ((b1 << 47) | (b1 >> (64 - 47))) ^ b14; + b1 = rol64(b1, 47) ^ b14; b8 += b5; - b5 = ((b5 << 28) | (b5 >> (64 - 28))) ^ b8; + b5 = rol64(b5, 28) ^ b8; b10 += b3; - b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b10; + b3 = rol64(b3, 16) ^ b10; b12 += b7; - b7 = ((b7 << 25) | (b7 >> (64 - 25))) ^ b12; + b7 = rol64(b7, 25) ^ b12; b1 += k8; b0 += b1 + k7; - b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b0; + b1 = rol64(b1, 41) ^ b0; b3 += k10; b2 += b3 + k9; - b3 = ((b3 << 9) | (b3 >> (64 - 9))) ^ b2; + b3 = rol64(b3, 9) ^ b2; b5 += k12; b4 += b5 + k11; - b5 = ((b5 << 37) | (b5 >> (64 - 37))) ^ b4; + b5 = rol64(b5, 37) ^ b4; b7 += k14; b6 += b7 + k13; - b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b6; + b7 = rol64(b7, 31) ^ b6; b9 += k16; b8 += b9 + k15; - b9 = ((b9 << 12) | (b9 >> (64 - 12))) ^ b8; + b9 = rol64(b9, 12) ^ b8; b11 += k1; b10 += b11 + k0; - b11 = ((b11 << 47) | (b11 >> (64 - 47))) ^ b10; + b11 = rol64(b11, 47) ^ b10; b13 += k3 + t1; b12 += b13 + k2; - b13 = ((b13 << 44) | (b13 >> (64 - 44))) ^ b12; + b13 = rol64(b13, 44) ^ b12; b15 += k5 + 7; b14 += b15 + k4 + t2; - b15 = ((b15 << 30) | (b15 >> (64 - 30))) ^ b14; + b15 = rol64(b15, 30) ^ b14; b0 += b9; - b9 = ((b9 << 16) | (b9 >> (64 - 16))) ^ b0; + b9 = rol64(b9, 16) ^ b0; b2 += b13; - b13 = ((b13 << 34) | (b13 >> (64 - 34))) ^ b2; + b13 = rol64(b13, 34) ^ b2; b6 += b11; - b11 = ((b11 << 56) | (b11 >> (64 - 56))) ^ b6; + b11 = rol64(b11, 56) ^ b6; b4 += b15; - b15 = ((b15 << 51) | (b15 >> (64 - 51))) ^ b4; + b15 = rol64(b15, 51) ^ b4; b10 += b7; - b7 = ((b7 << 4) | (b7 >> (64 - 4))) ^ b10; + b7 = rol64(b7, 4) ^ b10; b12 += b3; - b3 = ((b3 << 53) | (b3 >> (64 - 53))) ^ b12; + b3 = rol64(b3, 53) ^ b12; b14 += b5; - b5 = ((b5 << 42) | (b5 >> (64 - 42))) ^ b14; + b5 = rol64(b5, 42) ^ b14; b8 += b1; - b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b8; + b1 = rol64(b1, 41) ^ b8; b0 += b7; - b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b0; + b7 = rol64(b7, 31) ^ b0; b2 += b5; - b5 = ((b5 << 44) | (b5 >> (64 - 44))) ^ b2; + b5 = rol64(b5, 44) ^ b2; b4 += b3; - b3 = ((b3 << 47) | (b3 >> (64 - 47))) ^ b4; + b3 = rol64(b3, 47) ^ b4; b6 += b1; - b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b6; + b1 = rol64(b1, 46) ^ b6; b12 += b15; - b15 = ((b15 << 19) | (b15 >> (64 - 19))) ^ b12; + b15 = rol64(b15, 19) ^ b12; b14 += b13; - b13 = ((b13 << 42) | (b13 >> (64 - 42))) ^ b14; + b13 = rol64(b13, 42) ^ b14; b8 += b11; - b11 = ((b11 << 44) | (b11 >> (64 - 44))) ^ b8; + b11 = rol64(b11, 44) ^ b8; b10 += b9; - b9 = ((b9 << 25) | (b9 >> (64 - 25))) ^ b10; + b9 = rol64(b9, 25) ^ b10; b0 += b15; - b15 = ((b15 << 9) | (b15 >> (64 - 9))) ^ b0; + b15 = rol64(b15, 9) ^ b0; b2 += b11; - b11 = ((b11 << 48) | (b11 >> (64 - 48))) ^ b2; + b11 = rol64(b11, 48) ^ b2; b6 += b13; - b13 = ((b13 << 35) | (b13 >> (64 - 35))) ^ b6; + b13 = rol64(b13, 35) ^ b6; b4 += b9; - b9 = ((b9 << 52) | (b9 >> (64 - 52))) ^ b4; + b9 = rol64(b9, 52) ^ b4; b14 += b1; - b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b14; + b1 = rol64(b1, 23) ^ b14; b8 += b5; - b5 = ((b5 << 31) | (b5 >> (64 - 31))) ^ b8; + b5 = rol64(b5, 31) ^ b8; b10 += b3; - b3 = ((b3 << 37) | (b3 >> (64 - 37))) ^ b10; + b3 = rol64(b3, 37) ^ b10; b12 += b7; - b7 = ((b7 << 20) | (b7 >> (64 - 20))) ^ b12; + b7 = rol64(b7, 20) ^ b12; b1 += k9; b0 += b1 + k8; - b1 = ((b1 << 24) | (b1 >> (64 - 24))) ^ b0; + b1 = rol64(b1, 24) ^ b0; b3 += k11; b2 += b3 + k10; - b3 = ((b3 << 13) | (b3 >> (64 - 13))) ^ b2; + b3 = rol64(b3, 13) ^ b2; b5 += k13; b4 += b5 + k12; - b5 = ((b5 << 8) | (b5 >> (64 - 8))) ^ b4; + b5 = rol64(b5, 8) ^ b4; b7 += k15; b6 += b7 + k14; - b7 = ((b7 << 47) | (b7 >> (64 - 47))) ^ b6; + b7 = rol64(b7, 47) ^ b6; b9 += k0; b8 += b9 + k16; - b9 = ((b9 << 8) | (b9 >> (64 - 8))) ^ b8; + b9 = rol64(b9, 8) ^ b8; b11 += k2; b10 += b11 + k1; - b11 = ((b11 << 17) | (b11 >> (64 - 17))) ^ b10; + b11 = rol64(b11, 17) ^ b10; b13 += k4 + t2; b12 += b13 + k3; - b13 = ((b13 << 22) | (b13 >> (64 - 22))) ^ b12; + b13 = rol64(b13, 22) ^ b12; b15 += k6 + 8; b14 += b15 + k5 + t0; - b15 = ((b15 << 37) | (b15 >> (64 - 37))) ^ b14; + b15 = rol64(b15, 37) ^ b14; b0 += b9; - b9 = ((b9 << 38) | (b9 >> (64 - 38))) ^ b0; + b9 = rol64(b9, 38) ^ b0; b2 += b13; - b13 = ((b13 << 19) | (b13 >> (64 - 19))) ^ b2; + b13 = rol64(b13, 19) ^ b2; b6 += b11; - b11 = ((b11 << 10) | (b11 >> (64 - 10))) ^ b6; + b11 = rol64(b11, 10) ^ b6; b4 += b15; - b15 = ((b15 << 55) | (b15 >> (64 - 55))) ^ b4; + b15 = rol64(b15, 55) ^ b4; b10 += b7; - b7 = ((b7 << 49) | (b7 >> (64 - 49))) ^ b10; + b7 = rol64(b7, 49) ^ b10; b12 += b3; - b3 = ((b3 << 18) | (b3 >> (64 - 18))) ^ b12; + b3 = rol64(b3, 18) ^ b12; b14 += b5; - b5 = ((b5 << 23) | (b5 >> (64 - 23))) ^ b14; + b5 = rol64(b5, 23) ^ b14; b8 += b1; - b1 = ((b1 << 52) | (b1 >> (64 - 52))) ^ b8; + b1 = rol64(b1, 52) ^ b8; b0 += b7; - b7 = ((b7 << 33) | (b7 >> (64 - 33))) ^ b0; + b7 = rol64(b7, 33) ^ b0; b2 += b5; - b5 = ((b5 << 4) | (b5 >> (64 - 4))) ^ b2; + b5 = rol64(b5, 4) ^ b2; b4 += b3; - b3 = ((b3 << 51) | (b3 >> (64 - 51))) ^ b4; + b3 = rol64(b3, 51) ^ b4; b6 += b1; - b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b6; + b1 = rol64(b1, 13) ^ b6; b12 += b15; - b15 = ((b15 << 34) | (b15 >> (64 - 34))) ^ b12; + b15 = rol64(b15, 34) ^ b12; b14 += b13; - b13 = ((b13 << 41) | (b13 >> (64 - 41))) ^ b14; + b13 = rol64(b13, 41) ^ b14; b8 += b11; - b11 = ((b11 << 59) | (b11 >> (64 - 59))) ^ b8; + b11 = rol64(b11, 59) ^ b8; b10 += b9; - b9 = ((b9 << 17) | (b9 >> (64 - 17))) ^ b10; + b9 = rol64(b9, 17) ^ b10; b0 += b15; - b15 = ((b15 << 5) | (b15 >> (64 - 5))) ^ b0; + b15 = rol64(b15, 5) ^ b0; b2 += b11; - b11 = ((b11 << 20) | (b11 >> (64 - 20))) ^ b2; + b11 = rol64(b11, 20) ^ b2; b6 += b13; - b13 = ((b13 << 48) | (b13 >> (64 - 48))) ^ b6; + b13 = rol64(b13, 48) ^ b6; b4 += b9; - b9 = ((b9 << 41) | (b9 >> (64 - 41))) ^ b4; + b9 = rol64(b9, 41) ^ b4; b14 += b1; - b1 = ((b1 << 47) | (b1 >> (64 - 47))) ^ b14; + b1 = rol64(b1, 47) ^ b14; b8 += b5; - b5 = ((b5 << 28) | (b5 >> (64 - 28))) ^ b8; + b5 = rol64(b5, 28) ^ b8; b10 += b3; - b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b10; + b3 = rol64(b3, 16) ^ b10; b12 += b7; - b7 = ((b7 << 25) | (b7 >> (64 - 25))) ^ b12; + b7 = rol64(b7, 25) ^ b12; b1 += k10; b0 += b1 + k9; - b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b0; + b1 = rol64(b1, 41) ^ b0; b3 += k12; b2 += b3 + k11; - b3 = ((b3 << 9) | (b3 >> (64 - 9))) ^ b2; + b3 = rol64(b3, 9) ^ b2; b5 += k14; b4 += b5 + k13; - b5 = ((b5 << 37) | (b5 >> (64 - 37))) ^ b4; + b5 = rol64(b5, 37) ^ b4; b7 += k16; b6 += b7 + k15; - b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b6; + b7 = rol64(b7, 31) ^ b6; b9 += k1; b8 += b9 + k0; - b9 = ((b9 << 12) | (b9 >> (64 - 12))) ^ b8; + b9 = rol64(b9, 12) ^ b8; b11 += k3; b10 += b11 + k2; - b11 = ((b11 << 47) | (b11 >> (64 - 47))) ^ b10; + b11 = rol64(b11, 47) ^ b10; b13 += k5 + t0; b12 += b13 + k4; - b13 = ((b13 << 44) | (b13 >> (64 - 44))) ^ b12; + b13 = rol64(b13, 44) ^ b12; b15 += k7 + 9; b14 += b15 + k6 + t1; - b15 = ((b15 << 30) | (b15 >> (64 - 30))) ^ b14; + b15 = rol64(b15, 30) ^ b14; b0 += b9; - b9 = ((b9 << 16) | (b9 >> (64 - 16))) ^ b0; + b9 = rol64(b9, 16) ^ b0; b2 += b13; - b13 = ((b13 << 34) | (b13 >> (64 - 34))) ^ b2; + b13 = rol64(b13, 34) ^ b2; b6 += b11; - b11 = ((b11 << 56) | (b11 >> (64 - 56))) ^ b6; + b11 = rol64(b11, 56) ^ b6; b4 += b15; - b15 = ((b15 << 51) | (b15 >> (64 - 51))) ^ b4; + b15 = rol64(b15, 51) ^ b4; b10 += b7; - b7 = ((b7 << 4) | (b7 >> (64 - 4))) ^ b10; + b7 = rol64(b7, 4) ^ b10; b12 += b3; - b3 = ((b3 << 53) | (b3 >> (64 - 53))) ^ b12; + b3 = rol64(b3, 53) ^ b12; b14 += b5; - b5 = ((b5 << 42) | (b5 >> (64 - 42))) ^ b14; + b5 = rol64(b5, 42) ^ b14; b8 += b1; - b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b8; + b1 = rol64(b1, 41) ^ b8; b0 += b7; - b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b0; + b7 = rol64(b7, 31) ^ b0; b2 += b5; - b5 = ((b5 << 44) | (b5 >> (64 - 44))) ^ b2; + b5 = rol64(b5, 44) ^ b2; b4 += b3; - b3 = ((b3 << 47) | (b3 >> (64 - 47))) ^ b4; + b3 = rol64(b3, 47) ^ b4; b6 += b1; - b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b6; + b1 = rol64(b1, 46) ^ b6; b12 += b15; - b15 = ((b15 << 19) | (b15 >> (64 - 19))) ^ b12; + b15 = rol64(b15, 19) ^ b12; b14 += b13; - b13 = ((b13 << 42) | (b13 >> (64 - 42))) ^ b14; + b13 = rol64(b13, 42) ^ b14; b8 += b11; - b11 = ((b11 << 44) | (b11 >> (64 - 44))) ^ b8; + b11 = rol64(b11, 44) ^ b8; b10 += b9; - b9 = ((b9 << 25) | (b9 >> (64 - 25))) ^ b10; + b9 = rol64(b9, 25) ^ b10; b0 += b15; - b15 = ((b15 << 9) | (b15 >> (64 - 9))) ^ b0; + b15 = rol64(b15, 9) ^ b0; b2 += b11; - b11 = ((b11 << 48) | (b11 >> (64 - 48))) ^ b2; + b11 = rol64(b11, 48) ^ b2; b6 += b13; - b13 = ((b13 << 35) | (b13 >> (64 - 35))) ^ b6; + b13 = rol64(b13, 35) ^ b6; b4 += b9; - b9 = ((b9 << 52) | (b9 >> (64 - 52))) ^ b4; + b9 = rol64(b9, 52) ^ b4; b14 += b1; - b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b14; + b1 = rol64(b1, 23) ^ b14; b8 += b5; - b5 = ((b5 << 31) | (b5 >> (64 - 31))) ^ b8; + b5 = rol64(b5, 31) ^ b8; b10 += b3; - b3 = ((b3 << 37) | (b3 >> (64 - 37))) ^ b10; + b3 = rol64(b3, 37) ^ b10; b12 += b7; - b7 = ((b7 << 20) | (b7 >> (64 - 20))) ^ b12; + b7 = rol64(b7, 20) ^ b12; b1 += k11; b0 += b1 + k10; - b1 = ((b1 << 24) | (b1 >> (64 - 24))) ^ b0; + b1 = rol64(b1, 24) ^ b0; b3 += k13; b2 += b3 + k12; - b3 = ((b3 << 13) | (b3 >> (64 - 13))) ^ b2; + b3 = rol64(b3, 13) ^ b2; b5 += k15; b4 += b5 + k14; - b5 = ((b5 << 8) | (b5 >> (64 - 8))) ^ b4; + b5 = rol64(b5, 8) ^ b4; b7 += k0; b6 += b7 + k16; - b7 = ((b7 << 47) | (b7 >> (64 - 47))) ^ b6; + b7 = rol64(b7, 47) ^ b6; b9 += k2; b8 += b9 + k1; - b9 = ((b9 << 8) | (b9 >> (64 - 8))) ^ b8; + b9 = rol64(b9, 8) ^ b8; b11 += k4; b10 += b11 + k3; - b11 = ((b11 << 17) | (b11 >> (64 - 17))) ^ b10; + b11 = rol64(b11, 17) ^ b10; b13 += k6 + t1; b12 += b13 + k5; - b13 = ((b13 << 22) | (b13 >> (64 - 22))) ^ b12; + b13 = rol64(b13, 22) ^ b12; b15 += k8 + 10; b14 += b15 + k7 + t2; - b15 = ((b15 << 37) | (b15 >> (64 - 37))) ^ b14; + b15 = rol64(b15, 37) ^ b14; b0 += b9; - b9 = ((b9 << 38) | (b9 >> (64 - 38))) ^ b0; + b9 = rol64(b9, 38) ^ b0; b2 += b13; - b13 = ((b13 << 19) | (b13 >> (64 - 19))) ^ b2; + b13 = rol64(b13, 19) ^ b2; b6 += b11; - b11 = ((b11 << 10) | (b11 >> (64 - 10))) ^ b6; + b11 = rol64(b11, 10) ^ b6; b4 += b15; - b15 = ((b15 << 55) | (b15 >> (64 - 55))) ^ b4; + b15 = rol64(b15, 55) ^ b4; b10 += b7; - b7 = ((b7 << 49) | (b7 >> (64 - 49))) ^ b10; + b7 = rol64(b7, 49) ^ b10; b12 += b3; - b3 = ((b3 << 18) | (b3 >> (64 - 18))) ^ b12; + b3 = rol64(b3, 18) ^ b12; b14 += b5; - b5 = ((b5 << 23) | (b5 >> (64 - 23))) ^ b14; + b5 = rol64(b5, 23) ^ b14; b8 += b1; - b1 = ((b1 << 52) | (b1 >> (64 - 52))) ^ b8; + b1 = rol64(b1, 52) ^ b8; b0 += b7; - b7 = ((b7 << 33) | (b7 >> (64 - 33))) ^ b0; + b7 = rol64(b7, 33) ^ b0; b2 += b5; - b5 = ((b5 << 4) | (b5 >> (64 - 4))) ^ b2; + b5 = rol64(b5, 4) ^ b2; b4 += b3; - b3 = ((b3 << 51) | (b3 >> (64 - 51))) ^ b4; + b3 = rol64(b3, 51) ^ b4; b6 += b1; - b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b6; + b1 = rol64(b1, 13) ^ b6; b12 += b15; - b15 = ((b15 << 34) | (b15 >> (64 - 34))) ^ b12; + b15 = rol64(b15, 34) ^ b12; b14 += b13; - b13 = ((b13 << 41) | (b13 >> (64 - 41))) ^ b14; + b13 = rol64(b13, 41) ^ b14; b8 += b11; - b11 = ((b11 << 59) | (b11 >> (64 - 59))) ^ b8; + b11 = rol64(b11, 59) ^ b8; b10 += b9; - b9 = ((b9 << 17) | (b9 >> (64 - 17))) ^ b10; + b9 = rol64(b9, 17) ^ b10; b0 += b15; - b15 = ((b15 << 5) | (b15 >> (64 - 5))) ^ b0; + b15 = rol64(b15, 5) ^ b0; b2 += b11; - b11 = ((b11 << 20) | (b11 >> (64 - 20))) ^ b2; + b11 = rol64(b11, 20) ^ b2; b6 += b13; - b13 = ((b13 << 48) | (b13 >> (64 - 48))) ^ b6; + b13 = rol64(b13, 48) ^ b6; b4 += b9; - b9 = ((b9 << 41) | (b9 >> (64 - 41))) ^ b4; + b9 = rol64(b9, 41) ^ b4; b14 += b1; - b1 = ((b1 << 47) | (b1 >> (64 - 47))) ^ b14; + b1 = rol64(b1, 47) ^ b14; b8 += b5; - b5 = ((b5 << 28) | (b5 >> (64 - 28))) ^ b8; + b5 = rol64(b5, 28) ^ b8; b10 += b3; - b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b10; + b3 = rol64(b3, 16) ^ b10; b12 += b7; - b7 = ((b7 << 25) | (b7 >> (64 - 25))) ^ b12; + b7 = rol64(b7, 25) ^ b12; b1 += k12; b0 += b1 + k11; - b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b0; + b1 = rol64(b1, 41) ^ b0; b3 += k14; b2 += b3 + k13; - b3 = ((b3 << 9) | (b3 >> (64 - 9))) ^ b2; + b3 = rol64(b3, 9) ^ b2; b5 += k16; b4 += b5 + k15; - b5 = ((b5 << 37) | (b5 >> (64 - 37))) ^ b4; + b5 = rol64(b5, 37) ^ b4; b7 += k1; b6 += b7 + k0; - b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b6; + b7 = rol64(b7, 31) ^ b6; b9 += k3; b8 += b9 + k2; - b9 = ((b9 << 12) | (b9 >> (64 - 12))) ^ b8; + b9 = rol64(b9, 12) ^ b8; b11 += k5; b10 += b11 + k4; - b11 = ((b11 << 47) | (b11 >> (64 - 47))) ^ b10; + b11 = rol64(b11, 47) ^ b10; b13 += k7 + t2; b12 += b13 + k6; - b13 = ((b13 << 44) | (b13 >> (64 - 44))) ^ b12; + b13 = rol64(b13, 44) ^ b12; b15 += k9 + 11; b14 += b15 + k8 + t0; - b15 = ((b15 << 30) | (b15 >> (64 - 30))) ^ b14; + b15 = rol64(b15, 30) ^ b14; b0 += b9; - b9 = ((b9 << 16) | (b9 >> (64 - 16))) ^ b0; + b9 = rol64(b9, 16) ^ b0; b2 += b13; - b13 = ((b13 << 34) | (b13 >> (64 - 34))) ^ b2; + b13 = rol64(b13, 34) ^ b2; b6 += b11; - b11 = ((b11 << 56) | (b11 >> (64 - 56))) ^ b6; + b11 = rol64(b11, 56) ^ b6; b4 += b15; - b15 = ((b15 << 51) | (b15 >> (64 - 51))) ^ b4; + b15 = rol64(b15, 51) ^ b4; b10 += b7; - b7 = ((b7 << 4) | (b7 >> (64 - 4))) ^ b10; + b7 = rol64(b7, 4) ^ b10; b12 += b3; - b3 = ((b3 << 53) | (b3 >> (64 - 53))) ^ b12; + b3 = rol64(b3, 53) ^ b12; b14 += b5; - b5 = ((b5 << 42) | (b5 >> (64 - 42))) ^ b14; + b5 = rol64(b5, 42) ^ b14; b8 += b1; - b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b8; + b1 = rol64(b1, 41) ^ b8; b0 += b7; - b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b0; + b7 = rol64(b7, 31) ^ b0; b2 += b5; - b5 = ((b5 << 44) | (b5 >> (64 - 44))) ^ b2; + b5 = rol64(b5, 44) ^ b2; b4 += b3; - b3 = ((b3 << 47) | (b3 >> (64 - 47))) ^ b4; + b3 = rol64(b3, 47) ^ b4; b6 += b1; - b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b6; + b1 = rol64(b1, 46) ^ b6; b12 += b15; - b15 = ((b15 << 19) | (b15 >> (64 - 19))) ^ b12; + b15 = rol64(b15, 19) ^ b12; b14 += b13; - b13 = ((b13 << 42) | (b13 >> (64 - 42))) ^ b14; + b13 = rol64(b13, 42) ^ b14; b8 += b11; - b11 = ((b11 << 44) | (b11 >> (64 - 44))) ^ b8; + b11 = rol64(b11, 44) ^ b8; b10 += b9; - b9 = ((b9 << 25) | (b9 >> (64 - 25))) ^ b10; + b9 = rol64(b9, 25) ^ b10; b0 += b15; - b15 = ((b15 << 9) | (b15 >> (64 - 9))) ^ b0; + b15 = rol64(b15, 9) ^ b0; b2 += b11; - b11 = ((b11 << 48) | (b11 >> (64 - 48))) ^ b2; + b11 = rol64(b11, 48) ^ b2; b6 += b13; - b13 = ((b13 << 35) | (b13 >> (64 - 35))) ^ b6; + b13 = rol64(b13, 35) ^ b6; b4 += b9; - b9 = ((b9 << 52) | (b9 >> (64 - 52))) ^ b4; + b9 = rol64(b9, 52) ^ b4; b14 += b1; - b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b14; + b1 = rol64(b1, 23) ^ b14; b8 += b5; - b5 = ((b5 << 31) | (b5 >> (64 - 31))) ^ b8; + b5 = rol64(b5, 31) ^ b8; b10 += b3; - b3 = ((b3 << 37) | (b3 >> (64 - 37))) ^ b10; + b3 = rol64(b3, 37) ^ b10; b12 += b7; - b7 = ((b7 << 20) | (b7 >> (64 - 20))) ^ b12; + b7 = rol64(b7, 20) ^ b12; b1 += k13; b0 += b1 + k12; - b1 = ((b1 << 24) | (b1 >> (64 - 24))) ^ b0; + b1 = rol64(b1, 24) ^ b0; b3 += k15; b2 += b3 + k14; - b3 = ((b3 << 13) | (b3 >> (64 - 13))) ^ b2; + b3 = rol64(b3, 13) ^ b2; b5 += k0; b4 += b5 + k16; - b5 = ((b5 << 8) | (b5 >> (64 - 8))) ^ b4; + b5 = rol64(b5, 8) ^ b4; b7 += k2; b6 += b7 + k1; - b7 = ((b7 << 47) | (b7 >> (64 - 47))) ^ b6; + b7 = rol64(b7, 47) ^ b6; b9 += k4; b8 += b9 + k3; - b9 = ((b9 << 8) | (b9 >> (64 - 8))) ^ b8; + b9 = rol64(b9, 8) ^ b8; b11 += k6; b10 += b11 + k5; - b11 = ((b11 << 17) | (b11 >> (64 - 17))) ^ b10; + b11 = rol64(b11, 17) ^ b10; b13 += k8 + t0; b12 += b13 + k7; - b13 = ((b13 << 22) | (b13 >> (64 - 22))) ^ b12; + b13 = rol64(b13, 22) ^ b12; b15 += k10 + 12; b14 += b15 + k9 + t1; - b15 = ((b15 << 37) | (b15 >> (64 - 37))) ^ b14; + b15 = rol64(b15, 37) ^ b14; b0 += b9; - b9 = ((b9 << 38) | (b9 >> (64 - 38))) ^ b0; + b9 = rol64(b9, 38) ^ b0; b2 += b13; - b13 = ((b13 << 19) | (b13 >> (64 - 19))) ^ b2; + b13 = rol64(b13, 19) ^ b2; b6 += b11; - b11 = ((b11 << 10) | (b11 >> (64 - 10))) ^ b6; + b11 = rol64(b11, 10) ^ b6; b4 += b15; - b15 = ((b15 << 55) | (b15 >> (64 - 55))) ^ b4; + b15 = rol64(b15, 55) ^ b4; b10 += b7; - b7 = ((b7 << 49) | (b7 >> (64 - 49))) ^ b10; + b7 = rol64(b7, 49) ^ b10; b12 += b3; - b3 = ((b3 << 18) | (b3 >> (64 - 18))) ^ b12; + b3 = rol64(b3, 18) ^ b12; b14 += b5; - b5 = ((b5 << 23) | (b5 >> (64 - 23))) ^ b14; + b5 = rol64(b5, 23) ^ b14; b8 += b1; - b1 = ((b1 << 52) | (b1 >> (64 - 52))) ^ b8; + b1 = rol64(b1, 52) ^ b8; b0 += b7; - b7 = ((b7 << 33) | (b7 >> (64 - 33))) ^ b0; + b7 = rol64(b7, 33) ^ b0; b2 += b5; - b5 = ((b5 << 4) | (b5 >> (64 - 4))) ^ b2; + b5 = rol64(b5, 4) ^ b2; b4 += b3; - b3 = ((b3 << 51) | (b3 >> (64 - 51))) ^ b4; + b3 = rol64(b3, 51) ^ b4; b6 += b1; - b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b6; + b1 = rol64(b1, 13) ^ b6; b12 += b15; - b15 = ((b15 << 34) | (b15 >> (64 - 34))) ^ b12; + b15 = rol64(b15, 34) ^ b12; b14 += b13; - b13 = ((b13 << 41) | (b13 >> (64 - 41))) ^ b14; + b13 = rol64(b13, 41) ^ b14; b8 += b11; - b11 = ((b11 << 59) | (b11 >> (64 - 59))) ^ b8; + b11 = rol64(b11, 59) ^ b8; b10 += b9; - b9 = ((b9 << 17) | (b9 >> (64 - 17))) ^ b10; + b9 = rol64(b9, 17) ^ b10; b0 += b15; - b15 = ((b15 << 5) | (b15 >> (64 - 5))) ^ b0; + b15 = rol64(b15, 5) ^ b0; b2 += b11; - b11 = ((b11 << 20) | (b11 >> (64 - 20))) ^ b2; + b11 = rol64(b11, 20) ^ b2; b6 += b13; - b13 = ((b13 << 48) | (b13 >> (64 - 48))) ^ b6; + b13 = rol64(b13, 48) ^ b6; b4 += b9; - b9 = ((b9 << 41) | (b9 >> (64 - 41))) ^ b4; + b9 = rol64(b9, 41) ^ b4; b14 += b1; - b1 = ((b1 << 47) | (b1 >> (64 - 47))) ^ b14; + b1 = rol64(b1, 47) ^ b14; b8 += b5; - b5 = ((b5 << 28) | (b5 >> (64 - 28))) ^ b8; + b5 = rol64(b5, 28) ^ b8; b10 += b3; - b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b10; + b3 = rol64(b3, 16) ^ b10; b12 += b7; - b7 = ((b7 << 25) | (b7 >> (64 - 25))) ^ b12; + b7 = rol64(b7, 25) ^ b12; b1 += k14; b0 += b1 + k13; - b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b0; + b1 = rol64(b1, 41) ^ b0; b3 += k16; b2 += b3 + k15; - b3 = ((b3 << 9) | (b3 >> (64 - 9))) ^ b2; + b3 = rol64(b3, 9) ^ b2; b5 += k1; b4 += b5 + k0; - b5 = ((b5 << 37) | (b5 >> (64 - 37))) ^ b4; + b5 = rol64(b5, 37) ^ b4; b7 += k3; b6 += b7 + k2; - b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b6; + b7 = rol64(b7, 31) ^ b6; b9 += k5; b8 += b9 + k4; - b9 = ((b9 << 12) | (b9 >> (64 - 12))) ^ b8; + b9 = rol64(b9, 12) ^ b8; b11 += k7; b10 += b11 + k6; - b11 = ((b11 << 47) | (b11 >> (64 - 47))) ^ b10; + b11 = rol64(b11, 47) ^ b10; b13 += k9 + t1; b12 += b13 + k8; - b13 = ((b13 << 44) | (b13 >> (64 - 44))) ^ b12; + b13 = rol64(b13, 44) ^ b12; b15 += k11 + 13; b14 += b15 + k10 + t2; - b15 = ((b15 << 30) | (b15 >> (64 - 30))) ^ b14; + b15 = rol64(b15, 30) ^ b14; b0 += b9; - b9 = ((b9 << 16) | (b9 >> (64 - 16))) ^ b0; + b9 = rol64(b9, 16) ^ b0; b2 += b13; - b13 = ((b13 << 34) | (b13 >> (64 - 34))) ^ b2; + b13 = rol64(b13, 34) ^ b2; b6 += b11; - b11 = ((b11 << 56) | (b11 >> (64 - 56))) ^ b6; + b11 = rol64(b11, 56) ^ b6; b4 += b15; - b15 = ((b15 << 51) | (b15 >> (64 - 51))) ^ b4; + b15 = rol64(b15, 51) ^ b4; b10 += b7; - b7 = ((b7 << 4) | (b7 >> (64 - 4))) ^ b10; + b7 = rol64(b7, 4) ^ b10; b12 += b3; - b3 = ((b3 << 53) | (b3 >> (64 - 53))) ^ b12; + b3 = rol64(b3, 53) ^ b12; b14 += b5; - b5 = ((b5 << 42) | (b5 >> (64 - 42))) ^ b14; + b5 = rol64(b5, 42) ^ b14; b8 += b1; - b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b8; + b1 = rol64(b1, 41) ^ b8; b0 += b7; - b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b0; + b7 = rol64(b7, 31) ^ b0; b2 += b5; - b5 = ((b5 << 44) | (b5 >> (64 - 44))) ^ b2; + b5 = rol64(b5, 44) ^ b2; b4 += b3; - b3 = ((b3 << 47) | (b3 >> (64 - 47))) ^ b4; + b3 = rol64(b3, 47) ^ b4; b6 += b1; - b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b6; + b1 = rol64(b1, 46) ^ b6; b12 += b15; - b15 = ((b15 << 19) | (b15 >> (64 - 19))) ^ b12; + b15 = rol64(b15, 19) ^ b12; b14 += b13; - b13 = ((b13 << 42) | (b13 >> (64 - 42))) ^ b14; + b13 = rol64(b13, 42) ^ b14; b8 += b11; - b11 = ((b11 << 44) | (b11 >> (64 - 44))) ^ b8; + b11 = rol64(b11, 44) ^ b8; b10 += b9; - b9 = ((b9 << 25) | (b9 >> (64 - 25))) ^ b10; + b9 = rol64(b9, 25) ^ b10; b0 += b15; - b15 = ((b15 << 9) | (b15 >> (64 - 9))) ^ b0; + b15 = rol64(b15, 9) ^ b0; b2 += b11; - b11 = ((b11 << 48) | (b11 >> (64 - 48))) ^ b2; + b11 = rol64(b11, 48) ^ b2; b6 += b13; - b13 = ((b13 << 35) | (b13 >> (64 - 35))) ^ b6; + b13 = rol64(b13, 35) ^ b6; b4 += b9; - b9 = ((b9 << 52) | (b9 >> (64 - 52))) ^ b4; + b9 = rol64(b9, 52) ^ b4; b14 += b1; - b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b14; + b1 = rol64(b1, 23) ^ b14; b8 += b5; - b5 = ((b5 << 31) | (b5 >> (64 - 31))) ^ b8; + b5 = rol64(b5, 31) ^ b8; b10 += b3; - b3 = ((b3 << 37) | (b3 >> (64 - 37))) ^ b10; + b3 = rol64(b3, 37) ^ b10; b12 += b7; - b7 = ((b7 << 20) | (b7 >> (64 - 20))) ^ b12; + b7 = rol64(b7, 20) ^ b12; b1 += k15; b0 += b1 + k14; - b1 = ((b1 << 24) | (b1 >> (64 - 24))) ^ b0; + b1 = rol64(b1, 24) ^ b0; b3 += k0; b2 += b3 + k16; - b3 = ((b3 << 13) | (b3 >> (64 - 13))) ^ b2; + b3 = rol64(b3, 13) ^ b2; b5 += k2; b4 += b5 + k1; - b5 = ((b5 << 8) | (b5 >> (64 - 8))) ^ b4; + b5 = rol64(b5, 8) ^ b4; b7 += k4; b6 += b7 + k3; - b7 = ((b7 << 47) | (b7 >> (64 - 47))) ^ b6; + b7 = rol64(b7, 47) ^ b6; b9 += k6; b8 += b9 + k5; - b9 = ((b9 << 8) | (b9 >> (64 - 8))) ^ b8; + b9 = rol64(b9, 8) ^ b8; b11 += k8; b10 += b11 + k7; - b11 = ((b11 << 17) | (b11 >> (64 - 17))) ^ b10; + b11 = rol64(b11, 17) ^ b10; b13 += k10 + t2; b12 += b13 + k9; - b13 = ((b13 << 22) | (b13 >> (64 - 22))) ^ b12; + b13 = rol64(b13, 22) ^ b12; b15 += k12 + 14; b14 += b15 + k11 + t0; - b15 = ((b15 << 37) | (b15 >> (64 - 37))) ^ b14; + b15 = rol64(b15, 37) ^ b14; b0 += b9; - b9 = ((b9 << 38) | (b9 >> (64 - 38))) ^ b0; + b9 = rol64(b9, 38) ^ b0; b2 += b13; - b13 = ((b13 << 19) | (b13 >> (64 - 19))) ^ b2; + b13 = rol64(b13, 19) ^ b2; b6 += b11; - b11 = ((b11 << 10) | (b11 >> (64 - 10))) ^ b6; + b11 = rol64(b11, 10) ^ b6; b4 += b15; - b15 = ((b15 << 55) | (b15 >> (64 - 55))) ^ b4; + b15 = rol64(b15, 55) ^ b4; b10 += b7; - b7 = ((b7 << 49) | (b7 >> (64 - 49))) ^ b10; + b7 = rol64(b7, 49) ^ b10; b12 += b3; - b3 = ((b3 << 18) | (b3 >> (64 - 18))) ^ b12; + b3 = rol64(b3, 18) ^ b12; b14 += b5; - b5 = ((b5 << 23) | (b5 >> (64 - 23))) ^ b14; + b5 = rol64(b5, 23) ^ b14; b8 += b1; - b1 = ((b1 << 52) | (b1 >> (64 - 52))) ^ b8; + b1 = rol64(b1, 52) ^ b8; b0 += b7; - b7 = ((b7 << 33) | (b7 >> (64 - 33))) ^ b0; + b7 = rol64(b7, 33) ^ b0; b2 += b5; - b5 = ((b5 << 4) | (b5 >> (64 - 4))) ^ b2; + b5 = rol64(b5, 4) ^ b2; b4 += b3; - b3 = ((b3 << 51) | (b3 >> (64 - 51))) ^ b4; + b3 = rol64(b3, 51) ^ b4; b6 += b1; - b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b6; + b1 = rol64(b1, 13) ^ b6; b12 += b15; - b15 = ((b15 << 34) | (b15 >> (64 - 34))) ^ b12; + b15 = rol64(b15, 34) ^ b12; b14 += b13; - b13 = ((b13 << 41) | (b13 >> (64 - 41))) ^ b14; + b13 = rol64(b13, 41) ^ b14; b8 += b11; - b11 = ((b11 << 59) | (b11 >> (64 - 59))) ^ b8; + b11 = rol64(b11, 59) ^ b8; b10 += b9; - b9 = ((b9 << 17) | (b9 >> (64 - 17))) ^ b10; + b9 = rol64(b9, 17) ^ b10; b0 += b15; - b15 = ((b15 << 5) | (b15 >> (64 - 5))) ^ b0; + b15 = rol64(b15, 5) ^ b0; b2 += b11; - b11 = ((b11 << 20) | (b11 >> (64 - 20))) ^ b2; + b11 = rol64(b11, 20) ^ b2; b6 += b13; - b13 = ((b13 << 48) | (b13 >> (64 - 48))) ^ b6; + b13 = rol64(b13, 48) ^ b6; b4 += b9; - b9 = ((b9 << 41) | (b9 >> (64 - 41))) ^ b4; + b9 = rol64(b9, 41) ^ b4; b14 += b1; - b1 = ((b1 << 47) | (b1 >> (64 - 47))) ^ b14; + b1 = rol64(b1, 47) ^ b14; b8 += b5; - b5 = ((b5 << 28) | (b5 >> (64 - 28))) ^ b8; + b5 = rol64(b5, 28) ^ b8; b10 += b3; - b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b10; + b3 = rol64(b3, 16) ^ b10; b12 += b7; - b7 = ((b7 << 25) | (b7 >> (64 - 25))) ^ b12; + b7 = rol64(b7, 25) ^ b12; b1 += k16; b0 += b1 + k15; - b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b0; + b1 = rol64(b1, 41) ^ b0; b3 += k1; b2 += b3 + k0; - b3 = ((b3 << 9) | (b3 >> (64 - 9))) ^ b2; + b3 = rol64(b3, 9) ^ b2; b5 += k3; b4 += b5 + k2; - b5 = ((b5 << 37) | (b5 >> (64 - 37))) ^ b4; + b5 = rol64(b5, 37) ^ b4; b7 += k5; b6 += b7 + k4; - b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b6; + b7 = rol64(b7, 31) ^ b6; b9 += k7; b8 += b9 + k6; - b9 = ((b9 << 12) | (b9 >> (64 - 12))) ^ b8; + b9 = rol64(b9, 12) ^ b8; b11 += k9; b10 += b11 + k8; - b11 = ((b11 << 47) | (b11 >> (64 - 47))) ^ b10; + b11 = rol64(b11, 47) ^ b10; b13 += k11 + t0; b12 += b13 + k10; - b13 = ((b13 << 44) | (b13 >> (64 - 44))) ^ b12; + b13 = rol64(b13, 44) ^ b12; b15 += k13 + 15; b14 += b15 + k12 + t1; - b15 = ((b15 << 30) | (b15 >> (64 - 30))) ^ b14; + b15 = rol64(b15, 30) ^ b14; b0 += b9; - b9 = ((b9 << 16) | (b9 >> (64 - 16))) ^ b0; + b9 = rol64(b9, 16) ^ b0; b2 += b13; - b13 = ((b13 << 34) | (b13 >> (64 - 34))) ^ b2; + b13 = rol64(b13, 34) ^ b2; b6 += b11; - b11 = ((b11 << 56) | (b11 >> (64 - 56))) ^ b6; + b11 = rol64(b11, 56) ^ b6; b4 += b15; - b15 = ((b15 << 51) | (b15 >> (64 - 51))) ^ b4; + b15 = rol64(b15, 51) ^ b4; b10 += b7; - b7 = ((b7 << 4) | (b7 >> (64 - 4))) ^ b10; + b7 = rol64(b7, 4) ^ b10; b12 += b3; - b3 = ((b3 << 53) | (b3 >> (64 - 53))) ^ b12; + b3 = rol64(b3, 53) ^ b12; b14 += b5; - b5 = ((b5 << 42) | (b5 >> (64 - 42))) ^ b14; + b5 = rol64(b5, 42) ^ b14; b8 += b1; - b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b8; + b1 = rol64(b1, 41) ^ b8; b0 += b7; - b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b0; + b7 = rol64(b7, 31) ^ b0; b2 += b5; - b5 = ((b5 << 44) | (b5 >> (64 - 44))) ^ b2; + b5 = rol64(b5, 44) ^ b2; b4 += b3; - b3 = ((b3 << 47) | (b3 >> (64 - 47))) ^ b4; + b3 = rol64(b3, 47) ^ b4; b6 += b1; - b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b6; + b1 = rol64(b1, 46) ^ b6; b12 += b15; - b15 = ((b15 << 19) | (b15 >> (64 - 19))) ^ b12; + b15 = rol64(b15, 19) ^ b12; b14 += b13; - b13 = ((b13 << 42) | (b13 >> (64 - 42))) ^ b14; + b13 = rol64(b13, 42) ^ b14; b8 += b11; - b11 = ((b11 << 44) | (b11 >> (64 - 44))) ^ b8; + b11 = rol64(b11, 44) ^ b8; b10 += b9; - b9 = ((b9 << 25) | (b9 >> (64 - 25))) ^ b10; + b9 = rol64(b9, 25) ^ b10; b0 += b15; - b15 = ((b15 << 9) | (b15 >> (64 - 9))) ^ b0; + b15 = rol64(b15, 9) ^ b0; b2 += b11; - b11 = ((b11 << 48) | (b11 >> (64 - 48))) ^ b2; + b11 = rol64(b11, 48) ^ b2; b6 += b13; - b13 = ((b13 << 35) | (b13 >> (64 - 35))) ^ b6; + b13 = rol64(b13, 35) ^ b6; b4 += b9; - b9 = ((b9 << 52) | (b9 >> (64 - 52))) ^ b4; + b9 = rol64(b9, 52) ^ b4; b14 += b1; - b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b14; + b1 = rol64(b1, 23) ^ b14; b8 += b5; - b5 = ((b5 << 31) | (b5 >> (64 - 31))) ^ b8; + b5 = rol64(b5, 31) ^ b8; b10 += b3; - b3 = ((b3 << 37) | (b3 >> (64 - 37))) ^ b10; + b3 = rol64(b3, 37) ^ b10; b12 += b7; - b7 = ((b7 << 20) | (b7 >> (64 - 20))) ^ b12; + b7 = rol64(b7, 20) ^ b12; b1 += k0; b0 += b1 + k16; - b1 = ((b1 << 24) | (b1 >> (64 - 24))) ^ b0; + b1 = rol64(b1, 24) ^ b0; b3 += k2; b2 += b3 + k1; - b3 = ((b3 << 13) | (b3 >> (64 - 13))) ^ b2; + b3 = rol64(b3, 13) ^ b2; b5 += k4; b4 += b5 + k3; - b5 = ((b5 << 8) | (b5 >> (64 - 8))) ^ b4; + b5 = rol64(b5, 8) ^ b4; b7 += k6; b6 += b7 + k5; - b7 = ((b7 << 47) | (b7 >> (64 - 47))) ^ b6; + b7 = rol64(b7, 47) ^ b6; b9 += k8; b8 += b9 + k7; - b9 = ((b9 << 8) | (b9 >> (64 - 8))) ^ b8; + b9 = rol64(b9, 8) ^ b8; b11 += k10; b10 += b11 + k9; - b11 = ((b11 << 17) | (b11 >> (64 - 17))) ^ b10; + b11 = rol64(b11, 17) ^ b10; b13 += k12 + t1; b12 += b13 + k11; - b13 = ((b13 << 22) | (b13 >> (64 - 22))) ^ b12; + b13 = rol64(b13, 22) ^ b12; b15 += k14 + 16; b14 += b15 + k13 + t2; - b15 = ((b15 << 37) | (b15 >> (64 - 37))) ^ b14; + b15 = rol64(b15, 37) ^ b14; b0 += b9; - b9 = ((b9 << 38) | (b9 >> (64 - 38))) ^ b0; + b9 = rol64(b9, 38) ^ b0; b2 += b13; - b13 = ((b13 << 19) | (b13 >> (64 - 19))) ^ b2; + b13 = rol64(b13, 19) ^ b2; b6 += b11; - b11 = ((b11 << 10) | (b11 >> (64 - 10))) ^ b6; + b11 = rol64(b11, 10) ^ b6; b4 += b15; - b15 = ((b15 << 55) | (b15 >> (64 - 55))) ^ b4; + b15 = rol64(b15, 55) ^ b4; b10 += b7; - b7 = ((b7 << 49) | (b7 >> (64 - 49))) ^ b10; + b7 = rol64(b7, 49) ^ b10; b12 += b3; - b3 = ((b3 << 18) | (b3 >> (64 - 18))) ^ b12; + b3 = rol64(b3, 18) ^ b12; b14 += b5; - b5 = ((b5 << 23) | (b5 >> (64 - 23))) ^ b14; + b5 = rol64(b5, 23) ^ b14; b8 += b1; - b1 = ((b1 << 52) | (b1 >> (64 - 52))) ^ b8; + b1 = rol64(b1, 52) ^ b8; b0 += b7; - b7 = ((b7 << 33) | (b7 >> (64 - 33))) ^ b0; + b7 = rol64(b7, 33) ^ b0; b2 += b5; - b5 = ((b5 << 4) | (b5 >> (64 - 4))) ^ b2; + b5 = rol64(b5, 4) ^ b2; b4 += b3; - b3 = ((b3 << 51) | (b3 >> (64 - 51))) ^ b4; + b3 = rol64(b3, 51) ^ b4; b6 += b1; - b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b6; + b1 = rol64(b1, 13) ^ b6; b12 += b15; - b15 = ((b15 << 34) | (b15 >> (64 - 34))) ^ b12; + b15 = rol64(b15, 34) ^ b12; b14 += b13; - b13 = ((b13 << 41) | (b13 >> (64 - 41))) ^ b14; + b13 = rol64(b13, 41) ^ b14; b8 += b11; - b11 = ((b11 << 59) | (b11 >> (64 - 59))) ^ b8; + b11 = rol64(b11, 59) ^ b8; b10 += b9; - b9 = ((b9 << 17) | (b9 >> (64 - 17))) ^ b10; + b9 = rol64(b9, 17) ^ b10; b0 += b15; - b15 = ((b15 << 5) | (b15 >> (64 - 5))) ^ b0; + b15 = rol64(b15, 5) ^ b0; b2 += b11; - b11 = ((b11 << 20) | (b11 >> (64 - 20))) ^ b2; + b11 = rol64(b11, 20) ^ b2; b6 += b13; - b13 = ((b13 << 48) | (b13 >> (64 - 48))) ^ b6; + b13 = rol64(b13, 48) ^ b6; b4 += b9; - b9 = ((b9 << 41) | (b9 >> (64 - 41))) ^ b4; + b9 = rol64(b9, 41) ^ b4; b14 += b1; - b1 = ((b1 << 47) | (b1 >> (64 - 47))) ^ b14; + b1 = rol64(b1, 47) ^ b14; b8 += b5; - b5 = ((b5 << 28) | (b5 >> (64 - 28))) ^ b8; + b5 = rol64(b5, 28) ^ b8; b10 += b3; - b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b10; + b3 = rol64(b3, 16) ^ b10; b12 += b7; - b7 = ((b7 << 25) | (b7 >> (64 - 25))) ^ b12; + b7 = rol64(b7, 25) ^ b12; b1 += k1; b0 += b1 + k0; - b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b0; + b1 = rol64(b1, 41) ^ b0; b3 += k3; b2 += b3 + k2; - b3 = ((b3 << 9) | (b3 >> (64 - 9))) ^ b2; + b3 = rol64(b3, 9) ^ b2; b5 += k5; b4 += b5 + k4; - b5 = ((b5 << 37) | (b5 >> (64 - 37))) ^ b4; + b5 = rol64(b5, 37) ^ b4; b7 += k7; b6 += b7 + k6; - b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b6; + b7 = rol64(b7, 31) ^ b6; b9 += k9; b8 += b9 + k8; - b9 = ((b9 << 12) | (b9 >> (64 - 12))) ^ b8; + b9 = rol64(b9, 12) ^ b8; b11 += k11; b10 += b11 + k10; - b11 = ((b11 << 47) | (b11 >> (64 - 47))) ^ b10; + b11 = rol64(b11, 47) ^ b10; b13 += k13 + t2; b12 += b13 + k12; - b13 = ((b13 << 44) | (b13 >> (64 - 44))) ^ b12; + b13 = rol64(b13, 44) ^ b12; b15 += k15 + 17; b14 += b15 + k14 + t0; - b15 = ((b15 << 30) | (b15 >> (64 - 30))) ^ b14; + b15 = rol64(b15, 30) ^ b14; b0 += b9; - b9 = ((b9 << 16) | (b9 >> (64 - 16))) ^ b0; + b9 = rol64(b9, 16) ^ b0; b2 += b13; - b13 = ((b13 << 34) | (b13 >> (64 - 34))) ^ b2; + b13 = rol64(b13, 34) ^ b2; b6 += b11; - b11 = ((b11 << 56) | (b11 >> (64 - 56))) ^ b6; + b11 = rol64(b11, 56) ^ b6; b4 += b15; - b15 = ((b15 << 51) | (b15 >> (64 - 51))) ^ b4; + b15 = rol64(b15, 51) ^ b4; b10 += b7; - b7 = ((b7 << 4) | (b7 >> (64 - 4))) ^ b10; + b7 = rol64(b7, 4) ^ b10; b12 += b3; - b3 = ((b3 << 53) | (b3 >> (64 - 53))) ^ b12; + b3 = rol64(b3, 53) ^ b12; b14 += b5; - b5 = ((b5 << 42) | (b5 >> (64 - 42))) ^ b14; + b5 = rol64(b5, 42) ^ b14; b8 += b1; - b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b8; + b1 = rol64(b1, 41) ^ b8; b0 += b7; - b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b0; + b7 = rol64(b7, 31) ^ b0; b2 += b5; - b5 = ((b5 << 44) | (b5 >> (64 - 44))) ^ b2; + b5 = rol64(b5, 44) ^ b2; b4 += b3; - b3 = ((b3 << 47) | (b3 >> (64 - 47))) ^ b4; + b3 = rol64(b3, 47) ^ b4; b6 += b1; - b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b6; + b1 = rol64(b1, 46) ^ b6; b12 += b15; - b15 = ((b15 << 19) | (b15 >> (64 - 19))) ^ b12; + b15 = rol64(b15, 19) ^ b12; b14 += b13; - b13 = ((b13 << 42) | (b13 >> (64 - 42))) ^ b14; + b13 = rol64(b13, 42) ^ b14; b8 += b11; - b11 = ((b11 << 44) | (b11 >> (64 - 44))) ^ b8; + b11 = rol64(b11, 44) ^ b8; b10 += b9; - b9 = ((b9 << 25) | (b9 >> (64 - 25))) ^ b10; + b9 = rol64(b9, 25) ^ b10; b0 += b15; - b15 = ((b15 << 9) | (b15 >> (64 - 9))) ^ b0; + b15 = rol64(b15, 9) ^ b0; b2 += b11; - b11 = ((b11 << 48) | (b11 >> (64 - 48))) ^ b2; + b11 = rol64(b11, 48) ^ b2; b6 += b13; - b13 = ((b13 << 35) | (b13 >> (64 - 35))) ^ b6; + b13 = rol64(b13, 35) ^ b6; b4 += b9; - b9 = ((b9 << 52) | (b9 >> (64 - 52))) ^ b4; + b9 = rol64(b9, 52) ^ b4; b14 += b1; - b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b14; + b1 = rol64(b1, 23) ^ b14; b8 += b5; - b5 = ((b5 << 31) | (b5 >> (64 - 31))) ^ b8; + b5 = rol64(b5, 31) ^ b8; b10 += b3; - b3 = ((b3 << 37) | (b3 >> (64 - 37))) ^ b10; + b3 = rol64(b3, 37) ^ b10; b12 += b7; - b7 = ((b7 << 20) | (b7 >> (64 - 20))) ^ b12; + b7 = rol64(b7, 20) ^ b12; b1 += k2; b0 += b1 + k1; - b1 = ((b1 << 24) | (b1 >> (64 - 24))) ^ b0; + b1 = rol64(b1, 24) ^ b0; b3 += k4; b2 += b3 + k3; - b3 = ((b3 << 13) | (b3 >> (64 - 13))) ^ b2; + b3 = rol64(b3, 13) ^ b2; b5 += k6; b4 += b5 + k5; - b5 = ((b5 << 8) | (b5 >> (64 - 8))) ^ b4; + b5 = rol64(b5, 8) ^ b4; b7 += k8; b6 += b7 + k7; - b7 = ((b7 << 47) | (b7 >> (64 - 47))) ^ b6; + b7 = rol64(b7, 47) ^ b6; b9 += k10; b8 += b9 + k9; - b9 = ((b9 << 8) | (b9 >> (64 - 8))) ^ b8; + b9 = rol64(b9, 8) ^ b8; b11 += k12; b10 += b11 + k11; - b11 = ((b11 << 17) | (b11 >> (64 - 17))) ^ b10; + b11 = rol64(b11, 17) ^ b10; b13 += k14 + t0; b12 += b13 + k13; - b13 = ((b13 << 22) | (b13 >> (64 - 22))) ^ b12; + b13 = rol64(b13, 22) ^ b12; b15 += k16 + 18; b14 += b15 + k15 + t1; - b15 = ((b15 << 37) | (b15 >> (64 - 37))) ^ b14; + b15 = rol64(b15, 37) ^ b14; b0 += b9; - b9 = ((b9 << 38) | (b9 >> (64 - 38))) ^ b0; + b9 = rol64(b9, 38) ^ b0; b2 += b13; - b13 = ((b13 << 19) | (b13 >> (64 - 19))) ^ b2; + b13 = rol64(b13, 19) ^ b2; b6 += b11; - b11 = ((b11 << 10) | (b11 >> (64 - 10))) ^ b6; + b11 = rol64(b11, 10) ^ b6; b4 += b15; - b15 = ((b15 << 55) | (b15 >> (64 - 55))) ^ b4; + b15 = rol64(b15, 55) ^ b4; b10 += b7; - b7 = ((b7 << 49) | (b7 >> (64 - 49))) ^ b10; + b7 = rol64(b7, 49) ^ b10; b12 += b3; - b3 = ((b3 << 18) | (b3 >> (64 - 18))) ^ b12; + b3 = rol64(b3, 18) ^ b12; b14 += b5; - b5 = ((b5 << 23) | (b5 >> (64 - 23))) ^ b14; + b5 = rol64(b5, 23) ^ b14; b8 += b1; - b1 = ((b1 << 52) | (b1 >> (64 - 52))) ^ b8; + b1 = rol64(b1, 52) ^ b8; b0 += b7; - b7 = ((b7 << 33) | (b7 >> (64 - 33))) ^ b0; + b7 = rol64(b7, 33) ^ b0; b2 += b5; - b5 = ((b5 << 4) | (b5 >> (64 - 4))) ^ b2; + b5 = rol64(b5, 4) ^ b2; b4 += b3; - b3 = ((b3 << 51) | (b3 >> (64 - 51))) ^ b4; + b3 = rol64(b3, 51) ^ b4; b6 += b1; - b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b6; + b1 = rol64(b1, 13) ^ b6; b12 += b15; - b15 = ((b15 << 34) | (b15 >> (64 - 34))) ^ b12; + b15 = rol64(b15, 34) ^ b12; b14 += b13; - b13 = ((b13 << 41) | (b13 >> (64 - 41))) ^ b14; + b13 = rol64(b13, 41) ^ b14; b8 += b11; - b11 = ((b11 << 59) | (b11 >> (64 - 59))) ^ b8; + b11 = rol64(b11, 59) ^ b8; b10 += b9; - b9 = ((b9 << 17) | (b9 >> (64 - 17))) ^ b10; + b9 = rol64(b9, 17) ^ b10; b0 += b15; - b15 = ((b15 << 5) | (b15 >> (64 - 5))) ^ b0; + b15 = rol64(b15, 5) ^ b0; b2 += b11; - b11 = ((b11 << 20) | (b11 >> (64 - 20))) ^ b2; + b11 = rol64(b11, 20) ^ b2; b6 += b13; - b13 = ((b13 << 48) | (b13 >> (64 - 48))) ^ b6; + b13 = rol64(b13, 48) ^ b6; b4 += b9; - b9 = ((b9 << 41) | (b9 >> (64 - 41))) ^ b4; + b9 = rol64(b9, 41) ^ b4; b14 += b1; - b1 = ((b1 << 47) | (b1 >> (64 - 47))) ^ b14; + b1 = rol64(b1, 47) ^ b14; b8 += b5; - b5 = ((b5 << 28) | (b5 >> (64 - 28))) ^ b8; + b5 = rol64(b5, 28) ^ b8; b10 += b3; - b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b10; + b3 = rol64(b3, 16) ^ b10; b12 += b7; - b7 = ((b7 << 25) | (b7 >> (64 - 25))) ^ b12; + b7 = rol64(b7, 25) ^ b12; b1 += k3; b0 += b1 + k2; - b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b0; + b1 = rol64(b1, 41) ^ b0; b3 += k5; b2 += b3 + k4; - b3 = ((b3 << 9) | (b3 >> (64 - 9))) ^ b2; + b3 = rol64(b3, 9) ^ b2; b5 += k7; b4 += b5 + k6; - b5 = ((b5 << 37) | (b5 >> (64 - 37))) ^ b4; + b5 = rol64(b5, 37) ^ b4; b7 += k9; b6 += b7 + k8; - b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b6; + b7 = rol64(b7, 31) ^ b6; b9 += k11; b8 += b9 + k10; - b9 = ((b9 << 12) | (b9 >> (64 - 12))) ^ b8; + b9 = rol64(b9, 12) ^ b8; b11 += k13; b10 += b11 + k12; - b11 = ((b11 << 47) | (b11 >> (64 - 47))) ^ b10; + b11 = rol64(b11, 47) ^ b10; b13 += k15 + t1; b12 += b13 + k14; - b13 = ((b13 << 44) | (b13 >> (64 - 44))) ^ b12; + b13 = rol64(b13, 44) ^ b12; b15 += k0 + 19; b14 += b15 + k16 + t2; - b15 = ((b15 << 30) | (b15 >> (64 - 30))) ^ b14; + b15 = rol64(b15, 30) ^ b14; b0 += b9; - b9 = ((b9 << 16) | (b9 >> (64 - 16))) ^ b0; + b9 = rol64(b9, 16) ^ b0; b2 += b13; - b13 = ((b13 << 34) | (b13 >> (64 - 34))) ^ b2; + b13 = rol64(b13, 34) ^ b2; b6 += b11; - b11 = ((b11 << 56) | (b11 >> (64 - 56))) ^ b6; + b11 = rol64(b11, 56) ^ b6; b4 += b15; - b15 = ((b15 << 51) | (b15 >> (64 - 51))) ^ b4; + b15 = rol64(b15, 51) ^ b4; b10 += b7; - b7 = ((b7 << 4) | (b7 >> (64 - 4))) ^ b10; + b7 = rol64(b7, 4) ^ b10; b12 += b3; - b3 = ((b3 << 53) | (b3 >> (64 - 53))) ^ b12; + b3 = rol64(b3, 53) ^ b12; b14 += b5; - b5 = ((b5 << 42) | (b5 >> (64 - 42))) ^ b14; + b5 = rol64(b5, 42) ^ b14; b8 += b1; - b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b8; + b1 = rol64(b1, 41) ^ b8; b0 += b7; - b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b0; + b7 = rol64(b7, 31) ^ b0; b2 += b5; - b5 = ((b5 << 44) | (b5 >> (64 - 44))) ^ b2; + b5 = rol64(b5, 44) ^ b2; b4 += b3; - b3 = ((b3 << 47) | (b3 >> (64 - 47))) ^ b4; + b3 = rol64(b3, 47) ^ b4; b6 += b1; - b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b6; + b1 = rol64(b1, 46) ^ b6; b12 += b15; - b15 = ((b15 << 19) | (b15 >> (64 - 19))) ^ b12; + b15 = rol64(b15, 19) ^ b12; b14 += b13; - b13 = ((b13 << 42) | (b13 >> (64 - 42))) ^ b14; + b13 = rol64(b13, 42) ^ b14; b8 += b11; - b11 = ((b11 << 44) | (b11 >> (64 - 44))) ^ b8; + b11 = rol64(b11, 44) ^ b8; b10 += b9; - b9 = ((b9 << 25) | (b9 >> (64 - 25))) ^ b10; + b9 = rol64(b9, 25) ^ b10; b0 += b15; - b15 = ((b15 << 9) | (b15 >> (64 - 9))) ^ b0; + b15 = rol64(b15, 9) ^ b0; b2 += b11; - b11 = ((b11 << 48) | (b11 >> (64 - 48))) ^ b2; + b11 = rol64(b11, 48) ^ b2; b6 += b13; - b13 = ((b13 << 35) | (b13 >> (64 - 35))) ^ b6; + b13 = rol64(b13, 35) ^ b6; b4 += b9; - b9 = ((b9 << 52) | (b9 >> (64 - 52))) ^ b4; + b9 = rol64(b9, 52) ^ b4; b14 += b1; - b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b14; + b1 = rol64(b1, 23) ^ b14; b8 += b5; - b5 = ((b5 << 31) | (b5 >> (64 - 31))) ^ b8; + b5 = rol64(b5, 31) ^ b8; b10 += b3; - b3 = ((b3 << 37) | (b3 >> (64 - 37))) ^ b10; + b3 = rol64(b3, 37) ^ b10; b12 += b7; - b7 = ((b7 << 20) | (b7 >> (64 - 20))) ^ b12; + b7 = rol64(b7, 20) ^ b12; output[0] = b0 + k3; output[1] = b1 + k4; diff --git a/drivers/staging/slicoss/slic.h b/drivers/staging/slicoss/slic.h index c95b3abad646..cc0afeeb68c1 100644 --- a/drivers/staging/slicoss/slic.h +++ b/drivers/staging/slicoss/slic.h @@ -478,6 +478,8 @@ struct adapter { u32 max_isr_xmits; u32 rcv_interrupt_yields; u32 intagg_period; + u32 intagg_delay; + u32 dynamic_intagg; struct inicpm_state *inicpm_info; void *pinicpm_info; struct slic_ifevents if_events; diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c index b23a2d1f61a2..6d50fc4fd02e 100644 --- a/drivers/staging/slicoss/slicoss.c +++ b/drivers/staging/slicoss/slicoss.c @@ -58,9 +58,9 @@ #define DEBUG_MICROCODE 1 #define DBG 1 #define SLIC_INTERRUPT_PROCESS_LIMIT 1 -#define SLIC_OFFLOAD_IP_CHECKSUM 1 -#define STATS_TIMER_INTERVAL 2 -#define PING_TIMER_INTERVAL 1 +#define SLIC_OFFLOAD_IP_CHECKSUM 1 +#define STATS_TIMER_INTERVAL 2 +#define PING_TIMER_INTERVAL 1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> @@ -102,8 +102,7 @@ static char *slic_banner = "Alacritech SLIC Technology(tm) Server and Storage Ac static char *slic_proc_version = "2.0.351 2006/07/14 12:26:00"; static struct base_driver slic_global = { {}, 0, 0, 0, 1, NULL, NULL }; -static int intagg_delay = 100; -static u32 dynamic_intagg; +#define DEFAULT_INTAGG_DELAY 100 static unsigned int rcv_count; #define DRV_NAME "slicoss" @@ -119,17 +118,14 @@ MODULE_AUTHOR(DRV_AUTHOR); MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_LICENSE("Dual BSD/GPL"); -module_param(dynamic_intagg, int, 0); -MODULE_PARM_DESC(dynamic_intagg, "Dynamic Interrupt Aggregation Setting"); -module_param(intagg_delay, int, 0); -MODULE_PARM_DESC(intagg_delay, "uSec Interrupt Aggregation Delay"); - static const struct pci_device_id slic_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH, SLIC_1GB_DEVICE_ID) }, { PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH, SLIC_2GB_DEVICE_ID) }, { 0 } }; +static struct ethtool_ops slic_ethtool_ops; + MODULE_DEVICE_TABLE(pci, slic_pci_tbl); static inline void slic_reg32_write(void __iomem *reg, u32 value, bool flush) @@ -549,14 +545,6 @@ static int slic_card_download(struct adapter *adapter) instruction = *(u32 *)(fw->data + index); index += 4; - /* Check SRAM location zero. If it is non-zero. Abort.*/ - /* - * failure = readl((u32 __iomem *)&slic_regs->slic_reset); - * if (failure) { - * release_firmware(fw); - * return -EIO; - * } - */ } } release_firmware(fw); @@ -796,7 +784,6 @@ static bool slic_mac_filter(struct adapter *adapter, return true; } return false; - } static int slic_mac_set_address(struct net_device *dev, void *ptr) @@ -884,7 +871,7 @@ static int slic_upr_queue_request(struct adapter *adapter, struct slic_upr *upr; struct slic_upr *uprqueue; - upr = kmalloc(sizeof(struct slic_upr), GFP_ATOMIC); + upr = kmalloc(sizeof(*upr), GFP_ATOMIC); if (!upr) return -ENOMEM; @@ -911,11 +898,6 @@ static void slic_upr_start(struct adapter *adapter) { struct slic_upr *upr; __iomem struct slic_regs *slic_regs = adapter->slic_regs; -/* - * char * ptr1; - * char * ptr2; - * uint cmdoffset; - */ upr = adapter->upr_list; if (!upr) return; @@ -1773,7 +1755,6 @@ static void slic_init_cleanup(struct adapter *adapter) if (adapter->intrregistered) { adapter->intrregistered = 0; free_irq(adapter->netdev->irq, adapter->netdev); - } if (adapter->pshmem) { pci_free_consistent(adapter->pcidev, @@ -1810,8 +1791,8 @@ static int slic_mcast_add_list(struct adapter *adapter, char *address) } /* Doesn't already exist. Allocate a structure to hold it */ - mcaddr = kmalloc(sizeof(struct mcast_address), GFP_ATOMIC); - if (mcaddr == NULL) + mcaddr = kmalloc(sizeof(*mcaddr), GFP_ATOMIC); + if (!mcaddr) return 1; ether_addr_copy(mcaddr->address, address); @@ -1892,7 +1873,7 @@ static void slic_xmit_fail(struct adapter *adapter, { if (adapter->xmitq_full) netif_stop_queue(adapter->netdev); - if ((cmd == NULL) && (status <= XMIT_FAIL_HOSTCMD_FAIL)) { + if ((!cmd) && (status <= XMIT_FAIL_HOSTCMD_FAIL)) { switch (status) { case XMIT_FAIL_LINK_STATE: dev_err(&adapter->netdev->dev, @@ -2860,7 +2841,7 @@ static int slic_card_init(struct sliccard *card, struct adapter *adapter) if (slic_global.dynamic_intagg) slic_intagg_set(adapter, 0); else - slic_intagg_set(adapter, intagg_delay); + slic_intagg_set(adapter, adapter->intagg_delay); /* * Initialize ping status to "ok" @@ -2881,6 +2862,26 @@ card_init_err: return status; } +static int slic_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *coalesce) +{ + struct adapter *adapter = netdev_priv(dev); + + adapter->intagg_delay = coalesce->rx_coalesce_usecs; + adapter->dynamic_intagg = coalesce->use_adaptive_rx_coalesce; + return 0; +} + +static int slic_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *coalesce) +{ + struct adapter *adapter = netdev_priv(dev); + + coalesce->rx_coalesce_usecs = adapter->intagg_delay; + coalesce->use_adaptive_rx_coalesce = adapter->dynamic_intagg; + return 0; +} + static void slic_init_driver(void) { if (slic_first_init) { @@ -2907,9 +2908,8 @@ static void slic_init_adapter(struct net_device *netdev, adapter->functionnumber = (pcidev->devfn & 0x7); adapter->slic_regs = memaddr; adapter->irq = pcidev->irq; -/* adapter->netdev = netdev;*/ adapter->chipid = chip_idx; - adapter->port = 0; /*adapter->functionnumber;*/ + adapter->port = 0; adapter->cardindex = adapter->port; spin_lock_init(&adapter->upr_lock); spin_lock_init(&adapter->bit64reglock); @@ -2982,8 +2982,8 @@ static u32 slic_card_locate(struct adapter *adapter) /* Initialize a new card structure if need be */ if (card_hostid == SLIC_HOSTID_DEFAULT) { - card = kzalloc(sizeof(struct sliccard), GFP_KERNEL); - if (card == NULL) + card = kzalloc(sizeof(*card), GFP_KERNEL); + if (!card) return -ENOMEM; card->next = slic_global.slic_card; @@ -3033,7 +3033,7 @@ static u32 slic_card_locate(struct adapter *adapter) } if (!physcard) { /* no structure allocated for this physical card yet */ - physcard = kzalloc(sizeof(struct physcard), GFP_ATOMIC); + physcard = kzalloc(sizeof(*physcard), GFP_ATOMIC); if (!physcard) { if (card_hostid == SLIC_HOSTID_DEFAULT) kfree(card); @@ -3069,8 +3069,6 @@ static int slic_entry_probe(struct pci_dev *pcidev, struct sliccard *card = NULL; int pci_using_dac = 0; - slic_global.dynamic_intagg = dynamic_intagg; - err = pci_enable_device(pcidev); if (err) @@ -3112,19 +3110,20 @@ static int slic_entry_probe(struct pci_dev *pcidev, goto err_out_exit_slic_probe; } + netdev->ethtool_ops = &slic_ethtool_ops; SET_NETDEV_DEV(netdev, &pcidev->dev); pci_set_drvdata(pcidev, netdev); adapter = netdev_priv(netdev); adapter->netdev = netdev; adapter->pcidev = pcidev; + slic_global.dynamic_intagg = adapter->dynamic_intagg; if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; mmio_start = pci_resource_start(pcidev, 0); mmio_len = pci_resource_len(pcidev, 0); -/* memmapped_ioaddr = (u32)ioremap_nocache(mmio_start, mmio_len);*/ memmapped_ioaddr = ioremap(mmio_start, mmio_len); if (!memmapped_ioaddr) { dev_err(&pcidev->dev, "cannot remap MMIO region %lx @ %lx\n", @@ -3204,5 +3203,10 @@ static void __exit slic_module_cleanup(void) pci_unregister_driver(&slic_driver); } +static struct ethtool_ops slic_ethtool_ops = { + .get_coalesce = slic_get_coalesce, + .set_coalesce = slic_set_coalesce +}; + module_init(slic_module_init); module_exit(slic_module_cleanup); diff --git a/drivers/staging/sm750fb/ddk750_chip.c b/drivers/staging/sm750fb/ddk750_chip.c index 0331d34458ae..95f7cae3cc23 100644 --- a/drivers/staging/sm750fb/ddk750_chip.c +++ b/drivers/staging/sm750fb/ddk750_chip.c @@ -1,3 +1,4 @@ +#include <linux/kernel.h> #include <linux/sizes.h> #include "ddk750_help.h" @@ -5,6 +6,10 @@ #include "ddk750_chip.h" #include "ddk750_power.h" +/* n / d + 1 / 2 = (2n + d) / 2d */ +#define roundedDiv(num, denom) ((2 * (num) + (denom)) / (2 * (denom))) +#define MHz(x) ((x) * 1000000) + logical_chip_type_t getChipType(void) { unsigned short physicalID; @@ -36,10 +41,10 @@ static unsigned int get_mxclk_freq(void) return MHz(130); pll_reg = PEEK32(MXCLK_PLL_CTRL); - M = FIELD_GET(pll_reg, PANEL_PLL_CTRL, M); - N = FIELD_GET(pll_reg, PANEL_PLL_CTRL, N); - OD = FIELD_GET(pll_reg, PANEL_PLL_CTRL, OD); - POD = FIELD_GET(pll_reg, PANEL_PLL_CTRL, POD); + M = (pll_reg & PLL_CTRL_M_MASK) >> PLL_CTRL_M_SHIFT; + N = (pll_reg & PLL_CTRL_N_MASK) >> PLL_CTRL_M_SHIFT; + OD = (pll_reg & PLL_CTRL_OD_MASK) >> PLL_CTRL_OD_SHIFT; + POD = (pll_reg & PLL_CTRL_POD_MASK) >> PLL_CTRL_POD_SHIFT; return DEFAULT_INPUT_CLOCK * M / N / (1 << OD) / (1 << POD); } @@ -79,7 +84,7 @@ static void setChipClock(unsigned int frequency) static void setMemoryClock(unsigned int frequency) { - unsigned int ulReg, divisor; + unsigned int reg, divisor; /* Cheok_0509: For SM750LE, the memory clock is fixed. Nothing to set. */ if (getChipType() == SM750LE) @@ -95,24 +100,24 @@ static void setMemoryClock(unsigned int frequency) divisor = roundedDiv(get_mxclk_freq(), frequency); /* Set the corresponding divisor in the register. */ - ulReg = PEEK32(CURRENT_GATE); + reg = PEEK32(CURRENT_GATE) & ~CURRENT_GATE_M2XCLK_MASK; switch (divisor) { default: case 1: - ulReg = FIELD_SET(ulReg, CURRENT_GATE, M2XCLK, DIV_1); + reg |= CURRENT_GATE_M2XCLK_DIV_1; break; case 2: - ulReg = FIELD_SET(ulReg, CURRENT_GATE, M2XCLK, DIV_2); + reg |= CURRENT_GATE_M2XCLK_DIV_2; break; case 3: - ulReg = FIELD_SET(ulReg, CURRENT_GATE, M2XCLK, DIV_3); + reg |= CURRENT_GATE_M2XCLK_DIV_3; break; case 4: - ulReg = FIELD_SET(ulReg, CURRENT_GATE, M2XCLK, DIV_4); + reg |= CURRENT_GATE_M2XCLK_DIV_4; break; } - setCurrentGate(ulReg); + setCurrentGate(reg); } } @@ -126,7 +131,7 @@ static void setMemoryClock(unsigned int frequency) */ static void setMasterClock(unsigned int frequency) { - unsigned int ulReg, divisor; + unsigned int reg, divisor; /* Cheok_0509: For SM750LE, the memory clock is fixed. Nothing to set. */ if (getChipType() == SM750LE) @@ -142,24 +147,24 @@ static void setMasterClock(unsigned int frequency) divisor = roundedDiv(get_mxclk_freq(), frequency); /* Set the corresponding divisor in the register. */ - ulReg = PEEK32(CURRENT_GATE); + reg = PEEK32(CURRENT_GATE) & ~CURRENT_GATE_MCLK_MASK; switch (divisor) { default: case 3: - ulReg = FIELD_SET(ulReg, CURRENT_GATE, MCLK, DIV_3); + reg |= CURRENT_GATE_MCLK_DIV_3; break; case 4: - ulReg = FIELD_SET(ulReg, CURRENT_GATE, MCLK, DIV_4); + reg |= CURRENT_GATE_MCLK_DIV_4; break; case 6: - ulReg = FIELD_SET(ulReg, CURRENT_GATE, MCLK, DIV_6); + reg |= CURRENT_GATE_MCLK_DIV_6; break; case 8: - ulReg = FIELD_SET(ulReg, CURRENT_GATE, MCLK, DIV_8); + reg |= CURRENT_GATE_MCLK_DIV_8; break; } - setCurrentGate(ulReg); + setCurrentGate(reg); } } @@ -174,11 +179,11 @@ unsigned int ddk750_getVMSize(void) /* for 750,always use power mode0*/ reg = PEEK32(MODE0_GATE); - reg = FIELD_SET(reg, MODE0_GATE, GPIO, ON); + reg |= MODE0_GATE_GPIO; POKE32(MODE0_GATE, reg); /* get frame buffer size from GPIO */ - reg = FIELD_GET(PEEK32(MISC_CTRL), MISC_CTRL, LOCALMEM_SIZE); + reg = PEEK32(MISC_CTRL) & MISC_CTRL_LOCALMEM_SIZE_MASK; switch (reg) { case MISC_CTRL_LOCALMEM_SIZE_8M: data = SZ_8M; break; /* 8 Mega byte */ @@ -197,24 +202,22 @@ unsigned int ddk750_getVMSize(void) int ddk750_initHw(initchip_param_t *pInitParam) { - unsigned int ulReg; + unsigned int reg; if (pInitParam->powerMode != 0) pInitParam->powerMode = 0; setPowerMode(pInitParam->powerMode); /* Enable display power gate & LOCALMEM power gate*/ - ulReg = PEEK32(CURRENT_GATE); - ulReg = FIELD_SET(ulReg, CURRENT_GATE, DISPLAY, ON); - ulReg = FIELD_SET(ulReg, CURRENT_GATE, LOCALMEM, ON); - setCurrentGate(ulReg); + reg = PEEK32(CURRENT_GATE); + reg |= (CURRENT_GATE_DISPLAY | CURRENT_GATE_LOCALMEM); + setCurrentGate(reg); if (getChipType() != SM750LE) { /* set panel pll and graphic mode via mmio_88 */ - ulReg = PEEK32(VGA_CONFIGURATION); - ulReg = FIELD_SET(ulReg, VGA_CONFIGURATION, PLL, PANEL); - ulReg = FIELD_SET(ulReg, VGA_CONFIGURATION, MODE, GRAPHIC); - POKE32(VGA_CONFIGURATION, ulReg); + reg = PEEK32(VGA_CONFIGURATION); + reg |= (VGA_CONFIGURATION_PLL | VGA_CONFIGURATION_MODE); + POKE32(VGA_CONFIGURATION, reg); } else { #if defined(__i386__) || defined(__x86_64__) /* set graphic mode via IO method */ @@ -238,36 +241,36 @@ int ddk750_initHw(initchip_param_t *pInitParam) The memory should be resetted after changing the MXCLK. */ if (pInitParam->resetMemory == 1) { - ulReg = PEEK32(MISC_CTRL); - ulReg = FIELD_SET(ulReg, MISC_CTRL, LOCALMEM_RESET, RESET); - POKE32(MISC_CTRL, ulReg); + reg = PEEK32(MISC_CTRL); + reg &= ~MISC_CTRL_LOCALMEM_RESET; + POKE32(MISC_CTRL, reg); - ulReg = FIELD_SET(ulReg, MISC_CTRL, LOCALMEM_RESET, NORMAL); - POKE32(MISC_CTRL, ulReg); + reg |= MISC_CTRL_LOCALMEM_RESET; + POKE32(MISC_CTRL, reg); } if (pInitParam->setAllEngOff == 1) { enable2DEngine(0); /* Disable Overlay, if a former application left it on */ - ulReg = PEEK32(VIDEO_DISPLAY_CTRL); - ulReg = FIELD_SET(ulReg, VIDEO_DISPLAY_CTRL, PLANE, DISABLE); - POKE32(VIDEO_DISPLAY_CTRL, ulReg); + reg = PEEK32(VIDEO_DISPLAY_CTRL); + reg &= ~DISPLAY_CTRL_PLANE; + POKE32(VIDEO_DISPLAY_CTRL, reg); /* Disable video alpha, if a former application left it on */ - ulReg = PEEK32(VIDEO_ALPHA_DISPLAY_CTRL); - ulReg = FIELD_SET(ulReg, VIDEO_ALPHA_DISPLAY_CTRL, PLANE, DISABLE); - POKE32(VIDEO_ALPHA_DISPLAY_CTRL, ulReg); + reg = PEEK32(VIDEO_ALPHA_DISPLAY_CTRL); + reg &= ~DISPLAY_CTRL_PLANE; + POKE32(VIDEO_ALPHA_DISPLAY_CTRL, reg); /* Disable alpha plane, if a former application left it on */ - ulReg = PEEK32(ALPHA_DISPLAY_CTRL); - ulReg = FIELD_SET(ulReg, ALPHA_DISPLAY_CTRL, PLANE, DISABLE); - POKE32(ALPHA_DISPLAY_CTRL, ulReg); + reg = PEEK32(ALPHA_DISPLAY_CTRL); + reg &= ~DISPLAY_CTRL_PLANE; + POKE32(ALPHA_DISPLAY_CTRL, reg); /* Disable DMA Channel, if a former application left it on */ - ulReg = PEEK32(DMA_ABORT_INTERRUPT); - ulReg = FIELD_SET(ulReg, DMA_ABORT_INTERRUPT, ABORT_1, ABORT); - POKE32(DMA_ABORT_INTERRUPT, ulReg); + reg = PEEK32(DMA_ABORT_INTERRUPT); + reg |= DMA_ABORT_INTERRUPT_ABORT_1; + POKE32(DMA_ABORT_INTERRUPT, reg); /* Disable DMA Power, if a former application left it on */ enableDMA(0); @@ -337,7 +340,7 @@ unsigned int calcPllValue(unsigned int request_orig, pll_value_t *pll) unsigned int diff; tmpClock = pll->inputFreq * M / N / X; - diff = absDiff(tmpClock, request_orig); + diff = abs(tmpClock - request_orig); if (diff < mini_diff) { pll->M = M; pll->N = N; @@ -356,24 +359,29 @@ unsigned int calcPllValue(unsigned int request_orig, pll_value_t *pll) unsigned int formatPllReg(pll_value_t *pPLL) { - unsigned int ulPllReg = 0; - - /* Note that all PLL's have the same format. Here, we just use Panel PLL parameter - to work out the bit fields in the register. - On returning a 32 bit number, the value can be applied to any PLL in the calling function. - */ - ulPllReg = - FIELD_SET(0, PANEL_PLL_CTRL, BYPASS, OFF) - | FIELD_SET(0, PANEL_PLL_CTRL, POWER, ON) - | FIELD_SET(0, PANEL_PLL_CTRL, INPUT, OSC) #ifndef VALIDATION_CHIP - | FIELD_VALUE(0, PANEL_PLL_CTRL, POD, pPLL->POD) + unsigned int POD = pPLL->POD; +#endif + unsigned int OD = pPLL->OD; + unsigned int M = pPLL->M; + unsigned int N = pPLL->N; + unsigned int reg = 0; + + /* + * Note that all PLL's have the same format. Here, we just use + * Panel PLL parameter to work out the bit fields in the + * register. On returning a 32 bit number, the value can be + * applied to any PLL in the calling function. + */ + reg = PLL_CTRL_POWER | +#ifndef VALIDATION_CHIP + ((POD << PLL_CTRL_POD_SHIFT) & PLL_CTRL_POD_MASK) | #endif - | FIELD_VALUE(0, PANEL_PLL_CTRL, OD, pPLL->OD) - | FIELD_VALUE(0, PANEL_PLL_CTRL, N, pPLL->N) - | FIELD_VALUE(0, PANEL_PLL_CTRL, M, pPLL->M); + ((OD << PLL_CTRL_OD_SHIFT) & PLL_CTRL_OD_MASK) | + ((N << PLL_CTRL_N_SHIFT) & PLL_CTRL_N_MASK) | + ((M << PLL_CTRL_M_SHIFT) & PLL_CTRL_M_MASK); - return ulPllReg; + return reg; } diff --git a/drivers/staging/sm750fb/ddk750_display.c b/drivers/staging/sm750fb/ddk750_display.c index 84f6e8b8c0e2..ca4973ee49e4 100644 --- a/drivers/staging/sm750fb/ddk750_display.c +++ b/drivers/staging/sm750fb/ddk750_display.c @@ -9,111 +9,55 @@ static void setDisplayControl(int ctrl, int disp_state) { /* state != 0 means turn on both timing & plane en_bit */ - unsigned long ulDisplayCtrlReg, ulReservedBits; - int cnt; + unsigned long reg, val, reserved; + int cnt = 0; - cnt = 0; - - /* Set the primary display control */ if (!ctrl) { - ulDisplayCtrlReg = PEEK32(PANEL_DISPLAY_CTRL); - /* Turn on/off the Panel display control */ - if (disp_state) { - /* Timing should be enabled first before enabling the plane - * because changing at the same time does not guarantee that - * the plane will also enabled or disabled. - */ - ulDisplayCtrlReg = FIELD_SET(ulDisplayCtrlReg, - PANEL_DISPLAY_CTRL, TIMING, ENABLE); - POKE32(PANEL_DISPLAY_CTRL, ulDisplayCtrlReg); - - ulDisplayCtrlReg = FIELD_SET(ulDisplayCtrlReg, - PANEL_DISPLAY_CTRL, PLANE, ENABLE); - - /* Added some masks to mask out the reserved bits. - * Sometimes, the reserved bits are set/reset randomly when - * writing to the PRIMARY_DISPLAY_CTRL, therefore, the register - * reserved bits are needed to be masked out. - */ - ulReservedBits = FIELD_SET(0, PANEL_DISPLAY_CTRL, RESERVED_1_MASK, ENABLE) | - FIELD_SET(0, PANEL_DISPLAY_CTRL, RESERVED_2_MASK, ENABLE) | - FIELD_SET(0, PANEL_DISPLAY_CTRL, RESERVED_3_MASK, ENABLE); - - /* Somehow the register value on the plane is not set - * until a few delay. Need to write - * and read it a couple times - */ - do { - cnt++; - POKE32(PANEL_DISPLAY_CTRL, ulDisplayCtrlReg); - } while ((PEEK32(PANEL_DISPLAY_CTRL) & ~ulReservedBits) != - (ulDisplayCtrlReg & ~ulReservedBits)); - printk("Set Panel Plane enbit:after tried %d times\n", cnt); - } else { - /* When turning off, there is no rule on the programming - * sequence since whenever the clock is off, then it does not - * matter whether the plane is enabled or disabled. - * Note: Modifying the plane bit will take effect on the - * next vertical sync. Need to find out if it is necessary to - * wait for 1 vsync before modifying the timing enable bit. - * */ - ulDisplayCtrlReg = FIELD_SET(ulDisplayCtrlReg, - PANEL_DISPLAY_CTRL, PLANE, DISABLE); - POKE32(PANEL_DISPLAY_CTRL, ulDisplayCtrlReg); - - ulDisplayCtrlReg = FIELD_SET(ulDisplayCtrlReg, - PANEL_DISPLAY_CTRL, TIMING, DISABLE); - POKE32(PANEL_DISPLAY_CTRL, ulDisplayCtrlReg); - } - + reg = PANEL_DISPLAY_CTRL; + reserved = PANEL_DISPLAY_CTRL_RESERVED_MASK; } else { - /* Set the secondary display control */ - ulDisplayCtrlReg = PEEK32(CRT_DISPLAY_CTRL); - - if (disp_state) { - /* Timing should be enabled first before enabling the plane because changing at the - same time does not guarantee that the plane will also enabled or disabled. - */ - ulDisplayCtrlReg = FIELD_SET(ulDisplayCtrlReg, - CRT_DISPLAY_CTRL, TIMING, ENABLE); - POKE32(CRT_DISPLAY_CTRL, ulDisplayCtrlReg); - - ulDisplayCtrlReg = FIELD_SET(ulDisplayCtrlReg, - CRT_DISPLAY_CTRL, PLANE, ENABLE); - - /* Added some masks to mask out the reserved bits. - * Sometimes, the reserved bits are set/reset randomly when - * writing to the PRIMARY_DISPLAY_CTRL, therefore, the register - * reserved bits are needed to be masked out. - */ - - ulReservedBits = FIELD_SET(0, CRT_DISPLAY_CTRL, RESERVED_1_MASK, ENABLE) | - FIELD_SET(0, CRT_DISPLAY_CTRL, RESERVED_2_MASK, ENABLE) | - FIELD_SET(0, CRT_DISPLAY_CTRL, RESERVED_3_MASK, ENABLE) | - FIELD_SET(0, CRT_DISPLAY_CTRL, RESERVED_4_MASK, ENABLE); + reg = CRT_DISPLAY_CTRL; + reserved = CRT_DISPLAY_CTRL_RESERVED_MASK; + } - do { - cnt++; - POKE32(CRT_DISPLAY_CTRL, ulDisplayCtrlReg); - } while ((PEEK32(CRT_DISPLAY_CTRL) & ~ulReservedBits) != - (ulDisplayCtrlReg & ~ulReservedBits)); - printk("Set Crt Plane enbit:after tried %d times\n", cnt); - } else { - /* When turning off, there is no rule on the programming - * sequence since whenever the clock is off, then it does not - * matter whether the plane is enabled or disabled. - * Note: Modifying the plane bit will take effect on the next - * vertical sync. Need to find out if it is necessary to - * wait for 1 vsync before modifying the timing enable bit. - */ - ulDisplayCtrlReg = FIELD_SET(ulDisplayCtrlReg, - CRT_DISPLAY_CTRL, PLANE, DISABLE); - POKE32(CRT_DISPLAY_CTRL, ulDisplayCtrlReg); - - ulDisplayCtrlReg = FIELD_SET(ulDisplayCtrlReg, - CRT_DISPLAY_CTRL, TIMING, DISABLE); - POKE32(CRT_DISPLAY_CTRL, ulDisplayCtrlReg); - } + val = PEEK32(reg); + if (disp_state) { + /* + * Timing should be enabled first before enabling the + * plane because changing at the same time does not + * guarantee that the plane will also enabled or + * disabled. + */ + val |= DISPLAY_CTRL_TIMING; + POKE32(reg, val); + + val |= DISPLAY_CTRL_PLANE; + + /* + * Somehow the register value on the plane is not set + * until a few delay. Need to write and read it a + * couple times + */ + do { + cnt++; + POKE32(reg, val); + } while ((PEEK32(reg) & ~reserved) != (val & ~reserved)); + pr_debug("Set Plane enbit:after tried %d times\n", cnt); + } else { + /* + * When turning off, there is no rule on the + * programming sequence since whenever the clock is + * off, then it does not matter whether the plane is + * enabled or disabled. Note: Modifying the plane bit + * will take effect on the next vertical sync. Need to + * find out if it is necessary to wait for 1 vsync + * before modifying the timing enable bit. + */ + val &= ~DISPLAY_CTRL_PLANE; + POKE32(reg, val); + + val &= ~DISPLAY_CTRL_TIMING; + POKE32(reg, val); } } @@ -126,54 +70,42 @@ static void waitNextVerticalSync(int ctrl, int delay) /* Do not wait when the Primary PLL is off or display control is already off. This will prevent the software to wait forever. */ - if ((FIELD_GET(PEEK32(PANEL_PLL_CTRL), PANEL_PLL_CTRL, POWER) == - PANEL_PLL_CTRL_POWER_OFF) || - (FIELD_GET(PEEK32(PANEL_DISPLAY_CTRL), PANEL_DISPLAY_CTRL, TIMING) == - PANEL_DISPLAY_CTRL_TIMING_DISABLE)) { + if (!(PEEK32(PANEL_PLL_CTRL) & PLL_CTRL_POWER) || + !(PEEK32(PANEL_DISPLAY_CTRL) & DISPLAY_CTRL_TIMING)) { return; } while (delay-- > 0) { /* Wait for end of vsync. */ do { - status = FIELD_GET(PEEK32(SYSTEM_CTRL), - SYSTEM_CTRL, - PANEL_VSYNC); - } while (status == SYSTEM_CTRL_PANEL_VSYNC_ACTIVE); + status = PEEK32(SYSTEM_CTRL); + } while (status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE); /* Wait for start of vsync. */ do { - status = FIELD_GET(PEEK32(SYSTEM_CTRL), - SYSTEM_CTRL, - PANEL_VSYNC); - } while (status == SYSTEM_CTRL_PANEL_VSYNC_INACTIVE); + status = PEEK32(SYSTEM_CTRL); + } while (!(status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE)); } } else { /* Do not wait when the Primary PLL is off or display control is already off. This will prevent the software to wait forever. */ - if ((FIELD_GET(PEEK32(CRT_PLL_CTRL), CRT_PLL_CTRL, POWER) == - CRT_PLL_CTRL_POWER_OFF) || - (FIELD_GET(PEEK32(CRT_DISPLAY_CTRL), CRT_DISPLAY_CTRL, TIMING) == - CRT_DISPLAY_CTRL_TIMING_DISABLE)) { + if (!(PEEK32(CRT_PLL_CTRL) & PLL_CTRL_POWER) || + !(PEEK32(CRT_DISPLAY_CTRL) & DISPLAY_CTRL_TIMING)) { return; } while (delay-- > 0) { /* Wait for end of vsync. */ do { - status = FIELD_GET(PEEK32(SYSTEM_CTRL), - SYSTEM_CTRL, - CRT_VSYNC); - } while (status == SYSTEM_CTRL_CRT_VSYNC_ACTIVE); + status = PEEK32(SYSTEM_CTRL); + } while (status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE); /* Wait for start of vsync. */ do { - status = FIELD_GET(PEEK32(SYSTEM_CTRL), - SYSTEM_CTRL, - CRT_VSYNC); - } while (status == SYSTEM_CTRL_CRT_VSYNC_INACTIVE); + status = PEEK32(SYSTEM_CTRL); + } while (!(status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE)); } } } @@ -184,22 +116,22 @@ static void swPanelPowerSequence(int disp, int delay) /* disp should be 1 to open sequence */ reg = PEEK32(PANEL_DISPLAY_CTRL); - reg = FIELD_VALUE(reg, PANEL_DISPLAY_CTRL, FPEN, disp); + reg |= (disp ? PANEL_DISPLAY_CTRL_FPEN : 0); POKE32(PANEL_DISPLAY_CTRL, reg); primaryWaitVerticalSync(delay); reg = PEEK32(PANEL_DISPLAY_CTRL); - reg = FIELD_VALUE(reg, PANEL_DISPLAY_CTRL, DATA, disp); + reg |= (disp ? PANEL_DISPLAY_CTRL_DATA : 0); POKE32(PANEL_DISPLAY_CTRL, reg); primaryWaitVerticalSync(delay); reg = PEEK32(PANEL_DISPLAY_CTRL); - reg = FIELD_VALUE(reg, PANEL_DISPLAY_CTRL, VBIASEN, disp); + reg |= (disp ? PANEL_DISPLAY_CTRL_VBIASEN : 0); POKE32(PANEL_DISPLAY_CTRL, reg); primaryWaitVerticalSync(delay); reg = PEEK32(PANEL_DISPLAY_CTRL); - reg = FIELD_VALUE(reg, PANEL_DISPLAY_CTRL, FPEN, disp); + reg |= (disp ? PANEL_DISPLAY_CTRL_FPEN : 0); POKE32(PANEL_DISPLAY_CTRL, reg); primaryWaitVerticalSync(delay); @@ -212,16 +144,20 @@ void ddk750_setLogicalDispOut(disp_output_t output) if (output & PNL_2_USAGE) { /* set panel path controller select */ reg = PEEK32(PANEL_DISPLAY_CTRL); - reg = FIELD_VALUE(reg, PANEL_DISPLAY_CTRL, SELECT, (output & PNL_2_MASK)>>PNL_2_OFFSET); + reg &= ~PANEL_DISPLAY_CTRL_SELECT_MASK; + reg |= (((output & PNL_2_MASK) >> PNL_2_OFFSET) << + PANEL_DISPLAY_CTRL_SELECT_SHIFT); POKE32(PANEL_DISPLAY_CTRL, reg); } if (output & CRT_2_USAGE) { /* set crt path controller select */ reg = PEEK32(CRT_DISPLAY_CTRL); - reg = FIELD_VALUE(reg, CRT_DISPLAY_CTRL, SELECT, (output & CRT_2_MASK)>>CRT_2_OFFSET); + reg &= ~CRT_DISPLAY_CTRL_SELECT_MASK; + reg |= (((output & CRT_2_MASK) >> CRT_2_OFFSET) << + CRT_DISPLAY_CTRL_SELECT_SHIFT); /*se blank off */ - reg = FIELD_SET(reg, CRT_DISPLAY_CTRL, BLANK, OFF); + reg &= ~CRT_DISPLAY_CTRL_BLANK; POKE32(CRT_DISPLAY_CTRL, reg); } diff --git a/drivers/staging/sm750fb/ddk750_dvi.c b/drivers/staging/sm750fb/ddk750_dvi.c index a7a23514ac39..a4a255007c8d 100644 --- a/drivers/staging/sm750fb/ddk750_dvi.c +++ b/drivers/staging/sm750fb/ddk750_dvi.c @@ -53,44 +53,6 @@ int dviInit( return -1; /* error */ } - -/* - * dviGetVendorID - * This function gets the vendor ID of the DVI controller chip. - * - * Output: - * Vendor ID - */ -unsigned short dviGetVendorID(void) -{ - dvi_ctrl_device_t *pCurrentDviCtrl; - - pCurrentDviCtrl = g_dcftSupportedDviController; - if (pCurrentDviCtrl != (dvi_ctrl_device_t *)0) - return pCurrentDviCtrl->pfnGetVendorId(); - - return 0x0000; -} - - -/* - * dviGetDeviceID - * This function gets the device ID of the DVI controller chip. - * - * Output: - * Device ID - */ -unsigned short dviGetDeviceID(void) -{ - dvi_ctrl_device_t *pCurrentDviCtrl; - - pCurrentDviCtrl = g_dcftSupportedDviController; - if (pCurrentDviCtrl != (dvi_ctrl_device_t *)0) - return pCurrentDviCtrl->pfnGetDeviceId(); - - return 0x0000; -} - #endif diff --git a/drivers/staging/sm750fb/ddk750_dvi.h b/drivers/staging/sm750fb/ddk750_dvi.h index e1d4c9a2d50a..677939cb5130 100644 --- a/drivers/staging/sm750fb/ddk750_dvi.h +++ b/drivers/staging/sm750fb/ddk750_dvi.h @@ -55,8 +55,5 @@ int dviInit( unsigned char pllFilterValue ); -unsigned short dviGetVendorID(void); -unsigned short dviGetDeviceID(void); - #endif diff --git a/drivers/staging/sm750fb/ddk750_help.h b/drivers/staging/sm750fb/ddk750_help.h index 5be814eed735..009db9213a73 100644 --- a/drivers/staging/sm750fb/ddk750_help.h +++ b/drivers/staging/sm750fb/ddk750_help.h @@ -6,7 +6,6 @@ #include <linux/ioport.h> #include <linux/io.h> #include <linux/uaccess.h> -#include "sm750_help.h" /* software control endianness */ #define PEEK32(addr) readl(addr + mmio750) diff --git a/drivers/staging/sm750fb/ddk750_hwi2c.c b/drivers/staging/sm750fb/ddk750_hwi2c.c index 7be2111284f4..39c3e1cdbc0c 100644 --- a/drivers/staging/sm750fb/ddk750_hwi2c.c +++ b/drivers/staging/sm750fb/ddk750_hwi2c.c @@ -17,8 +17,7 @@ unsigned char bus_speed_mode /* Enable GPIO 30 & 31 as IIC clock & data */ value = PEEK32(GPIO_MUX); - value = FIELD_SET(value, GPIO_MUX, 30, I2C) | - FIELD_SET(0, GPIO_MUX, 31, I2C); + value |= (GPIO_MUX_30 | GPIO_MUX_31); POKE32(GPIO_MUX, value); /* Enable Hardware I2C power. @@ -27,12 +26,10 @@ unsigned char bus_speed_mode enableI2C(1); /* Enable the I2C Controller and set the bus speed mode */ - value = PEEK32(I2C_CTRL); - if (bus_speed_mode == 0) - value = FIELD_SET(value, I2C_CTRL, MODE, STANDARD); - else - value = FIELD_SET(value, I2C_CTRL, MODE, FAST); - value = FIELD_SET(value, I2C_CTRL, EN, ENABLE); + value = PEEK32(I2C_CTRL) & ~(I2C_CTRL_MODE | I2C_CTRL_EN); + if (bus_speed_mode) + value |= I2C_CTRL_MODE; + value |= I2C_CTRL_EN; POKE32(I2C_CTRL, value); return 0; @@ -43,8 +40,7 @@ void sm750_hw_i2c_close(void) unsigned int value; /* Disable I2C controller */ - value = PEEK32(I2C_CTRL); - value = FIELD_SET(value, I2C_CTRL, EN, DISABLE); + value = PEEK32(I2C_CTRL) & ~I2C_CTRL_EN; POKE32(I2C_CTRL, value); /* Disable I2C Power */ @@ -52,8 +48,8 @@ void sm750_hw_i2c_close(void) /* Set GPIO 30 & 31 back as GPIO pins */ value = PEEK32(GPIO_MUX); - value = FIELD_SET(value, GPIO_MUX, 30, GPIO); - value = FIELD_SET(value, GPIO_MUX, 31, GPIO); + value &= ~GPIO_MUX_30; + value &= ~GPIO_MUX_31; POKE32(GPIO_MUX, value); } @@ -63,13 +59,11 @@ static long hw_i2c_wait_tx_done(void) /* Wait until the transfer is completed. */ timeout = HWI2C_WAIT_TIMEOUT; - while ((FIELD_GET(PEEK32(I2C_STATUS), - I2C_STATUS, TX) != I2C_STATUS_TX_COMPLETED) && - (timeout != 0)) + while (!(PEEK32(I2C_STATUS) & I2C_STATUS_TX) && (timeout != 0)) timeout--; if (timeout == 0) - return (-1); + return -1; return 0; } @@ -121,14 +115,13 @@ static unsigned int hw_i2c_write_data( POKE32(I2C_DATA0 + i, *buf++); /* Start the I2C */ - POKE32(I2C_CTRL, - FIELD_SET(PEEK32(I2C_CTRL), I2C_CTRL, CTRL, START)); + POKE32(I2C_CTRL, PEEK32(I2C_CTRL) | I2C_CTRL_CTRL); /* Wait until the transfer is completed. */ if (hw_i2c_wait_tx_done() != 0) break; - /* Substract length */ + /* Subtract length */ length -= (count + 1); /* Total byte written */ @@ -184,8 +177,7 @@ static unsigned int hw_i2c_read_data( POKE32(I2C_BYTE_COUNT, count); /* Start the I2C */ - POKE32(I2C_CTRL, - FIELD_SET(PEEK32(I2C_CTRL), I2C_CTRL, CTRL, START)); + POKE32(I2C_CTRL, PEEK32(I2C_CTRL) | I2C_CTRL_CTRL); /* Wait until transaction done. */ if (hw_i2c_wait_tx_done() != 0) @@ -195,7 +187,7 @@ static unsigned int hw_i2c_read_data( for (i = 0; i <= count; i++) *buf++ = PEEK32(I2C_DATA0 + i); - /* Substract length by 16 */ + /* Subtract length by 16 */ length -= (count + 1); /* Number of bytes read. */ @@ -256,7 +248,7 @@ int sm750_hw_i2c_write_reg( if (hw_i2c_write_data(addr, 2, value) == 2) return 0; - return (-1); + return -1; } #endif diff --git a/drivers/staging/sm750fb/ddk750_mode.c b/drivers/staging/sm750fb/ddk750_mode.c index fa35926680ab..ccb4e067661a 100644 --- a/drivers/staging/sm750fb/ddk750_mode.c +++ b/drivers/staging/sm750fb/ddk750_mode.c @@ -25,13 +25,12 @@ static unsigned long displayControlAdjust_SM750LE(mode_parameter_t *pModeParam, Note that normal SM750/SM718 only use those two register for auto-centering mode. */ - POKE32(CRT_AUTO_CENTERING_TL, - FIELD_VALUE(0, CRT_AUTO_CENTERING_TL, TOP, 0) - | FIELD_VALUE(0, CRT_AUTO_CENTERING_TL, LEFT, 0)); + POKE32(CRT_AUTO_CENTERING_TL, 0); POKE32(CRT_AUTO_CENTERING_BR, - FIELD_VALUE(0, CRT_AUTO_CENTERING_BR, BOTTOM, y-1) - | FIELD_VALUE(0, CRT_AUTO_CENTERING_BR, RIGHT, x-1)); + (((y - 1) << CRT_AUTO_CENTERING_BR_BOTTOM_SHIFT) & + CRT_AUTO_CENTERING_BR_BOTTOM_MASK) | + ((x - 1) & CRT_AUTO_CENTERING_BR_RIGHT_MASK)); /* Assume common fields in dispControl have been properly set before calling this function. @@ -39,33 +38,32 @@ static unsigned long displayControlAdjust_SM750LE(mode_parameter_t *pModeParam, */ /* Clear bit 29:27 of display control register */ - dispControl &= FIELD_CLEAR(CRT_DISPLAY_CTRL, CLK); + dispControl &= ~CRT_DISPLAY_CTRL_CLK_MASK; /* Set bit 29:27 of display control register for the right clock */ - /* Note that SM750LE only need to supported 7 resoluitons. */ + /* Note that SM750LE only need to supported 7 resolutions. */ if (x == 800 && y == 600) - dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL41); + dispControl |= CRT_DISPLAY_CTRL_CLK_PLL41; else if (x == 1024 && y == 768) - dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL65); + dispControl |= CRT_DISPLAY_CTRL_CLK_PLL65; else if (x == 1152 && y == 864) - dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL80); + dispControl |= CRT_DISPLAY_CTRL_CLK_PLL80; else if (x == 1280 && y == 768) - dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL80); + dispControl |= CRT_DISPLAY_CTRL_CLK_PLL80; else if (x == 1280 && y == 720) - dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL74); + dispControl |= CRT_DISPLAY_CTRL_CLK_PLL74; else if (x == 1280 && y == 960) - dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL108); + dispControl |= CRT_DISPLAY_CTRL_CLK_PLL108; else if (x == 1280 && y == 1024) - dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL108); + dispControl |= CRT_DISPLAY_CTRL_CLK_PLL108; else /* default to VGA clock */ - dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL25); + dispControl |= CRT_DISPLAY_CTRL_CLK_PLL25; /* Set bit 25:24 of display controller */ - dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CRTSELECT, CRT); - dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, RGBBIT, 24BIT); + dispControl |= (CRT_DISPLAY_CTRL_CRTSELECT | CRT_DISPLAY_CTRL_RGBBIT); /* Set bit 14 of display controller */ - dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLOCK_PHASE, ACTIVE_LOW); + dispControl = DISPLAY_CTRL_CLOCK_PHASE; POKE32(CRT_DISPLAY_CTRL, dispControl); @@ -79,85 +77,105 @@ static int programModeRegisters(mode_parameter_t *pModeParam, pll_value_t *pll) { int ret = 0; int cnt = 0; - unsigned int ulTmpValue, ulReg; + unsigned int tmp, reg; if (pll->clockType == SECONDARY_PLL) { /* programe secondary pixel clock */ POKE32(CRT_PLL_CTRL, formatPllReg(pll)); POKE32(CRT_HORIZONTAL_TOTAL, - FIELD_VALUE(0, CRT_HORIZONTAL_TOTAL, TOTAL, pModeParam->horizontal_total - 1) - | FIELD_VALUE(0, CRT_HORIZONTAL_TOTAL, DISPLAY_END, pModeParam->horizontal_display_end - 1)); + (((pModeParam->horizontal_total - 1) << + CRT_HORIZONTAL_TOTAL_TOTAL_SHIFT) & + CRT_HORIZONTAL_TOTAL_TOTAL_MASK) | + ((pModeParam->horizontal_display_end - 1) & + CRT_HORIZONTAL_TOTAL_DISPLAY_END_MASK)); POKE32(CRT_HORIZONTAL_SYNC, - FIELD_VALUE(0, CRT_HORIZONTAL_SYNC, WIDTH, pModeParam->horizontal_sync_width) - | FIELD_VALUE(0, CRT_HORIZONTAL_SYNC, START, pModeParam->horizontal_sync_start - 1)); + ((pModeParam->horizontal_sync_width << + CRT_HORIZONTAL_SYNC_WIDTH_SHIFT) & + CRT_HORIZONTAL_SYNC_WIDTH_MASK) | + ((pModeParam->horizontal_sync_start - 1) & + CRT_HORIZONTAL_SYNC_START_MASK)); POKE32(CRT_VERTICAL_TOTAL, - FIELD_VALUE(0, CRT_VERTICAL_TOTAL, TOTAL, pModeParam->vertical_total - 1) - | FIELD_VALUE(0, CRT_VERTICAL_TOTAL, DISPLAY_END, pModeParam->vertical_display_end - 1)); + (((pModeParam->vertical_total - 1) << + CRT_VERTICAL_TOTAL_TOTAL_SHIFT) & + CRT_VERTICAL_TOTAL_TOTAL_MASK) | + ((pModeParam->vertical_display_end - 1) & + CRT_VERTICAL_TOTAL_DISPLAY_END_MASK)); POKE32(CRT_VERTICAL_SYNC, - FIELD_VALUE(0, CRT_VERTICAL_SYNC, HEIGHT, pModeParam->vertical_sync_height) - | FIELD_VALUE(0, CRT_VERTICAL_SYNC, START, pModeParam->vertical_sync_start - 1)); + ((pModeParam->vertical_sync_height << + CRT_VERTICAL_SYNC_HEIGHT_SHIFT) & + CRT_VERTICAL_SYNC_HEIGHT_MASK) | + ((pModeParam->vertical_sync_start - 1) & + CRT_VERTICAL_SYNC_START_MASK)); - ulTmpValue = FIELD_VALUE(0, CRT_DISPLAY_CTRL, VSYNC_PHASE, pModeParam->vertical_sync_polarity)| - FIELD_VALUE(0, CRT_DISPLAY_CTRL, HSYNC_PHASE, pModeParam->horizontal_sync_polarity)| - FIELD_SET(0, CRT_DISPLAY_CTRL, TIMING, ENABLE)| - FIELD_SET(0, CRT_DISPLAY_CTRL, PLANE, ENABLE); - + tmp = DISPLAY_CTRL_TIMING | DISPLAY_CTRL_PLANE; + if (pModeParam->vertical_sync_polarity) + tmp |= DISPLAY_CTRL_VSYNC_PHASE; + if (pModeParam->horizontal_sync_polarity) + tmp |= DISPLAY_CTRL_HSYNC_PHASE; if (getChipType() == SM750LE) { - displayControlAdjust_SM750LE(pModeParam, ulTmpValue); + displayControlAdjust_SM750LE(pModeParam, tmp); } else { - ulReg = PEEK32(CRT_DISPLAY_CTRL) - & FIELD_CLEAR(CRT_DISPLAY_CTRL, VSYNC_PHASE) - & FIELD_CLEAR(CRT_DISPLAY_CTRL, HSYNC_PHASE) - & FIELD_CLEAR(CRT_DISPLAY_CTRL, TIMING) - & FIELD_CLEAR(CRT_DISPLAY_CTRL, PLANE); + reg = PEEK32(CRT_DISPLAY_CTRL) & + ~(DISPLAY_CTRL_VSYNC_PHASE | + DISPLAY_CTRL_HSYNC_PHASE | + DISPLAY_CTRL_TIMING | DISPLAY_CTRL_PLANE); - POKE32(CRT_DISPLAY_CTRL, ulTmpValue|ulReg); + POKE32(CRT_DISPLAY_CTRL, tmp | reg); } } else if (pll->clockType == PRIMARY_PLL) { - unsigned int ulReservedBits; + unsigned int reserved; POKE32(PANEL_PLL_CTRL, formatPllReg(pll)); - POKE32(PANEL_HORIZONTAL_TOTAL, - FIELD_VALUE(0, PANEL_HORIZONTAL_TOTAL, TOTAL, pModeParam->horizontal_total - 1) - | FIELD_VALUE(0, PANEL_HORIZONTAL_TOTAL, DISPLAY_END, pModeParam->horizontal_display_end - 1)); + reg = ((pModeParam->horizontal_total - 1) << + PANEL_HORIZONTAL_TOTAL_TOTAL_SHIFT) & + PANEL_HORIZONTAL_TOTAL_TOTAL_MASK; + reg |= ((pModeParam->horizontal_display_end - 1) & + PANEL_HORIZONTAL_TOTAL_DISPLAY_END_MASK); + POKE32(PANEL_HORIZONTAL_TOTAL, reg); POKE32(PANEL_HORIZONTAL_SYNC, - FIELD_VALUE(0, PANEL_HORIZONTAL_SYNC, WIDTH, pModeParam->horizontal_sync_width) - | FIELD_VALUE(0, PANEL_HORIZONTAL_SYNC, START, pModeParam->horizontal_sync_start - 1)); + ((pModeParam->horizontal_sync_width << + PANEL_HORIZONTAL_SYNC_WIDTH_SHIFT) & + PANEL_HORIZONTAL_SYNC_WIDTH_MASK) | + ((pModeParam->horizontal_sync_start - 1) & + PANEL_HORIZONTAL_SYNC_START_MASK)); POKE32(PANEL_VERTICAL_TOTAL, - FIELD_VALUE(0, PANEL_VERTICAL_TOTAL, TOTAL, pModeParam->vertical_total - 1) - | FIELD_VALUE(0, PANEL_VERTICAL_TOTAL, DISPLAY_END, pModeParam->vertical_display_end - 1)); + (((pModeParam->vertical_total - 1) << + PANEL_VERTICAL_TOTAL_TOTAL_SHIFT) & + PANEL_VERTICAL_TOTAL_TOTAL_MASK) | + ((pModeParam->vertical_display_end - 1) & + PANEL_VERTICAL_TOTAL_DISPLAY_END_MASK)); POKE32(PANEL_VERTICAL_SYNC, - FIELD_VALUE(0, PANEL_VERTICAL_SYNC, HEIGHT, pModeParam->vertical_sync_height) - | FIELD_VALUE(0, PANEL_VERTICAL_SYNC, START, pModeParam->vertical_sync_start - 1)); - - ulTmpValue = FIELD_VALUE(0, PANEL_DISPLAY_CTRL, VSYNC_PHASE, pModeParam->vertical_sync_polarity)| - FIELD_VALUE(0, PANEL_DISPLAY_CTRL, HSYNC_PHASE, pModeParam->horizontal_sync_polarity)| - FIELD_VALUE(0, PANEL_DISPLAY_CTRL, CLOCK_PHASE, pModeParam->clock_phase_polarity)| - FIELD_SET(0, PANEL_DISPLAY_CTRL, TIMING, ENABLE)| - FIELD_SET(0, PANEL_DISPLAY_CTRL, PLANE, ENABLE); - - ulReservedBits = FIELD_SET(0, PANEL_DISPLAY_CTRL, RESERVED_1_MASK, ENABLE) | - FIELD_SET(0, PANEL_DISPLAY_CTRL, RESERVED_2_MASK, ENABLE) | - FIELD_SET(0, PANEL_DISPLAY_CTRL, RESERVED_3_MASK, ENABLE)| - FIELD_SET(0, PANEL_DISPLAY_CTRL, VSYNC, ACTIVE_LOW); - - ulReg = (PEEK32(PANEL_DISPLAY_CTRL) & ~ulReservedBits) - & FIELD_CLEAR(PANEL_DISPLAY_CTRL, CLOCK_PHASE) - & FIELD_CLEAR(PANEL_DISPLAY_CTRL, VSYNC_PHASE) - & FIELD_CLEAR(PANEL_DISPLAY_CTRL, HSYNC_PHASE) - & FIELD_CLEAR(PANEL_DISPLAY_CTRL, TIMING) - & FIELD_CLEAR(PANEL_DISPLAY_CTRL, PLANE); - + ((pModeParam->vertical_sync_height << + PANEL_VERTICAL_SYNC_HEIGHT_SHIFT) & + PANEL_VERTICAL_SYNC_HEIGHT_MASK) | + ((pModeParam->vertical_sync_start - 1) & + PANEL_VERTICAL_SYNC_START_MASK)); + + tmp = DISPLAY_CTRL_TIMING | DISPLAY_CTRL_PLANE; + if (pModeParam->vertical_sync_polarity) + tmp |= DISPLAY_CTRL_VSYNC_PHASE; + if (pModeParam->horizontal_sync_polarity) + tmp |= DISPLAY_CTRL_HSYNC_PHASE; + if (pModeParam->clock_phase_polarity) + tmp |= DISPLAY_CTRL_CLOCK_PHASE; + + reserved = PANEL_DISPLAY_CTRL_RESERVED_MASK | + PANEL_DISPLAY_CTRL_VSYNC; + + reg = (PEEK32(PANEL_DISPLAY_CTRL) & ~reserved) & + ~(DISPLAY_CTRL_CLOCK_PHASE | DISPLAY_CTRL_VSYNC_PHASE | + DISPLAY_CTRL_HSYNC_PHASE | DISPLAY_CTRL_TIMING | + DISPLAY_CTRL_PLANE); /* May a hardware bug or just my test chip (not confirmed). * PANEL_DISPLAY_CTRL register seems requiring few writes @@ -167,13 +185,14 @@ static int programModeRegisters(mode_parameter_t *pModeParam, pll_value_t *pll) * next vertical sync to turn on/off the plane. */ - POKE32(PANEL_DISPLAY_CTRL, ulTmpValue|ulReg); + POKE32(PANEL_DISPLAY_CTRL, tmp | reg); - while ((PEEK32(PANEL_DISPLAY_CTRL) & ~ulReservedBits) != (ulTmpValue|ulReg)) { + while ((PEEK32(PANEL_DISPLAY_CTRL) & ~reserved) != + (tmp | reg)) { cnt++; if (cnt > 1000) break; - POKE32(PANEL_DISPLAY_CTRL, ulTmpValue|ulReg); + POKE32(PANEL_DISPLAY_CTRL, tmp | reg); } } else { ret = -1; diff --git a/drivers/staging/sm750fb/ddk750_power.c b/drivers/staging/sm750fb/ddk750_power.c index 667e4f822544..b3c3791b95bd 100644 --- a/drivers/staging/sm750fb/ddk750_power.c +++ b/drivers/staging/sm750fb/ddk750_power.c @@ -7,12 +7,12 @@ void ddk750_setDPMS(DPMS_t state) unsigned int value; if (getChipType() == SM750LE) { - value = PEEK32(CRT_DISPLAY_CTRL); - POKE32(CRT_DISPLAY_CTRL, FIELD_VALUE(value, CRT_DISPLAY_CTRL, - DPMS, state)); + value = PEEK32(CRT_DISPLAY_CTRL) & ~CRT_DISPLAY_CTRL_DPMS_MASK; + value |= (state << CRT_DISPLAY_CTRL_DPMS_SHIFT); + POKE32(CRT_DISPLAY_CTRL, value); } else { value = PEEK32(SYSTEM_CTRL); - value = FIELD_VALUE(value, SYSTEM_CTRL, DPMS, state); + value = (value & ~SYSTEM_CTRL_DPMS_MASK) | state; POKE32(SYSTEM_CTRL, value); } } @@ -21,7 +21,7 @@ static unsigned int getPowerMode(void) { if (getChipType() == SM750LE) return 0; - return FIELD_GET(PEEK32(POWER_MODE_CTRL), POWER_MODE_CTRL, MODE); + return PEEK32(POWER_MODE_CTRL) & POWER_MODE_CTRL_MODE_MASK; } @@ -33,25 +33,22 @@ void setPowerMode(unsigned int powerMode) { unsigned int control_value = 0; - control_value = PEEK32(POWER_MODE_CTRL); + control_value = PEEK32(POWER_MODE_CTRL) & ~POWER_MODE_CTRL_MODE_MASK; if (getChipType() == SM750LE) return; switch (powerMode) { case POWER_MODE_CTRL_MODE_MODE0: - control_value = FIELD_SET(control_value, POWER_MODE_CTRL, MODE, - MODE0); + control_value |= POWER_MODE_CTRL_MODE_MODE0; break; case POWER_MODE_CTRL_MODE_MODE1: - control_value = FIELD_SET(control_value, POWER_MODE_CTRL, MODE, - MODE1); + control_value |= POWER_MODE_CTRL_MODE_MODE1; break; case POWER_MODE_CTRL_MODE_SLEEP: - control_value = FIELD_SET(control_value, POWER_MODE_CTRL, MODE, - SLEEP); + control_value |= POWER_MODE_CTRL_MODE_SLEEP; break; default: @@ -60,17 +57,15 @@ void setPowerMode(unsigned int powerMode) /* Set up other fields in Power Control Register */ if (powerMode == POWER_MODE_CTRL_MODE_SLEEP) { - control_value = + control_value &= ~POWER_MODE_CTRL_OSC_INPUT; #ifdef VALIDATION_CHIP - FIELD_SET(control_value, POWER_MODE_CTRL, 336CLK, OFF) | + control_value &= ~POWER_MODE_CTRL_336CLK; #endif - FIELD_SET(control_value, POWER_MODE_CTRL, OSC_INPUT, OFF); } else { - control_value = + control_value |= POWER_MODE_CTRL_OSC_INPUT; #ifdef VALIDATION_CHIP - FIELD_SET(control_value, POWER_MODE_CTRL, 336CLK, ON) | + control_value |= POWER_MODE_CTRL_336CLK; #endif - FIELD_SET(control_value, POWER_MODE_CTRL, OSC_INPUT, ON); } /* Program new power mode. */ @@ -111,13 +106,10 @@ void enable2DEngine(unsigned int enable) u32 gate; gate = PEEK32(CURRENT_GATE); - if (enable) { - gate = FIELD_SET(gate, CURRENT_GATE, DE, ON); - gate = FIELD_SET(gate, CURRENT_GATE, CSC, ON); - } else { - gate = FIELD_SET(gate, CURRENT_GATE, DE, OFF); - gate = FIELD_SET(gate, CURRENT_GATE, CSC, OFF); - } + if (enable) + gate |= (CURRENT_GATE_DE | CURRENT_GATE_CSC); + else + gate &= ~(CURRENT_GATE_DE | CURRENT_GATE_CSC); setCurrentGate(gate); } @@ -129,9 +121,9 @@ void enableDMA(unsigned int enable) /* Enable DMA Gate */ gate = PEEK32(CURRENT_GATE); if (enable) - gate = FIELD_SET(gate, CURRENT_GATE, DMA, ON); + gate |= CURRENT_GATE_DMA; else - gate = FIELD_SET(gate, CURRENT_GATE, DMA, OFF); + gate &= ~CURRENT_GATE_DMA; setCurrentGate(gate); } @@ -146,9 +138,9 @@ void enableGPIO(unsigned int enable) /* Enable GPIO Gate */ gate = PEEK32(CURRENT_GATE); if (enable) - gate = FIELD_SET(gate, CURRENT_GATE, GPIO, ON); + gate |= CURRENT_GATE_GPIO; else - gate = FIELD_SET(gate, CURRENT_GATE, GPIO, OFF); + gate &= ~CURRENT_GATE_GPIO; setCurrentGate(gate); } @@ -163,9 +155,9 @@ void enableI2C(unsigned int enable) /* Enable I2C Gate */ gate = PEEK32(CURRENT_GATE); if (enable) - gate = FIELD_SET(gate, CURRENT_GATE, I2C, ON); + gate |= CURRENT_GATE_I2C; else - gate = FIELD_SET(gate, CURRENT_GATE, I2C, OFF); + gate &= ~CURRENT_GATE_I2C; setCurrentGate(gate); } diff --git a/drivers/staging/sm750fb/ddk750_power.h b/drivers/staging/sm750fb/ddk750_power.h index 6e804d990cff..5963691f9a68 100644 --- a/drivers/staging/sm750fb/ddk750_power.h +++ b/drivers/staging/sm750fb/ddk750_power.h @@ -9,13 +9,10 @@ typedef enum _DPMS_t { } DPMS_t; -#define setDAC(off) \ - { \ - POKE32(MISC_CTRL, FIELD_VALUE(PEEK32(MISC_CTRL), \ - MISC_CTRL, \ - DAC_POWER, \ - off)); \ - } +#define setDAC(off) { \ + POKE32(MISC_CTRL, \ + (PEEK32(MISC_CTRL) & ~MISC_CTRL_DAC_POWER_OFF) | (off)); \ +} void ddk750_setDPMS(DPMS_t); diff --git a/drivers/staging/sm750fb/ddk750_reg.h b/drivers/staging/sm750fb/ddk750_reg.h index 16a01c25442c..955247979aaa 100644 --- a/drivers/staging/sm750fb/ddk750_reg.h +++ b/drivers/staging/sm750fb/ddk750_reg.h @@ -3,1865 +3,1149 @@ /* New register for SM750LE */ #define DE_STATE1 0x100054 -#define DE_STATE1_DE_ABORT 0:0 -#define DE_STATE1_DE_ABORT_OFF 0 -#define DE_STATE1_DE_ABORT_ON 1 +#define DE_STATE1_DE_ABORT BIT(0) #define DE_STATE2 0x100058 -#define DE_STATE2_DE_FIFO 3:3 -#define DE_STATE2_DE_FIFO_NOTEMPTY 0 -#define DE_STATE2_DE_FIFO_EMPTY 1 -#define DE_STATE2_DE_STATUS 2:2 -#define DE_STATE2_DE_STATUS_IDLE 0 -#define DE_STATE2_DE_STATUS_BUSY 1 -#define DE_STATE2_DE_MEM_FIFO 1:1 -#define DE_STATE2_DE_MEM_FIFO_NOTEMPTY 0 -#define DE_STATE2_DE_MEM_FIFO_EMPTY 1 -#define DE_STATE2_DE_RESERVED 0:0 - - +#define DE_STATE2_DE_FIFO_EMPTY BIT(3) +#define DE_STATE2_DE_STATUS_BUSY BIT(2) +#define DE_STATE2_DE_MEM_FIFO_EMPTY BIT(1) #define SYSTEM_CTRL 0x000000 -#define SYSTEM_CTRL_DPMS 31:30 -#define SYSTEM_CTRL_DPMS_VPHP 0 -#define SYSTEM_CTRL_DPMS_VPHN 1 -#define SYSTEM_CTRL_DPMS_VNHP 2 -#define SYSTEM_CTRL_DPMS_VNHN 3 -#define SYSTEM_CTRL_PCI_BURST 29:29 -#define SYSTEM_CTRL_PCI_BURST_OFF 0 -#define SYSTEM_CTRL_PCI_BURST_ON 1 -#define SYSTEM_CTRL_PCI_MASTER 25:25 -#define SYSTEM_CTRL_PCI_MASTER_OFF 0 -#define SYSTEM_CTRL_PCI_MASTER_ON 1 -#define SYSTEM_CTRL_LATENCY_TIMER 24:24 -#define SYSTEM_CTRL_LATENCY_TIMER_ON 0 -#define SYSTEM_CTRL_LATENCY_TIMER_OFF 1 -#define SYSTEM_CTRL_DE_FIFO 23:23 -#define SYSTEM_CTRL_DE_FIFO_NOTEMPTY 0 -#define SYSTEM_CTRL_DE_FIFO_EMPTY 1 -#define SYSTEM_CTRL_DE_STATUS 22:22 -#define SYSTEM_CTRL_DE_STATUS_IDLE 0 -#define SYSTEM_CTRL_DE_STATUS_BUSY 1 -#define SYSTEM_CTRL_DE_MEM_FIFO 21:21 -#define SYSTEM_CTRL_DE_MEM_FIFO_NOTEMPTY 0 -#define SYSTEM_CTRL_DE_MEM_FIFO_EMPTY 1 -#define SYSTEM_CTRL_CSC_STATUS 20:20 -#define SYSTEM_CTRL_CSC_STATUS_IDLE 0 -#define SYSTEM_CTRL_CSC_STATUS_BUSY 1 -#define SYSTEM_CTRL_CRT_VSYNC 19:19 -#define SYSTEM_CTRL_CRT_VSYNC_INACTIVE 0 -#define SYSTEM_CTRL_CRT_VSYNC_ACTIVE 1 -#define SYSTEM_CTRL_PANEL_VSYNC 18:18 -#define SYSTEM_CTRL_PANEL_VSYNC_INACTIVE 0 -#define SYSTEM_CTRL_PANEL_VSYNC_ACTIVE 1 -#define SYSTEM_CTRL_CURRENT_BUFFER 17:17 -#define SYSTEM_CTRL_CURRENT_BUFFER_NORMAL 0 -#define SYSTEM_CTRL_CURRENT_BUFFER_FLIP_PENDING 1 -#define SYSTEM_CTRL_DMA_STATUS 16:16 -#define SYSTEM_CTRL_DMA_STATUS_IDLE 0 -#define SYSTEM_CTRL_DMA_STATUS_BUSY 1 -#define SYSTEM_CTRL_PCI_BURST_READ 15:15 -#define SYSTEM_CTRL_PCI_BURST_READ_OFF 0 -#define SYSTEM_CTRL_PCI_BURST_READ_ON 1 -#define SYSTEM_CTRL_DE_ABORT 13:13 -#define SYSTEM_CTRL_DE_ABORT_OFF 0 -#define SYSTEM_CTRL_DE_ABORT_ON 1 -#define SYSTEM_CTRL_PCI_SUBSYS_ID_LOCK 11:11 -#define SYSTEM_CTRL_PCI_SUBSYS_ID_LOCK_OFF 0 -#define SYSTEM_CTRL_PCI_SUBSYS_ID_LOCK_ON 1 -#define SYSTEM_CTRL_PCI_RETRY 7:7 -#define SYSTEM_CTRL_PCI_RETRY_ON 0 -#define SYSTEM_CTRL_PCI_RETRY_OFF 1 -#define SYSTEM_CTRL_PCI_SLAVE_BURST_READ_SIZE 5:4 -#define SYSTEM_CTRL_PCI_SLAVE_BURST_READ_SIZE_1 0 -#define SYSTEM_CTRL_PCI_SLAVE_BURST_READ_SIZE_2 1 -#define SYSTEM_CTRL_PCI_SLAVE_BURST_READ_SIZE_4 2 -#define SYSTEM_CTRL_PCI_SLAVE_BURST_READ_SIZE_8 3 -#define SYSTEM_CTRL_CRT_TRISTATE 3:3 -#define SYSTEM_CTRL_CRT_TRISTATE_OFF 0 -#define SYSTEM_CTRL_CRT_TRISTATE_ON 1 -#define SYSTEM_CTRL_PCIMEM_TRISTATE 2:2 -#define SYSTEM_CTRL_PCIMEM_TRISTATE_OFF 0 -#define SYSTEM_CTRL_PCIMEM_TRISTATE_ON 1 -#define SYSTEM_CTRL_LOCALMEM_TRISTATE 1:1 -#define SYSTEM_CTRL_LOCALMEM_TRISTATE_OFF 0 -#define SYSTEM_CTRL_LOCALMEM_TRISTATE_ON 1 -#define SYSTEM_CTRL_PANEL_TRISTATE 0:0 -#define SYSTEM_CTRL_PANEL_TRISTATE_OFF 0 -#define SYSTEM_CTRL_PANEL_TRISTATE_ON 1 +#define SYSTEM_CTRL_DPMS_MASK (0x3 << 30) +#define SYSTEM_CTRL_DPMS_VPHP (0x0 << 30) +#define SYSTEM_CTRL_DPMS_VPHN (0x1 << 30) +#define SYSTEM_CTRL_DPMS_VNHP (0x2 << 30) +#define SYSTEM_CTRL_DPMS_VNHN (0x3 << 30) +#define SYSTEM_CTRL_PCI_BURST BIT(29) +#define SYSTEM_CTRL_PCI_MASTER BIT(25) +#define SYSTEM_CTRL_LATENCY_TIMER_OFF BIT(24) +#define SYSTEM_CTRL_DE_FIFO_EMPTY BIT(23) +#define SYSTEM_CTRL_DE_STATUS_BUSY BIT(22) +#define SYSTEM_CTRL_DE_MEM_FIFO_EMPTY BIT(21) +#define SYSTEM_CTRL_CSC_STATUS_BUSY BIT(20) +#define SYSTEM_CTRL_CRT_VSYNC_ACTIVE BIT(19) +#define SYSTEM_CTRL_PANEL_VSYNC_ACTIVE BIT(18) +#define SYSTEM_CTRL_CURRENT_BUFFER_FLIP_PENDING BIT(17) +#define SYSTEM_CTRL_DMA_STATUS_BUSY BIT(16) +#define SYSTEM_CTRL_PCI_BURST_READ BIT(15) +#define SYSTEM_CTRL_DE_ABORT BIT(13) +#define SYSTEM_CTRL_PCI_SUBSYS_ID_LOCK BIT(11) +#define SYSTEM_CTRL_PCI_RETRY_OFF BIT(7) +#define SYSTEM_CTRL_PCI_SLAVE_BURST_READ_SIZE_MASK (0x3 << 4) +#define SYSTEM_CTRL_PCI_SLAVE_BURST_READ_SIZE_1 (0x0 << 4) +#define SYSTEM_CTRL_PCI_SLAVE_BURST_READ_SIZE_2 (0x1 << 4) +#define SYSTEM_CTRL_PCI_SLAVE_BURST_READ_SIZE_4 (0x2 << 4) +#define SYSTEM_CTRL_PCI_SLAVE_BURST_READ_SIZE_8 (0x3 << 4) +#define SYSTEM_CTRL_CRT_TRISTATE BIT(3) +#define SYSTEM_CTRL_PCIMEM_TRISTATE BIT(2) +#define SYSTEM_CTRL_LOCALMEM_TRISTATE BIT(1) +#define SYSTEM_CTRL_PANEL_TRISTATE BIT(0) #define MISC_CTRL 0x000004 -#define MISC_CTRL_DRAM_RERESH_COUNT 27:27 -#define MISC_CTRL_DRAM_RERESH_COUNT_1ROW 0 -#define MISC_CTRL_DRAM_RERESH_COUNT_3ROW 1 -#define MISC_CTRL_DRAM_REFRESH_TIME 26:25 -#define MISC_CTRL_DRAM_REFRESH_TIME_8 0 -#define MISC_CTRL_DRAM_REFRESH_TIME_16 1 -#define MISC_CTRL_DRAM_REFRESH_TIME_32 2 -#define MISC_CTRL_DRAM_REFRESH_TIME_64 3 -#define MISC_CTRL_INT_OUTPUT 24:24 -#define MISC_CTRL_INT_OUTPUT_NORMAL 0 -#define MISC_CTRL_INT_OUTPUT_INVERT 1 -#define MISC_CTRL_PLL_CLK_COUNT 23:23 -#define MISC_CTRL_PLL_CLK_COUNT_OFF 0 -#define MISC_CTRL_PLL_CLK_COUNT_ON 1 -#define MISC_CTRL_DAC_POWER 20:20 -#define MISC_CTRL_DAC_POWER_ON 0 -#define MISC_CTRL_DAC_POWER_OFF 1 -#define MISC_CTRL_CLK_SELECT 16:16 -#define MISC_CTRL_CLK_SELECT_OSC 0 -#define MISC_CTRL_CLK_SELECT_TESTCLK 1 -#define MISC_CTRL_DRAM_COLUMN_SIZE 15:14 -#define MISC_CTRL_DRAM_COLUMN_SIZE_256 0 -#define MISC_CTRL_DRAM_COLUMN_SIZE_512 1 -#define MISC_CTRL_DRAM_COLUMN_SIZE_1024 2 -#define MISC_CTRL_LOCALMEM_SIZE 13:12 -#define MISC_CTRL_LOCALMEM_SIZE_8M 3 -#define MISC_CTRL_LOCALMEM_SIZE_16M 0 -#define MISC_CTRL_LOCALMEM_SIZE_32M 1 -#define MISC_CTRL_LOCALMEM_SIZE_64M 2 -#define MISC_CTRL_DRAM_TWTR 11:11 -#define MISC_CTRL_DRAM_TWTR_2CLK 0 -#define MISC_CTRL_DRAM_TWTR_1CLK 1 -#define MISC_CTRL_DRAM_TWR 10:10 -#define MISC_CTRL_DRAM_TWR_3CLK 0 -#define MISC_CTRL_DRAM_TWR_2CLK 1 -#define MISC_CTRL_DRAM_TRP 9:9 -#define MISC_CTRL_DRAM_TRP_3CLK 0 -#define MISC_CTRL_DRAM_TRP_4CLK 1 -#define MISC_CTRL_DRAM_TRFC 8:8 -#define MISC_CTRL_DRAM_TRFC_12CLK 0 -#define MISC_CTRL_DRAM_TRFC_14CLK 1 -#define MISC_CTRL_DRAM_TRAS 7:7 -#define MISC_CTRL_DRAM_TRAS_7CLK 0 -#define MISC_CTRL_DRAM_TRAS_8CLK 1 -#define MISC_CTRL_LOCALMEM_RESET 6:6 -#define MISC_CTRL_LOCALMEM_RESET_RESET 0 -#define MISC_CTRL_LOCALMEM_RESET_NORMAL 1 -#define MISC_CTRL_LOCALMEM_STATE 5:5 -#define MISC_CTRL_LOCALMEM_STATE_ACTIVE 0 -#define MISC_CTRL_LOCALMEM_STATE_INACTIVE 1 -#define MISC_CTRL_CPU_CAS_LATENCY 4:4 -#define MISC_CTRL_CPU_CAS_LATENCY_2CLK 0 -#define MISC_CTRL_CPU_CAS_LATENCY_3CLK 1 -#define MISC_CTRL_DLL 3:3 -#define MISC_CTRL_DLL_ON 0 -#define MISC_CTRL_DLL_OFF 1 -#define MISC_CTRL_DRAM_OUTPUT 2:2 -#define MISC_CTRL_DRAM_OUTPUT_LOW 0 -#define MISC_CTRL_DRAM_OUTPUT_HIGH 1 -#define MISC_CTRL_LOCALMEM_BUS_SIZE 1:1 -#define MISC_CTRL_LOCALMEM_BUS_SIZE_32 0 -#define MISC_CTRL_LOCALMEM_BUS_SIZE_64 1 -#define MISC_CTRL_EMBEDDED_LOCALMEM 0:0 -#define MISC_CTRL_EMBEDDED_LOCALMEM_ON 0 -#define MISC_CTRL_EMBEDDED_LOCALMEM_OFF 1 +#define MISC_CTRL_DRAM_RERESH_COUNT BIT(27) +#define MISC_CTRL_DRAM_REFRESH_TIME_MASK (0x3 << 25) +#define MISC_CTRL_DRAM_REFRESH_TIME_8 (0x0 << 25) +#define MISC_CTRL_DRAM_REFRESH_TIME_16 (0x1 << 25) +#define MISC_CTRL_DRAM_REFRESH_TIME_32 (0x2 << 25) +#define MISC_CTRL_DRAM_REFRESH_TIME_64 (0x3 << 25) +#define MISC_CTRL_INT_OUTPUT_INVERT BIT(24) +#define MISC_CTRL_PLL_CLK_COUNT BIT(23) +#define MISC_CTRL_DAC_POWER_OFF BIT(20) +#define MISC_CTRL_CLK_SELECT_TESTCLK BIT(16) +#define MISC_CTRL_DRAM_COLUMN_SIZE_MASK (0x3 << 14) +#define MISC_CTRL_DRAM_COLUMN_SIZE_256 (0x0 << 14) +#define MISC_CTRL_DRAM_COLUMN_SIZE_512 (0x1 << 14) +#define MISC_CTRL_DRAM_COLUMN_SIZE_1024 (0x2 << 14) +#define MISC_CTRL_LOCALMEM_SIZE_MASK (0x3 << 12) +#define MISC_CTRL_LOCALMEM_SIZE_8M (0x3 << 12) +#define MISC_CTRL_LOCALMEM_SIZE_16M (0x0 << 12) +#define MISC_CTRL_LOCALMEM_SIZE_32M (0x1 << 12) +#define MISC_CTRL_LOCALMEM_SIZE_64M (0x2 << 12) +#define MISC_CTRL_DRAM_TWTR BIT(11) +#define MISC_CTRL_DRAM_TWR BIT(10) +#define MISC_CTRL_DRAM_TRP BIT(9) +#define MISC_CTRL_DRAM_TRFC BIT(8) +#define MISC_CTRL_DRAM_TRAS BIT(7) +#define MISC_CTRL_LOCALMEM_RESET BIT(6) +#define MISC_CTRL_LOCALMEM_STATE_INACTIVE BIT(5) +#define MISC_CTRL_CPU_CAS_LATENCY BIT(4) +#define MISC_CTRL_DLL_OFF BIT(3) +#define MISC_CTRL_DRAM_OUTPUT_HIGH BIT(2) +#define MISC_CTRL_LOCALMEM_BUS_SIZE BIT(1) +#define MISC_CTRL_EMBEDDED_LOCALMEM_OFF BIT(0) #define GPIO_MUX 0x000008 -#define GPIO_MUX_31 31:31 -#define GPIO_MUX_31_GPIO 0 -#define GPIO_MUX_31_I2C 1 -#define GPIO_MUX_30 30:30 -#define GPIO_MUX_30_GPIO 0 -#define GPIO_MUX_30_I2C 1 -#define GPIO_MUX_29 29:29 -#define GPIO_MUX_29_GPIO 0 -#define GPIO_MUX_29_SSP1 1 -#define GPIO_MUX_28 28:28 -#define GPIO_MUX_28_GPIO 0 -#define GPIO_MUX_28_SSP1 1 -#define GPIO_MUX_27 27:27 -#define GPIO_MUX_27_GPIO 0 -#define GPIO_MUX_27_SSP1 1 -#define GPIO_MUX_26 26:26 -#define GPIO_MUX_26_GPIO 0 -#define GPIO_MUX_26_SSP1 1 -#define GPIO_MUX_25 25:25 -#define GPIO_MUX_25_GPIO 0 -#define GPIO_MUX_25_SSP1 1 -#define GPIO_MUX_24 24:24 -#define GPIO_MUX_24_GPIO 0 -#define GPIO_MUX_24_SSP0 1 -#define GPIO_MUX_23 23:23 -#define GPIO_MUX_23_GPIO 0 -#define GPIO_MUX_23_SSP0 1 -#define GPIO_MUX_22 22:22 -#define GPIO_MUX_22_GPIO 0 -#define GPIO_MUX_22_SSP0 1 -#define GPIO_MUX_21 21:21 -#define GPIO_MUX_21_GPIO 0 -#define GPIO_MUX_21_SSP0 1 -#define GPIO_MUX_20 20:20 -#define GPIO_MUX_20_GPIO 0 -#define GPIO_MUX_20_SSP0 1 -#define GPIO_MUX_19 19:19 -#define GPIO_MUX_19_GPIO 0 -#define GPIO_MUX_19_PWM 1 -#define GPIO_MUX_18 18:18 -#define GPIO_MUX_18_GPIO 0 -#define GPIO_MUX_18_PWM 1 -#define GPIO_MUX_17 17:17 -#define GPIO_MUX_17_GPIO 0 -#define GPIO_MUX_17_PWM 1 -#define GPIO_MUX_16 16:16 -#define GPIO_MUX_16_GPIO_ZVPORT 0 -#define GPIO_MUX_16_TEST_DATA 1 -#define GPIO_MUX_15 15:15 -#define GPIO_MUX_15_GPIO_ZVPORT 0 -#define GPIO_MUX_15_TEST_DATA 1 -#define GPIO_MUX_14 14:14 -#define GPIO_MUX_14_GPIO_ZVPORT 0 -#define GPIO_MUX_14_TEST_DATA 1 -#define GPIO_MUX_13 13:13 -#define GPIO_MUX_13_GPIO_ZVPORT 0 -#define GPIO_MUX_13_TEST_DATA 1 -#define GPIO_MUX_12 12:12 -#define GPIO_MUX_12_GPIO_ZVPORT 0 -#define GPIO_MUX_12_TEST_DATA 1 -#define GPIO_MUX_11 11:11 -#define GPIO_MUX_11_GPIO_ZVPORT 0 -#define GPIO_MUX_11_TEST_DATA 1 -#define GPIO_MUX_10 10:10 -#define GPIO_MUX_10_GPIO_ZVPORT 0 -#define GPIO_MUX_10_TEST_DATA 1 -#define GPIO_MUX_9 9:9 -#define GPIO_MUX_9_GPIO_ZVPORT 0 -#define GPIO_MUX_9_TEST_DATA 1 -#define GPIO_MUX_8 8:8 -#define GPIO_MUX_8_GPIO_ZVPORT 0 -#define GPIO_MUX_8_TEST_DATA 1 -#define GPIO_MUX_7 7:7 -#define GPIO_MUX_7_GPIO_ZVPORT 0 -#define GPIO_MUX_7_TEST_DATA 1 -#define GPIO_MUX_6 6:6 -#define GPIO_MUX_6_GPIO_ZVPORT 0 -#define GPIO_MUX_6_TEST_DATA 1 -#define GPIO_MUX_5 5:5 -#define GPIO_MUX_5_GPIO_ZVPORT 0 -#define GPIO_MUX_5_TEST_DATA 1 -#define GPIO_MUX_4 4:4 -#define GPIO_MUX_4_GPIO_ZVPORT 0 -#define GPIO_MUX_4_TEST_DATA 1 -#define GPIO_MUX_3 3:3 -#define GPIO_MUX_3_GPIO_ZVPORT 0 -#define GPIO_MUX_3_TEST_DATA 1 -#define GPIO_MUX_2 2:2 -#define GPIO_MUX_2_GPIO_ZVPORT 0 -#define GPIO_MUX_2_TEST_DATA 1 -#define GPIO_MUX_1 1:1 -#define GPIO_MUX_1_GPIO_ZVPORT 0 -#define GPIO_MUX_1_TEST_DATA 1 -#define GPIO_MUX_0 0:0 -#define GPIO_MUX_0_GPIO_ZVPORT 0 -#define GPIO_MUX_0_TEST_DATA 1 +#define GPIO_MUX_31 BIT(31) +#define GPIO_MUX_30 BIT(30) +#define GPIO_MUX_29 BIT(29) +#define GPIO_MUX_28 BIT(28) +#define GPIO_MUX_27 BIT(27) +#define GPIO_MUX_26 BIT(26) +#define GPIO_MUX_25 BIT(25) +#define GPIO_MUX_24 BIT(24) +#define GPIO_MUX_23 BIT(23) +#define GPIO_MUX_22 BIT(22) +#define GPIO_MUX_21 BIT(21) +#define GPIO_MUX_20 BIT(20) +#define GPIO_MUX_19 BIT(19) +#define GPIO_MUX_18 BIT(18) +#define GPIO_MUX_17 BIT(17) +#define GPIO_MUX_16 BIT(16) +#define GPIO_MUX_15 BIT(15) +#define GPIO_MUX_14 BIT(14) +#define GPIO_MUX_13 BIT(13) +#define GPIO_MUX_12 BIT(12) +#define GPIO_MUX_11 BIT(11) +#define GPIO_MUX_10 BIT(10) +#define GPIO_MUX_9 BIT(9) +#define GPIO_MUX_8 BIT(8) +#define GPIO_MUX_7 BIT(7) +#define GPIO_MUX_6 BIT(6) +#define GPIO_MUX_5 BIT(5) +#define GPIO_MUX_4 BIT(4) +#define GPIO_MUX_3 BIT(3) +#define GPIO_MUX_2 BIT(2) +#define GPIO_MUX_1 BIT(1) +#define GPIO_MUX_0 BIT(0) #define LOCALMEM_ARBITRATION 0x00000C -#define LOCALMEM_ARBITRATION_ROTATE 28:28 -#define LOCALMEM_ARBITRATION_ROTATE_OFF 0 -#define LOCALMEM_ARBITRATION_ROTATE_ON 1 -#define LOCALMEM_ARBITRATION_VGA 26:24 -#define LOCALMEM_ARBITRATION_VGA_OFF 0 -#define LOCALMEM_ARBITRATION_VGA_PRIORITY_1 1 -#define LOCALMEM_ARBITRATION_VGA_PRIORITY_2 2 -#define LOCALMEM_ARBITRATION_VGA_PRIORITY_3 3 -#define LOCALMEM_ARBITRATION_VGA_PRIORITY_4 4 -#define LOCALMEM_ARBITRATION_VGA_PRIORITY_5 5 -#define LOCALMEM_ARBITRATION_VGA_PRIORITY_6 6 -#define LOCALMEM_ARBITRATION_VGA_PRIORITY_7 7 -#define LOCALMEM_ARBITRATION_DMA 22:20 -#define LOCALMEM_ARBITRATION_DMA_OFF 0 -#define LOCALMEM_ARBITRATION_DMA_PRIORITY_1 1 -#define LOCALMEM_ARBITRATION_DMA_PRIORITY_2 2 -#define LOCALMEM_ARBITRATION_DMA_PRIORITY_3 3 -#define LOCALMEM_ARBITRATION_DMA_PRIORITY_4 4 -#define LOCALMEM_ARBITRATION_DMA_PRIORITY_5 5 -#define LOCALMEM_ARBITRATION_DMA_PRIORITY_6 6 -#define LOCALMEM_ARBITRATION_DMA_PRIORITY_7 7 -#define LOCALMEM_ARBITRATION_ZVPORT1 18:16 -#define LOCALMEM_ARBITRATION_ZVPORT1_OFF 0 -#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_1 1 -#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_2 2 -#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_3 3 -#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_4 4 -#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_5 5 -#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_6 6 -#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_7 7 -#define LOCALMEM_ARBITRATION_ZVPORT0 14:12 -#define LOCALMEM_ARBITRATION_ZVPORT0_OFF 0 -#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_1 1 -#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_2 2 -#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_3 3 -#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_4 4 -#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_5 5 -#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_6 6 -#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_7 7 -#define LOCALMEM_ARBITRATION_VIDEO 10:8 -#define LOCALMEM_ARBITRATION_VIDEO_OFF 0 -#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_1 1 -#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_2 2 -#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_3 3 -#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_4 4 -#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_5 5 -#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_6 6 -#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_7 7 -#define LOCALMEM_ARBITRATION_PANEL 6:4 -#define LOCALMEM_ARBITRATION_PANEL_OFF 0 -#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_1 1 -#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_2 2 -#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_3 3 -#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_4 4 -#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_5 5 -#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_6 6 -#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_7 7 -#define LOCALMEM_ARBITRATION_CRT 2:0 -#define LOCALMEM_ARBITRATION_CRT_OFF 0 -#define LOCALMEM_ARBITRATION_CRT_PRIORITY_1 1 -#define LOCALMEM_ARBITRATION_CRT_PRIORITY_2 2 -#define LOCALMEM_ARBITRATION_CRT_PRIORITY_3 3 -#define LOCALMEM_ARBITRATION_CRT_PRIORITY_4 4 -#define LOCALMEM_ARBITRATION_CRT_PRIORITY_5 5 -#define LOCALMEM_ARBITRATION_CRT_PRIORITY_6 6 -#define LOCALMEM_ARBITRATION_CRT_PRIORITY_7 7 +#define LOCALMEM_ARBITRATION_ROTATE BIT(28) +#define LOCALMEM_ARBITRATION_VGA_MASK (0x7 << 24) +#define LOCALMEM_ARBITRATION_VGA_OFF (0x0 << 24) +#define LOCALMEM_ARBITRATION_VGA_PRIORITY_1 (0x1 << 24) +#define LOCALMEM_ARBITRATION_VGA_PRIORITY_2 (0x2 << 24) +#define LOCALMEM_ARBITRATION_VGA_PRIORITY_3 (0x3 << 24) +#define LOCALMEM_ARBITRATION_VGA_PRIORITY_4 (0x4 << 24) +#define LOCALMEM_ARBITRATION_VGA_PRIORITY_5 (0x5 << 24) +#define LOCALMEM_ARBITRATION_VGA_PRIORITY_6 (0x6 << 24) +#define LOCALMEM_ARBITRATION_VGA_PRIORITY_7 (0x7 << 24) +#define LOCALMEM_ARBITRATION_DMA_MASK (0x7 << 20) +#define LOCALMEM_ARBITRATION_DMA_OFF (0x0 << 20) +#define LOCALMEM_ARBITRATION_DMA_PRIORITY_1 (0x1 << 20) +#define LOCALMEM_ARBITRATION_DMA_PRIORITY_2 (0x2 << 20) +#define LOCALMEM_ARBITRATION_DMA_PRIORITY_3 (0x3 << 20) +#define LOCALMEM_ARBITRATION_DMA_PRIORITY_4 (0x4 << 20) +#define LOCALMEM_ARBITRATION_DMA_PRIORITY_5 (0x5 << 20) +#define LOCALMEM_ARBITRATION_DMA_PRIORITY_6 (0x6 << 20) +#define LOCALMEM_ARBITRATION_DMA_PRIORITY_7 (0x7 << 20) +#define LOCALMEM_ARBITRATION_ZVPORT1_MASK (0x7 << 16) +#define LOCALMEM_ARBITRATION_ZVPORT1_OFF (0x0 << 16) +#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_1 (0x1 << 16) +#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_2 (0x2 << 16) +#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_3 (0x3 << 16) +#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_4 (0x4 << 16) +#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_5 (0x5 << 16) +#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_6 (0x6 << 16) +#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_7 (0x7 << 16) +#define LOCALMEM_ARBITRATION_ZVPORT0_MASK (0x7 << 12) +#define LOCALMEM_ARBITRATION_ZVPORT0_OFF (0x0 << 12) +#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_1 (0x1 << 12) +#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_2 (0x2 << 12) +#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_3 (0x3 << 12) +#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_4 (0x4 << 12) +#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_5 (0x5 << 12) +#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_6 (0x6 << 12) +#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_7 (0x7 << 12) +#define LOCALMEM_ARBITRATION_VIDEO_MASK (0x7 << 8) +#define LOCALMEM_ARBITRATION_VIDEO_OFF (0x0 << 8) +#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_1 (0x1 << 8) +#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_2 (0x2 << 8) +#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_3 (0x3 << 8) +#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_4 (0x4 << 8) +#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_5 (0x5 << 8) +#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_6 (0x6 << 8) +#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_7 (0x7 << 8) +#define LOCALMEM_ARBITRATION_PANEL_MASK (0x7 << 4) +#define LOCALMEM_ARBITRATION_PANEL_OFF (0x0 << 4) +#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_1 (0x1 << 4) +#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_2 (0x2 << 4) +#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_3 (0x3 << 4) +#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_4 (0x4 << 4) +#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_5 (0x5 << 4) +#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_6 (0x6 << 4) +#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_7 (0x7 << 4) +#define LOCALMEM_ARBITRATION_CRT_MASK 0x7 +#define LOCALMEM_ARBITRATION_CRT_OFF 0x0 +#define LOCALMEM_ARBITRATION_CRT_PRIORITY_1 0x1 +#define LOCALMEM_ARBITRATION_CRT_PRIORITY_2 0x2 +#define LOCALMEM_ARBITRATION_CRT_PRIORITY_3 0x3 +#define LOCALMEM_ARBITRATION_CRT_PRIORITY_4 0x4 +#define LOCALMEM_ARBITRATION_CRT_PRIORITY_5 0x5 +#define LOCALMEM_ARBITRATION_CRT_PRIORITY_6 0x6 +#define LOCALMEM_ARBITRATION_CRT_PRIORITY_7 0x7 #define PCIMEM_ARBITRATION 0x000010 -#define PCIMEM_ARBITRATION_ROTATE 28:28 -#define PCIMEM_ARBITRATION_ROTATE_OFF 0 -#define PCIMEM_ARBITRATION_ROTATE_ON 1 -#define PCIMEM_ARBITRATION_VGA 26:24 -#define PCIMEM_ARBITRATION_VGA_OFF 0 -#define PCIMEM_ARBITRATION_VGA_PRIORITY_1 1 -#define PCIMEM_ARBITRATION_VGA_PRIORITY_2 2 -#define PCIMEM_ARBITRATION_VGA_PRIORITY_3 3 -#define PCIMEM_ARBITRATION_VGA_PRIORITY_4 4 -#define PCIMEM_ARBITRATION_VGA_PRIORITY_5 5 -#define PCIMEM_ARBITRATION_VGA_PRIORITY_6 6 -#define PCIMEM_ARBITRATION_VGA_PRIORITY_7 7 -#define PCIMEM_ARBITRATION_DMA 22:20 -#define PCIMEM_ARBITRATION_DMA_OFF 0 -#define PCIMEM_ARBITRATION_DMA_PRIORITY_1 1 -#define PCIMEM_ARBITRATION_DMA_PRIORITY_2 2 -#define PCIMEM_ARBITRATION_DMA_PRIORITY_3 3 -#define PCIMEM_ARBITRATION_DMA_PRIORITY_4 4 -#define PCIMEM_ARBITRATION_DMA_PRIORITY_5 5 -#define PCIMEM_ARBITRATION_DMA_PRIORITY_6 6 -#define PCIMEM_ARBITRATION_DMA_PRIORITY_7 7 -#define PCIMEM_ARBITRATION_ZVPORT1 18:16 -#define PCIMEM_ARBITRATION_ZVPORT1_OFF 0 -#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_1 1 -#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_2 2 -#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_3 3 -#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_4 4 -#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_5 5 -#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_6 6 -#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_7 7 -#define PCIMEM_ARBITRATION_ZVPORT0 14:12 -#define PCIMEM_ARBITRATION_ZVPORT0_OFF 0 -#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_1 1 -#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_2 2 -#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_3 3 -#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_4 4 -#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_5 5 -#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_6 6 -#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_7 7 -#define PCIMEM_ARBITRATION_VIDEO 10:8 -#define PCIMEM_ARBITRATION_VIDEO_OFF 0 -#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_1 1 -#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_2 2 -#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_3 3 -#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_4 4 -#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_5 5 -#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_6 6 -#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_7 7 -#define PCIMEM_ARBITRATION_PANEL 6:4 -#define PCIMEM_ARBITRATION_PANEL_OFF 0 -#define PCIMEM_ARBITRATION_PANEL_PRIORITY_1 1 -#define PCIMEM_ARBITRATION_PANEL_PRIORITY_2 2 -#define PCIMEM_ARBITRATION_PANEL_PRIORITY_3 3 -#define PCIMEM_ARBITRATION_PANEL_PRIORITY_4 4 -#define PCIMEM_ARBITRATION_PANEL_PRIORITY_5 5 -#define PCIMEM_ARBITRATION_PANEL_PRIORITY_6 6 -#define PCIMEM_ARBITRATION_PANEL_PRIORITY_7 7 -#define PCIMEM_ARBITRATION_CRT 2:0 -#define PCIMEM_ARBITRATION_CRT_OFF 0 -#define PCIMEM_ARBITRATION_CRT_PRIORITY_1 1 -#define PCIMEM_ARBITRATION_CRT_PRIORITY_2 2 -#define PCIMEM_ARBITRATION_CRT_PRIORITY_3 3 -#define PCIMEM_ARBITRATION_CRT_PRIORITY_4 4 -#define PCIMEM_ARBITRATION_CRT_PRIORITY_5 5 -#define PCIMEM_ARBITRATION_CRT_PRIORITY_6 6 -#define PCIMEM_ARBITRATION_CRT_PRIORITY_7 7 +#define PCIMEM_ARBITRATION_ROTATE BIT(28) +#define PCIMEM_ARBITRATION_VGA_MASK (0x7 << 24) +#define PCIMEM_ARBITRATION_VGA_OFF (0x0 << 24) +#define PCIMEM_ARBITRATION_VGA_PRIORITY_1 (0x1 << 24) +#define PCIMEM_ARBITRATION_VGA_PRIORITY_2 (0x2 << 24) +#define PCIMEM_ARBITRATION_VGA_PRIORITY_3 (0x3 << 24) +#define PCIMEM_ARBITRATION_VGA_PRIORITY_4 (0x4 << 24) +#define PCIMEM_ARBITRATION_VGA_PRIORITY_5 (0x5 << 24) +#define PCIMEM_ARBITRATION_VGA_PRIORITY_6 (0x6 << 24) +#define PCIMEM_ARBITRATION_VGA_PRIORITY_7 (0x7 << 24) +#define PCIMEM_ARBITRATION_DMA_MASK (0x7 << 20) +#define PCIMEM_ARBITRATION_DMA_OFF (0x0 << 20) +#define PCIMEM_ARBITRATION_DMA_PRIORITY_1 (0x1 << 20) +#define PCIMEM_ARBITRATION_DMA_PRIORITY_2 (0x2 << 20) +#define PCIMEM_ARBITRATION_DMA_PRIORITY_3 (0x3 << 20) +#define PCIMEM_ARBITRATION_DMA_PRIORITY_4 (0x4 << 20) +#define PCIMEM_ARBITRATION_DMA_PRIORITY_5 (0x5 << 20) +#define PCIMEM_ARBITRATION_DMA_PRIORITY_6 (0x6 << 20) +#define PCIMEM_ARBITRATION_DMA_PRIORITY_7 (0x7 << 20) +#define PCIMEM_ARBITRATION_ZVPORT1_MASK (0x7 << 16) +#define PCIMEM_ARBITRATION_ZVPORT1_OFF (0x0 << 16) +#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_1 (0x1 << 16) +#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_2 (0x2 << 16) +#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_3 (0x3 << 16) +#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_4 (0x4 << 16) +#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_5 (0x5 << 16) +#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_6 (0x6 << 16) +#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_7 (0x7 << 16) +#define PCIMEM_ARBITRATION_ZVPORT0_MASK (0x7 << 12) +#define PCIMEM_ARBITRATION_ZVPORT0_OFF (0x0 << 12) +#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_1 (0x1 << 12) +#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_2 (0x2 << 12) +#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_3 (0x3 << 12) +#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_4 (0x4 << 12) +#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_5 (0x5 << 12) +#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_6 (0x6 << 12) +#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_7 (0x7 << 12) +#define PCIMEM_ARBITRATION_VIDEO_MASK (0x7 << 8) +#define PCIMEM_ARBITRATION_VIDEO_OFF (0x0 << 8) +#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_1 (0x1 << 8) +#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_2 (0x2 << 8) +#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_3 (0x3 << 8) +#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_4 (0x4 << 8) +#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_5 (0x5 << 8) +#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_6 (0x6 << 8) +#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_7 (0x7 << 8) +#define PCIMEM_ARBITRATION_PANEL_MASK (0x7 << 4) +#define PCIMEM_ARBITRATION_PANEL_OFF (0x0 << 4) +#define PCIMEM_ARBITRATION_PANEL_PRIORITY_1 (0x1 << 4) +#define PCIMEM_ARBITRATION_PANEL_PRIORITY_2 (0x2 << 4) +#define PCIMEM_ARBITRATION_PANEL_PRIORITY_3 (0x3 << 4) +#define PCIMEM_ARBITRATION_PANEL_PRIORITY_4 (0x4 << 4) +#define PCIMEM_ARBITRATION_PANEL_PRIORITY_5 (0x5 << 4) +#define PCIMEM_ARBITRATION_PANEL_PRIORITY_6 (0x6 << 4) +#define PCIMEM_ARBITRATION_PANEL_PRIORITY_7 (0x7 << 4) +#define PCIMEM_ARBITRATION_CRT_MASK 0x7 +#define PCIMEM_ARBITRATION_CRT_OFF 0x0 +#define PCIMEM_ARBITRATION_CRT_PRIORITY_1 0x1 +#define PCIMEM_ARBITRATION_CRT_PRIORITY_2 0x2 +#define PCIMEM_ARBITRATION_CRT_PRIORITY_3 0x3 +#define PCIMEM_ARBITRATION_CRT_PRIORITY_4 0x4 +#define PCIMEM_ARBITRATION_CRT_PRIORITY_5 0x5 +#define PCIMEM_ARBITRATION_CRT_PRIORITY_6 0x6 +#define PCIMEM_ARBITRATION_CRT_PRIORITY_7 0x7 #define RAW_INT 0x000020 -#define RAW_INT_ZVPORT1_VSYNC 4:4 -#define RAW_INT_ZVPORT1_VSYNC_INACTIVE 0 -#define RAW_INT_ZVPORT1_VSYNC_ACTIVE 1 -#define RAW_INT_ZVPORT1_VSYNC_CLEAR 1 -#define RAW_INT_ZVPORT0_VSYNC 3:3 -#define RAW_INT_ZVPORT0_VSYNC_INACTIVE 0 -#define RAW_INT_ZVPORT0_VSYNC_ACTIVE 1 -#define RAW_INT_ZVPORT0_VSYNC_CLEAR 1 -#define RAW_INT_CRT_VSYNC 2:2 -#define RAW_INT_CRT_VSYNC_INACTIVE 0 -#define RAW_INT_CRT_VSYNC_ACTIVE 1 -#define RAW_INT_CRT_VSYNC_CLEAR 1 -#define RAW_INT_PANEL_VSYNC 1:1 -#define RAW_INT_PANEL_VSYNC_INACTIVE 0 -#define RAW_INT_PANEL_VSYNC_ACTIVE 1 -#define RAW_INT_PANEL_VSYNC_CLEAR 1 -#define RAW_INT_VGA_VSYNC 0:0 -#define RAW_INT_VGA_VSYNC_INACTIVE 0 -#define RAW_INT_VGA_VSYNC_ACTIVE 1 -#define RAW_INT_VGA_VSYNC_CLEAR 1 +#define RAW_INT_ZVPORT1_VSYNC BIT(4) +#define RAW_INT_ZVPORT0_VSYNC BIT(3) +#define RAW_INT_CRT_VSYNC BIT(2) +#define RAW_INT_PANEL_VSYNC BIT(1) +#define RAW_INT_VGA_VSYNC BIT(0) #define INT_STATUS 0x000024 -#define INT_STATUS_GPIO31 31:31 -#define INT_STATUS_GPIO31_INACTIVE 0 -#define INT_STATUS_GPIO31_ACTIVE 1 -#define INT_STATUS_GPIO30 30:30 -#define INT_STATUS_GPIO30_INACTIVE 0 -#define INT_STATUS_GPIO30_ACTIVE 1 -#define INT_STATUS_GPIO29 29:29 -#define INT_STATUS_GPIO29_INACTIVE 0 -#define INT_STATUS_GPIO29_ACTIVE 1 -#define INT_STATUS_GPIO28 28:28 -#define INT_STATUS_GPIO28_INACTIVE 0 -#define INT_STATUS_GPIO28_ACTIVE 1 -#define INT_STATUS_GPIO27 27:27 -#define INT_STATUS_GPIO27_INACTIVE 0 -#define INT_STATUS_GPIO27_ACTIVE 1 -#define INT_STATUS_GPIO26 26:26 -#define INT_STATUS_GPIO26_INACTIVE 0 -#define INT_STATUS_GPIO26_ACTIVE 1 -#define INT_STATUS_GPIO25 25:25 -#define INT_STATUS_GPIO25_INACTIVE 0 -#define INT_STATUS_GPIO25_ACTIVE 1 -#define INT_STATUS_I2C 12:12 -#define INT_STATUS_I2C_INACTIVE 0 -#define INT_STATUS_I2C_ACTIVE 1 -#define INT_STATUS_PWM 11:11 -#define INT_STATUS_PWM_INACTIVE 0 -#define INT_STATUS_PWM_ACTIVE 1 -#define INT_STATUS_DMA1 10:10 -#define INT_STATUS_DMA1_INACTIVE 0 -#define INT_STATUS_DMA1_ACTIVE 1 -#define INT_STATUS_DMA0 9:9 -#define INT_STATUS_DMA0_INACTIVE 0 -#define INT_STATUS_DMA0_ACTIVE 1 -#define INT_STATUS_PCI 8:8 -#define INT_STATUS_PCI_INACTIVE 0 -#define INT_STATUS_PCI_ACTIVE 1 -#define INT_STATUS_SSP1 7:7 -#define INT_STATUS_SSP1_INACTIVE 0 -#define INT_STATUS_SSP1_ACTIVE 1 -#define INT_STATUS_SSP0 6:6 -#define INT_STATUS_SSP0_INACTIVE 0 -#define INT_STATUS_SSP0_ACTIVE 1 -#define INT_STATUS_DE 5:5 -#define INT_STATUS_DE_INACTIVE 0 -#define INT_STATUS_DE_ACTIVE 1 -#define INT_STATUS_ZVPORT1_VSYNC 4:4 -#define INT_STATUS_ZVPORT1_VSYNC_INACTIVE 0 -#define INT_STATUS_ZVPORT1_VSYNC_ACTIVE 1 -#define INT_STATUS_ZVPORT0_VSYNC 3:3 -#define INT_STATUS_ZVPORT0_VSYNC_INACTIVE 0 -#define INT_STATUS_ZVPORT0_VSYNC_ACTIVE 1 -#define INT_STATUS_CRT_VSYNC 2:2 -#define INT_STATUS_CRT_VSYNC_INACTIVE 0 -#define INT_STATUS_CRT_VSYNC_ACTIVE 1 -#define INT_STATUS_PANEL_VSYNC 1:1 -#define INT_STATUS_PANEL_VSYNC_INACTIVE 0 -#define INT_STATUS_PANEL_VSYNC_ACTIVE 1 -#define INT_STATUS_VGA_VSYNC 0:0 -#define INT_STATUS_VGA_VSYNC_INACTIVE 0 -#define INT_STATUS_VGA_VSYNC_ACTIVE 1 +#define INT_STATUS_GPIO31 BIT(31) +#define INT_STATUS_GPIO30 BIT(30) +#define INT_STATUS_GPIO29 BIT(29) +#define INT_STATUS_GPIO28 BIT(28) +#define INT_STATUS_GPIO27 BIT(27) +#define INT_STATUS_GPIO26 BIT(26) +#define INT_STATUS_GPIO25 BIT(25) +#define INT_STATUS_I2C BIT(12) +#define INT_STATUS_PWM BIT(11) +#define INT_STATUS_DMA1 BIT(10) +#define INT_STATUS_DMA0 BIT(9) +#define INT_STATUS_PCI BIT(8) +#define INT_STATUS_SSP1 BIT(7) +#define INT_STATUS_SSP0 BIT(6) +#define INT_STATUS_DE BIT(5) +#define INT_STATUS_ZVPORT1_VSYNC BIT(4) +#define INT_STATUS_ZVPORT0_VSYNC BIT(3) +#define INT_STATUS_CRT_VSYNC BIT(2) +#define INT_STATUS_PANEL_VSYNC BIT(1) +#define INT_STATUS_VGA_VSYNC BIT(0) #define INT_MASK 0x000028 -#define INT_MASK_GPIO31 31:31 -#define INT_MASK_GPIO31_DISABLE 0 -#define INT_MASK_GPIO31_ENABLE 1 -#define INT_MASK_GPIO30 30:30 -#define INT_MASK_GPIO30_DISABLE 0 -#define INT_MASK_GPIO30_ENABLE 1 -#define INT_MASK_GPIO29 29:29 -#define INT_MASK_GPIO29_DISABLE 0 -#define INT_MASK_GPIO29_ENABLE 1 -#define INT_MASK_GPIO28 28:28 -#define INT_MASK_GPIO28_DISABLE 0 -#define INT_MASK_GPIO28_ENABLE 1 -#define INT_MASK_GPIO27 27:27 -#define INT_MASK_GPIO27_DISABLE 0 -#define INT_MASK_GPIO27_ENABLE 1 -#define INT_MASK_GPIO26 26:26 -#define INT_MASK_GPIO26_DISABLE 0 -#define INT_MASK_GPIO26_ENABLE 1 -#define INT_MASK_GPIO25 25:25 -#define INT_MASK_GPIO25_DISABLE 0 -#define INT_MASK_GPIO25_ENABLE 1 -#define INT_MASK_I2C 12:12 -#define INT_MASK_I2C_DISABLE 0 -#define INT_MASK_I2C_ENABLE 1 -#define INT_MASK_PWM 11:11 -#define INT_MASK_PWM_DISABLE 0 -#define INT_MASK_PWM_ENABLE 1 -#define INT_MASK_DMA1 10:10 -#define INT_MASK_DMA1_DISABLE 0 -#define INT_MASK_DMA1_ENABLE 1 -#define INT_MASK_DMA 9:9 -#define INT_MASK_DMA_DISABLE 0 -#define INT_MASK_DMA_ENABLE 1 -#define INT_MASK_PCI 8:8 -#define INT_MASK_PCI_DISABLE 0 -#define INT_MASK_PCI_ENABLE 1 -#define INT_MASK_SSP1 7:7 -#define INT_MASK_SSP1_DISABLE 0 -#define INT_MASK_SSP1_ENABLE 1 -#define INT_MASK_SSP0 6:6 -#define INT_MASK_SSP0_DISABLE 0 -#define INT_MASK_SSP0_ENABLE 1 -#define INT_MASK_DE 5:5 -#define INT_MASK_DE_DISABLE 0 -#define INT_MASK_DE_ENABLE 1 -#define INT_MASK_ZVPORT1_VSYNC 4:4 -#define INT_MASK_ZVPORT1_VSYNC_DISABLE 0 -#define INT_MASK_ZVPORT1_VSYNC_ENABLE 1 -#define INT_MASK_ZVPORT0_VSYNC 3:3 -#define INT_MASK_ZVPORT0_VSYNC_DISABLE 0 -#define INT_MASK_ZVPORT0_VSYNC_ENABLE 1 -#define INT_MASK_CRT_VSYNC 2:2 -#define INT_MASK_CRT_VSYNC_DISABLE 0 -#define INT_MASK_CRT_VSYNC_ENABLE 1 -#define INT_MASK_PANEL_VSYNC 1:1 -#define INT_MASK_PANEL_VSYNC_DISABLE 0 -#define INT_MASK_PANEL_VSYNC_ENABLE 1 -#define INT_MASK_VGA_VSYNC 0:0 -#define INT_MASK_VGA_VSYNC_DISABLE 0 -#define INT_MASK_VGA_VSYNC_ENABLE 1 +#define INT_MASK_GPIO31 BIT(31) +#define INT_MASK_GPIO30 BIT(30) +#define INT_MASK_GPIO29 BIT(29) +#define INT_MASK_GPIO28 BIT(28) +#define INT_MASK_GPIO27 BIT(27) +#define INT_MASK_GPIO26 BIT(26) +#define INT_MASK_GPIO25 BIT(25) +#define INT_MASK_I2C BIT(12) +#define INT_MASK_PWM BIT(11) +#define INT_MASK_DMA1 BIT(10) +#define INT_MASK_DMA BIT(9) +#define INT_MASK_PCI BIT(8) +#define INT_MASK_SSP1 BIT(7) +#define INT_MASK_SSP0 BIT(6) +#define INT_MASK_DE BIT(5) +#define INT_MASK_ZVPORT1_VSYNC BIT(4) +#define INT_MASK_ZVPORT0_VSYNC BIT(3) +#define INT_MASK_CRT_VSYNC BIT(2) +#define INT_MASK_PANEL_VSYNC BIT(1) +#define INT_MASK_VGA_VSYNC BIT(0) #define CURRENT_GATE 0x000040 -#define CURRENT_GATE_MCLK 15:14 +#define CURRENT_GATE_MCLK_MASK (0x3 << 14) #ifdef VALIDATION_CHIP - #define CURRENT_GATE_MCLK_112MHZ 0 - #define CURRENT_GATE_MCLK_84MHZ 1 - #define CURRENT_GATE_MCLK_56MHZ 2 - #define CURRENT_GATE_MCLK_42MHZ 3 + #define CURRENT_GATE_MCLK_112MHZ (0x0 << 14) + #define CURRENT_GATE_MCLK_84MHZ (0x1 << 14) + #define CURRENT_GATE_MCLK_56MHZ (0x2 << 14) + #define CURRENT_GATE_MCLK_42MHZ (0x3 << 14) #else - #define CURRENT_GATE_MCLK_DIV_3 0 - #define CURRENT_GATE_MCLK_DIV_4 1 - #define CURRENT_GATE_MCLK_DIV_6 2 - #define CURRENT_GATE_MCLK_DIV_8 3 + #define CURRENT_GATE_MCLK_DIV_3 (0x0 << 14) + #define CURRENT_GATE_MCLK_DIV_4 (0x1 << 14) + #define CURRENT_GATE_MCLK_DIV_6 (0x2 << 14) + #define CURRENT_GATE_MCLK_DIV_8 (0x3 << 14) #endif -#define CURRENT_GATE_M2XCLK 13:12 +#define CURRENT_GATE_M2XCLK_MASK (0x3 << 12) #ifdef VALIDATION_CHIP - #define CURRENT_GATE_M2XCLK_336MHZ 0 - #define CURRENT_GATE_M2XCLK_168MHZ 1 - #define CURRENT_GATE_M2XCLK_112MHZ 2 - #define CURRENT_GATE_M2XCLK_84MHZ 3 + #define CURRENT_GATE_M2XCLK_336MHZ (0x0 << 12) + #define CURRENT_GATE_M2XCLK_168MHZ (0x1 << 12) + #define CURRENT_GATE_M2XCLK_112MHZ (0x2 << 12) + #define CURRENT_GATE_M2XCLK_84MHZ (0x3 << 12) #else - #define CURRENT_GATE_M2XCLK_DIV_1 0 - #define CURRENT_GATE_M2XCLK_DIV_2 1 - #define CURRENT_GATE_M2XCLK_DIV_3 2 - #define CURRENT_GATE_M2XCLK_DIV_4 3 + #define CURRENT_GATE_M2XCLK_DIV_1 (0x0 << 12) + #define CURRENT_GATE_M2XCLK_DIV_2 (0x1 << 12) + #define CURRENT_GATE_M2XCLK_DIV_3 (0x2 << 12) + #define CURRENT_GATE_M2XCLK_DIV_4 (0x3 << 12) #endif -#define CURRENT_GATE_VGA 10:10 -#define CURRENT_GATE_VGA_OFF 0 -#define CURRENT_GATE_VGA_ON 1 -#define CURRENT_GATE_PWM 9:9 -#define CURRENT_GATE_PWM_OFF 0 -#define CURRENT_GATE_PWM_ON 1 -#define CURRENT_GATE_I2C 8:8 -#define CURRENT_GATE_I2C_OFF 0 -#define CURRENT_GATE_I2C_ON 1 -#define CURRENT_GATE_SSP 7:7 -#define CURRENT_GATE_SSP_OFF 0 -#define CURRENT_GATE_SSP_ON 1 -#define CURRENT_GATE_GPIO 6:6 -#define CURRENT_GATE_GPIO_OFF 0 -#define CURRENT_GATE_GPIO_ON 1 -#define CURRENT_GATE_ZVPORT 5:5 -#define CURRENT_GATE_ZVPORT_OFF 0 -#define CURRENT_GATE_ZVPORT_ON 1 -#define CURRENT_GATE_CSC 4:4 -#define CURRENT_GATE_CSC_OFF 0 -#define CURRENT_GATE_CSC_ON 1 -#define CURRENT_GATE_DE 3:3 -#define CURRENT_GATE_DE_OFF 0 -#define CURRENT_GATE_DE_ON 1 -#define CURRENT_GATE_DISPLAY 2:2 -#define CURRENT_GATE_DISPLAY_OFF 0 -#define CURRENT_GATE_DISPLAY_ON 1 -#define CURRENT_GATE_LOCALMEM 1:1 -#define CURRENT_GATE_LOCALMEM_OFF 0 -#define CURRENT_GATE_LOCALMEM_ON 1 -#define CURRENT_GATE_DMA 0:0 -#define CURRENT_GATE_DMA_OFF 0 -#define CURRENT_GATE_DMA_ON 1 +#define CURRENT_GATE_VGA BIT(10) +#define CURRENT_GATE_PWM BIT(9) +#define CURRENT_GATE_I2C BIT(8) +#define CURRENT_GATE_SSP BIT(7) +#define CURRENT_GATE_GPIO BIT(6) +#define CURRENT_GATE_ZVPORT BIT(5) +#define CURRENT_GATE_CSC BIT(4) +#define CURRENT_GATE_DE BIT(3) +#define CURRENT_GATE_DISPLAY BIT(2) +#define CURRENT_GATE_LOCALMEM BIT(1) +#define CURRENT_GATE_DMA BIT(0) #define MODE0_GATE 0x000044 -#define MODE0_GATE_MCLK 15:14 -#define MODE0_GATE_MCLK_112MHZ 0 -#define MODE0_GATE_MCLK_84MHZ 1 -#define MODE0_GATE_MCLK_56MHZ 2 -#define MODE0_GATE_MCLK_42MHZ 3 -#define MODE0_GATE_M2XCLK 13:12 -#define MODE0_GATE_M2XCLK_336MHZ 0 -#define MODE0_GATE_M2XCLK_168MHZ 1 -#define MODE0_GATE_M2XCLK_112MHZ 2 -#define MODE0_GATE_M2XCLK_84MHZ 3 -#define MODE0_GATE_VGA 10:10 -#define MODE0_GATE_VGA_OFF 0 -#define MODE0_GATE_VGA_ON 1 -#define MODE0_GATE_PWM 9:9 -#define MODE0_GATE_PWM_OFF 0 -#define MODE0_GATE_PWM_ON 1 -#define MODE0_GATE_I2C 8:8 -#define MODE0_GATE_I2C_OFF 0 -#define MODE0_GATE_I2C_ON 1 -#define MODE0_GATE_SSP 7:7 -#define MODE0_GATE_SSP_OFF 0 -#define MODE0_GATE_SSP_ON 1 -#define MODE0_GATE_GPIO 6:6 -#define MODE0_GATE_GPIO_OFF 0 -#define MODE0_GATE_GPIO_ON 1 -#define MODE0_GATE_ZVPORT 5:5 -#define MODE0_GATE_ZVPORT_OFF 0 -#define MODE0_GATE_ZVPORT_ON 1 -#define MODE0_GATE_CSC 4:4 -#define MODE0_GATE_CSC_OFF 0 -#define MODE0_GATE_CSC_ON 1 -#define MODE0_GATE_DE 3:3 -#define MODE0_GATE_DE_OFF 0 -#define MODE0_GATE_DE_ON 1 -#define MODE0_GATE_DISPLAY 2:2 -#define MODE0_GATE_DISPLAY_OFF 0 -#define MODE0_GATE_DISPLAY_ON 1 -#define MODE0_GATE_LOCALMEM 1:1 -#define MODE0_GATE_LOCALMEM_OFF 0 -#define MODE0_GATE_LOCALMEM_ON 1 -#define MODE0_GATE_DMA 0:0 -#define MODE0_GATE_DMA_OFF 0 -#define MODE0_GATE_DMA_ON 1 +#define MODE0_GATE_MCLK_MASK (0x3 << 14) +#define MODE0_GATE_MCLK_112MHZ (0x0 << 14) +#define MODE0_GATE_MCLK_84MHZ (0x1 << 14) +#define MODE0_GATE_MCLK_56MHZ (0x2 << 14) +#define MODE0_GATE_MCLK_42MHZ (0x3 << 14) +#define MODE0_GATE_M2XCLK_MASK (0x3 << 12) +#define MODE0_GATE_M2XCLK_336MHZ (0x0 << 12) +#define MODE0_GATE_M2XCLK_168MHZ (0x1 << 12) +#define MODE0_GATE_M2XCLK_112MHZ (0x2 << 12) +#define MODE0_GATE_M2XCLK_84MHZ (0x3 << 12) +#define MODE0_GATE_VGA BIT(10) +#define MODE0_GATE_PWM BIT(9) +#define MODE0_GATE_I2C BIT(8) +#define MODE0_GATE_SSP BIT(7) +#define MODE0_GATE_GPIO BIT(6) +#define MODE0_GATE_ZVPORT BIT(5) +#define MODE0_GATE_CSC BIT(4) +#define MODE0_GATE_DE BIT(3) +#define MODE0_GATE_DISPLAY BIT(2) +#define MODE0_GATE_LOCALMEM BIT(1) +#define MODE0_GATE_DMA BIT(0) #define MODE1_GATE 0x000048 -#define MODE1_GATE_MCLK 15:14 -#define MODE1_GATE_MCLK_112MHZ 0 -#define MODE1_GATE_MCLK_84MHZ 1 -#define MODE1_GATE_MCLK_56MHZ 2 -#define MODE1_GATE_MCLK_42MHZ 3 -#define MODE1_GATE_M2XCLK 13:12 -#define MODE1_GATE_M2XCLK_336MHZ 0 -#define MODE1_GATE_M2XCLK_168MHZ 1 -#define MODE1_GATE_M2XCLK_112MHZ 2 -#define MODE1_GATE_M2XCLK_84MHZ 3 -#define MODE1_GATE_VGA 10:10 -#define MODE1_GATE_VGA_OFF 0 -#define MODE1_GATE_VGA_ON 1 -#define MODE1_GATE_PWM 9:9 -#define MODE1_GATE_PWM_OFF 0 -#define MODE1_GATE_PWM_ON 1 -#define MODE1_GATE_I2C 8:8 -#define MODE1_GATE_I2C_OFF 0 -#define MODE1_GATE_I2C_ON 1 -#define MODE1_GATE_SSP 7:7 -#define MODE1_GATE_SSP_OFF 0 -#define MODE1_GATE_SSP_ON 1 -#define MODE1_GATE_GPIO 6:6 -#define MODE1_GATE_GPIO_OFF 0 -#define MODE1_GATE_GPIO_ON 1 -#define MODE1_GATE_ZVPORT 5:5 -#define MODE1_GATE_ZVPORT_OFF 0 -#define MODE1_GATE_ZVPORT_ON 1 -#define MODE1_GATE_CSC 4:4 -#define MODE1_GATE_CSC_OFF 0 -#define MODE1_GATE_CSC_ON 1 -#define MODE1_GATE_DE 3:3 -#define MODE1_GATE_DE_OFF 0 -#define MODE1_GATE_DE_ON 1 -#define MODE1_GATE_DISPLAY 2:2 -#define MODE1_GATE_DISPLAY_OFF 0 -#define MODE1_GATE_DISPLAY_ON 1 -#define MODE1_GATE_LOCALMEM 1:1 -#define MODE1_GATE_LOCALMEM_OFF 0 -#define MODE1_GATE_LOCALMEM_ON 1 -#define MODE1_GATE_DMA 0:0 -#define MODE1_GATE_DMA_OFF 0 -#define MODE1_GATE_DMA_ON 1 +#define MODE1_GATE_MCLK_MASK (0x3 << 14) +#define MODE1_GATE_MCLK_112MHZ (0x0 << 14) +#define MODE1_GATE_MCLK_84MHZ (0x1 << 14) +#define MODE1_GATE_MCLK_56MHZ (0x2 << 14) +#define MODE1_GATE_MCLK_42MHZ (0x3 << 14) +#define MODE1_GATE_M2XCLK_MASK (0x3 << 12) +#define MODE1_GATE_M2XCLK_336MHZ (0x0 << 12) +#define MODE1_GATE_M2XCLK_168MHZ (0x1 << 12) +#define MODE1_GATE_M2XCLK_112MHZ (0x2 << 12) +#define MODE1_GATE_M2XCLK_84MHZ (0x3 << 12) +#define MODE1_GATE_VGA BIT(10) +#define MODE1_GATE_PWM BIT(9) +#define MODE1_GATE_I2C BIT(8) +#define MODE1_GATE_SSP BIT(7) +#define MODE1_GATE_GPIO BIT(6) +#define MODE1_GATE_ZVPORT BIT(5) +#define MODE1_GATE_CSC BIT(4) +#define MODE1_GATE_DE BIT(3) +#define MODE1_GATE_DISPLAY BIT(2) +#define MODE1_GATE_LOCALMEM BIT(1) +#define MODE1_GATE_DMA BIT(0) #define POWER_MODE_CTRL 0x00004C #ifdef VALIDATION_CHIP - #define POWER_MODE_CTRL_336CLK 4:4 - #define POWER_MODE_CTRL_336CLK_OFF 0 - #define POWER_MODE_CTRL_336CLK_ON 1 + #define POWER_MODE_CTRL_336CLK BIT(4) #endif -#define POWER_MODE_CTRL_OSC_INPUT 3:3 -#define POWER_MODE_CTRL_OSC_INPUT_OFF 0 -#define POWER_MODE_CTRL_OSC_INPUT_ON 1 -#define POWER_MODE_CTRL_ACPI 2:2 -#define POWER_MODE_CTRL_ACPI_OFF 0 -#define POWER_MODE_CTRL_ACPI_ON 1 -#define POWER_MODE_CTRL_MODE 1:0 -#define POWER_MODE_CTRL_MODE_MODE0 0 -#define POWER_MODE_CTRL_MODE_MODE1 1 -#define POWER_MODE_CTRL_MODE_SLEEP 2 +#define POWER_MODE_CTRL_OSC_INPUT BIT(3) +#define POWER_MODE_CTRL_ACPI BIT(2) +#define POWER_MODE_CTRL_MODE_MASK (0x3 << 0) +#define POWER_MODE_CTRL_MODE_MODE0 (0x0 << 0) +#define POWER_MODE_CTRL_MODE_MODE1 (0x1 << 0) +#define POWER_MODE_CTRL_MODE_SLEEP (0x2 << 0) #define PCI_MASTER_BASE 0x000050 -#define PCI_MASTER_BASE_ADDRESS 7:0 +#define PCI_MASTER_BASE_ADDRESS_MASK 0xff #define DEVICE_ID 0x000054 -#define DEVICE_ID_DEVICE_ID 31:16 -#define DEVICE_ID_REVISION_ID 7:0 +#define DEVICE_ID_DEVICE_ID_MASK (0xffff << 16) +#define DEVICE_ID_REVISION_ID_MASK 0xff #define PLL_CLK_COUNT 0x000058 -#define PLL_CLK_COUNT_COUNTER 15:0 +#define PLL_CLK_COUNT_COUNTER_MASK 0xffff #define PANEL_PLL_CTRL 0x00005C -#define PANEL_PLL_CTRL_BYPASS 18:18 -#define PANEL_PLL_CTRL_BYPASS_OFF 0 -#define PANEL_PLL_CTRL_BYPASS_ON 1 -#define PANEL_PLL_CTRL_POWER 17:17 -#define PANEL_PLL_CTRL_POWER_OFF 0 -#define PANEL_PLL_CTRL_POWER_ON 1 -#define PANEL_PLL_CTRL_INPUT 16:16 -#define PANEL_PLL_CTRL_INPUT_OSC 0 -#define PANEL_PLL_CTRL_INPUT_TESTCLK 1 +#define PLL_CTRL_BYPASS BIT(18) +#define PLL_CTRL_POWER BIT(17) +#define PLL_CTRL_INPUT BIT(16) #ifdef VALIDATION_CHIP - #define PANEL_PLL_CTRL_OD 15:14 + #define PLL_CTRL_OD_SHIFT 14 + #define PLL_CTRL_OD_MASK (0x3 << 14) #else - #define PANEL_PLL_CTRL_POD 15:14 - #define PANEL_PLL_CTRL_OD 13:12 + #define PLL_CTRL_POD_SHIFT 14 + #define PLL_CTRL_POD_MASK (0x3 << 14) + #define PLL_CTRL_OD_SHIFT 12 + #define PLL_CTRL_OD_MASK (0x3 << 12) #endif -#define PANEL_PLL_CTRL_N 11:8 -#define PANEL_PLL_CTRL_M 7:0 +#define PLL_CTRL_N_SHIFT 8 +#define PLL_CTRL_N_MASK (0xf << 8) +#define PLL_CTRL_M_SHIFT 0 +#define PLL_CTRL_M_MASK 0xff #define CRT_PLL_CTRL 0x000060 -#define CRT_PLL_CTRL_BYPASS 18:18 -#define CRT_PLL_CTRL_BYPASS_OFF 0 -#define CRT_PLL_CTRL_BYPASS_ON 1 -#define CRT_PLL_CTRL_POWER 17:17 -#define CRT_PLL_CTRL_POWER_OFF 0 -#define CRT_PLL_CTRL_POWER_ON 1 -#define CRT_PLL_CTRL_INPUT 16:16 -#define CRT_PLL_CTRL_INPUT_OSC 0 -#define CRT_PLL_CTRL_INPUT_TESTCLK 1 -#ifdef VALIDATION_CHIP - #define CRT_PLL_CTRL_OD 15:14 -#else - #define CRT_PLL_CTRL_POD 15:14 - #define CRT_PLL_CTRL_OD 13:12 -#endif -#define CRT_PLL_CTRL_N 11:8 -#define CRT_PLL_CTRL_M 7:0 #define VGA_PLL0_CTRL 0x000064 -#define VGA_PLL0_CTRL_BYPASS 18:18 -#define VGA_PLL0_CTRL_BYPASS_OFF 0 -#define VGA_PLL0_CTRL_BYPASS_ON 1 -#define VGA_PLL0_CTRL_POWER 17:17 -#define VGA_PLL0_CTRL_POWER_OFF 0 -#define VGA_PLL0_CTRL_POWER_ON 1 -#define VGA_PLL0_CTRL_INPUT 16:16 -#define VGA_PLL0_CTRL_INPUT_OSC 0 -#define VGA_PLL0_CTRL_INPUT_TESTCLK 1 -#ifdef VALIDATION_CHIP - #define VGA_PLL0_CTRL_OD 15:14 -#else - #define VGA_PLL0_CTRL_POD 15:14 - #define VGA_PLL0_CTRL_OD 13:12 -#endif -#define VGA_PLL0_CTRL_N 11:8 -#define VGA_PLL0_CTRL_M 7:0 #define VGA_PLL1_CTRL 0x000068 -#define VGA_PLL1_CTRL_BYPASS 18:18 -#define VGA_PLL1_CTRL_BYPASS_OFF 0 -#define VGA_PLL1_CTRL_BYPASS_ON 1 -#define VGA_PLL1_CTRL_POWER 17:17 -#define VGA_PLL1_CTRL_POWER_OFF 0 -#define VGA_PLL1_CTRL_POWER_ON 1 -#define VGA_PLL1_CTRL_INPUT 16:16 -#define VGA_PLL1_CTRL_INPUT_OSC 0 -#define VGA_PLL1_CTRL_INPUT_TESTCLK 1 -#ifdef VALIDATION_CHIP - #define VGA_PLL1_CTRL_OD 15:14 -#else - #define VGA_PLL1_CTRL_POD 15:14 - #define VGA_PLL1_CTRL_OD 13:12 -#endif -#define VGA_PLL1_CTRL_N 11:8 -#define VGA_PLL1_CTRL_M 7:0 #define SCRATCH_DATA 0x00006c #ifndef VALIDATION_CHIP #define MXCLK_PLL_CTRL 0x000070 -#define MXCLK_PLL_CTRL_BYPASS 18:18 -#define MXCLK_PLL_CTRL_BYPASS_OFF 0 -#define MXCLK_PLL_CTRL_BYPASS_ON 1 -#define MXCLK_PLL_CTRL_POWER 17:17 -#define MXCLK_PLL_CTRL_POWER_OFF 0 -#define MXCLK_PLL_CTRL_POWER_ON 1 -#define MXCLK_PLL_CTRL_INPUT 16:16 -#define MXCLK_PLL_CTRL_INPUT_OSC 0 -#define MXCLK_PLL_CTRL_INPUT_TESTCLK 1 -#define MXCLK_PLL_CTRL_POD 15:14 -#define MXCLK_PLL_CTRL_OD 13:12 -#define MXCLK_PLL_CTRL_N 11:8 -#define MXCLK_PLL_CTRL_M 7:0 #define VGA_CONFIGURATION 0x000088 -#define VGA_CONFIGURATION_USER_DEFINE 5:4 -#define VGA_CONFIGURATION_PLL 2:2 -#define VGA_CONFIGURATION_PLL_VGA 0 -#define VGA_CONFIGURATION_PLL_PANEL 1 -#define VGA_CONFIGURATION_MODE 1:1 -#define VGA_CONFIGURATION_MODE_TEXT 0 -#define VGA_CONFIGURATION_MODE_GRAPHIC 1 +#define VGA_CONFIGURATION_USER_DEFINE_MASK (0x3 << 4) +#define VGA_CONFIGURATION_PLL BIT(2) +#define VGA_CONFIGURATION_MODE BIT(1) #endif #define GPIO_DATA 0x010000 -#define GPIO_DATA_31 31:31 -#define GPIO_DATA_30 30:30 -#define GPIO_DATA_29 29:29 -#define GPIO_DATA_28 28:28 -#define GPIO_DATA_27 27:27 -#define GPIO_DATA_26 26:26 -#define GPIO_DATA_25 25:25 -#define GPIO_DATA_24 24:24 -#define GPIO_DATA_23 23:23 -#define GPIO_DATA_22 22:22 -#define GPIO_DATA_21 21:21 -#define GPIO_DATA_20 20:20 -#define GPIO_DATA_19 19:19 -#define GPIO_DATA_18 18:18 -#define GPIO_DATA_17 17:17 -#define GPIO_DATA_16 16:16 -#define GPIO_DATA_15 15:15 -#define GPIO_DATA_14 14:14 -#define GPIO_DATA_13 13:13 -#define GPIO_DATA_12 12:12 -#define GPIO_DATA_11 11:11 -#define GPIO_DATA_10 10:10 -#define GPIO_DATA_9 9:9 -#define GPIO_DATA_8 8:8 -#define GPIO_DATA_7 7:7 -#define GPIO_DATA_6 6:6 -#define GPIO_DATA_5 5:5 -#define GPIO_DATA_4 4:4 -#define GPIO_DATA_3 3:3 -#define GPIO_DATA_2 2:2 -#define GPIO_DATA_1 1:1 -#define GPIO_DATA_0 0:0 +#define GPIO_DATA_31 BIT(31) +#define GPIO_DATA_30 BIT(30) +#define GPIO_DATA_29 BIT(29) +#define GPIO_DATA_28 BIT(28) +#define GPIO_DATA_27 BIT(27) +#define GPIO_DATA_26 BIT(26) +#define GPIO_DATA_25 BIT(25) +#define GPIO_DATA_24 BIT(24) +#define GPIO_DATA_23 BIT(23) +#define GPIO_DATA_22 BIT(22) +#define GPIO_DATA_21 BIT(21) +#define GPIO_DATA_20 BIT(20) +#define GPIO_DATA_19 BIT(19) +#define GPIO_DATA_18 BIT(18) +#define GPIO_DATA_17 BIT(17) +#define GPIO_DATA_16 BIT(16) +#define GPIO_DATA_15 BIT(15) +#define GPIO_DATA_14 BIT(14) +#define GPIO_DATA_13 BIT(13) +#define GPIO_DATA_12 BIT(12) +#define GPIO_DATA_11 BIT(11) +#define GPIO_DATA_10 BIT(10) +#define GPIO_DATA_9 BIT(9) +#define GPIO_DATA_8 BIT(8) +#define GPIO_DATA_7 BIT(7) +#define GPIO_DATA_6 BIT(6) +#define GPIO_DATA_5 BIT(5) +#define GPIO_DATA_4 BIT(4) +#define GPIO_DATA_3 BIT(3) +#define GPIO_DATA_2 BIT(2) +#define GPIO_DATA_1 BIT(1) +#define GPIO_DATA_0 BIT(0) #define GPIO_DATA_DIRECTION 0x010004 -#define GPIO_DATA_DIRECTION_31 31:31 -#define GPIO_DATA_DIRECTION_31_INPUT 0 -#define GPIO_DATA_DIRECTION_31_OUTPUT 1 -#define GPIO_DATA_DIRECTION_30 30:30 -#define GPIO_DATA_DIRECTION_30_INPUT 0 -#define GPIO_DATA_DIRECTION_30_OUTPUT 1 -#define GPIO_DATA_DIRECTION_29 29:29 -#define GPIO_DATA_DIRECTION_29_INPUT 0 -#define GPIO_DATA_DIRECTION_29_OUTPUT 1 -#define GPIO_DATA_DIRECTION_28 28:28 -#define GPIO_DATA_DIRECTION_28_INPUT 0 -#define GPIO_DATA_DIRECTION_28_OUTPUT 1 -#define GPIO_DATA_DIRECTION_27 27:27 -#define GPIO_DATA_DIRECTION_27_INPUT 0 -#define GPIO_DATA_DIRECTION_27_OUTPUT 1 -#define GPIO_DATA_DIRECTION_26 26:26 -#define GPIO_DATA_DIRECTION_26_INPUT 0 -#define GPIO_DATA_DIRECTION_26_OUTPUT 1 -#define GPIO_DATA_DIRECTION_25 25:25 -#define GPIO_DATA_DIRECTION_25_INPUT 0 -#define GPIO_DATA_DIRECTION_25_OUTPUT 1 -#define GPIO_DATA_DIRECTION_24 24:24 -#define GPIO_DATA_DIRECTION_24_INPUT 0 -#define GPIO_DATA_DIRECTION_24_OUTPUT 1 -#define GPIO_DATA_DIRECTION_23 23:23 -#define GPIO_DATA_DIRECTION_23_INPUT 0 -#define GPIO_DATA_DIRECTION_23_OUTPUT 1 -#define GPIO_DATA_DIRECTION_22 22:22 -#define GPIO_DATA_DIRECTION_22_INPUT 0 -#define GPIO_DATA_DIRECTION_22_OUTPUT 1 -#define GPIO_DATA_DIRECTION_21 21:21 -#define GPIO_DATA_DIRECTION_21_INPUT 0 -#define GPIO_DATA_DIRECTION_21_OUTPUT 1 -#define GPIO_DATA_DIRECTION_20 20:20 -#define GPIO_DATA_DIRECTION_20_INPUT 0 -#define GPIO_DATA_DIRECTION_20_OUTPUT 1 -#define GPIO_DATA_DIRECTION_19 19:19 -#define GPIO_DATA_DIRECTION_19_INPUT 0 -#define GPIO_DATA_DIRECTION_19_OUTPUT 1 -#define GPIO_DATA_DIRECTION_18 18:18 -#define GPIO_DATA_DIRECTION_18_INPUT 0 -#define GPIO_DATA_DIRECTION_18_OUTPUT 1 -#define GPIO_DATA_DIRECTION_17 17:17 -#define GPIO_DATA_DIRECTION_17_INPUT 0 -#define GPIO_DATA_DIRECTION_17_OUTPUT 1 -#define GPIO_DATA_DIRECTION_16 16:16 -#define GPIO_DATA_DIRECTION_16_INPUT 0 -#define GPIO_DATA_DIRECTION_16_OUTPUT 1 -#define GPIO_DATA_DIRECTION_15 15:15 -#define GPIO_DATA_DIRECTION_15_INPUT 0 -#define GPIO_DATA_DIRECTION_15_OUTPUT 1 -#define GPIO_DATA_DIRECTION_14 14:14 -#define GPIO_DATA_DIRECTION_14_INPUT 0 -#define GPIO_DATA_DIRECTION_14_OUTPUT 1 -#define GPIO_DATA_DIRECTION_13 13:13 -#define GPIO_DATA_DIRECTION_13_INPUT 0 -#define GPIO_DATA_DIRECTION_13_OUTPUT 1 -#define GPIO_DATA_DIRECTION_12 12:12 -#define GPIO_DATA_DIRECTION_12_INPUT 0 -#define GPIO_DATA_DIRECTION_12_OUTPUT 1 -#define GPIO_DATA_DIRECTION_11 11:11 -#define GPIO_DATA_DIRECTION_11_INPUT 0 -#define GPIO_DATA_DIRECTION_11_OUTPUT 1 -#define GPIO_DATA_DIRECTION_10 10:10 -#define GPIO_DATA_DIRECTION_10_INPUT 0 -#define GPIO_DATA_DIRECTION_10_OUTPUT 1 -#define GPIO_DATA_DIRECTION_9 9:9 -#define GPIO_DATA_DIRECTION_9_INPUT 0 -#define GPIO_DATA_DIRECTION_9_OUTPUT 1 -#define GPIO_DATA_DIRECTION_8 8:8 -#define GPIO_DATA_DIRECTION_8_INPUT 0 -#define GPIO_DATA_DIRECTION_8_OUTPUT 1 -#define GPIO_DATA_DIRECTION_7 7:7 -#define GPIO_DATA_DIRECTION_7_INPUT 0 -#define GPIO_DATA_DIRECTION_7_OUTPUT 1 -#define GPIO_DATA_DIRECTION_6 6:6 -#define GPIO_DATA_DIRECTION_6_INPUT 0 -#define GPIO_DATA_DIRECTION_6_OUTPUT 1 -#define GPIO_DATA_DIRECTION_5 5:5 -#define GPIO_DATA_DIRECTION_5_INPUT 0 -#define GPIO_DATA_DIRECTION_5_OUTPUT 1 -#define GPIO_DATA_DIRECTION_4 4:4 -#define GPIO_DATA_DIRECTION_4_INPUT 0 -#define GPIO_DATA_DIRECTION_4_OUTPUT 1 -#define GPIO_DATA_DIRECTION_3 3:3 -#define GPIO_DATA_DIRECTION_3_INPUT 0 -#define GPIO_DATA_DIRECTION_3_OUTPUT 1 -#define GPIO_DATA_DIRECTION_2 2:2 -#define GPIO_DATA_DIRECTION_2_INPUT 0 -#define GPIO_DATA_DIRECTION_2_OUTPUT 1 -#define GPIO_DATA_DIRECTION_1 131 -#define GPIO_DATA_DIRECTION_1_INPUT 0 -#define GPIO_DATA_DIRECTION_1_OUTPUT 1 -#define GPIO_DATA_DIRECTION_0 0:0 -#define GPIO_DATA_DIRECTION_0_INPUT 0 -#define GPIO_DATA_DIRECTION_0_OUTPUT 1 +#define GPIO_DATA_DIRECTION_31 BIT(31) +#define GPIO_DATA_DIRECTION_30 BIT(30) +#define GPIO_DATA_DIRECTION_29 BIT(29) +#define GPIO_DATA_DIRECTION_28 BIT(28) +#define GPIO_DATA_DIRECTION_27 BIT(27) +#define GPIO_DATA_DIRECTION_26 BIT(26) +#define GPIO_DATA_DIRECTION_25 BIT(25) +#define GPIO_DATA_DIRECTION_24 BIT(24) +#define GPIO_DATA_DIRECTION_23 BIT(23) +#define GPIO_DATA_DIRECTION_22 BIT(22) +#define GPIO_DATA_DIRECTION_21 BIT(21) +#define GPIO_DATA_DIRECTION_20 BIT(20) +#define GPIO_DATA_DIRECTION_19 BIT(19) +#define GPIO_DATA_DIRECTION_18 BIT(18) +#define GPIO_DATA_DIRECTION_17 BIT(17) +#define GPIO_DATA_DIRECTION_16 BIT(16) +#define GPIO_DATA_DIRECTION_15 BIT(15) +#define GPIO_DATA_DIRECTION_14 BIT(14) +#define GPIO_DATA_DIRECTION_13 BIT(13) +#define GPIO_DATA_DIRECTION_12 BIT(12) +#define GPIO_DATA_DIRECTION_11 BIT(11) +#define GPIO_DATA_DIRECTION_10 BIT(10) +#define GPIO_DATA_DIRECTION_9 BIT(9) +#define GPIO_DATA_DIRECTION_8 BIT(8) +#define GPIO_DATA_DIRECTION_7 BIT(7) +#define GPIO_DATA_DIRECTION_6 BIT(6) +#define GPIO_DATA_DIRECTION_5 BIT(5) +#define GPIO_DATA_DIRECTION_4 BIT(4) +#define GPIO_DATA_DIRECTION_3 BIT(3) +#define GPIO_DATA_DIRECTION_2 BIT(2) +#define GPIO_DATA_DIRECTION_1 BIT(1) +#define GPIO_DATA_DIRECTION_0 BIT(0) #define GPIO_INTERRUPT_SETUP 0x010008 -#define GPIO_INTERRUPT_SETUP_TRIGGER_31 22:22 -#define GPIO_INTERRUPT_SETUP_TRIGGER_31_EDGE 0 -#define GPIO_INTERRUPT_SETUP_TRIGGER_31_LEVEL 1 -#define GPIO_INTERRUPT_SETUP_TRIGGER_30 21:21 -#define GPIO_INTERRUPT_SETUP_TRIGGER_30_EDGE 0 -#define GPIO_INTERRUPT_SETUP_TRIGGER_30_LEVEL 1 -#define GPIO_INTERRUPT_SETUP_TRIGGER_29 20:20 -#define GPIO_INTERRUPT_SETUP_TRIGGER_29_EDGE 0 -#define GPIO_INTERRUPT_SETUP_TRIGGER_29_LEVEL 1 -#define GPIO_INTERRUPT_SETUP_TRIGGER_28 19:19 -#define GPIO_INTERRUPT_SETUP_TRIGGER_28_EDGE 0 -#define GPIO_INTERRUPT_SETUP_TRIGGER_28_LEVEL 1 -#define GPIO_INTERRUPT_SETUP_TRIGGER_27 18:18 -#define GPIO_INTERRUPT_SETUP_TRIGGER_27_EDGE 0 -#define GPIO_INTERRUPT_SETUP_TRIGGER_27_LEVEL 1 -#define GPIO_INTERRUPT_SETUP_TRIGGER_26 17:17 -#define GPIO_INTERRUPT_SETUP_TRIGGER_26_EDGE 0 -#define GPIO_INTERRUPT_SETUP_TRIGGER_26_LEVEL 1 -#define GPIO_INTERRUPT_SETUP_TRIGGER_25 16:16 -#define GPIO_INTERRUPT_SETUP_TRIGGER_25_EDGE 0 -#define GPIO_INTERRUPT_SETUP_TRIGGER_25_LEVEL 1 -#define GPIO_INTERRUPT_SETUP_ACTIVE_31 14:14 -#define GPIO_INTERRUPT_SETUP_ACTIVE_31_LOW 0 -#define GPIO_INTERRUPT_SETUP_ACTIVE_31_HIGH 1 -#define GPIO_INTERRUPT_SETUP_ACTIVE_30 13:13 -#define GPIO_INTERRUPT_SETUP_ACTIVE_30_LOW 0 -#define GPIO_INTERRUPT_SETUP_ACTIVE_30_HIGH 1 -#define GPIO_INTERRUPT_SETUP_ACTIVE_29 12:12 -#define GPIO_INTERRUPT_SETUP_ACTIVE_29_LOW 0 -#define GPIO_INTERRUPT_SETUP_ACTIVE_29_HIGH 1 -#define GPIO_INTERRUPT_SETUP_ACTIVE_28 11:11 -#define GPIO_INTERRUPT_SETUP_ACTIVE_28_LOW 0 -#define GPIO_INTERRUPT_SETUP_ACTIVE_28_HIGH 1 -#define GPIO_INTERRUPT_SETUP_ACTIVE_27 10:10 -#define GPIO_INTERRUPT_SETUP_ACTIVE_27_LOW 0 -#define GPIO_INTERRUPT_SETUP_ACTIVE_27_HIGH 1 -#define GPIO_INTERRUPT_SETUP_ACTIVE_26 9:9 -#define GPIO_INTERRUPT_SETUP_ACTIVE_26_LOW 0 -#define GPIO_INTERRUPT_SETUP_ACTIVE_26_HIGH 1 -#define GPIO_INTERRUPT_SETUP_ACTIVE_25 8:8 -#define GPIO_INTERRUPT_SETUP_ACTIVE_25_LOW 0 -#define GPIO_INTERRUPT_SETUP_ACTIVE_25_HIGH 1 -#define GPIO_INTERRUPT_SETUP_ENABLE_31 6:6 -#define GPIO_INTERRUPT_SETUP_ENABLE_31_GPIO 0 -#define GPIO_INTERRUPT_SETUP_ENABLE_31_INTERRUPT 1 -#define GPIO_INTERRUPT_SETUP_ENABLE_30 5:5 -#define GPIO_INTERRUPT_SETUP_ENABLE_30_GPIO 0 -#define GPIO_INTERRUPT_SETUP_ENABLE_30_INTERRUPT 1 -#define GPIO_INTERRUPT_SETUP_ENABLE_29 4:4 -#define GPIO_INTERRUPT_SETUP_ENABLE_29_GPIO 0 -#define GPIO_INTERRUPT_SETUP_ENABLE_29_INTERRUPT 1 -#define GPIO_INTERRUPT_SETUP_ENABLE_28 3:3 -#define GPIO_INTERRUPT_SETUP_ENABLE_28_GPIO 0 -#define GPIO_INTERRUPT_SETUP_ENABLE_28_INTERRUPT 1 -#define GPIO_INTERRUPT_SETUP_ENABLE_27 2:2 -#define GPIO_INTERRUPT_SETUP_ENABLE_27_GPIO 0 -#define GPIO_INTERRUPT_SETUP_ENABLE_27_INTERRUPT 1 -#define GPIO_INTERRUPT_SETUP_ENABLE_26 1:1 -#define GPIO_INTERRUPT_SETUP_ENABLE_26_GPIO 0 -#define GPIO_INTERRUPT_SETUP_ENABLE_26_INTERRUPT 1 -#define GPIO_INTERRUPT_SETUP_ENABLE_25 0:0 -#define GPIO_INTERRUPT_SETUP_ENABLE_25_GPIO 0 -#define GPIO_INTERRUPT_SETUP_ENABLE_25_INTERRUPT 1 +#define GPIO_INTERRUPT_SETUP_TRIGGER_31 BIT(22) +#define GPIO_INTERRUPT_SETUP_TRIGGER_30 BIT(21) +#define GPIO_INTERRUPT_SETUP_TRIGGER_29 BIT(20) +#define GPIO_INTERRUPT_SETUP_TRIGGER_28 BIT(19) +#define GPIO_INTERRUPT_SETUP_TRIGGER_27 BIT(18) +#define GPIO_INTERRUPT_SETUP_TRIGGER_26 BIT(17) +#define GPIO_INTERRUPT_SETUP_TRIGGER_25 BIT(16) +#define GPIO_INTERRUPT_SETUP_ACTIVE_31 BIT(14) +#define GPIO_INTERRUPT_SETUP_ACTIVE_30 BIT(13) +#define GPIO_INTERRUPT_SETUP_ACTIVE_29 BIT(12) +#define GPIO_INTERRUPT_SETUP_ACTIVE_28 BIT(11) +#define GPIO_INTERRUPT_SETUP_ACTIVE_27 BIT(10) +#define GPIO_INTERRUPT_SETUP_ACTIVE_26 BIT(9) +#define GPIO_INTERRUPT_SETUP_ACTIVE_25 BIT(8) +#define GPIO_INTERRUPT_SETUP_ENABLE_31 BIT(6) +#define GPIO_INTERRUPT_SETUP_ENABLE_30 BIT(5) +#define GPIO_INTERRUPT_SETUP_ENABLE_29 BIT(4) +#define GPIO_INTERRUPT_SETUP_ENABLE_28 BIT(3) +#define GPIO_INTERRUPT_SETUP_ENABLE_27 BIT(2) +#define GPIO_INTERRUPT_SETUP_ENABLE_26 BIT(1) +#define GPIO_INTERRUPT_SETUP_ENABLE_25 BIT(0) #define GPIO_INTERRUPT_STATUS 0x01000C -#define GPIO_INTERRUPT_STATUS_31 22:22 -#define GPIO_INTERRUPT_STATUS_31_INACTIVE 0 -#define GPIO_INTERRUPT_STATUS_31_ACTIVE 1 -#define GPIO_INTERRUPT_STATUS_31_RESET 1 -#define GPIO_INTERRUPT_STATUS_30 21:21 -#define GPIO_INTERRUPT_STATUS_30_INACTIVE 0 -#define GPIO_INTERRUPT_STATUS_30_ACTIVE 1 -#define GPIO_INTERRUPT_STATUS_30_RESET 1 -#define GPIO_INTERRUPT_STATUS_29 20:20 -#define GPIO_INTERRUPT_STATUS_29_INACTIVE 0 -#define GPIO_INTERRUPT_STATUS_29_ACTIVE 1 -#define GPIO_INTERRUPT_STATUS_29_RESET 1 -#define GPIO_INTERRUPT_STATUS_28 19:19 -#define GPIO_INTERRUPT_STATUS_28_INACTIVE 0 -#define GPIO_INTERRUPT_STATUS_28_ACTIVE 1 -#define GPIO_INTERRUPT_STATUS_28_RESET 1 -#define GPIO_INTERRUPT_STATUS_27 18:18 -#define GPIO_INTERRUPT_STATUS_27_INACTIVE 0 -#define GPIO_INTERRUPT_STATUS_27_ACTIVE 1 -#define GPIO_INTERRUPT_STATUS_27_RESET 1 -#define GPIO_INTERRUPT_STATUS_26 17:17 -#define GPIO_INTERRUPT_STATUS_26_INACTIVE 0 -#define GPIO_INTERRUPT_STATUS_26_ACTIVE 1 -#define GPIO_INTERRUPT_STATUS_26_RESET 1 -#define GPIO_INTERRUPT_STATUS_25 16:16 -#define GPIO_INTERRUPT_STATUS_25_INACTIVE 0 -#define GPIO_INTERRUPT_STATUS_25_ACTIVE 1 -#define GPIO_INTERRUPT_STATUS_25_RESET 1 +#define GPIO_INTERRUPT_STATUS_31 BIT(22) +#define GPIO_INTERRUPT_STATUS_30 BIT(21) +#define GPIO_INTERRUPT_STATUS_29 BIT(20) +#define GPIO_INTERRUPT_STATUS_28 BIT(19) +#define GPIO_INTERRUPT_STATUS_27 BIT(18) +#define GPIO_INTERRUPT_STATUS_26 BIT(17) +#define GPIO_INTERRUPT_STATUS_25 BIT(16) #define PANEL_DISPLAY_CTRL 0x080000 -#define PANEL_DISPLAY_CTRL_RESERVED_1_MASK 31:30 -#define PANEL_DISPLAY_CTRL_RESERVED_1_MASK_DISABLE 0 -#define PANEL_DISPLAY_CTRL_RESERVED_1_MASK_ENABLE 3 -#define PANEL_DISPLAY_CTRL_SELECT 29:28 -#define PANEL_DISPLAY_CTRL_SELECT_PANEL 0 -#define PANEL_DISPLAY_CTRL_SELECT_VGA 1 -#define PANEL_DISPLAY_CTRL_SELECT_CRT 2 -#define PANEL_DISPLAY_CTRL_FPEN 27:27 -#define PANEL_DISPLAY_CTRL_FPEN_LOW 0 -#define PANEL_DISPLAY_CTRL_FPEN_HIGH 1 -#define PANEL_DISPLAY_CTRL_VBIASEN 26:26 -#define PANEL_DISPLAY_CTRL_VBIASEN_LOW 0 -#define PANEL_DISPLAY_CTRL_VBIASEN_HIGH 1 -#define PANEL_DISPLAY_CTRL_DATA 25:25 -#define PANEL_DISPLAY_CTRL_DATA_DISABLE 0 -#define PANEL_DISPLAY_CTRL_DATA_ENABLE 1 -#define PANEL_DISPLAY_CTRL_FPVDDEN 24:24 -#define PANEL_DISPLAY_CTRL_FPVDDEN_LOW 0 -#define PANEL_DISPLAY_CTRL_FPVDDEN_HIGH 1 -#define PANEL_DISPLAY_CTRL_RESERVED_2_MASK 23:20 -#define PANEL_DISPLAY_CTRL_RESERVED_2_MASK_DISABLE 0 -#define PANEL_DISPLAY_CTRL_RESERVED_2_MASK_ENABLE 15 - -#define PANEL_DISPLAY_CTRL_TFT_DISP 19:18 -#define PANEL_DISPLAY_CTRL_TFT_DISP_24 0 -#define PANEL_DISPLAY_CTRL_TFT_DISP_36 1 -#define PANEL_DISPLAY_CTRL_TFT_DISP_18 2 - - -#define PANEL_DISPLAY_CTRL_DUAL_DISPLAY 19:19 -#define PANEL_DISPLAY_CTRL_DUAL_DISPLAY_DISABLE 0 -#define PANEL_DISPLAY_CTRL_DUAL_DISPLAY_ENABLE 1 -#define PANEL_DISPLAY_CTRL_DOUBLE_PIXEL 18:18 -#define PANEL_DISPLAY_CTRL_DOUBLE_PIXEL_DISABLE 0 -#define PANEL_DISPLAY_CTRL_DOUBLE_PIXEL_ENABLE 1 -#define PANEL_DISPLAY_CTRL_FIFO 17:16 -#define PANEL_DISPLAY_CTRL_FIFO_1 0 -#define PANEL_DISPLAY_CTRL_FIFO_3 1 -#define PANEL_DISPLAY_CTRL_FIFO_7 2 -#define PANEL_DISPLAY_CTRL_FIFO_11 3 -#define PANEL_DISPLAY_CTRL_RESERVED_3_MASK 15:15 -#define PANEL_DISPLAY_CTRL_RESERVED_3_MASK_DISABLE 0 -#define PANEL_DISPLAY_CTRL_RESERVED_3_MASK_ENABLE 1 -#define PANEL_DISPLAY_CTRL_CLOCK_PHASE 14:14 -#define PANEL_DISPLAY_CTRL_CLOCK_PHASE_ACTIVE_HIGH 0 -#define PANEL_DISPLAY_CTRL_CLOCK_PHASE_ACTIVE_LOW 1 -#define PANEL_DISPLAY_CTRL_VSYNC_PHASE 13:13 -#define PANEL_DISPLAY_CTRL_VSYNC_PHASE_ACTIVE_HIGH 0 -#define PANEL_DISPLAY_CTRL_VSYNC_PHASE_ACTIVE_LOW 1 -#define PANEL_DISPLAY_CTRL_HSYNC_PHASE 12:12 -#define PANEL_DISPLAY_CTRL_HSYNC_PHASE_ACTIVE_HIGH 0 -#define PANEL_DISPLAY_CTRL_HSYNC_PHASE_ACTIVE_LOW 1 -#define PANEL_DISPLAY_CTRL_VSYNC 11:11 -#define PANEL_DISPLAY_CTRL_VSYNC_ACTIVE_HIGH 0 -#define PANEL_DISPLAY_CTRL_VSYNC_ACTIVE_LOW 1 -#define PANEL_DISPLAY_CTRL_CAPTURE_TIMING 10:10 -#define PANEL_DISPLAY_CTRL_CAPTURE_TIMING_DISABLE 0 -#define PANEL_DISPLAY_CTRL_CAPTURE_TIMING_ENABLE 1 -#define PANEL_DISPLAY_CTRL_COLOR_KEY 9:9 -#define PANEL_DISPLAY_CTRL_COLOR_KEY_DISABLE 0 -#define PANEL_DISPLAY_CTRL_COLOR_KEY_ENABLE 1 -#define PANEL_DISPLAY_CTRL_TIMING 8:8 -#define PANEL_DISPLAY_CTRL_TIMING_DISABLE 0 -#define PANEL_DISPLAY_CTRL_TIMING_ENABLE 1 -#define PANEL_DISPLAY_CTRL_VERTICAL_PAN_DIR 7:7 -#define PANEL_DISPLAY_CTRL_VERTICAL_PAN_DIR_DOWN 0 -#define PANEL_DISPLAY_CTRL_VERTICAL_PAN_DIR_UP 1 -#define PANEL_DISPLAY_CTRL_VERTICAL_PAN 6:6 -#define PANEL_DISPLAY_CTRL_VERTICAL_PAN_DISABLE 0 -#define PANEL_DISPLAY_CTRL_VERTICAL_PAN_ENABLE 1 -#define PANEL_DISPLAY_CTRL_HORIZONTAL_PAN_DIR 5:5 -#define PANEL_DISPLAY_CTRL_HORIZONTAL_PAN_DIR_RIGHT 0 -#define PANEL_DISPLAY_CTRL_HORIZONTAL_PAN_DIR_LEFT 1 -#define PANEL_DISPLAY_CTRL_HORIZONTAL_PAN 4:4 -#define PANEL_DISPLAY_CTRL_HORIZONTAL_PAN_DISABLE 0 -#define PANEL_DISPLAY_CTRL_HORIZONTAL_PAN_ENABLE 1 -#define PANEL_DISPLAY_CTRL_GAMMA 3:3 -#define PANEL_DISPLAY_CTRL_GAMMA_DISABLE 0 -#define PANEL_DISPLAY_CTRL_GAMMA_ENABLE 1 -#define PANEL_DISPLAY_CTRL_PLANE 2:2 -#define PANEL_DISPLAY_CTRL_PLANE_DISABLE 0 -#define PANEL_DISPLAY_CTRL_PLANE_ENABLE 1 -#define PANEL_DISPLAY_CTRL_FORMAT 1:0 -#define PANEL_DISPLAY_CTRL_FORMAT_8 0 -#define PANEL_DISPLAY_CTRL_FORMAT_16 1 -#define PANEL_DISPLAY_CTRL_FORMAT_32 2 +#define PANEL_DISPLAY_CTRL_RESERVED_MASK 0xc0f08000 +#define PANEL_DISPLAY_CTRL_SELECT_SHIFT 28 +#define PANEL_DISPLAY_CTRL_SELECT_MASK (0x3 << 28) +#define PANEL_DISPLAY_CTRL_SELECT_PANEL (0x0 << 28) +#define PANEL_DISPLAY_CTRL_SELECT_VGA (0x1 << 28) +#define PANEL_DISPLAY_CTRL_SELECT_CRT (0x2 << 28) +#define PANEL_DISPLAY_CTRL_FPEN BIT(27) +#define PANEL_DISPLAY_CTRL_VBIASEN BIT(26) +#define PANEL_DISPLAY_CTRL_DATA BIT(25) +#define PANEL_DISPLAY_CTRL_FPVDDEN BIT(24) +#define PANEL_DISPLAY_CTRL_DUAL_DISPLAY BIT(19) +#define PANEL_DISPLAY_CTRL_DOUBLE_PIXEL BIT(18) +#define PANEL_DISPLAY_CTRL_FIFO (0x3 << 16) +#define PANEL_DISPLAY_CTRL_FIFO_1 (0x0 << 16) +#define PANEL_DISPLAY_CTRL_FIFO_3 (0x1 << 16) +#define PANEL_DISPLAY_CTRL_FIFO_7 (0x2 << 16) +#define PANEL_DISPLAY_CTRL_FIFO_11 (0x3 << 16) +#define DISPLAY_CTRL_CLOCK_PHASE BIT(14) +#define DISPLAY_CTRL_VSYNC_PHASE BIT(13) +#define DISPLAY_CTRL_HSYNC_PHASE BIT(12) +#define PANEL_DISPLAY_CTRL_VSYNC BIT(11) +#define PANEL_DISPLAY_CTRL_CAPTURE_TIMING BIT(10) +#define PANEL_DISPLAY_CTRL_COLOR_KEY BIT(9) +#define DISPLAY_CTRL_TIMING BIT(8) +#define PANEL_DISPLAY_CTRL_VERTICAL_PAN_DIR BIT(7) +#define PANEL_DISPLAY_CTRL_VERTICAL_PAN BIT(6) +#define PANEL_DISPLAY_CTRL_HORIZONTAL_PAN_DIR BIT(5) +#define PANEL_DISPLAY_CTRL_HORIZONTAL_PAN BIT(4) +#define DISPLAY_CTRL_GAMMA BIT(3) +#define DISPLAY_CTRL_PLANE BIT(2) +#define PANEL_DISPLAY_CTRL_FORMAT (0x3 << 0) +#define PANEL_DISPLAY_CTRL_FORMAT_8 (0x0 << 0) +#define PANEL_DISPLAY_CTRL_FORMAT_16 (0x1 << 0) +#define PANEL_DISPLAY_CTRL_FORMAT_32 (0x2 << 0) #define PANEL_PAN_CTRL 0x080004 -#define PANEL_PAN_CTRL_VERTICAL_PAN 31:24 -#define PANEL_PAN_CTRL_VERTICAL_VSYNC 21:16 -#define PANEL_PAN_CTRL_HORIZONTAL_PAN 15:8 -#define PANEL_PAN_CTRL_HORIZONTAL_VSYNC 5:0 +#define PANEL_PAN_CTRL_VERTICAL_PAN_MASK (0xff << 24) +#define PANEL_PAN_CTRL_VERTICAL_VSYNC_MASK (0x3f << 16) +#define PANEL_PAN_CTRL_HORIZONTAL_PAN_MASK (0xff << 8) +#define PANEL_PAN_CTRL_HORIZONTAL_VSYNC_MASK 0x3f #define PANEL_COLOR_KEY 0x080008 -#define PANEL_COLOR_KEY_MASK 31:16 -#define PANEL_COLOR_KEY_VALUE 15:0 +#define PANEL_COLOR_KEY_MASK_MASK (0xffff << 16) +#define PANEL_COLOR_KEY_VALUE_MASK 0xffff #define PANEL_FB_ADDRESS 0x08000C -#define PANEL_FB_ADDRESS_STATUS 31:31 -#define PANEL_FB_ADDRESS_STATUS_CURRENT 0 -#define PANEL_FB_ADDRESS_STATUS_PENDING 1 -#define PANEL_FB_ADDRESS_EXT 27:27 -#define PANEL_FB_ADDRESS_EXT_LOCAL 0 -#define PANEL_FB_ADDRESS_EXT_EXTERNAL 1 -#define PANEL_FB_ADDRESS_ADDRESS 25:0 +#define PANEL_FB_ADDRESS_STATUS BIT(31) +#define PANEL_FB_ADDRESS_EXT BIT(27) +#define PANEL_FB_ADDRESS_ADDRESS_MASK 0x1ffffff #define PANEL_FB_WIDTH 0x080010 -#define PANEL_FB_WIDTH_WIDTH 29:16 -#define PANEL_FB_WIDTH_OFFSET 13:0 +#define PANEL_FB_WIDTH_WIDTH_SHIFT 16 +#define PANEL_FB_WIDTH_WIDTH_MASK (0x3fff << 16) +#define PANEL_FB_WIDTH_OFFSET_MASK 0x3fff #define PANEL_WINDOW_WIDTH 0x080014 -#define PANEL_WINDOW_WIDTH_WIDTH 27:16 -#define PANEL_WINDOW_WIDTH_X 11:0 +#define PANEL_WINDOW_WIDTH_WIDTH_SHIFT 16 +#define PANEL_WINDOW_WIDTH_WIDTH_MASK (0xfff << 16) +#define PANEL_WINDOW_WIDTH_X_MASK 0xfff #define PANEL_WINDOW_HEIGHT 0x080018 -#define PANEL_WINDOW_HEIGHT_HEIGHT 27:16 -#define PANEL_WINDOW_HEIGHT_Y 11:0 +#define PANEL_WINDOW_HEIGHT_HEIGHT_SHIFT 16 +#define PANEL_WINDOW_HEIGHT_HEIGHT_MASK (0xfff << 16) +#define PANEL_WINDOW_HEIGHT_Y_MASK 0xfff #define PANEL_PLANE_TL 0x08001C -#define PANEL_PLANE_TL_TOP 26:16 -#define PANEL_PLANE_TL_LEFT 10:0 +#define PANEL_PLANE_TL_TOP_SHIFT 16 +#define PANEL_PLANE_TL_TOP_MASK (0xeff << 16) +#define PANEL_PLANE_TL_LEFT_MASK 0xeff #define PANEL_PLANE_BR 0x080020 -#define PANEL_PLANE_BR_BOTTOM 26:16 -#define PANEL_PLANE_BR_RIGHT 10:0 +#define PANEL_PLANE_BR_BOTTOM_SHIFT 16 +#define PANEL_PLANE_BR_BOTTOM_MASK (0xeff << 16) +#define PANEL_PLANE_BR_RIGHT_MASK 0xeff #define PANEL_HORIZONTAL_TOTAL 0x080024 -#define PANEL_HORIZONTAL_TOTAL_TOTAL 27:16 -#define PANEL_HORIZONTAL_TOTAL_DISPLAY_END 11:0 +#define PANEL_HORIZONTAL_TOTAL_TOTAL_SHIFT 16 +#define PANEL_HORIZONTAL_TOTAL_TOTAL_MASK (0xfff << 16) +#define PANEL_HORIZONTAL_TOTAL_DISPLAY_END_MASK 0xfff #define PANEL_HORIZONTAL_SYNC 0x080028 -#define PANEL_HORIZONTAL_SYNC_WIDTH 23:16 -#define PANEL_HORIZONTAL_SYNC_START 11:0 +#define PANEL_HORIZONTAL_SYNC_WIDTH_SHIFT 16 +#define PANEL_HORIZONTAL_SYNC_WIDTH_MASK (0xff << 16) +#define PANEL_HORIZONTAL_SYNC_START_MASK 0xfff #define PANEL_VERTICAL_TOTAL 0x08002C -#define PANEL_VERTICAL_TOTAL_TOTAL 26:16 -#define PANEL_VERTICAL_TOTAL_DISPLAY_END 10:0 +#define PANEL_VERTICAL_TOTAL_TOTAL_SHIFT 16 +#define PANEL_VERTICAL_TOTAL_TOTAL_MASK (0x7ff << 16) +#define PANEL_VERTICAL_TOTAL_DISPLAY_END_MASK 0x7ff #define PANEL_VERTICAL_SYNC 0x080030 -#define PANEL_VERTICAL_SYNC_HEIGHT 21:16 -#define PANEL_VERTICAL_SYNC_START 10:0 +#define PANEL_VERTICAL_SYNC_HEIGHT_SHIFT 16 +#define PANEL_VERTICAL_SYNC_HEIGHT_MASK (0x3f << 16) +#define PANEL_VERTICAL_SYNC_START_MASK 0x7ff #define PANEL_CURRENT_LINE 0x080034 -#define PANEL_CURRENT_LINE_LINE 10:0 +#define PANEL_CURRENT_LINE_LINE_MASK 0x7ff /* Video Control */ #define VIDEO_DISPLAY_CTRL 0x080040 -#define VIDEO_DISPLAY_CTRL_LINE_BUFFER 18:18 -#define VIDEO_DISPLAY_CTRL_LINE_BUFFER_DISABLE 0 -#define VIDEO_DISPLAY_CTRL_LINE_BUFFER_ENABLE 1 -#define VIDEO_DISPLAY_CTRL_FIFO 17:16 -#define VIDEO_DISPLAY_CTRL_FIFO_1 0 -#define VIDEO_DISPLAY_CTRL_FIFO_3 1 -#define VIDEO_DISPLAY_CTRL_FIFO_7 2 -#define VIDEO_DISPLAY_CTRL_FIFO_11 3 -#define VIDEO_DISPLAY_CTRL_BUFFER 15:15 -#define VIDEO_DISPLAY_CTRL_BUFFER_0 0 -#define VIDEO_DISPLAY_CTRL_BUFFER_1 1 -#define VIDEO_DISPLAY_CTRL_CAPTURE 14:14 -#define VIDEO_DISPLAY_CTRL_CAPTURE_DISABLE 0 -#define VIDEO_DISPLAY_CTRL_CAPTURE_ENABLE 1 -#define VIDEO_DISPLAY_CTRL_DOUBLE_BUFFER 13:13 -#define VIDEO_DISPLAY_CTRL_DOUBLE_BUFFER_DISABLE 0 -#define VIDEO_DISPLAY_CTRL_DOUBLE_BUFFER_ENABLE 1 -#define VIDEO_DISPLAY_CTRL_BYTE_SWAP 12:12 -#define VIDEO_DISPLAY_CTRL_BYTE_SWAP_DISABLE 0 -#define VIDEO_DISPLAY_CTRL_BYTE_SWAP_ENABLE 1 -#define VIDEO_DISPLAY_CTRL_VERTICAL_SCALE 11:11 -#define VIDEO_DISPLAY_CTRL_VERTICAL_SCALE_NORMAL 0 -#define VIDEO_DISPLAY_CTRL_VERTICAL_SCALE_HALF 1 -#define VIDEO_DISPLAY_CTRL_HORIZONTAL_SCALE 10:10 -#define VIDEO_DISPLAY_CTRL_HORIZONTAL_SCALE_NORMAL 0 -#define VIDEO_DISPLAY_CTRL_HORIZONTAL_SCALE_HALF 1 -#define VIDEO_DISPLAY_CTRL_VERTICAL_MODE 9:9 -#define VIDEO_DISPLAY_CTRL_VERTICAL_MODE_REPLICATE 0 -#define VIDEO_DISPLAY_CTRL_VERTICAL_MODE_INTERPOLATE 1 -#define VIDEO_DISPLAY_CTRL_HORIZONTAL_MODE 8:8 -#define VIDEO_DISPLAY_CTRL_HORIZONTAL_MODE_REPLICATE 0 -#define VIDEO_DISPLAY_CTRL_HORIZONTAL_MODE_INTERPOLATE 1 -#define VIDEO_DISPLAY_CTRL_PIXEL 7:4 -#define VIDEO_DISPLAY_CTRL_GAMMA 3:3 -#define VIDEO_DISPLAY_CTRL_GAMMA_DISABLE 0 -#define VIDEO_DISPLAY_CTRL_GAMMA_ENABLE 1 -#define VIDEO_DISPLAY_CTRL_PLANE 2:2 -#define VIDEO_DISPLAY_CTRL_PLANE_DISABLE 0 -#define VIDEO_DISPLAY_CTRL_PLANE_ENABLE 1 -#define VIDEO_DISPLAY_CTRL_FORMAT 1:0 -#define VIDEO_DISPLAY_CTRL_FORMAT_8 0 -#define VIDEO_DISPLAY_CTRL_FORMAT_16 1 -#define VIDEO_DISPLAY_CTRL_FORMAT_32 2 -#define VIDEO_DISPLAY_CTRL_FORMAT_YUV 3 +#define VIDEO_DISPLAY_CTRL_LINE_BUFFER BIT(18) +#define VIDEO_DISPLAY_CTRL_FIFO_MASK (0x3 << 16) +#define VIDEO_DISPLAY_CTRL_FIFO_1 (0x0 << 16) +#define VIDEO_DISPLAY_CTRL_FIFO_3 (0x1 << 16) +#define VIDEO_DISPLAY_CTRL_FIFO_7 (0x2 << 16) +#define VIDEO_DISPLAY_CTRL_FIFO_11 (0x3 << 16) +#define VIDEO_DISPLAY_CTRL_BUFFER BIT(15) +#define VIDEO_DISPLAY_CTRL_CAPTURE BIT(14) +#define VIDEO_DISPLAY_CTRL_DOUBLE_BUFFER BIT(13) +#define VIDEO_DISPLAY_CTRL_BYTE_SWAP BIT(12) +#define VIDEO_DISPLAY_CTRL_VERTICAL_SCALE BIT(11) +#define VIDEO_DISPLAY_CTRL_HORIZONTAL_SCALE BIT(10) +#define VIDEO_DISPLAY_CTRL_VERTICAL_MODE BIT(9) +#define VIDEO_DISPLAY_CTRL_HORIZONTAL_MODE BIT(8) +#define VIDEO_DISPLAY_CTRL_PIXEL_MASK (0xf << 4) +#define VIDEO_DISPLAY_CTRL_GAMMA BIT(3) +#define VIDEO_DISPLAY_CTRL_FORMAT_MASK 0x3 +#define VIDEO_DISPLAY_CTRL_FORMAT_8 0x0 +#define VIDEO_DISPLAY_CTRL_FORMAT_16 0x1 +#define VIDEO_DISPLAY_CTRL_FORMAT_32 0x2 +#define VIDEO_DISPLAY_CTRL_FORMAT_YUV 0x3 #define VIDEO_FB_0_ADDRESS 0x080044 -#define VIDEO_FB_0_ADDRESS_STATUS 31:31 -#define VIDEO_FB_0_ADDRESS_STATUS_CURRENT 0 -#define VIDEO_FB_0_ADDRESS_STATUS_PENDING 1 -#define VIDEO_FB_0_ADDRESS_EXT 27:27 -#define VIDEO_FB_0_ADDRESS_EXT_LOCAL 0 -#define VIDEO_FB_0_ADDRESS_EXT_EXTERNAL 1 -#define VIDEO_FB_0_ADDRESS_ADDRESS 25:0 +#define VIDEO_FB_0_ADDRESS_STATUS BIT(31) +#define VIDEO_FB_0_ADDRESS_EXT BIT(27) +#define VIDEO_FB_0_ADDRESS_ADDRESS_MASK 0x3ffffff #define VIDEO_FB_WIDTH 0x080048 -#define VIDEO_FB_WIDTH_WIDTH 29:16 -#define VIDEO_FB_WIDTH_OFFSET 13:0 +#define VIDEO_FB_WIDTH_WIDTH_MASK (0x3fff << 16) +#define VIDEO_FB_WIDTH_OFFSET_MASK 0x3fff #define VIDEO_FB_0_LAST_ADDRESS 0x08004C -#define VIDEO_FB_0_LAST_ADDRESS_EXT 27:27 -#define VIDEO_FB_0_LAST_ADDRESS_EXT_LOCAL 0 -#define VIDEO_FB_0_LAST_ADDRESS_EXT_EXTERNAL 1 -#define VIDEO_FB_0_LAST_ADDRESS_ADDRESS 25:0 +#define VIDEO_FB_0_LAST_ADDRESS_EXT BIT(27) +#define VIDEO_FB_0_LAST_ADDRESS_ADDRESS_MASK 0x3ffffff #define VIDEO_PLANE_TL 0x080050 -#define VIDEO_PLANE_TL_TOP 26:16 -#define VIDEO_PLANE_TL_LEFT 10:0 +#define VIDEO_PLANE_TL_TOP_MASK (0x7ff << 16) +#define VIDEO_PLANE_TL_LEFT_MASK 0x7ff #define VIDEO_PLANE_BR 0x080054 -#define VIDEO_PLANE_BR_BOTTOM 26:16 -#define VIDEO_PLANE_BR_RIGHT 10:0 +#define VIDEO_PLANE_BR_BOTTOM_MASK (0x7ff << 16) +#define VIDEO_PLANE_BR_RIGHT_MASK 0x7ff #define VIDEO_SCALE 0x080058 -#define VIDEO_SCALE_VERTICAL_MODE 31:31 -#define VIDEO_SCALE_VERTICAL_MODE_EXPAND 0 -#define VIDEO_SCALE_VERTICAL_MODE_SHRINK 1 -#define VIDEO_SCALE_VERTICAL_SCALE 27:16 -#define VIDEO_SCALE_HORIZONTAL_MODE 15:15 -#define VIDEO_SCALE_HORIZONTAL_MODE_EXPAND 0 -#define VIDEO_SCALE_HORIZONTAL_MODE_SHRINK 1 -#define VIDEO_SCALE_HORIZONTAL_SCALE 11:0 +#define VIDEO_SCALE_VERTICAL_MODE BIT(31) +#define VIDEO_SCALE_VERTICAL_SCALE_MASK (0xfff << 16) +#define VIDEO_SCALE_HORIZONTAL_MODE BIT(15) +#define VIDEO_SCALE_HORIZONTAL_SCALE_MASK 0xfff #define VIDEO_INITIAL_SCALE 0x08005C -#define VIDEO_INITIAL_SCALE_FB_1 27:16 -#define VIDEO_INITIAL_SCALE_FB_0 11:0 +#define VIDEO_INITIAL_SCALE_FB_1_MASK (0xfff << 16) +#define VIDEO_INITIAL_SCALE_FB_0_MASK 0xfff #define VIDEO_YUV_CONSTANTS 0x080060 -#define VIDEO_YUV_CONSTANTS_Y 31:24 -#define VIDEO_YUV_CONSTANTS_R 23:16 -#define VIDEO_YUV_CONSTANTS_G 15:8 -#define VIDEO_YUV_CONSTANTS_B 7:0 +#define VIDEO_YUV_CONSTANTS_Y_MASK (0xff << 24) +#define VIDEO_YUV_CONSTANTS_R_MASK (0xff << 16) +#define VIDEO_YUV_CONSTANTS_G_MASK (0xff << 8) +#define VIDEO_YUV_CONSTANTS_B_MASK 0xff #define VIDEO_FB_1_ADDRESS 0x080064 -#define VIDEO_FB_1_ADDRESS_STATUS 31:31 -#define VIDEO_FB_1_ADDRESS_STATUS_CURRENT 0 -#define VIDEO_FB_1_ADDRESS_STATUS_PENDING 1 -#define VIDEO_FB_1_ADDRESS_EXT 27:27 -#define VIDEO_FB_1_ADDRESS_EXT_LOCAL 0 -#define VIDEO_FB_1_ADDRESS_EXT_EXTERNAL 1 -#define VIDEO_FB_1_ADDRESS_ADDRESS 25:0 +#define VIDEO_FB_1_ADDRESS_STATUS BIT(31) +#define VIDEO_FB_1_ADDRESS_EXT BIT(27) +#define VIDEO_FB_1_ADDRESS_ADDRESS_MASK 0x3ffffff #define VIDEO_FB_1_LAST_ADDRESS 0x080068 -#define VIDEO_FB_1_LAST_ADDRESS_EXT 27:27 -#define VIDEO_FB_1_LAST_ADDRESS_EXT_LOCAL 0 -#define VIDEO_FB_1_LAST_ADDRESS_EXT_EXTERNAL 1 -#define VIDEO_FB_1_LAST_ADDRESS_ADDRESS 25:0 +#define VIDEO_FB_1_LAST_ADDRESS_EXT BIT(27) +#define VIDEO_FB_1_LAST_ADDRESS_ADDRESS_MASK 0x3ffffff /* Video Alpha Control */ #define VIDEO_ALPHA_DISPLAY_CTRL 0x080080 -#define VIDEO_ALPHA_DISPLAY_CTRL_SELECT 28:28 -#define VIDEO_ALPHA_DISPLAY_CTRL_SELECT_PER_PIXEL 0 -#define VIDEO_ALPHA_DISPLAY_CTRL_SELECT_ALPHA 1 -#define VIDEO_ALPHA_DISPLAY_CTRL_ALPHA 27:24 -#define VIDEO_ALPHA_DISPLAY_CTRL_FIFO 17:16 -#define VIDEO_ALPHA_DISPLAY_CTRL_FIFO_1 0 -#define VIDEO_ALPHA_DISPLAY_CTRL_FIFO_3 1 -#define VIDEO_ALPHA_DISPLAY_CTRL_FIFO_7 2 -#define VIDEO_ALPHA_DISPLAY_CTRL_FIFO_11 3 -#define VIDEO_ALPHA_DISPLAY_CTRL_VERT_SCALE 11:11 -#define VIDEO_ALPHA_DISPLAY_CTRL_VERT_SCALE_NORMAL 0 -#define VIDEO_ALPHA_DISPLAY_CTRL_VERT_SCALE_HALF 1 -#define VIDEO_ALPHA_DISPLAY_CTRL_HORZ_SCALE 10:10 -#define VIDEO_ALPHA_DISPLAY_CTRL_HORZ_SCALE_NORMAL 0 -#define VIDEO_ALPHA_DISPLAY_CTRL_HORZ_SCALE_HALF 1 -#define VIDEO_ALPHA_DISPLAY_CTRL_VERT_MODE 9:9 -#define VIDEO_ALPHA_DISPLAY_CTRL_VERT_MODE_REPLICATE 0 -#define VIDEO_ALPHA_DISPLAY_CTRL_VERT_MODE_INTERPOLATE 1 -#define VIDEO_ALPHA_DISPLAY_CTRL_HORZ_MODE 8:8 -#define VIDEO_ALPHA_DISPLAY_CTRL_HORZ_MODE_REPLICATE 0 -#define VIDEO_ALPHA_DISPLAY_CTRL_HORZ_MODE_INTERPOLATE 1 -#define VIDEO_ALPHA_DISPLAY_CTRL_PIXEL 7:4 -#define VIDEO_ALPHA_DISPLAY_CTRL_CHROMA_KEY 3:3 -#define VIDEO_ALPHA_DISPLAY_CTRL_CHROMA_KEY_DISABLE 0 -#define VIDEO_ALPHA_DISPLAY_CTRL_CHROMA_KEY_ENABLE 1 -#define VIDEO_ALPHA_DISPLAY_CTRL_PLANE 2:2 -#define VIDEO_ALPHA_DISPLAY_CTRL_PLANE_DISABLE 0 -#define VIDEO_ALPHA_DISPLAY_CTRL_PLANE_ENABLE 1 -#define VIDEO_ALPHA_DISPLAY_CTRL_FORMAT 1:0 -#define VIDEO_ALPHA_DISPLAY_CTRL_FORMAT_8 0 -#define VIDEO_ALPHA_DISPLAY_CTRL_FORMAT_16 1 -#define VIDEO_ALPHA_DISPLAY_CTRL_FORMAT_ALPHA_4_4 2 -#define VIDEO_ALPHA_DISPLAY_CTRL_FORMAT_ALPHA_4_4_4_4 3 +#define VIDEO_ALPHA_DISPLAY_CTRL_SELECT BIT(28) +#define VIDEO_ALPHA_DISPLAY_CTRL_ALPHA_MASK (0xf << 24) +#define VIDEO_ALPHA_DISPLAY_CTRL_FIFO_MASK (0x3 << 16) +#define VIDEO_ALPHA_DISPLAY_CTRL_FIFO_1 (0x0 << 16) +#define VIDEO_ALPHA_DISPLAY_CTRL_FIFO_3 (0x1 << 16) +#define VIDEO_ALPHA_DISPLAY_CTRL_FIFO_7 (0x2 << 16) +#define VIDEO_ALPHA_DISPLAY_CTRL_FIFO_11 (0x3 << 16) +#define VIDEO_ALPHA_DISPLAY_CTRL_VERT_SCALE BIT(11) +#define VIDEO_ALPHA_DISPLAY_CTRL_HORZ_SCALE BIT(10) +#define VIDEO_ALPHA_DISPLAY_CTRL_VERT_MODE BIT(9) +#define VIDEO_ALPHA_DISPLAY_CTRL_HORZ_MODE BIT(8) +#define VIDEO_ALPHA_DISPLAY_CTRL_PIXEL_MASK (0xf << 4) +#define VIDEO_ALPHA_DISPLAY_CTRL_CHROMA_KEY BIT(3) +#define VIDEO_ALPHA_DISPLAY_CTRL_FORMAT_MASK 0x3 +#define VIDEO_ALPHA_DISPLAY_CTRL_FORMAT_8 0x0 +#define VIDEO_ALPHA_DISPLAY_CTRL_FORMAT_16 0x1 +#define VIDEO_ALPHA_DISPLAY_CTRL_FORMAT_ALPHA_4_4 0x2 +#define VIDEO_ALPHA_DISPLAY_CTRL_FORMAT_ALPHA_4_4_4_4 0x3 #define VIDEO_ALPHA_FB_ADDRESS 0x080084 -#define VIDEO_ALPHA_FB_ADDRESS_STATUS 31:31 -#define VIDEO_ALPHA_FB_ADDRESS_STATUS_CURRENT 0 -#define VIDEO_ALPHA_FB_ADDRESS_STATUS_PENDING 1 -#define VIDEO_ALPHA_FB_ADDRESS_EXT 27:27 -#define VIDEO_ALPHA_FB_ADDRESS_EXT_LOCAL 0 -#define VIDEO_ALPHA_FB_ADDRESS_EXT_EXTERNAL 1 -#define VIDEO_ALPHA_FB_ADDRESS_ADDRESS 25:0 +#define VIDEO_ALPHA_FB_ADDRESS_STATUS BIT(31) +#define VIDEO_ALPHA_FB_ADDRESS_EXT BIT(27) +#define VIDEO_ALPHA_FB_ADDRESS_ADDRESS_MASK 0x3ffffff #define VIDEO_ALPHA_FB_WIDTH 0x080088 -#define VIDEO_ALPHA_FB_WIDTH_WIDTH 29:16 -#define VIDEO_ALPHA_FB_WIDTH_OFFSET 13:0 +#define VIDEO_ALPHA_FB_WIDTH_WIDTH_MASK (0x3fff << 16) +#define VIDEO_ALPHA_FB_WIDTH_OFFSET_MASK 0x3fff #define VIDEO_ALPHA_FB_LAST_ADDRESS 0x08008C -#define VIDEO_ALPHA_FB_LAST_ADDRESS_EXT 27:27 -#define VIDEO_ALPHA_FB_LAST_ADDRESS_EXT_LOCAL 0 -#define VIDEO_ALPHA_FB_LAST_ADDRESS_EXT_EXTERNAL 1 -#define VIDEO_ALPHA_FB_LAST_ADDRESS_ADDRESS 25:0 +#define VIDEO_ALPHA_FB_LAST_ADDRESS_EXT BIT(27) +#define VIDEO_ALPHA_FB_LAST_ADDRESS_ADDRESS_MASK 0x3ffffff #define VIDEO_ALPHA_PLANE_TL 0x080090 -#define VIDEO_ALPHA_PLANE_TL_TOP 26:16 -#define VIDEO_ALPHA_PLANE_TL_LEFT 10:0 +#define VIDEO_ALPHA_PLANE_TL_TOP_MASK (0x7ff << 16) +#define VIDEO_ALPHA_PLANE_TL_LEFT_MASK 0x7ff #define VIDEO_ALPHA_PLANE_BR 0x080094 -#define VIDEO_ALPHA_PLANE_BR_BOTTOM 26:16 -#define VIDEO_ALPHA_PLANE_BR_RIGHT 10:0 +#define VIDEO_ALPHA_PLANE_BR_BOTTOM_MASK (0x7ff << 16) +#define VIDEO_ALPHA_PLANE_BR_RIGHT_MASK 0x7ff #define VIDEO_ALPHA_SCALE 0x080098 -#define VIDEO_ALPHA_SCALE_VERTICAL_MODE 31:31 -#define VIDEO_ALPHA_SCALE_VERTICAL_MODE_EXPAND 0 -#define VIDEO_ALPHA_SCALE_VERTICAL_MODE_SHRINK 1 -#define VIDEO_ALPHA_SCALE_VERTICAL_SCALE 27:16 -#define VIDEO_ALPHA_SCALE_HORIZONTAL_MODE 15:15 -#define VIDEO_ALPHA_SCALE_HORIZONTAL_MODE_EXPAND 0 -#define VIDEO_ALPHA_SCALE_HORIZONTAL_MODE_SHRINK 1 -#define VIDEO_ALPHA_SCALE_HORIZONTAL_SCALE 11:0 +#define VIDEO_ALPHA_SCALE_VERTICAL_MODE BIT(31) +#define VIDEO_ALPHA_SCALE_VERTICAL_SCALE_MASK (0xfff << 16) +#define VIDEO_ALPHA_SCALE_HORIZONTAL_MODE BIT(15) +#define VIDEO_ALPHA_SCALE_HORIZONTAL_SCALE_MASK 0xfff #define VIDEO_ALPHA_INITIAL_SCALE 0x08009C -#define VIDEO_ALPHA_INITIAL_SCALE_VERTICAL 27:16 -#define VIDEO_ALPHA_INITIAL_SCALE_HORIZONTAL 11:0 +#define VIDEO_ALPHA_INITIAL_SCALE_VERTICAL_MASK (0xfff << 16) +#define VIDEO_ALPHA_INITIAL_SCALE_HORIZONTAL_MASK 0xfff #define VIDEO_ALPHA_CHROMA_KEY 0x0800A0 -#define VIDEO_ALPHA_CHROMA_KEY_MASK 31:16 -#define VIDEO_ALPHA_CHROMA_KEY_VALUE 15:0 +#define VIDEO_ALPHA_CHROMA_KEY_MASK_MASK (0xffff << 16) +#define VIDEO_ALPHA_CHROMA_KEY_VALUE_MASK 0xffff #define VIDEO_ALPHA_COLOR_LOOKUP_01 0x0800A4 -#define VIDEO_ALPHA_COLOR_LOOKUP_01_1 31:16 -#define VIDEO_ALPHA_COLOR_LOOKUP_01_1_RED 31:27 -#define VIDEO_ALPHA_COLOR_LOOKUP_01_1_GREEN 26:21 -#define VIDEO_ALPHA_COLOR_LOOKUP_01_1_BLUE 20:16 -#define VIDEO_ALPHA_COLOR_LOOKUP_01_0 15:0 -#define VIDEO_ALPHA_COLOR_LOOKUP_01_0_RED 15:11 -#define VIDEO_ALPHA_COLOR_LOOKUP_01_0_GREEN 10:5 -#define VIDEO_ALPHA_COLOR_LOOKUP_01_0_BLUE 4:0 +#define VIDEO_ALPHA_COLOR_LOOKUP_01_1_MASK (0xffff << 16) +#define VIDEO_ALPHA_COLOR_LOOKUP_01_1_RED_MASK (0x1f << 27) +#define VIDEO_ALPHA_COLOR_LOOKUP_01_1_GREEN_MASK (0x3f << 21) +#define VIDEO_ALPHA_COLOR_LOOKUP_01_1_BLUE_MASK (0x1f << 16) +#define VIDEO_ALPHA_COLOR_LOOKUP_01_0_MASK 0xffff +#define VIDEO_ALPHA_COLOR_LOOKUP_01_0_RED_MASK (0x1f << 11) +#define VIDEO_ALPHA_COLOR_LOOKUP_01_0_GREEN_MASK (0x3f << 5) +#define VIDEO_ALPHA_COLOR_LOOKUP_01_0_BLUE_MASK 0x1f #define VIDEO_ALPHA_COLOR_LOOKUP_23 0x0800A8 -#define VIDEO_ALPHA_COLOR_LOOKUP_23_3 31:16 -#define VIDEO_ALPHA_COLOR_LOOKUP_23_3_RED 31:27 -#define VIDEO_ALPHA_COLOR_LOOKUP_23_3_GREEN 26:21 -#define VIDEO_ALPHA_COLOR_LOOKUP_23_3_BLUE 20:16 -#define VIDEO_ALPHA_COLOR_LOOKUP_23_2 15:0 -#define VIDEO_ALPHA_COLOR_LOOKUP_23_2_RED 15:11 -#define VIDEO_ALPHA_COLOR_LOOKUP_23_2_GREEN 10:5 -#define VIDEO_ALPHA_COLOR_LOOKUP_23_2_BLUE 4:0 +#define VIDEO_ALPHA_COLOR_LOOKUP_23_3_MASK (0xffff << 16) +#define VIDEO_ALPHA_COLOR_LOOKUP_23_3_RED_MASK (0x1f << 27) +#define VIDEO_ALPHA_COLOR_LOOKUP_23_3_GREEN_MASK (0x3f << 21) +#define VIDEO_ALPHA_COLOR_LOOKUP_23_3_BLUE_MASK (0x1f << 16) +#define VIDEO_ALPHA_COLOR_LOOKUP_23_2_MASK 0xffff +#define VIDEO_ALPHA_COLOR_LOOKUP_23_2_RED_MASK (0x1f << 11) +#define VIDEO_ALPHA_COLOR_LOOKUP_23_2_GREEN_MASK (0x3f << 5) +#define VIDEO_ALPHA_COLOR_LOOKUP_23_2_BLUE_MASK 0x1f #define VIDEO_ALPHA_COLOR_LOOKUP_45 0x0800AC -#define VIDEO_ALPHA_COLOR_LOOKUP_45_5 31:16 -#define VIDEO_ALPHA_COLOR_LOOKUP_45_5_RED 31:27 -#define VIDEO_ALPHA_COLOR_LOOKUP_45_5_GREEN 26:21 -#define VIDEO_ALPHA_COLOR_LOOKUP_45_5_BLUE 20:16 -#define VIDEO_ALPHA_COLOR_LOOKUP_45_4 15:0 -#define VIDEO_ALPHA_COLOR_LOOKUP_45_4_RED 15:11 -#define VIDEO_ALPHA_COLOR_LOOKUP_45_4_GREEN 10:5 -#define VIDEO_ALPHA_COLOR_LOOKUP_45_4_BLUE 4:0 +#define VIDEO_ALPHA_COLOR_LOOKUP_45_5_MASK (0xffff << 16) +#define VIDEO_ALPHA_COLOR_LOOKUP_45_5_RED_MASK (0x1f << 27) +#define VIDEO_ALPHA_COLOR_LOOKUP_45_5_GREEN_MASK (0x3f << 21) +#define VIDEO_ALPHA_COLOR_LOOKUP_45_5_BLUE_MASK (0x1f << 16) +#define VIDEO_ALPHA_COLOR_LOOKUP_45_4_MASK 0xffff +#define VIDEO_ALPHA_COLOR_LOOKUP_45_4_RED_MASK (0x1f << 11) +#define VIDEO_ALPHA_COLOR_LOOKUP_45_4_GREEN_MASK (0x3f << 5) +#define VIDEO_ALPHA_COLOR_LOOKUP_45_4_BLUE_MASK 0x1f #define VIDEO_ALPHA_COLOR_LOOKUP_67 0x0800B0 -#define VIDEO_ALPHA_COLOR_LOOKUP_67_7 31:16 -#define VIDEO_ALPHA_COLOR_LOOKUP_67_7_RED 31:27 -#define VIDEO_ALPHA_COLOR_LOOKUP_67_7_GREEN 26:21 -#define VIDEO_ALPHA_COLOR_LOOKUP_67_7_BLUE 20:16 -#define VIDEO_ALPHA_COLOR_LOOKUP_67_6 15:0 -#define VIDEO_ALPHA_COLOR_LOOKUP_67_6_RED 15:11 -#define VIDEO_ALPHA_COLOR_LOOKUP_67_6_GREEN 10:5 -#define VIDEO_ALPHA_COLOR_LOOKUP_67_6_BLUE 4:0 +#define VIDEO_ALPHA_COLOR_LOOKUP_67_7_MASK (0xffff << 16) +#define VIDEO_ALPHA_COLOR_LOOKUP_67_7_RED_MASK (0x1f << 27) +#define VIDEO_ALPHA_COLOR_LOOKUP_67_7_GREEN_MASK (0x3f << 21) +#define VIDEO_ALPHA_COLOR_LOOKUP_67_7_BLUE_MASK (0x1f << 16) +#define VIDEO_ALPHA_COLOR_LOOKUP_67_6_MASK 0xffff +#define VIDEO_ALPHA_COLOR_LOOKUP_67_6_RED_MASK (0x1f << 11) +#define VIDEO_ALPHA_COLOR_LOOKUP_67_6_GREEN_MASK (0x3f << 5) +#define VIDEO_ALPHA_COLOR_LOOKUP_67_6_BLUE_MASK 0x1f #define VIDEO_ALPHA_COLOR_LOOKUP_89 0x0800B4 -#define VIDEO_ALPHA_COLOR_LOOKUP_89_9 31:16 -#define VIDEO_ALPHA_COLOR_LOOKUP_89_9_RED 31:27 -#define VIDEO_ALPHA_COLOR_LOOKUP_89_9_GREEN 26:21 -#define VIDEO_ALPHA_COLOR_LOOKUP_89_9_BLUE 20:16 -#define VIDEO_ALPHA_COLOR_LOOKUP_89_8 15:0 -#define VIDEO_ALPHA_COLOR_LOOKUP_89_8_RED 15:11 -#define VIDEO_ALPHA_COLOR_LOOKUP_89_8_GREEN 10:5 -#define VIDEO_ALPHA_COLOR_LOOKUP_89_8_BLUE 4:0 +#define VIDEO_ALPHA_COLOR_LOOKUP_89_9_MASK (0xffff << 16) +#define VIDEO_ALPHA_COLOR_LOOKUP_89_9_RED_MASK (0x1f << 27) +#define VIDEO_ALPHA_COLOR_LOOKUP_89_9_GREEN_MASK (0x3f << 21) +#define VIDEO_ALPHA_COLOR_LOOKUP_89_9_BLUE_MASK (0x1f << 16) +#define VIDEO_ALPHA_COLOR_LOOKUP_89_8_MASK 0xffff +#define VIDEO_ALPHA_COLOR_LOOKUP_89_8_RED_MASK (0x1f << 11) +#define VIDEO_ALPHA_COLOR_LOOKUP_89_8_GREEN_MASK (0x3f << 5) +#define VIDEO_ALPHA_COLOR_LOOKUP_89_8_BLUE_MASK 0x1f #define VIDEO_ALPHA_COLOR_LOOKUP_AB 0x0800B8 -#define VIDEO_ALPHA_COLOR_LOOKUP_AB_B 31:16 -#define VIDEO_ALPHA_COLOR_LOOKUP_AB_B_RED 31:27 -#define VIDEO_ALPHA_COLOR_LOOKUP_AB_B_GREEN 26:21 -#define VIDEO_ALPHA_COLOR_LOOKUP_AB_B_BLUE 20:16 -#define VIDEO_ALPHA_COLOR_LOOKUP_AB_A 15:0 -#define VIDEO_ALPHA_COLOR_LOOKUP_AB_A_RED 15:11 -#define VIDEO_ALPHA_COLOR_LOOKUP_AB_A_GREEN 10:5 -#define VIDEO_ALPHA_COLOR_LOOKUP_AB_A_BLUE 4:0 +#define VIDEO_ALPHA_COLOR_LOOKUP_AB_B_MASK (0xffff << 16) +#define VIDEO_ALPHA_COLOR_LOOKUP_AB_B_RED_MASK (0x1f << 27) +#define VIDEO_ALPHA_COLOR_LOOKUP_AB_B_GREEN_MASK (0x3f << 21) +#define VIDEO_ALPHA_COLOR_LOOKUP_AB_B_BLUE_MASK (0x1f << 16) +#define VIDEO_ALPHA_COLOR_LOOKUP_AB_A_MASK 0xffff +#define VIDEO_ALPHA_COLOR_LOOKUP_AB_A_RED_MASK (0x1f << 11) +#define VIDEO_ALPHA_COLOR_LOOKUP_AB_A_GREEN_MASK (0x3f << 5) +#define VIDEO_ALPHA_COLOR_LOOKUP_AB_A_BLUE_MASK 0x1f #define VIDEO_ALPHA_COLOR_LOOKUP_CD 0x0800BC -#define VIDEO_ALPHA_COLOR_LOOKUP_CD_D 31:16 -#define VIDEO_ALPHA_COLOR_LOOKUP_CD_D_RED 31:27 -#define VIDEO_ALPHA_COLOR_LOOKUP_CD_D_GREEN 26:21 -#define VIDEO_ALPHA_COLOR_LOOKUP_CD_D_BLUE 20:16 -#define VIDEO_ALPHA_COLOR_LOOKUP_CD_C 15:0 -#define VIDEO_ALPHA_COLOR_LOOKUP_CD_C_RED 15:11 -#define VIDEO_ALPHA_COLOR_LOOKUP_CD_C_GREEN 10:5 -#define VIDEO_ALPHA_COLOR_LOOKUP_CD_C_BLUE 4:0 +#define VIDEO_ALPHA_COLOR_LOOKUP_CD_D_MASK (0xffff << 16) +#define VIDEO_ALPHA_COLOR_LOOKUP_CD_D_RED_MASK (0x1f << 27) +#define VIDEO_ALPHA_COLOR_LOOKUP_CD_D_GREEN_MASK (0x3f << 21) +#define VIDEO_ALPHA_COLOR_LOOKUP_CD_D_BLUE_MASK (0x1f << 16) +#define VIDEO_ALPHA_COLOR_LOOKUP_CD_C_MASK 0xffff +#define VIDEO_ALPHA_COLOR_LOOKUP_CD_C_RED_MASK (0x1f << 11) +#define VIDEO_ALPHA_COLOR_LOOKUP_CD_C_GREEN_MASK (0x3f << 5) +#define VIDEO_ALPHA_COLOR_LOOKUP_CD_C_BLUE_MASK 0x1f #define VIDEO_ALPHA_COLOR_LOOKUP_EF 0x0800C0 -#define VIDEO_ALPHA_COLOR_LOOKUP_EF_F 31:16 -#define VIDEO_ALPHA_COLOR_LOOKUP_EF_F_RED 31:27 -#define VIDEO_ALPHA_COLOR_LOOKUP_EF_F_GREEN 26:21 -#define VIDEO_ALPHA_COLOR_LOOKUP_EF_F_BLUE 20:16 -#define VIDEO_ALPHA_COLOR_LOOKUP_EF_E 15:0 -#define VIDEO_ALPHA_COLOR_LOOKUP_EF_E_RED 15:11 -#define VIDEO_ALPHA_COLOR_LOOKUP_EF_E_GREEN 10:5 -#define VIDEO_ALPHA_COLOR_LOOKUP_EF_E_BLUE 4:0 +#define VIDEO_ALPHA_COLOR_LOOKUP_EF_F_MASK (0xffff << 16) +#define VIDEO_ALPHA_COLOR_LOOKUP_EF_F_RED_MASK (0x1f << 27) +#define VIDEO_ALPHA_COLOR_LOOKUP_EF_F_GREEN_MASK (0x3f << 21) +#define VIDEO_ALPHA_COLOR_LOOKUP_EF_F_BLUE_MASK (0x1f << 16) +#define VIDEO_ALPHA_COLOR_LOOKUP_EF_E_MASK 0xffff +#define VIDEO_ALPHA_COLOR_LOOKUP_EF_E_RED_MASK (0x1f << 11) +#define VIDEO_ALPHA_COLOR_LOOKUP_EF_E_GREEN_MASK (0x3f << 5) +#define VIDEO_ALPHA_COLOR_LOOKUP_EF_E_BLUE_MASK 0x1f /* Panel Cursor Control */ #define PANEL_HWC_ADDRESS 0x0800F0 -#define PANEL_HWC_ADDRESS_ENABLE 31:31 -#define PANEL_HWC_ADDRESS_ENABLE_DISABLE 0 -#define PANEL_HWC_ADDRESS_ENABLE_ENABLE 1 -#define PANEL_HWC_ADDRESS_EXT 27:27 -#define PANEL_HWC_ADDRESS_EXT_LOCAL 0 -#define PANEL_HWC_ADDRESS_EXT_EXTERNAL 1 -#define PANEL_HWC_ADDRESS_ADDRESS 25:0 +#define PANEL_HWC_ADDRESS_ENABLE BIT(31) +#define PANEL_HWC_ADDRESS_EXT BIT(27) +#define PANEL_HWC_ADDRESS_ADDRESS_MASK 0x3ffffff #define PANEL_HWC_LOCATION 0x0800F4 -#define PANEL_HWC_LOCATION_TOP 27:27 -#define PANEL_HWC_LOCATION_TOP_INSIDE 0 -#define PANEL_HWC_LOCATION_TOP_OUTSIDE 1 -#define PANEL_HWC_LOCATION_Y 26:16 -#define PANEL_HWC_LOCATION_LEFT 11:11 -#define PANEL_HWC_LOCATION_LEFT_INSIDE 0 -#define PANEL_HWC_LOCATION_LEFT_OUTSIDE 1 -#define PANEL_HWC_LOCATION_X 10:0 +#define PANEL_HWC_LOCATION_TOP BIT(27) +#define PANEL_HWC_LOCATION_Y_MASK (0x7ff << 16) +#define PANEL_HWC_LOCATION_LEFT BIT(11) +#define PANEL_HWC_LOCATION_X_MASK 0x7ff #define PANEL_HWC_COLOR_12 0x0800F8 -#define PANEL_HWC_COLOR_12_2_RGB565 31:16 -#define PANEL_HWC_COLOR_12_1_RGB565 15:0 +#define PANEL_HWC_COLOR_12_2_RGB565_MASK (0xffff << 16) +#define PANEL_HWC_COLOR_12_1_RGB565_MASK 0xffff #define PANEL_HWC_COLOR_3 0x0800FC -#define PANEL_HWC_COLOR_3_RGB565 15:0 +#define PANEL_HWC_COLOR_3_RGB565_MASK 0xffff /* Old Definitions +++ */ #define PANEL_HWC_COLOR_01 0x0800F8 -#define PANEL_HWC_COLOR_01_1_RED 31:27 -#define PANEL_HWC_COLOR_01_1_GREEN 26:21 -#define PANEL_HWC_COLOR_01_1_BLUE 20:16 -#define PANEL_HWC_COLOR_01_0_RED 15:11 -#define PANEL_HWC_COLOR_01_0_GREEN 10:5 -#define PANEL_HWC_COLOR_01_0_BLUE 4:0 +#define PANEL_HWC_COLOR_01_1_RED_MASK (0x1f << 27) +#define PANEL_HWC_COLOR_01_1_GREEN_MASK (0x3f << 21) +#define PANEL_HWC_COLOR_01_1_BLUE_MASK (0x1f << 16) +#define PANEL_HWC_COLOR_01_0_RED_MASK (0x1f << 11) +#define PANEL_HWC_COLOR_01_0_GREEN_MASK (0x3f << 5) +#define PANEL_HWC_COLOR_01_0_BLUE_MASK 0x1f #define PANEL_HWC_COLOR_2 0x0800FC -#define PANEL_HWC_COLOR_2_RED 15:11 -#define PANEL_HWC_COLOR_2_GREEN 10:5 -#define PANEL_HWC_COLOR_2_BLUE 4:0 +#define PANEL_HWC_COLOR_2_RED_MASK (0x1f << 11) +#define PANEL_HWC_COLOR_2_GREEN_MASK (0x3f << 5) +#define PANEL_HWC_COLOR_2_BLUE_MASK 0x1f /* Old Definitions --- */ /* Alpha Control */ #define ALPHA_DISPLAY_CTRL 0x080100 -#define ALPHA_DISPLAY_CTRL_SELECT 28:28 -#define ALPHA_DISPLAY_CTRL_SELECT_PER_PIXEL 0 -#define ALPHA_DISPLAY_CTRL_SELECT_ALPHA 1 -#define ALPHA_DISPLAY_CTRL_ALPHA 27:24 -#define ALPHA_DISPLAY_CTRL_FIFO 17:16 -#define ALPHA_DISPLAY_CTRL_FIFO_1 0 -#define ALPHA_DISPLAY_CTRL_FIFO_3 1 -#define ALPHA_DISPLAY_CTRL_FIFO_7 2 -#define ALPHA_DISPLAY_CTRL_FIFO_11 3 -#define ALPHA_DISPLAY_CTRL_PIXEL 7:4 -#define ALPHA_DISPLAY_CTRL_CHROMA_KEY 3:3 -#define ALPHA_DISPLAY_CTRL_CHROMA_KEY_DISABLE 0 -#define ALPHA_DISPLAY_CTRL_CHROMA_KEY_ENABLE 1 -#define ALPHA_DISPLAY_CTRL_PLANE 2:2 -#define ALPHA_DISPLAY_CTRL_PLANE_DISABLE 0 -#define ALPHA_DISPLAY_CTRL_PLANE_ENABLE 1 -#define ALPHA_DISPLAY_CTRL_FORMAT 1:0 -#define ALPHA_DISPLAY_CTRL_FORMAT_16 1 -#define ALPHA_DISPLAY_CTRL_FORMAT_ALPHA_4_4 2 -#define ALPHA_DISPLAY_CTRL_FORMAT_ALPHA_4_4_4_4 3 +#define ALPHA_DISPLAY_CTRL_SELECT BIT(28) +#define ALPHA_DISPLAY_CTRL_ALPHA_MASK (0xf << 24) +#define ALPHA_DISPLAY_CTRL_FIFO_MASK (0x3 << 16) +#define ALPHA_DISPLAY_CTRL_FIFO_1 (0x0 << 16) +#define ALPHA_DISPLAY_CTRL_FIFO_3 (0x1 << 16) +#define ALPHA_DISPLAY_CTRL_FIFO_7 (0x2 << 16) +#define ALPHA_DISPLAY_CTRL_FIFO_11 (0x3 << 16) +#define ALPHA_DISPLAY_CTRL_PIXEL_MASK (0xf << 4) +#define ALPHA_DISPLAY_CTRL_CHROMA_KEY BIT(3) +#define ALPHA_DISPLAY_CTRL_FORMAT_MASK 0x3 +#define ALPHA_DISPLAY_CTRL_FORMAT_16 0x1 +#define ALPHA_DISPLAY_CTRL_FORMAT_ALPHA_4_4 0x2 +#define ALPHA_DISPLAY_CTRL_FORMAT_ALPHA_4_4_4_4 0x3 #define ALPHA_FB_ADDRESS 0x080104 -#define ALPHA_FB_ADDRESS_STATUS 31:31 -#define ALPHA_FB_ADDRESS_STATUS_CURRENT 0 -#define ALPHA_FB_ADDRESS_STATUS_PENDING 1 -#define ALPHA_FB_ADDRESS_EXT 27:27 -#define ALPHA_FB_ADDRESS_EXT_LOCAL 0 -#define ALPHA_FB_ADDRESS_EXT_EXTERNAL 1 -#define ALPHA_FB_ADDRESS_ADDRESS 25:0 +#define ALPHA_FB_ADDRESS_STATUS BIT(31) +#define ALPHA_FB_ADDRESS_EXT BIT(27) +#define ALPHA_FB_ADDRESS_ADDRESS_MASK 0x3ffffff #define ALPHA_FB_WIDTH 0x080108 -#define ALPHA_FB_WIDTH_WIDTH 29:16 -#define ALPHA_FB_WIDTH_OFFSET 13:0 +#define ALPHA_FB_WIDTH_WIDTH_MASK (0x3fff << 16) +#define ALPHA_FB_WIDTH_OFFSET_MASK 0x3fff #define ALPHA_PLANE_TL 0x08010C -#define ALPHA_PLANE_TL_TOP 26:16 -#define ALPHA_PLANE_TL_LEFT 10:0 +#define ALPHA_PLANE_TL_TOP_MASK (0x7ff << 16) +#define ALPHA_PLANE_TL_LEFT_MASK 0x7ff #define ALPHA_PLANE_BR 0x080110 -#define ALPHA_PLANE_BR_BOTTOM 26:16 -#define ALPHA_PLANE_BR_RIGHT 10:0 +#define ALPHA_PLANE_BR_BOTTOM_MASK (0x7ff << 16) +#define ALPHA_PLANE_BR_RIGHT_MASK 0x7ff #define ALPHA_CHROMA_KEY 0x080114 -#define ALPHA_CHROMA_KEY_MASK 31:16 -#define ALPHA_CHROMA_KEY_VALUE 15:0 +#define ALPHA_CHROMA_KEY_MASK_MASK (0xffff << 16) +#define ALPHA_CHROMA_KEY_VALUE_MASK 0xffff #define ALPHA_COLOR_LOOKUP_01 0x080118 -#define ALPHA_COLOR_LOOKUP_01_1 31:16 -#define ALPHA_COLOR_LOOKUP_01_1_RED 31:27 -#define ALPHA_COLOR_LOOKUP_01_1_GREEN 26:21 -#define ALPHA_COLOR_LOOKUP_01_1_BLUE 20:16 -#define ALPHA_COLOR_LOOKUP_01_0 15:0 -#define ALPHA_COLOR_LOOKUP_01_0_RED 15:11 -#define ALPHA_COLOR_LOOKUP_01_0_GREEN 10:5 -#define ALPHA_COLOR_LOOKUP_01_0_BLUE 4:0 +#define ALPHA_COLOR_LOOKUP_01_1_MASK (0xffff << 16) +#define ALPHA_COLOR_LOOKUP_01_1_RED_MASK (0x1f << 27) +#define ALPHA_COLOR_LOOKUP_01_1_GREEN_MASK (0x3f << 21) +#define ALPHA_COLOR_LOOKUP_01_1_BLUE_MASK (0x1f << 16) +#define ALPHA_COLOR_LOOKUP_01_0_MASK 0xffff +#define ALPHA_COLOR_LOOKUP_01_0_RED_MASK (0x1f << 11) +#define ALPHA_COLOR_LOOKUP_01_0_GREEN_MASK (0x3f << 5) +#define ALPHA_COLOR_LOOKUP_01_0_BLUE_MASK 0x1f #define ALPHA_COLOR_LOOKUP_23 0x08011C -#define ALPHA_COLOR_LOOKUP_23_3 31:16 -#define ALPHA_COLOR_LOOKUP_23_3_RED 31:27 -#define ALPHA_COLOR_LOOKUP_23_3_GREEN 26:21 -#define ALPHA_COLOR_LOOKUP_23_3_BLUE 20:16 -#define ALPHA_COLOR_LOOKUP_23_2 15:0 -#define ALPHA_COLOR_LOOKUP_23_2_RED 15:11 -#define ALPHA_COLOR_LOOKUP_23_2_GREEN 10:5 -#define ALPHA_COLOR_LOOKUP_23_2_BLUE 4:0 +#define ALPHA_COLOR_LOOKUP_23_3_MASK (0xffff << 16) +#define ALPHA_COLOR_LOOKUP_23_3_RED_MASK (0x1f << 27) +#define ALPHA_COLOR_LOOKUP_23_3_GREEN_MASK (0x3f << 21) +#define ALPHA_COLOR_LOOKUP_23_3_BLUE_MASK (0x1f << 16) +#define ALPHA_COLOR_LOOKUP_23_2_MASK 0xffff +#define ALPHA_COLOR_LOOKUP_23_2_RED_MASK (0x1f << 11) +#define ALPHA_COLOR_LOOKUP_23_2_GREEN_MASK (0x3f << 5) +#define ALPHA_COLOR_LOOKUP_23_2_BLUE_MASK 0x1f #define ALPHA_COLOR_LOOKUP_45 0x080120 -#define ALPHA_COLOR_LOOKUP_45_5 31:16 -#define ALPHA_COLOR_LOOKUP_45_5_RED 31:27 -#define ALPHA_COLOR_LOOKUP_45_5_GREEN 26:21 -#define ALPHA_COLOR_LOOKUP_45_5_BLUE 20:16 -#define ALPHA_COLOR_LOOKUP_45_4 15:0 -#define ALPHA_COLOR_LOOKUP_45_4_RED 15:11 -#define ALPHA_COLOR_LOOKUP_45_4_GREEN 10:5 -#define ALPHA_COLOR_LOOKUP_45_4_BLUE 4:0 +#define ALPHA_COLOR_LOOKUP_45_5_MASK (0xffff << 16) +#define ALPHA_COLOR_LOOKUP_45_5_RED_MASK (0x1f << 27) +#define ALPHA_COLOR_LOOKUP_45_5_GREEN_MASK (0x3f << 21) +#define ALPHA_COLOR_LOOKUP_45_5_BLUE_MASK (0x1f << 16) +#define ALPHA_COLOR_LOOKUP_45_4_MASK 0xffff +#define ALPHA_COLOR_LOOKUP_45_4_RED_MASK (0x1f << 11) +#define ALPHA_COLOR_LOOKUP_45_4_GREEN_MASK (0x3f << 5) +#define ALPHA_COLOR_LOOKUP_45_4_BLUE_MASK 0x1f #define ALPHA_COLOR_LOOKUP_67 0x080124 -#define ALPHA_COLOR_LOOKUP_67_7 31:16 -#define ALPHA_COLOR_LOOKUP_67_7_RED 31:27 -#define ALPHA_COLOR_LOOKUP_67_7_GREEN 26:21 -#define ALPHA_COLOR_LOOKUP_67_7_BLUE 20:16 -#define ALPHA_COLOR_LOOKUP_67_6 15:0 -#define ALPHA_COLOR_LOOKUP_67_6_RED 15:11 -#define ALPHA_COLOR_LOOKUP_67_6_GREEN 10:5 -#define ALPHA_COLOR_LOOKUP_67_6_BLUE 4:0 +#define ALPHA_COLOR_LOOKUP_67_7_MASK (0xffff << 16) +#define ALPHA_COLOR_LOOKUP_67_7_RED_MASK (0x1f << 27) +#define ALPHA_COLOR_LOOKUP_67_7_GREEN_MASK (0x3f << 21) +#define ALPHA_COLOR_LOOKUP_67_7_BLUE_MASK (0x1f << 16) +#define ALPHA_COLOR_LOOKUP_67_6_MASK 0xffff +#define ALPHA_COLOR_LOOKUP_67_6_RED_MASK (0x1f << 11) +#define ALPHA_COLOR_LOOKUP_67_6_GREEN_MASK (0x3f << 5) +#define ALPHA_COLOR_LOOKUP_67_6_BLUE_MASK 0x1f #define ALPHA_COLOR_LOOKUP_89 0x080128 -#define ALPHA_COLOR_LOOKUP_89_9 31:16 -#define ALPHA_COLOR_LOOKUP_89_9_RED 31:27 -#define ALPHA_COLOR_LOOKUP_89_9_GREEN 26:21 -#define ALPHA_COLOR_LOOKUP_89_9_BLUE 20:16 -#define ALPHA_COLOR_LOOKUP_89_8 15:0 -#define ALPHA_COLOR_LOOKUP_89_8_RED 15:11 -#define ALPHA_COLOR_LOOKUP_89_8_GREEN 10:5 -#define ALPHA_COLOR_LOOKUP_89_8_BLUE 4:0 +#define ALPHA_COLOR_LOOKUP_89_9_MASK (0xffff << 16) +#define ALPHA_COLOR_LOOKUP_89_9_RED_MASK (0x1f << 27) +#define ALPHA_COLOR_LOOKUP_89_9_GREEN_MASK (0x3f << 21) +#define ALPHA_COLOR_LOOKUP_89_9_BLUE_MASK (0x1f << 16) +#define ALPHA_COLOR_LOOKUP_89_8_MASK 0xffff +#define ALPHA_COLOR_LOOKUP_89_8_RED_MASK (0x1f << 11) +#define ALPHA_COLOR_LOOKUP_89_8_GREEN_MASK (0x3f << 5) +#define ALPHA_COLOR_LOOKUP_89_8_BLUE_MASK 0x1f #define ALPHA_COLOR_LOOKUP_AB 0x08012C -#define ALPHA_COLOR_LOOKUP_AB_B 31:16 -#define ALPHA_COLOR_LOOKUP_AB_B_RED 31:27 -#define ALPHA_COLOR_LOOKUP_AB_B_GREEN 26:21 -#define ALPHA_COLOR_LOOKUP_AB_B_BLUE 20:16 -#define ALPHA_COLOR_LOOKUP_AB_A 15:0 -#define ALPHA_COLOR_LOOKUP_AB_A_RED 15:11 -#define ALPHA_COLOR_LOOKUP_AB_A_GREEN 10:5 -#define ALPHA_COLOR_LOOKUP_AB_A_BLUE 4:0 +#define ALPHA_COLOR_LOOKUP_AB_B_MASK (0xffff << 16) +#define ALPHA_COLOR_LOOKUP_AB_B_RED_MASK (0x1f << 27) +#define ALPHA_COLOR_LOOKUP_AB_B_GREEN_MASK (0x3f << 21) +#define ALPHA_COLOR_LOOKUP_AB_B_BLUE_MASK (0x1f << 16) +#define ALPHA_COLOR_LOOKUP_AB_A_MASK 0xffff +#define ALPHA_COLOR_LOOKUP_AB_A_RED_MASK (0x1f << 11) +#define ALPHA_COLOR_LOOKUP_AB_A_GREEN_MASK (0x3f << 5) +#define ALPHA_COLOR_LOOKUP_AB_A_BLUE_MASK 0x1f #define ALPHA_COLOR_LOOKUP_CD 0x080130 -#define ALPHA_COLOR_LOOKUP_CD_D 31:16 -#define ALPHA_COLOR_LOOKUP_CD_D_RED 31:27 -#define ALPHA_COLOR_LOOKUP_CD_D_GREEN 26:21 -#define ALPHA_COLOR_LOOKUP_CD_D_BLUE 20:16 -#define ALPHA_COLOR_LOOKUP_CD_C 15:0 -#define ALPHA_COLOR_LOOKUP_CD_C_RED 15:11 -#define ALPHA_COLOR_LOOKUP_CD_C_GREEN 10:5 -#define ALPHA_COLOR_LOOKUP_CD_C_BLUE 4:0 +#define ALPHA_COLOR_LOOKUP_CD_D_MASK (0xffff << 16) +#define ALPHA_COLOR_LOOKUP_CD_D_RED_MASK (0x1f << 27) +#define ALPHA_COLOR_LOOKUP_CD_D_GREEN_MASK (0x3f << 21) +#define ALPHA_COLOR_LOOKUP_CD_D_BLUE_MASK (0x1f << 16) +#define ALPHA_COLOR_LOOKUP_CD_C_MASK 0xffff +#define ALPHA_COLOR_LOOKUP_CD_C_RED_MASK (0x1f << 11) +#define ALPHA_COLOR_LOOKUP_CD_C_GREEN_MASK (0x3f << 5) +#define ALPHA_COLOR_LOOKUP_CD_C_BLUE_MASK 0x1f #define ALPHA_COLOR_LOOKUP_EF 0x080134 -#define ALPHA_COLOR_LOOKUP_EF_F 31:16 -#define ALPHA_COLOR_LOOKUP_EF_F_RED 31:27 -#define ALPHA_COLOR_LOOKUP_EF_F_GREEN 26:21 -#define ALPHA_COLOR_LOOKUP_EF_F_BLUE 20:16 -#define ALPHA_COLOR_LOOKUP_EF_E 15:0 -#define ALPHA_COLOR_LOOKUP_EF_E_RED 15:11 -#define ALPHA_COLOR_LOOKUP_EF_E_GREEN 10:5 -#define ALPHA_COLOR_LOOKUP_EF_E_BLUE 4:0 +#define ALPHA_COLOR_LOOKUP_EF_F_MASK (0xffff << 16) +#define ALPHA_COLOR_LOOKUP_EF_F_RED_MASK (0x1f << 27) +#define ALPHA_COLOR_LOOKUP_EF_F_GREEN_MASK (0x3f << 21) +#define ALPHA_COLOR_LOOKUP_EF_F_BLUE_MASK (0x1f << 16) +#define ALPHA_COLOR_LOOKUP_EF_E_MASK 0xffff +#define ALPHA_COLOR_LOOKUP_EF_E_RED_MASK (0x1f << 11) +#define ALPHA_COLOR_LOOKUP_EF_E_GREEN_MASK (0x3f << 5) +#define ALPHA_COLOR_LOOKUP_EF_E_BLUE_MASK 0x1f /* CRT Graphics Control */ #define CRT_DISPLAY_CTRL 0x080200 -#define CRT_DISPLAY_CTRL_RESERVED_1_MASK 31:27 -#define CRT_DISPLAY_CTRL_RESERVED_1_MASK_DISABLE 0 -#define CRT_DISPLAY_CTRL_RESERVED_1_MASK_ENABLE 0x1F +#define CRT_DISPLAY_CTRL_RESERVED_MASK 0xfb008200 /* SM750LE definition */ -#define CRT_DISPLAY_CTRL_DPMS 31:30 -#define CRT_DISPLAY_CTRL_DPMS_0 0 -#define CRT_DISPLAY_CTRL_DPMS_1 1 -#define CRT_DISPLAY_CTRL_DPMS_2 2 -#define CRT_DISPLAY_CTRL_DPMS_3 3 -#define CRT_DISPLAY_CTRL_CLK 29:27 -#define CRT_DISPLAY_CTRL_CLK_PLL25 0 -#define CRT_DISPLAY_CTRL_CLK_PLL41 1 -#define CRT_DISPLAY_CTRL_CLK_PLL62 2 -#define CRT_DISPLAY_CTRL_CLK_PLL65 3 -#define CRT_DISPLAY_CTRL_CLK_PLL74 4 -#define CRT_DISPLAY_CTRL_CLK_PLL80 5 -#define CRT_DISPLAY_CTRL_CLK_PLL108 6 -#define CRT_DISPLAY_CTRL_CLK_RESERVED 7 -#define CRT_DISPLAY_CTRL_SHIFT_VGA_DAC 26:26 -#define CRT_DISPLAY_CTRL_SHIFT_VGA_DAC_DISABLE 1 -#define CRT_DISPLAY_CTRL_SHIFT_VGA_DAC_ENABLE 0 - - -#define CRT_DISPLAY_CTRL_RESERVED_2_MASK 25:24 -#define CRT_DISPLAY_CTRL_RESERVED_2_MASK_ENABLE 3 -#define CRT_DISPLAY_CTRL_RESERVED_2_MASK_DISABLE 0 +#define CRT_DISPLAY_CTRL_DPMS_SHIFT 30 +#define CRT_DISPLAY_CTRL_DPMS_MASK (0x3 << 30) +#define CRT_DISPLAY_CTRL_DPMS_0 (0x0 << 30) +#define CRT_DISPLAY_CTRL_DPMS_1 (0x1 << 30) +#define CRT_DISPLAY_CTRL_DPMS_2 (0x2 << 30) +#define CRT_DISPLAY_CTRL_DPMS_3 (0x3 << 30) +#define CRT_DISPLAY_CTRL_CLK_MASK (0x7 << 27) +#define CRT_DISPLAY_CTRL_CLK_PLL25 (0x0 << 27) +#define CRT_DISPLAY_CTRL_CLK_PLL41 (0x1 << 27) +#define CRT_DISPLAY_CTRL_CLK_PLL62 (0x2 << 27) +#define CRT_DISPLAY_CTRL_CLK_PLL65 (0x3 << 27) +#define CRT_DISPLAY_CTRL_CLK_PLL74 (0x4 << 27) +#define CRT_DISPLAY_CTRL_CLK_PLL80 (0x5 << 27) +#define CRT_DISPLAY_CTRL_CLK_PLL108 (0x6 << 27) +#define CRT_DISPLAY_CTRL_CLK_RESERVED (0x7 << 27) +#define CRT_DISPLAY_CTRL_SHIFT_VGA_DAC BIT(26) /* SM750LE definition */ -#define CRT_DISPLAY_CTRL_CRTSELECT 25:25 -#define CRT_DISPLAY_CTRL_CRTSELECT_VGA 0 -#define CRT_DISPLAY_CTRL_CRTSELECT_CRT 1 -#define CRT_DISPLAY_CTRL_RGBBIT 24:24 -#define CRT_DISPLAY_CTRL_RGBBIT_24BIT 0 -#define CRT_DISPLAY_CTRL_RGBBIT_12BIT 1 - - -#define CRT_DISPLAY_CTRL_RESERVED_3_MASK 15:15 -#define CRT_DISPLAY_CTRL_RESERVED_3_MASK_DISABLE 0 -#define CRT_DISPLAY_CTRL_RESERVED_3_MASK_ENABLE 1 - -#define CRT_DISPLAY_CTRL_RESERVED_4_MASK 9:9 -#define CRT_DISPLAY_CTRL_RESERVED_4_MASK_DISABLE 0 -#define CRT_DISPLAY_CTRL_RESERVED_4_MASK_ENABLE 1 +#define CRT_DISPLAY_CTRL_CRTSELECT BIT(25) +#define CRT_DISPLAY_CTRL_RGBBIT BIT(24) #ifndef VALIDATION_CHIP - #define CRT_DISPLAY_CTRL_SHIFT_VGA_DAC 26:26 - #define CRT_DISPLAY_CTRL_SHIFT_VGA_DAC_DISABLE 1 - #define CRT_DISPLAY_CTRL_SHIFT_VGA_DAC_ENABLE 0 - #define CRT_DISPLAY_CTRL_CENTERING 24:24 - #define CRT_DISPLAY_CTRL_CENTERING_DISABLE 0 - #define CRT_DISPLAY_CTRL_CENTERING_ENABLE 1 + #define CRT_DISPLAY_CTRL_CENTERING BIT(24) #endif -#define CRT_DISPLAY_CTRL_LOCK_TIMING 23:23 -#define CRT_DISPLAY_CTRL_LOCK_TIMING_DISABLE 0 -#define CRT_DISPLAY_CTRL_LOCK_TIMING_ENABLE 1 -#define CRT_DISPLAY_CTRL_EXPANSION 22:22 -#define CRT_DISPLAY_CTRL_EXPANSION_DISABLE 0 -#define CRT_DISPLAY_CTRL_EXPANSION_ENABLE 1 -#define CRT_DISPLAY_CTRL_VERTICAL_MODE 21:21 -#define CRT_DISPLAY_CTRL_VERTICAL_MODE_REPLICATE 0 -#define CRT_DISPLAY_CTRL_VERTICAL_MODE_INTERPOLATE 1 -#define CRT_DISPLAY_CTRL_HORIZONTAL_MODE 20:20 -#define CRT_DISPLAY_CTRL_HORIZONTAL_MODE_REPLICATE 0 -#define CRT_DISPLAY_CTRL_HORIZONTAL_MODE_INTERPOLATE 1 -#define CRT_DISPLAY_CTRL_SELECT 19:18 -#define CRT_DISPLAY_CTRL_SELECT_PANEL 0 -#define CRT_DISPLAY_CTRL_SELECT_VGA 1 -#define CRT_DISPLAY_CTRL_SELECT_CRT 2 -#define CRT_DISPLAY_CTRL_FIFO 17:16 -#define CRT_DISPLAY_CTRL_FIFO_1 0 -#define CRT_DISPLAY_CTRL_FIFO_3 1 -#define CRT_DISPLAY_CTRL_FIFO_7 2 -#define CRT_DISPLAY_CTRL_FIFO_11 3 -#define CRT_DISPLAY_CTRL_CLOCK_PHASE 14:14 -#define CRT_DISPLAY_CTRL_CLOCK_PHASE_ACTIVE_HIGH 0 -#define CRT_DISPLAY_CTRL_CLOCK_PHASE_ACTIVE_LOW 1 -#define CRT_DISPLAY_CTRL_VSYNC_PHASE 13:13 -#define CRT_DISPLAY_CTRL_VSYNC_PHASE_ACTIVE_HIGH 0 -#define CRT_DISPLAY_CTRL_VSYNC_PHASE_ACTIVE_LOW 1 -#define CRT_DISPLAY_CTRL_HSYNC_PHASE 12:12 -#define CRT_DISPLAY_CTRL_HSYNC_PHASE_ACTIVE_HIGH 0 -#define CRT_DISPLAY_CTRL_HSYNC_PHASE_ACTIVE_LOW 1 -#define CRT_DISPLAY_CTRL_BLANK 10:10 -#define CRT_DISPLAY_CTRL_BLANK_OFF 0 -#define CRT_DISPLAY_CTRL_BLANK_ON 1 -#define CRT_DISPLAY_CTRL_TIMING 8:8 -#define CRT_DISPLAY_CTRL_TIMING_DISABLE 0 -#define CRT_DISPLAY_CTRL_TIMING_ENABLE 1 -#define CRT_DISPLAY_CTRL_PIXEL 7:4 -#define CRT_DISPLAY_CTRL_GAMMA 3:3 -#define CRT_DISPLAY_CTRL_GAMMA_DISABLE 0 -#define CRT_DISPLAY_CTRL_GAMMA_ENABLE 1 -#define CRT_DISPLAY_CTRL_PLANE 2:2 -#define CRT_DISPLAY_CTRL_PLANE_DISABLE 0 -#define CRT_DISPLAY_CTRL_PLANE_ENABLE 1 -#define CRT_DISPLAY_CTRL_FORMAT 1:0 -#define CRT_DISPLAY_CTRL_FORMAT_8 0 -#define CRT_DISPLAY_CTRL_FORMAT_16 1 -#define CRT_DISPLAY_CTRL_FORMAT_32 2 -#define CRT_DISPLAY_CTRL_RESERVED_BITS_MASK 0xFF000200 +#define CRT_DISPLAY_CTRL_LOCK_TIMING BIT(23) +#define CRT_DISPLAY_CTRL_EXPANSION BIT(22) +#define CRT_DISPLAY_CTRL_VERTICAL_MODE BIT(21) +#define CRT_DISPLAY_CTRL_HORIZONTAL_MODE BIT(20) +#define CRT_DISPLAY_CTRL_SELECT_SHIFT 18 +#define CRT_DISPLAY_CTRL_SELECT_MASK (0x3 << 18) +#define CRT_DISPLAY_CTRL_SELECT_PANEL (0x0 << 18) +#define CRT_DISPLAY_CTRL_SELECT_VGA (0x1 << 18) +#define CRT_DISPLAY_CTRL_SELECT_CRT (0x2 << 18) +#define CRT_DISPLAY_CTRL_FIFO_MASK (0x3 << 16) +#define CRT_DISPLAY_CTRL_FIFO_1 (0x0 << 16) +#define CRT_DISPLAY_CTRL_FIFO_3 (0x1 << 16) +#define CRT_DISPLAY_CTRL_FIFO_7 (0x2 << 16) +#define CRT_DISPLAY_CTRL_FIFO_11 (0x3 << 16) +#define CRT_DISPLAY_CTRL_BLANK BIT(10) +#define CRT_DISPLAY_CTRL_PIXEL_MASK (0xf << 4) +#define CRT_DISPLAY_CTRL_FORMAT_MASK (0x3 << 0) +#define CRT_DISPLAY_CTRL_FORMAT_8 (0x0 << 0) +#define CRT_DISPLAY_CTRL_FORMAT_16 (0x1 << 0) +#define CRT_DISPLAY_CTRL_FORMAT_32 (0x2 << 0) #define CRT_FB_ADDRESS 0x080204 -#define CRT_FB_ADDRESS_STATUS 31:31 -#define CRT_FB_ADDRESS_STATUS_CURRENT 0 -#define CRT_FB_ADDRESS_STATUS_PENDING 1 -#define CRT_FB_ADDRESS_EXT 27:27 -#define CRT_FB_ADDRESS_EXT_LOCAL 0 -#define CRT_FB_ADDRESS_EXT_EXTERNAL 1 -#define CRT_FB_ADDRESS_ADDRESS 25:0 +#define CRT_FB_ADDRESS_STATUS BIT(31) +#define CRT_FB_ADDRESS_EXT BIT(27) +#define CRT_FB_ADDRESS_ADDRESS_MASK 0x3ffffff #define CRT_FB_WIDTH 0x080208 -#define CRT_FB_WIDTH_WIDTH 29:16 -#define CRT_FB_WIDTH_OFFSET 13:0 +#define CRT_FB_WIDTH_WIDTH_SHIFT 16 +#define CRT_FB_WIDTH_WIDTH_MASK (0x3fff << 16) +#define CRT_FB_WIDTH_OFFSET_MASK 0x3fff #define CRT_HORIZONTAL_TOTAL 0x08020C -#define CRT_HORIZONTAL_TOTAL_TOTAL 27:16 -#define CRT_HORIZONTAL_TOTAL_DISPLAY_END 11:0 +#define CRT_HORIZONTAL_TOTAL_TOTAL_SHIFT 16 +#define CRT_HORIZONTAL_TOTAL_TOTAL_MASK (0xfff << 16) +#define CRT_HORIZONTAL_TOTAL_DISPLAY_END_MASK 0xfff #define CRT_HORIZONTAL_SYNC 0x080210 -#define CRT_HORIZONTAL_SYNC_WIDTH 23:16 -#define CRT_HORIZONTAL_SYNC_START 11:0 +#define CRT_HORIZONTAL_SYNC_WIDTH_SHIFT 16 +#define CRT_HORIZONTAL_SYNC_WIDTH_MASK (0xff << 16) +#define CRT_HORIZONTAL_SYNC_START_MASK 0xfff #define CRT_VERTICAL_TOTAL 0x080214 -#define CRT_VERTICAL_TOTAL_TOTAL 26:16 -#define CRT_VERTICAL_TOTAL_DISPLAY_END 10:0 +#define CRT_VERTICAL_TOTAL_TOTAL_SHIFT 16 +#define CRT_VERTICAL_TOTAL_TOTAL_MASK (0x7ff << 16) +#define CRT_VERTICAL_TOTAL_DISPLAY_END_MASK (0x7ff) #define CRT_VERTICAL_SYNC 0x080218 -#define CRT_VERTICAL_SYNC_HEIGHT 21:16 -#define CRT_VERTICAL_SYNC_START 10:0 +#define CRT_VERTICAL_SYNC_HEIGHT_SHIFT 16 +#define CRT_VERTICAL_SYNC_HEIGHT_MASK (0x3f << 16) +#define CRT_VERTICAL_SYNC_START_MASK 0x7ff #define CRT_SIGNATURE_ANALYZER 0x08021C -#define CRT_SIGNATURE_ANALYZER_STATUS 31:16 -#define CRT_SIGNATURE_ANALYZER_ENABLE 3:3 -#define CRT_SIGNATURE_ANALYZER_ENABLE_DISABLE 0 -#define CRT_SIGNATURE_ANALYZER_ENABLE_ENABLE 1 -#define CRT_SIGNATURE_ANALYZER_RESET 2:2 -#define CRT_SIGNATURE_ANALYZER_RESET_NORMAL 0 -#define CRT_SIGNATURE_ANALYZER_RESET_RESET 1 -#define CRT_SIGNATURE_ANALYZER_SOURCE 1:0 +#define CRT_SIGNATURE_ANALYZER_STATUS_MASK (0xffff << 16) +#define CRT_SIGNATURE_ANALYZER_ENABLE BIT(3) +#define CRT_SIGNATURE_ANALYZER_RESET BIT(2) +#define CRT_SIGNATURE_ANALYZER_SOURCE_MASK 0x3 #define CRT_SIGNATURE_ANALYZER_SOURCE_RED 0 #define CRT_SIGNATURE_ANALYZER_SOURCE_GREEN 1 #define CRT_SIGNATURE_ANALYZER_SOURCE_BLUE 2 #define CRT_CURRENT_LINE 0x080220 -#define CRT_CURRENT_LINE_LINE 10:0 +#define CRT_CURRENT_LINE_LINE_MASK 0x7ff #define CRT_MONITOR_DETECT 0x080224 -#define CRT_MONITOR_DETECT_VALUE 25:25 -#define CRT_MONITOR_DETECT_VALUE_DISABLE 0 -#define CRT_MONITOR_DETECT_VALUE_ENABLE 1 -#define CRT_MONITOR_DETECT_ENABLE 24:24 -#define CRT_MONITOR_DETECT_ENABLE_DISABLE 0 -#define CRT_MONITOR_DETECT_ENABLE_ENABLE 1 -#define CRT_MONITOR_DETECT_RED 23:16 -#define CRT_MONITOR_DETECT_GREEN 15:8 -#define CRT_MONITOR_DETECT_BLUE 7:0 +#define CRT_MONITOR_DETECT_VALUE BIT(25) +#define CRT_MONITOR_DETECT_ENABLE BIT(24) +#define CRT_MONITOR_DETECT_RED_MASK (0xff << 16) +#define CRT_MONITOR_DETECT_GREEN_MASK (0xff << 8) +#define CRT_MONITOR_DETECT_BLUE_MASK 0xff #define CRT_SCALE 0x080228 -#define CRT_SCALE_VERTICAL_MODE 31:31 -#define CRT_SCALE_VERTICAL_MODE_EXPAND 0 -#define CRT_SCALE_VERTICAL_MODE_SHRINK 1 -#define CRT_SCALE_VERTICAL_SCALE 27:16 -#define CRT_SCALE_HORIZONTAL_MODE 15:15 -#define CRT_SCALE_HORIZONTAL_MODE_EXPAND 0 -#define CRT_SCALE_HORIZONTAL_MODE_SHRINK 1 -#define CRT_SCALE_HORIZONTAL_SCALE 11:0 +#define CRT_SCALE_VERTICAL_MODE BIT(31) +#define CRT_SCALE_VERTICAL_SCALE_MASK (0xfff << 16) +#define CRT_SCALE_HORIZONTAL_MODE BIT(15) +#define CRT_SCALE_HORIZONTAL_SCALE_MASK 0xfff /* CRT Cursor Control */ #define CRT_HWC_ADDRESS 0x080230 -#define CRT_HWC_ADDRESS_ENABLE 31:31 -#define CRT_HWC_ADDRESS_ENABLE_DISABLE 0 -#define CRT_HWC_ADDRESS_ENABLE_ENABLE 1 -#define CRT_HWC_ADDRESS_EXT 27:27 -#define CRT_HWC_ADDRESS_EXT_LOCAL 0 -#define CRT_HWC_ADDRESS_EXT_EXTERNAL 1 -#define CRT_HWC_ADDRESS_ADDRESS 25:0 +#define CRT_HWC_ADDRESS_ENABLE BIT(31) +#define CRT_HWC_ADDRESS_EXT BIT(27) +#define CRT_HWC_ADDRESS_ADDRESS_MASK 0x3ffffff #define CRT_HWC_LOCATION 0x080234 -#define CRT_HWC_LOCATION_TOP 27:27 -#define CRT_HWC_LOCATION_TOP_INSIDE 0 -#define CRT_HWC_LOCATION_TOP_OUTSIDE 1 -#define CRT_HWC_LOCATION_Y 26:16 -#define CRT_HWC_LOCATION_LEFT 11:11 -#define CRT_HWC_LOCATION_LEFT_INSIDE 0 -#define CRT_HWC_LOCATION_LEFT_OUTSIDE 1 -#define CRT_HWC_LOCATION_X 10:0 +#define CRT_HWC_LOCATION_TOP BIT(27) +#define CRT_HWC_LOCATION_Y_MASK (0x7ff << 16) +#define CRT_HWC_LOCATION_LEFT BIT(11) +#define CRT_HWC_LOCATION_X_MASK 0x7ff #define CRT_HWC_COLOR_12 0x080238 -#define CRT_HWC_COLOR_12_2_RGB565 31:16 -#define CRT_HWC_COLOR_12_1_RGB565 15:0 +#define CRT_HWC_COLOR_12_2_RGB565_MASK (0xffff << 16) +#define CRT_HWC_COLOR_12_1_RGB565_MASK 0xffff #define CRT_HWC_COLOR_3 0x08023C -#define CRT_HWC_COLOR_3_RGB565 15:0 +#define CRT_HWC_COLOR_3_RGB565_MASK 0xffff /* This vertical expansion below start at 0x080240 ~ 0x080264 */ #define CRT_VERTICAL_EXPANSION 0x080240 #ifndef VALIDATION_CHIP - #define CRT_VERTICAL_CENTERING_VALUE 31:24 + #define CRT_VERTICAL_CENTERING_VALUE_MASK (0xff << 24) #endif -#define CRT_VERTICAL_EXPANSION_COMPARE_VALUE 23:16 -#define CRT_VERTICAL_EXPANSION_LINE_BUFFER 15:12 -#define CRT_VERTICAL_EXPANSION_SCALE_FACTOR 11:0 +#define CRT_VERTICAL_EXPANSION_COMPARE_VALUE_MASK (0xff << 16) +#define CRT_VERTICAL_EXPANSION_LINE_BUFFER_MASK (0xf << 12) +#define CRT_VERTICAL_EXPANSION_SCALE_FACTOR_MASK 0xfff /* This horizontal expansion below start at 0x080268 ~ 0x08027C */ #define CRT_HORIZONTAL_EXPANSION 0x080268 #ifndef VALIDATION_CHIP - #define CRT_HORIZONTAL_CENTERING_VALUE 31:24 + #define CRT_HORIZONTAL_CENTERING_VALUE_MASK (0xff << 24) #endif -#define CRT_HORIZONTAL_EXPANSION_COMPARE_VALUE 23:16 -#define CRT_HORIZONTAL_EXPANSION_SCALE_FACTOR 11:0 +#define CRT_HORIZONTAL_EXPANSION_COMPARE_VALUE_MASK (0xff << 16) +#define CRT_HORIZONTAL_EXPANSION_SCALE_FACTOR_MASK 0xfff #ifndef VALIDATION_CHIP /* Auto Centering */ #define CRT_AUTO_CENTERING_TL 0x080280 - #define CRT_AUTO_CENTERING_TL_TOP 26:16 - #define CRT_AUTO_CENTERING_TL_LEFT 10:0 + #define CRT_AUTO_CENTERING_TL_TOP_MASK (0x7ff << 16) + #define CRT_AUTO_CENTERING_TL_LEFT_MASK 0x7ff #define CRT_AUTO_CENTERING_BR 0x080284 - #define CRT_AUTO_CENTERING_BR_BOTTOM 26:16 - #define CRT_AUTO_CENTERING_BR_RIGHT 10:0 + #define CRT_AUTO_CENTERING_BR_BOTTOM_MASK (0x7ff << 16) + #define CRT_AUTO_CENTERING_BR_BOTTOM_SHIFT 16 + #define CRT_AUTO_CENTERING_BR_RIGHT_MASK 0x7ff #endif /* sm750le new register to control panel output */ @@ -1877,155 +1161,106 @@ /* Color Space Conversion registers. */ #define CSC_Y_SOURCE_BASE 0x1000C8 -#define CSC_Y_SOURCE_BASE_EXT 27:27 -#define CSC_Y_SOURCE_BASE_EXT_LOCAL 0 -#define CSC_Y_SOURCE_BASE_EXT_EXTERNAL 1 -#define CSC_Y_SOURCE_BASE_CS 26:26 -#define CSC_Y_SOURCE_BASE_CS_0 0 -#define CSC_Y_SOURCE_BASE_CS_1 1 -#define CSC_Y_SOURCE_BASE_ADDRESS 25:0 +#define CSC_Y_SOURCE_BASE_EXT BIT(27) +#define CSC_Y_SOURCE_BASE_CS BIT(26) +#define CSC_Y_SOURCE_BASE_ADDRESS_MASK 0x3ffffff #define CSC_CONSTANTS 0x1000CC -#define CSC_CONSTANTS_Y 31:24 -#define CSC_CONSTANTS_R 23:16 -#define CSC_CONSTANTS_G 15:8 -#define CSC_CONSTANTS_B 7:0 +#define CSC_CONSTANTS_Y_MASK (0xff << 24) +#define CSC_CONSTANTS_R_MASK (0xff << 16) +#define CSC_CONSTANTS_G_MASK (0xff << 8) +#define CSC_CONSTANTS_B_MASK 0xff #define CSC_Y_SOURCE_X 0x1000D0 -#define CSC_Y_SOURCE_X_INTEGER 26:16 -#define CSC_Y_SOURCE_X_FRACTION 15:3 +#define CSC_Y_SOURCE_X_INTEGER_MASK (0x7ff << 16) +#define CSC_Y_SOURCE_X_FRACTION_MASK (0x1fff << 3) #define CSC_Y_SOURCE_Y 0x1000D4 -#define CSC_Y_SOURCE_Y_INTEGER 27:16 -#define CSC_Y_SOURCE_Y_FRACTION 15:3 +#define CSC_Y_SOURCE_Y_INTEGER_MASK (0xfff << 16) +#define CSC_Y_SOURCE_Y_FRACTION_MASK (0x1fff << 3) #define CSC_U_SOURCE_BASE 0x1000D8 -#define CSC_U_SOURCE_BASE_EXT 27:27 -#define CSC_U_SOURCE_BASE_EXT_LOCAL 0 -#define CSC_U_SOURCE_BASE_EXT_EXTERNAL 1 -#define CSC_U_SOURCE_BASE_CS 26:26 -#define CSC_U_SOURCE_BASE_CS_0 0 -#define CSC_U_SOURCE_BASE_CS_1 1 -#define CSC_U_SOURCE_BASE_ADDRESS 25:0 +#define CSC_U_SOURCE_BASE_EXT BIT(27) +#define CSC_U_SOURCE_BASE_CS BIT(26) +#define CSC_U_SOURCE_BASE_ADDRESS_MASK 0x3ffffff #define CSC_V_SOURCE_BASE 0x1000DC -#define CSC_V_SOURCE_BASE_EXT 27:27 -#define CSC_V_SOURCE_BASE_EXT_LOCAL 0 -#define CSC_V_SOURCE_BASE_EXT_EXTERNAL 1 -#define CSC_V_SOURCE_BASE_CS 26:26 -#define CSC_V_SOURCE_BASE_CS_0 0 -#define CSC_V_SOURCE_BASE_CS_1 1 -#define CSC_V_SOURCE_BASE_ADDRESS 25:0 +#define CSC_V_SOURCE_BASE_EXT BIT(27) +#define CSC_V_SOURCE_BASE_CS BIT(26) +#define CSC_V_SOURCE_BASE_ADDRESS_MASK 0x3ffffff #define CSC_SOURCE_DIMENSION 0x1000E0 -#define CSC_SOURCE_DIMENSION_X 31:16 -#define CSC_SOURCE_DIMENSION_Y 15:0 +#define CSC_SOURCE_DIMENSION_X_MASK (0xffff << 16) +#define CSC_SOURCE_DIMENSION_Y_MASK 0xffff #define CSC_SOURCE_PITCH 0x1000E4 -#define CSC_SOURCE_PITCH_Y 31:16 -#define CSC_SOURCE_PITCH_UV 15:0 +#define CSC_SOURCE_PITCH_Y_MASK (0xffff << 16) +#define CSC_SOURCE_PITCH_UV_MASK 0xffff #define CSC_DESTINATION 0x1000E8 -#define CSC_DESTINATION_WRAP 31:31 -#define CSC_DESTINATION_WRAP_DISABLE 0 -#define CSC_DESTINATION_WRAP_ENABLE 1 -#define CSC_DESTINATION_X 27:16 -#define CSC_DESTINATION_Y 11:0 +#define CSC_DESTINATION_WRAP BIT(31) +#define CSC_DESTINATION_X_MASK (0xfff << 16) +#define CSC_DESTINATION_Y_MASK 0xfff #define CSC_DESTINATION_DIMENSION 0x1000EC -#define CSC_DESTINATION_DIMENSION_X 31:16 -#define CSC_DESTINATION_DIMENSION_Y 15:0 +#define CSC_DESTINATION_DIMENSION_X_MASK (0xffff << 16) +#define CSC_DESTINATION_DIMENSION_Y_MASK 0xffff #define CSC_DESTINATION_PITCH 0x1000F0 -#define CSC_DESTINATION_PITCH_X 31:16 -#define CSC_DESTINATION_PITCH_Y 15:0 +#define CSC_DESTINATION_PITCH_X_MASK (0xffff << 16) +#define CSC_DESTINATION_PITCH_Y_MASK 0xffff #define CSC_SCALE_FACTOR 0x1000F4 -#define CSC_SCALE_FACTOR_HORIZONTAL 31:16 -#define CSC_SCALE_FACTOR_VERTICAL 15:0 +#define CSC_SCALE_FACTOR_HORIZONTAL_MASK (0xffff << 16) +#define CSC_SCALE_FACTOR_VERTICAL_MASK 0xffff #define CSC_DESTINATION_BASE 0x1000F8 -#define CSC_DESTINATION_BASE_EXT 27:27 -#define CSC_DESTINATION_BASE_EXT_LOCAL 0 -#define CSC_DESTINATION_BASE_EXT_EXTERNAL 1 -#define CSC_DESTINATION_BASE_CS 26:26 -#define CSC_DESTINATION_BASE_CS_0 0 -#define CSC_DESTINATION_BASE_CS_1 1 -#define CSC_DESTINATION_BASE_ADDRESS 25:0 +#define CSC_DESTINATION_BASE_EXT BIT(27) +#define CSC_DESTINATION_BASE_CS BIT(26) +#define CSC_DESTINATION_BASE_ADDRESS_MASK 0x3ffffff #define CSC_CONTROL 0x1000FC -#define CSC_CONTROL_STATUS 31:31 -#define CSC_CONTROL_STATUS_STOP 0 -#define CSC_CONTROL_STATUS_START 1 -#define CSC_CONTROL_SOURCE_FORMAT 30:28 -#define CSC_CONTROL_SOURCE_FORMAT_YUV422 0 -#define CSC_CONTROL_SOURCE_FORMAT_YUV420I 1 -#define CSC_CONTROL_SOURCE_FORMAT_YUV420 2 -#define CSC_CONTROL_SOURCE_FORMAT_YVU9 3 -#define CSC_CONTROL_SOURCE_FORMAT_IYU1 4 -#define CSC_CONTROL_SOURCE_FORMAT_IYU2 5 -#define CSC_CONTROL_SOURCE_FORMAT_RGB565 6 -#define CSC_CONTROL_SOURCE_FORMAT_RGB8888 7 -#define CSC_CONTROL_DESTINATION_FORMAT 27:26 -#define CSC_CONTROL_DESTINATION_FORMAT_RGB565 0 -#define CSC_CONTROL_DESTINATION_FORMAT_RGB8888 1 -#define CSC_CONTROL_HORIZONTAL_FILTER 25:25 -#define CSC_CONTROL_HORIZONTAL_FILTER_DISABLE 0 -#define CSC_CONTROL_HORIZONTAL_FILTER_ENABLE 1 -#define CSC_CONTROL_VERTICAL_FILTER 24:24 -#define CSC_CONTROL_VERTICAL_FILTER_DISABLE 0 -#define CSC_CONTROL_VERTICAL_FILTER_ENABLE 1 -#define CSC_CONTROL_BYTE_ORDER 23:23 -#define CSC_CONTROL_BYTE_ORDER_YUYV 0 -#define CSC_CONTROL_BYTE_ORDER_UYVY 1 +#define CSC_CONTROL_STATUS BIT(31) +#define CSC_CONTROL_SOURCE_FORMAT_MASK (0x7 << 28) +#define CSC_CONTROL_SOURCE_FORMAT_YUV422 (0x0 << 28) +#define CSC_CONTROL_SOURCE_FORMAT_YUV420I (0x1 << 28) +#define CSC_CONTROL_SOURCE_FORMAT_YUV420 (0x2 << 28) +#define CSC_CONTROL_SOURCE_FORMAT_YVU9 (0x3 << 28) +#define CSC_CONTROL_SOURCE_FORMAT_IYU1 (0x4 << 28) +#define CSC_CONTROL_SOURCE_FORMAT_IYU2 (0x5 << 28) +#define CSC_CONTROL_SOURCE_FORMAT_RGB565 (0x6 << 28) +#define CSC_CONTROL_SOURCE_FORMAT_RGB8888 (0x7 << 28) +#define CSC_CONTROL_DESTINATION_FORMAT_MASK (0x3 << 26) +#define CSC_CONTROL_DESTINATION_FORMAT_RGB565 (0x0 << 26) +#define CSC_CONTROL_DESTINATION_FORMAT_RGB8888 (0x1 << 26) +#define CSC_CONTROL_HORIZONTAL_FILTER BIT(25) +#define CSC_CONTROL_VERTICAL_FILTER BIT(24) +#define CSC_CONTROL_BYTE_ORDER BIT(23) #define DE_DATA_PORT 0x110000 #define I2C_BYTE_COUNT 0x010040 -#define I2C_BYTE_COUNT_COUNT 3:0 +#define I2C_BYTE_COUNT_COUNT_MASK 0xf #define I2C_CTRL 0x010041 -#define I2C_CTRL_INT 4:4 -#define I2C_CTRL_INT_DISABLE 0 -#define I2C_CTRL_INT_ENABLE 1 -#define I2C_CTRL_DIR 3:3 -#define I2C_CTRL_DIR_WR 0 -#define I2C_CTRL_DIR_RD 1 -#define I2C_CTRL_CTRL 2:2 -#define I2C_CTRL_CTRL_STOP 0 -#define I2C_CTRL_CTRL_START 1 -#define I2C_CTRL_MODE 1:1 -#define I2C_CTRL_MODE_STANDARD 0 -#define I2C_CTRL_MODE_FAST 1 -#define I2C_CTRL_EN 0:0 -#define I2C_CTRL_EN_DISABLE 0 -#define I2C_CTRL_EN_ENABLE 1 +#define I2C_CTRL_INT BIT(4) +#define I2C_CTRL_DIR BIT(3) +#define I2C_CTRL_CTRL BIT(2) +#define I2C_CTRL_MODE BIT(1) +#define I2C_CTRL_EN BIT(0) #define I2C_STATUS 0x010042 -#define I2C_STATUS_TX 3:3 -#define I2C_STATUS_TX_PROGRESS 0 -#define I2C_STATUS_TX_COMPLETED 1 -#define I2C_TX_DONE 0x08 -#define I2C_STATUS_ERR 2:2 -#define I2C_STATUS_ERR_NORMAL 0 -#define I2C_STATUS_ERR_ERROR 1 -#define I2C_STATUS_ERR_CLEAR 0 -#define I2C_STATUS_ACK 1:1 -#define I2C_STATUS_ACK_RECEIVED 0 -#define I2C_STATUS_ACK_NOT 1 -#define I2C_STATUS_BSY 0:0 -#define I2C_STATUS_BSY_IDLE 0 -#define I2C_STATUS_BSY_BUSY 1 +#define I2C_STATUS_TX BIT(3) +#define I2C_STATUS_ERR BIT(2) +#define I2C_STATUS_ACK BIT(1) +#define I2C_STATUS_BSY BIT(0) #define I2C_RESET 0x010042 -#define I2C_RESET_BUS_ERROR 2:2 -#define I2C_RESET_BUS_ERROR_CLEAR 0 +#define I2C_RESET_BUS_ERROR BIT(2) #define I2C_SLAVE_ADDRESS 0x010043 -#define I2C_SLAVE_ADDRESS_ADDRESS 7:1 -#define I2C_SLAVE_ADDRESS_RW 0:0 -#define I2C_SLAVE_ADDRESS_RW_W 0 -#define I2C_SLAVE_ADDRESS_RW_R 1 +#define I2C_SLAVE_ADDRESS_ADDRESS_MASK (0x7f << 1) +#define I2C_SLAVE_ADDRESS_RW BIT(0) #define I2C_DATA0 0x010044 #define I2C_DATA1 0x010045 @@ -2046,120 +1281,59 @@ #define ZV0_CAPTURE_CTRL 0x090000 -#define ZV0_CAPTURE_CTRL_FIELD_INPUT 27:27 -#define ZV0_CAPTURE_CTRL_FIELD_INPUT_EVEN_FIELD 0 -#define ZV0_CAPTURE_CTRL_FIELD_INPUT_ODD_FIELD 1 -#define ZV0_CAPTURE_CTRL_SCAN 26:26 -#define ZV0_CAPTURE_CTRL_SCAN_PROGRESSIVE 0 -#define ZV0_CAPTURE_CTRL_SCAN_INTERLACE 1 -#define ZV0_CAPTURE_CTRL_CURRENT_BUFFER 25:25 -#define ZV0_CAPTURE_CTRL_CURRENT_BUFFER_0 0 -#define ZV0_CAPTURE_CTRL_CURRENT_BUFFER_1 1 -#define ZV0_CAPTURE_CTRL_VERTICAL_SYNC 24:24 -#define ZV0_CAPTURE_CTRL_VERTICAL_SYNC_INACTIVE 0 -#define ZV0_CAPTURE_CTRL_VERTICAL_SYNC_ACTIVE 1 -#define ZV0_CAPTURE_CTRL_ADJ 19:19 -#define ZV0_CAPTURE_CTRL_ADJ_NORMAL 0 -#define ZV0_CAPTURE_CTRL_ADJ_DELAY 1 -#define ZV0_CAPTURE_CTRL_HA 18:18 -#define ZV0_CAPTURE_CTRL_HA_DISABLE 0 -#define ZV0_CAPTURE_CTRL_HA_ENABLE 1 -#define ZV0_CAPTURE_CTRL_VSK 17:17 -#define ZV0_CAPTURE_CTRL_VSK_DISABLE 0 -#define ZV0_CAPTURE_CTRL_VSK_ENABLE 1 -#define ZV0_CAPTURE_CTRL_HSK 16:16 -#define ZV0_CAPTURE_CTRL_HSK_DISABLE 0 -#define ZV0_CAPTURE_CTRL_HSK_ENABLE 1 -#define ZV0_CAPTURE_CTRL_FD 15:15 -#define ZV0_CAPTURE_CTRL_FD_RISING 0 -#define ZV0_CAPTURE_CTRL_FD_FALLING 1 -#define ZV0_CAPTURE_CTRL_VP 14:14 -#define ZV0_CAPTURE_CTRL_VP_HIGH 0 -#define ZV0_CAPTURE_CTRL_VP_LOW 1 -#define ZV0_CAPTURE_CTRL_HP 13:13 -#define ZV0_CAPTURE_CTRL_HP_HIGH 0 -#define ZV0_CAPTURE_CTRL_HP_LOW 1 -#define ZV0_CAPTURE_CTRL_CP 12:12 -#define ZV0_CAPTURE_CTRL_CP_HIGH 0 -#define ZV0_CAPTURE_CTRL_CP_LOW 1 -#define ZV0_CAPTURE_CTRL_UVS 11:11 -#define ZV0_CAPTURE_CTRL_UVS_DISABLE 0 -#define ZV0_CAPTURE_CTRL_UVS_ENABLE 1 -#define ZV0_CAPTURE_CTRL_BS 10:10 -#define ZV0_CAPTURE_CTRL_BS_DISABLE 0 -#define ZV0_CAPTURE_CTRL_BS_ENABLE 1 -#define ZV0_CAPTURE_CTRL_CS 9:9 -#define ZV0_CAPTURE_CTRL_CS_16 0 -#define ZV0_CAPTURE_CTRL_CS_8 1 -#define ZV0_CAPTURE_CTRL_CF 8:8 -#define ZV0_CAPTURE_CTRL_CF_YUV 0 -#define ZV0_CAPTURE_CTRL_CF_RGB 1 -#define ZV0_CAPTURE_CTRL_FS 7:7 -#define ZV0_CAPTURE_CTRL_FS_DISABLE 0 -#define ZV0_CAPTURE_CTRL_FS_ENABLE 1 -#define ZV0_CAPTURE_CTRL_WEAVE 6:6 -#define ZV0_CAPTURE_CTRL_WEAVE_DISABLE 0 -#define ZV0_CAPTURE_CTRL_WEAVE_ENABLE 1 -#define ZV0_CAPTURE_CTRL_BOB 5:5 -#define ZV0_CAPTURE_CTRL_BOB_DISABLE 0 -#define ZV0_CAPTURE_CTRL_BOB_ENABLE 1 -#define ZV0_CAPTURE_CTRL_DB 4:4 -#define ZV0_CAPTURE_CTRL_DB_DISABLE 0 -#define ZV0_CAPTURE_CTRL_DB_ENABLE 1 -#define ZV0_CAPTURE_CTRL_CC 3:3 -#define ZV0_CAPTURE_CTRL_CC_CONTINUE 0 -#define ZV0_CAPTURE_CTRL_CC_CONDITION 1 -#define ZV0_CAPTURE_CTRL_RGB 2:2 -#define ZV0_CAPTURE_CTRL_RGB_DISABLE 0 -#define ZV0_CAPTURE_CTRL_RGB_ENABLE 1 -#define ZV0_CAPTURE_CTRL_656 1:1 -#define ZV0_CAPTURE_CTRL_656_DISABLE 0 -#define ZV0_CAPTURE_CTRL_656_ENABLE 1 -#define ZV0_CAPTURE_CTRL_CAP 0:0 -#define ZV0_CAPTURE_CTRL_CAP_DISABLE 0 -#define ZV0_CAPTURE_CTRL_CAP_ENABLE 1 +#define ZV0_CAPTURE_CTRL_FIELD_INPUT BIT(27) +#define ZV0_CAPTURE_CTRL_SCAN BIT(26) +#define ZV0_CAPTURE_CTRL_CURRENT_BUFFER BIT(25) +#define ZV0_CAPTURE_CTRL_VERTICAL_SYNC BIT(24) +#define ZV0_CAPTURE_CTRL_ADJ BIT(19) +#define ZV0_CAPTURE_CTRL_HA BIT(18) +#define ZV0_CAPTURE_CTRL_VSK BIT(17) +#define ZV0_CAPTURE_CTRL_HSK BIT(16) +#define ZV0_CAPTURE_CTRL_FD BIT(15) +#define ZV0_CAPTURE_CTRL_VP BIT(14) +#define ZV0_CAPTURE_CTRL_HP BIT(13) +#define ZV0_CAPTURE_CTRL_CP BIT(12) +#define ZV0_CAPTURE_CTRL_UVS BIT(11) +#define ZV0_CAPTURE_CTRL_BS BIT(10) +#define ZV0_CAPTURE_CTRL_CS BIT(9) +#define ZV0_CAPTURE_CTRL_CF BIT(8) +#define ZV0_CAPTURE_CTRL_FS BIT(7) +#define ZV0_CAPTURE_CTRL_WEAVE BIT(6) +#define ZV0_CAPTURE_CTRL_BOB BIT(5) +#define ZV0_CAPTURE_CTRL_DB BIT(4) +#define ZV0_CAPTURE_CTRL_CC BIT(3) +#define ZV0_CAPTURE_CTRL_RGB BIT(2) +#define ZV0_CAPTURE_CTRL_656 BIT(1) +#define ZV0_CAPTURE_CTRL_CAP BIT(0) #define ZV0_CAPTURE_CLIP 0x090004 -#define ZV0_CAPTURE_CLIP_YCLIP_EVEN_FIELD 25:16 -#define ZV0_CAPTURE_CLIP_YCLIP 25:16 -#define ZV0_CAPTURE_CLIP_XCLIP 9:0 +#define ZV0_CAPTURE_CLIP_EYCLIP_MASK (0x3ff << 16) +#define ZV0_CAPTURE_CLIP_XCLIP_MASK 0x3ff #define ZV0_CAPTURE_SIZE 0x090008 -#define ZV0_CAPTURE_SIZE_HEIGHT 26:16 -#define ZV0_CAPTURE_SIZE_WIDTH 10:0 +#define ZV0_CAPTURE_SIZE_HEIGHT_MASK (0x7ff << 16) +#define ZV0_CAPTURE_SIZE_WIDTH_MASK 0x7ff #define ZV0_CAPTURE_BUF0_ADDRESS 0x09000C -#define ZV0_CAPTURE_BUF0_ADDRESS_STATUS 31:31 -#define ZV0_CAPTURE_BUF0_ADDRESS_STATUS_CURRENT 0 -#define ZV0_CAPTURE_BUF0_ADDRESS_STATUS_PENDING 1 -#define ZV0_CAPTURE_BUF0_ADDRESS_EXT 27:27 -#define ZV0_CAPTURE_BUF0_ADDRESS_EXT_LOCAL 0 -#define ZV0_CAPTURE_BUF0_ADDRESS_EXT_EXTERNAL 1 -#define ZV0_CAPTURE_BUF0_ADDRESS_CS 26:26 -#define ZV0_CAPTURE_BUF0_ADDRESS_CS_0 0 -#define ZV0_CAPTURE_BUF0_ADDRESS_CS_1 1 -#define ZV0_CAPTURE_BUF0_ADDRESS_ADDRESS 25:0 +#define ZV0_CAPTURE_BUF0_ADDRESS_STATUS BIT(31) +#define ZV0_CAPTURE_BUF0_ADDRESS_EXT BIT(27) +#define ZV0_CAPTURE_BUF0_ADDRESS_CS BIT(26) +#define ZV0_CAPTURE_BUF0_ADDRESS_ADDRESS_MASK 0x3ffffff #define ZV0_CAPTURE_BUF1_ADDRESS 0x090010 -#define ZV0_CAPTURE_BUF1_ADDRESS_STATUS 31:31 -#define ZV0_CAPTURE_BUF1_ADDRESS_STATUS_CURRENT 0 -#define ZV0_CAPTURE_BUF1_ADDRESS_STATUS_PENDING 1 -#define ZV0_CAPTURE_BUF1_ADDRESS_EXT 27:27 -#define ZV0_CAPTURE_BUF1_ADDRESS_EXT_LOCAL 0 -#define ZV0_CAPTURE_BUF1_ADDRESS_EXT_EXTERNAL 1 -#define ZV0_CAPTURE_BUF1_ADDRESS_CS 26:26 -#define ZV0_CAPTURE_BUF1_ADDRESS_CS_0 0 -#define ZV0_CAPTURE_BUF1_ADDRESS_CS_1 1 -#define ZV0_CAPTURE_BUF1_ADDRESS_ADDRESS 25:0 +#define ZV0_CAPTURE_BUF1_ADDRESS_STATUS BIT(31) +#define ZV0_CAPTURE_BUF1_ADDRESS_EXT BIT(27) +#define ZV0_CAPTURE_BUF1_ADDRESS_CS BIT(26) +#define ZV0_CAPTURE_BUF1_ADDRESS_ADDRESS_MASK 0x3ffffff #define ZV0_CAPTURE_BUF_OFFSET 0x090014 #ifndef VALIDATION_CHIP - #define ZV0_CAPTURE_BUF_OFFSET_YCLIP_ODD_FIELD 25:16 + #define ZV0_CAPTURE_BUF_OFFSET_YCLIP_ODD_FIELD (0x3ff << 16) #endif -#define ZV0_CAPTURE_BUF_OFFSET_OFFSET 15:0 +#define ZV0_CAPTURE_BUF_OFFSET_OFFSET_MASK 0xffff #define ZV0_CAPTURE_FIFO_CTRL 0x090018 -#define ZV0_CAPTURE_FIFO_CTRL_FIFO 2:0 +#define ZV0_CAPTURE_FIFO_CTRL_FIFO_MASK 0x7 #define ZV0_CAPTURE_FIFO_CTRL_FIFO_0 0 #define ZV0_CAPTURE_FIFO_CTRL_FIFO_1 1 #define ZV0_CAPTURE_FIFO_CTRL_FIFO_2 2 @@ -2170,130 +1344,68 @@ #define ZV0_CAPTURE_FIFO_CTRL_FIFO_7 7 #define ZV0_CAPTURE_YRGB_CONST 0x09001C -#define ZV0_CAPTURE_YRGB_CONST_Y 31:24 -#define ZV0_CAPTURE_YRGB_CONST_R 23:16 -#define ZV0_CAPTURE_YRGB_CONST_G 15:8 -#define ZV0_CAPTURE_YRGB_CONST_B 7:0 +#define ZV0_CAPTURE_YRGB_CONST_Y_MASK (0xff << 24) +#define ZV0_CAPTURE_YRGB_CONST_R_MASK (0xff << 16) +#define ZV0_CAPTURE_YRGB_CONST_G_MASK (0xff << 8) +#define ZV0_CAPTURE_YRGB_CONST_B_MASK 0xff #define ZV0_CAPTURE_LINE_COMP 0x090020 -#define ZV0_CAPTURE_LINE_COMP_LC 10:0 +#define ZV0_CAPTURE_LINE_COMP_LC_MASK 0x7ff /* ZV1 */ #define ZV1_CAPTURE_CTRL 0x098000 -#define ZV1_CAPTURE_CTRL_FIELD_INPUT 27:27 -#define ZV1_CAPTURE_CTRL_FIELD_INPUT_EVEN_FIELD 0 -#define ZV1_CAPTURE_CTRL_FIELD_INPUT_ODD_FIELD 0 -#define ZV1_CAPTURE_CTRL_SCAN 26:26 -#define ZV1_CAPTURE_CTRL_SCAN_PROGRESSIVE 0 -#define ZV1_CAPTURE_CTRL_SCAN_INTERLACE 1 -#define ZV1_CAPTURE_CTRL_CURRENT_BUFFER 25:25 -#define ZV1_CAPTURE_CTRL_CURRENT_BUFFER_0 0 -#define ZV1_CAPTURE_CTRL_CURRENT_BUFFER_1 1 -#define ZV1_CAPTURE_CTRL_VERTICAL_SYNC 24:24 -#define ZV1_CAPTURE_CTRL_VERTICAL_SYNC_INACTIVE 0 -#define ZV1_CAPTURE_CTRL_VERTICAL_SYNC_ACTIVE 1 -#define ZV1_CAPTURE_CTRL_PANEL 20:20 -#define ZV1_CAPTURE_CTRL_PANEL_DISABLE 0 -#define ZV1_CAPTURE_CTRL_PANEL_ENABLE 1 -#define ZV1_CAPTURE_CTRL_ADJ 19:19 -#define ZV1_CAPTURE_CTRL_ADJ_NORMAL 0 -#define ZV1_CAPTURE_CTRL_ADJ_DELAY 1 -#define ZV1_CAPTURE_CTRL_HA 18:18 -#define ZV1_CAPTURE_CTRL_HA_DISABLE 0 -#define ZV1_CAPTURE_CTRL_HA_ENABLE 1 -#define ZV1_CAPTURE_CTRL_VSK 17:17 -#define ZV1_CAPTURE_CTRL_VSK_DISABLE 0 -#define ZV1_CAPTURE_CTRL_VSK_ENABLE 1 -#define ZV1_CAPTURE_CTRL_HSK 16:16 -#define ZV1_CAPTURE_CTRL_HSK_DISABLE 0 -#define ZV1_CAPTURE_CTRL_HSK_ENABLE 1 -#define ZV1_CAPTURE_CTRL_FD 15:15 -#define ZV1_CAPTURE_CTRL_FD_RISING 0 -#define ZV1_CAPTURE_CTRL_FD_FALLING 1 -#define ZV1_CAPTURE_CTRL_VP 14:14 -#define ZV1_CAPTURE_CTRL_VP_HIGH 0 -#define ZV1_CAPTURE_CTRL_VP_LOW 1 -#define ZV1_CAPTURE_CTRL_HP 13:13 -#define ZV1_CAPTURE_CTRL_HP_HIGH 0 -#define ZV1_CAPTURE_CTRL_HP_LOW 1 -#define ZV1_CAPTURE_CTRL_CP 12:12 -#define ZV1_CAPTURE_CTRL_CP_HIGH 0 -#define ZV1_CAPTURE_CTRL_CP_LOW 1 -#define ZV1_CAPTURE_CTRL_UVS 11:11 -#define ZV1_CAPTURE_CTRL_UVS_DISABLE 0 -#define ZV1_CAPTURE_CTRL_UVS_ENABLE 1 -#define ZV1_CAPTURE_CTRL_BS 10:10 -#define ZV1_CAPTURE_CTRL_BS_DISABLE 0 -#define ZV1_CAPTURE_CTRL_BS_ENABLE 1 -#define ZV1_CAPTURE_CTRL_CS 9:9 -#define ZV1_CAPTURE_CTRL_CS_16 0 -#define ZV1_CAPTURE_CTRL_CS_8 1 -#define ZV1_CAPTURE_CTRL_CF 8:8 -#define ZV1_CAPTURE_CTRL_CF_YUV 0 -#define ZV1_CAPTURE_CTRL_CF_RGB 1 -#define ZV1_CAPTURE_CTRL_FS 7:7 -#define ZV1_CAPTURE_CTRL_FS_DISABLE 0 -#define ZV1_CAPTURE_CTRL_FS_ENABLE 1 -#define ZV1_CAPTURE_CTRL_WEAVE 6:6 -#define ZV1_CAPTURE_CTRL_WEAVE_DISABLE 0 -#define ZV1_CAPTURE_CTRL_WEAVE_ENABLE 1 -#define ZV1_CAPTURE_CTRL_BOB 5:5 -#define ZV1_CAPTURE_CTRL_BOB_DISABLE 0 -#define ZV1_CAPTURE_CTRL_BOB_ENABLE 1 -#define ZV1_CAPTURE_CTRL_DB 4:4 -#define ZV1_CAPTURE_CTRL_DB_DISABLE 0 -#define ZV1_CAPTURE_CTRL_DB_ENABLE 1 -#define ZV1_CAPTURE_CTRL_CC 3:3 -#define ZV1_CAPTURE_CTRL_CC_CONTINUE 0 -#define ZV1_CAPTURE_CTRL_CC_CONDITION 1 -#define ZV1_CAPTURE_CTRL_RGB 2:2 -#define ZV1_CAPTURE_CTRL_RGB_DISABLE 0 -#define ZV1_CAPTURE_CTRL_RGB_ENABLE 1 -#define ZV1_CAPTURE_CTRL_656 1:1 -#define ZV1_CAPTURE_CTRL_656_DISABLE 0 -#define ZV1_CAPTURE_CTRL_656_ENABLE 1 -#define ZV1_CAPTURE_CTRL_CAP 0:0 -#define ZV1_CAPTURE_CTRL_CAP_DISABLE 0 -#define ZV1_CAPTURE_CTRL_CAP_ENABLE 1 +#define ZV1_CAPTURE_CTRL_FIELD_INPUT BIT(27) +#define ZV1_CAPTURE_CTRL_SCAN BIT(26) +#define ZV1_CAPTURE_CTRL_CURRENT_BUFFER BIT(25) +#define ZV1_CAPTURE_CTRL_VERTICAL_SYNC BIT(24) +#define ZV1_CAPTURE_CTRL_PANEL BIT(20) +#define ZV1_CAPTURE_CTRL_ADJ BIT(19) +#define ZV1_CAPTURE_CTRL_HA BIT(18) +#define ZV1_CAPTURE_CTRL_VSK BIT(17) +#define ZV1_CAPTURE_CTRL_HSK BIT(16) +#define ZV1_CAPTURE_CTRL_FD BIT(15) +#define ZV1_CAPTURE_CTRL_VP BIT(14) +#define ZV1_CAPTURE_CTRL_HP BIT(13) +#define ZV1_CAPTURE_CTRL_CP BIT(12) +#define ZV1_CAPTURE_CTRL_UVS BIT(11) +#define ZV1_CAPTURE_CTRL_BS BIT(10) +#define ZV1_CAPTURE_CTRL_CS BIT(9) +#define ZV1_CAPTURE_CTRL_CF BIT(8) +#define ZV1_CAPTURE_CTRL_FS BIT(7) +#define ZV1_CAPTURE_CTRL_WEAVE BIT(6) +#define ZV1_CAPTURE_CTRL_BOB BIT(5) +#define ZV1_CAPTURE_CTRL_DB BIT(4) +#define ZV1_CAPTURE_CTRL_CC BIT(3) +#define ZV1_CAPTURE_CTRL_RGB BIT(2) +#define ZV1_CAPTURE_CTRL_656 BIT(1) +#define ZV1_CAPTURE_CTRL_CAP BIT(0) #define ZV1_CAPTURE_CLIP 0x098004 -#define ZV1_CAPTURE_CLIP_YCLIP 25:16 -#define ZV1_CAPTURE_CLIP_XCLIP 9:0 +#define ZV1_CAPTURE_CLIP_YCLIP_MASK (0x3ff << 16) +#define ZV1_CAPTURE_CLIP_XCLIP_MASK 0x3ff #define ZV1_CAPTURE_SIZE 0x098008 -#define ZV1_CAPTURE_SIZE_HEIGHT 26:16 -#define ZV1_CAPTURE_SIZE_WIDTH 10:0 +#define ZV1_CAPTURE_SIZE_HEIGHT_MASK (0x7ff << 16) +#define ZV1_CAPTURE_SIZE_WIDTH_MASK 0x7ff #define ZV1_CAPTURE_BUF0_ADDRESS 0x09800C -#define ZV1_CAPTURE_BUF0_ADDRESS_STATUS 31:31 -#define ZV1_CAPTURE_BUF0_ADDRESS_STATUS_CURRENT 0 -#define ZV1_CAPTURE_BUF0_ADDRESS_STATUS_PENDING 1 -#define ZV1_CAPTURE_BUF0_ADDRESS_EXT 27:27 -#define ZV1_CAPTURE_BUF0_ADDRESS_EXT_LOCAL 0 -#define ZV1_CAPTURE_BUF0_ADDRESS_EXT_EXTERNAL 1 -#define ZV1_CAPTURE_BUF0_ADDRESS_CS 26:26 -#define ZV1_CAPTURE_BUF0_ADDRESS_CS_0 0 -#define ZV1_CAPTURE_BUF0_ADDRESS_CS_1 1 -#define ZV1_CAPTURE_BUF0_ADDRESS_ADDRESS 25:0 +#define ZV1_CAPTURE_BUF0_ADDRESS_STATUS BIT(31) +#define ZV1_CAPTURE_BUF0_ADDRESS_EXT BIT(27) +#define ZV1_CAPTURE_BUF0_ADDRESS_CS BIT(26) +#define ZV1_CAPTURE_BUF0_ADDRESS_ADDRESS_MASK 0x3ffffff #define ZV1_CAPTURE_BUF1_ADDRESS 0x098010 -#define ZV1_CAPTURE_BUF1_ADDRESS_STATUS 31:31 -#define ZV1_CAPTURE_BUF1_ADDRESS_STATUS_CURRENT 0 -#define ZV1_CAPTURE_BUF1_ADDRESS_STATUS_PENDING 1 -#define ZV1_CAPTURE_BUF1_ADDRESS_EXT 27:27 -#define ZV1_CAPTURE_BUF1_ADDRESS_EXT_LOCAL 0 -#define ZV1_CAPTURE_BUF1_ADDRESS_EXT_EXTERNAL 1 -#define ZV1_CAPTURE_BUF1_ADDRESS_CS 26:26 -#define ZV1_CAPTURE_BUF1_ADDRESS_CS_0 0 -#define ZV1_CAPTURE_BUF1_ADDRESS_CS_1 1 -#define ZV1_CAPTURE_BUF1_ADDRESS_ADDRESS 25:0 +#define ZV1_CAPTURE_BUF1_ADDRESS_STATUS BIT(31) +#define ZV1_CAPTURE_BUF1_ADDRESS_EXT BIT(27) +#define ZV1_CAPTURE_BUF1_ADDRESS_CS BIT(26) +#define ZV1_CAPTURE_BUF1_ADDRESS_ADDRESS_MASK 0x3ffffff #define ZV1_CAPTURE_BUF_OFFSET 0x098014 -#define ZV1_CAPTURE_BUF_OFFSET_OFFSET 15:0 +#define ZV1_CAPTURE_BUF_OFFSET_OFFSET_MASK 0xffff #define ZV1_CAPTURE_FIFO_CTRL 0x098018 -#define ZV1_CAPTURE_FIFO_CTRL_FIFO 2:0 +#define ZV1_CAPTURE_FIFO_CTRL_FIFO_MASK 0x7 #define ZV1_CAPTURE_FIFO_CTRL_FIFO_0 0 #define ZV1_CAPTURE_FIFO_CTRL_FIFO_1 1 #define ZV1_CAPTURE_FIFO_CTRL_FIFO_2 2 @@ -2304,52 +1416,30 @@ #define ZV1_CAPTURE_FIFO_CTRL_FIFO_7 7 #define ZV1_CAPTURE_YRGB_CONST 0x09801C -#define ZV1_CAPTURE_YRGB_CONST_Y 31:24 -#define ZV1_CAPTURE_YRGB_CONST_R 23:16 -#define ZV1_CAPTURE_YRGB_CONST_G 15:8 -#define ZV1_CAPTURE_YRGB_CONST_B 7:0 +#define ZV1_CAPTURE_YRGB_CONST_Y_MASK (0xff << 24) +#define ZV1_CAPTURE_YRGB_CONST_R_MASK (0xff << 16) +#define ZV1_CAPTURE_YRGB_CONST_G_MASK (0xff << 8) +#define ZV1_CAPTURE_YRGB_CONST_B_MASK 0xff #define DMA_1_SOURCE 0x0D0010 -#define DMA_1_SOURCE_ADDRESS_EXT 27:27 -#define DMA_1_SOURCE_ADDRESS_EXT_LOCAL 0 -#define DMA_1_SOURCE_ADDRESS_EXT_EXTERNAL 1 -#define DMA_1_SOURCE_ADDRESS_CS 26:26 -#define DMA_1_SOURCE_ADDRESS_CS_0 0 -#define DMA_1_SOURCE_ADDRESS_CS_1 1 -#define DMA_1_SOURCE_ADDRESS 25:0 +#define DMA_1_SOURCE_ADDRESS_EXT BIT(27) +#define DMA_1_SOURCE_ADDRESS_CS BIT(26) +#define DMA_1_SOURCE_ADDRESS_MASK 0x3ffffff #define DMA_1_DESTINATION 0x0D0014 -#define DMA_1_DESTINATION_ADDRESS_EXT 27:27 -#define DMA_1_DESTINATION_ADDRESS_EXT_LOCAL 0 -#define DMA_1_DESTINATION_ADDRESS_EXT_EXTERNAL 1 -#define DMA_1_DESTINATION_ADDRESS_CS 26:26 -#define DMA_1_DESTINATION_ADDRESS_CS_0 0 -#define DMA_1_DESTINATION_ADDRESS_CS_1 1 -#define DMA_1_DESTINATION_ADDRESS 25:0 +#define DMA_1_DESTINATION_ADDRESS_EXT BIT(27) +#define DMA_1_DESTINATION_ADDRESS_CS BIT(26) +#define DMA_1_DESTINATION_ADDRESS_MASK 0x3ffffff #define DMA_1_SIZE_CONTROL 0x0D0018 -#define DMA_1_SIZE_CONTROL_STATUS 31:31 -#define DMA_1_SIZE_CONTROL_STATUS_IDLE 0 -#define DMA_1_SIZE_CONTROL_STATUS_ACTIVE 1 -#define DMA_1_SIZE_CONTROL_SIZE 23:0 +#define DMA_1_SIZE_CONTROL_STATUS BIT(31) +#define DMA_1_SIZE_CONTROL_SIZE_MASK 0xffffff #define DMA_ABORT_INTERRUPT 0x0D0020 -#define DMA_ABORT_INTERRUPT_ABORT_1 5:5 -#define DMA_ABORT_INTERRUPT_ABORT_1_ENABLE 0 -#define DMA_ABORT_INTERRUPT_ABORT_1_ABORT 1 -#define DMA_ABORT_INTERRUPT_ABORT_0 4:4 -#define DMA_ABORT_INTERRUPT_ABORT_0_ENABLE 0 -#define DMA_ABORT_INTERRUPT_ABORT_0_ABORT 1 -#define DMA_ABORT_INTERRUPT_INT_1 1:1 -#define DMA_ABORT_INTERRUPT_INT_1_CLEAR 0 -#define DMA_ABORT_INTERRUPT_INT_1_FINISHED 1 -#define DMA_ABORT_INTERRUPT_INT_0 0:0 -#define DMA_ABORT_INTERRUPT_INT_0_CLEAR 0 -#define DMA_ABORT_INTERRUPT_INT_0_FINISHED 1 - - - - +#define DMA_ABORT_INTERRUPT_ABORT_1 BIT(5) +#define DMA_ABORT_INTERRUPT_ABORT_0 BIT(4) +#define DMA_ABORT_INTERRUPT_INT_1 BIT(1) +#define DMA_ABORT_INTERRUPT_INT_0 BIT(0) /* Default i2c CLK and Data GPIO. These are the default i2c pins */ #define DEFAULT_I2C_SCL 30 @@ -2357,16 +1447,12 @@ #define GPIO_DATA_SM750LE 0x020018 -#define GPIO_DATA_SM750LE_1 1:1 -#define GPIO_DATA_SM750LE_0 0:0 +#define GPIO_DATA_SM750LE_1 BIT(1) +#define GPIO_DATA_SM750LE_0 BIT(0) #define GPIO_DATA_DIRECTION_SM750LE 0x02001C -#define GPIO_DATA_DIRECTION_SM750LE_1 1:1 -#define GPIO_DATA_DIRECTION_SM750LE_1_INPUT 0 -#define GPIO_DATA_DIRECTION_SM750LE_1_OUTPUT 1 -#define GPIO_DATA_DIRECTION_SM750LE_0 0:0 -#define GPIO_DATA_DIRECTION_SM750LE_0_INPUT 0 -#define GPIO_DATA_DIRECTION_SM750LE_0_OUTPUT 1 +#define GPIO_DATA_DIRECTION_SM750LE_1 BIT(1) +#define GPIO_DATA_DIRECTION_SM750LE_0 BIT(0) #endif diff --git a/drivers/staging/sm750fb/ddk750_sii164.c b/drivers/staging/sm750fb/ddk750_sii164.c index 241b77b927ee..67f36e71da7e 100644 --- a/drivers/staging/sm750fb/ddk750_sii164.c +++ b/drivers/staging/sm750fb/ddk750_sii164.c @@ -14,8 +14,8 @@ #define i2cWriteReg sm750_hw_i2c_write_reg #define i2cReadReg sm750_hw_i2c_read_reg #else - #define i2cWriteReg swI2CWriteReg - #define i2cReadReg swI2CReadReg + #define i2cWriteReg sm750_sw_i2c_write_reg + #define i2cReadReg sm750_sw_i2c_read_reg #endif /* SII164 Vendor and Device ID */ @@ -236,7 +236,7 @@ long sii164InitChip( } /* Return -1 if initialization fails. */ - return (-1); + return -1; } diff --git a/drivers/staging/sm750fb/ddk750_sii164.h b/drivers/staging/sm750fb/ddk750_sii164.h index f2610c90eeb4..664ad089f753 100644 --- a/drivers/staging/sm750fb/ddk750_sii164.h +++ b/drivers/staging/sm750fb/ddk750_sii164.h @@ -39,7 +39,10 @@ unsigned char sii164IsConnected(void); unsigned char sii164CheckInterrupt(void); void sii164ClearInterrupt(void); #endif -/* below register definination is used for Silicon Image SiI164 DVI controller chip */ +/* + * below register definition is used for + * Silicon Image SiI164 DVI controller chip + */ /* * Vendor ID registers */ diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c index c78421b5b0e7..6ed004e40855 100644 --- a/drivers/staging/sm750fb/sm750.c +++ b/drivers/staging/sm750fb/sm750.c @@ -13,8 +13,6 @@ #include <linux/vmalloc.h> #include <linux/pagemap.h> #include <linux/screen_info.h> -#include <linux/vmalloc.h> -#include <linux/pagemap.h> #include <linux/console.h> #include <asm/fb.h> #include "sm750.h" @@ -189,7 +187,7 @@ static void lynxfb_ops_fillrect(struct fb_info *info, * If not use spin_lock,system will die if user load driver * and immediately unload driver frequently (dual) */ - if (sm750_dev->dual) + if (sm750_dev->fb_count > 1) spin_lock(&sm750_dev->slock); sm750_dev->accel.de_fillrect(&sm750_dev->accel, @@ -197,7 +195,7 @@ static void lynxfb_ops_fillrect(struct fb_info *info, region->dx, region->dy, region->width, region->height, color, rop); - if (sm750_dev->dual) + if (sm750_dev->fb_count > 1) spin_unlock(&sm750_dev->slock); } @@ -223,7 +221,7 @@ static void lynxfb_ops_copyarea(struct fb_info *info, * If not use spin_lock, system will die if user load driver * and immediately unload driver frequently (dual) */ - if (sm750_dev->dual) + if (sm750_dev->fb_count > 1) spin_lock(&sm750_dev->slock); sm750_dev->accel.de_copyarea(&sm750_dev->accel, @@ -231,7 +229,7 @@ static void lynxfb_ops_copyarea(struct fb_info *info, base, pitch, Bpp, region->dx, region->dy, region->width, region->height, HW_ROP2_COPY); - if (sm750_dev->dual) + if (sm750_dev->fb_count > 1) spin_unlock(&sm750_dev->slock); } @@ -272,7 +270,7 @@ static void lynxfb_ops_imageblit(struct fb_info *info, * If not use spin_lock, system will die if user load driver * and immediately unload driver frequently (dual) */ - if (sm750_dev->dual) + if (sm750_dev->fb_count > 1) spin_lock(&sm750_dev->slock); sm750_dev->accel.de_imageblit(&sm750_dev->accel, @@ -281,7 +279,7 @@ static void lynxfb_ops_imageblit(struct fb_info *info, image->dx, image->dy, image->width, image->height, fgcol, bgcol, HW_ROP2_COPY); - if (sm750_dev->dual) + if (sm750_dev->fb_count > 1) spin_unlock(&sm750_dev->slock); } @@ -319,7 +317,7 @@ static int lynxfb_ops_set_par(struct fb_info *info) var = &info->var; fix = &info->fix; - /* fix structur is not so FIX ... */ + /* fix structure is not so FIX ... */ line_length = var->xres_virtual * var->bits_per_pixel / 8; line_length = ALIGN(line_length, crtc->line_pad); fix->line_length = line_length; @@ -420,14 +418,16 @@ static int lynxfb_suspend(struct pci_dev *pdev, pm_message_t mesg) ret = pci_save_state(pdev); if (ret) { - pr_err("error:%d occurred in pci_save_state\n", ret); + dev_err(&pdev->dev, + "error:%d occurred in pci_save_state\n", ret); return ret; } - pci_disable_device(pdev); ret = pci_set_power_state(pdev, pci_choose_state(pdev, mesg)); if (ret) { - pr_err("error:%d occurred in pci_set_power_state\n", ret); + dev_err(&pdev->dev, + "error:%d occurred in pci_set_power_state\n", + ret); return ret; } } @@ -455,7 +455,8 @@ static int lynxfb_resume(struct pci_dev *pdev) ret = pci_set_power_state(pdev, PCI_D0); if (ret) { - pr_err("error:%d occurred in pci_set_power_state\n", ret); + dev_err(&pdev->dev, + "error:%d occurred in pci_set_power_state\n", ret); return ret; } @@ -463,7 +464,9 @@ static int lynxfb_resume(struct pci_dev *pdev) pci_restore_state(pdev); ret = pci_enable_device(pdev); if (ret) { - pr_err("error:%d occurred in pci_enable_device\n", ret); + dev_err(&pdev->dev, + "error:%d occurred in pci_enable_device\n", + ret); return ret; } pci_set_master(pdev); @@ -650,8 +653,10 @@ static int sm750fb_set_drv(struct lynxfb_par *par) output = &par->output; crtc = &par->crtc; - crtc->vidmem_size = (sm750_dev->dual) ? sm750_dev->vidmem_size >> 1 : - sm750_dev->vidmem_size; + crtc->vidmem_size = sm750_dev->vidmem_size; + if (sm750_dev->fb_count > 1) + crtc->vidmem_size >>= 1; + /* setup crtc and output member */ sm750_dev->hwCursor = g_hwcursor; @@ -981,7 +986,7 @@ static void sm750fb_setup(struct sm750_dev *sm750_dev, char *src) NO_PARAM: if (sm750_dev->revid != SM750LE_REVISION_ID) { - if (sm750_dev->dual) { + if (sm750_dev->fb_count > 1) { if (swap) sm750_dev->dataflow = sm750_dual_swap; else @@ -1000,35 +1005,75 @@ NO_PARAM: } } +static void sm750fb_frambuffer_release(struct sm750_dev *sm750_dev) +{ + struct fb_info *fb_info; + + while (sm750_dev->fb_count) { + fb_info = sm750_dev->fbinfo[sm750_dev->fb_count - 1]; + unregister_framebuffer(fb_info); + framebuffer_release(fb_info); + sm750_dev->fb_count--; + } +} + +static int sm750fb_frambuffer_alloc(struct sm750_dev *sm750_dev, int fbidx) +{ + struct fb_info *fb_info; + struct lynxfb_par *par; + int err; + + fb_info = framebuffer_alloc(sizeof(struct lynxfb_par), + &sm750_dev->pdev->dev); + if (!fb_info) + return -ENOMEM; + + sm750_dev->fbinfo[fbidx] = fb_info; + par = fb_info->par; + par->dev = sm750_dev; + + err = lynxfb_set_fbinfo(fb_info, fbidx); + if (err) + goto release_fb; + + err = register_framebuffer(fb_info); + if (err < 0) + goto release_fb; + + sm750_dev->fb_count++; + + return 0; + +release_fb: + framebuffer_release(fb_info); + return err; +} + static int lynxfb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - struct fb_info *info[] = {NULL, NULL}; struct sm750_dev *sm750_dev = NULL; + int max_fb; int fbidx; + int err; /* enable device */ - if (pci_enable_device(pdev)) { - pr_err("can not enable device.\n"); - goto err_enable; - } + err = pcim_enable_device(pdev); + if (err) + return err; - sm750_dev = kzalloc(sizeof(*sm750_dev), GFP_KERNEL); - if (!sm750_dev) { - pr_err("Could not allocate memory for share.\n"); - goto err_share; - } + err = -ENOMEM; + sm750_dev = devm_kzalloc(&pdev->dev, sizeof(*sm750_dev), GFP_KERNEL); + if (!sm750_dev) + return err; sm750_dev->fbinfo[0] = sm750_dev->fbinfo[1] = NULL; sm750_dev->devid = pdev->device; sm750_dev->revid = pdev->revision; - - pr_info("share->revid = %02x\n", sm750_dev->revid); sm750_dev->pdev = pdev; sm750_dev->mtrr_off = g_nomtrr; sm750_dev->mtrr.vram = 0; sm750_dev->accel_off = g_noaccel; - sm750_dev->dual = g_dualview; spin_lock_init(&sm750_dev->slock); if (!sm750_dev->accel_off) { @@ -1042,19 +1087,15 @@ static int lynxfb_pci_probe(struct pci_dev *pdev, sm750_dev->accel.de_fillrect = hw_fillrect; sm750_dev->accel.de_copyarea = hw_copyarea; sm750_dev->accel.de_imageblit = hw_imageblit; - pr_info("enable 2d acceleration\n"); - } else { - pr_info("disable 2d acceleration\n"); } /* call chip specific setup routine */ sm750fb_setup(sm750_dev, g_settings); /* call chip specific mmap routine */ - if (hw_sm750_map(sm750_dev, pdev)) { - pr_err("Memory map failed\n"); - goto err_map; - } + err = hw_sm750_map(sm750_dev, pdev); + if (err) + return err; if (!sm750_dev->mtrr_off) sm750_dev->mtrr.vram = arch_phys_wc_add(sm750_dev->vidmem_start, @@ -1062,107 +1103,38 @@ static int lynxfb_pci_probe(struct pci_dev *pdev, memset_io(sm750_dev->pvMem, 0, sm750_dev->vidmem_size); - pr_info("sm%3x mmio address = %p\n", sm750_dev->devid, - sm750_dev->pvReg); - pci_set_drvdata(pdev, sm750_dev); /* call chipInit routine */ hw_sm750_inithw(sm750_dev, pdev); - /* allocate frame buffer info structor according to g_dualview */ - fbidx = 0; -ALLOC_FB: - info[fbidx] = framebuffer_alloc(sizeof(struct lynxfb_par), &pdev->dev); - if (!info[fbidx]) { - pr_err("Could not allocate framebuffer #%d.\n", fbidx); - if (fbidx == 0) - goto err_info0_alloc; - else - goto err_info1_alloc; - } else { - struct lynxfb_par *par; - int errno; - - pr_info("framebuffer #%d alloc okay\n", fbidx); - sm750_dev->fbinfo[fbidx] = info[fbidx]; - par = info[fbidx]->par; - par->dev = sm750_dev; - - /* set fb_info structure */ - if (lynxfb_set_fbinfo(info[fbidx], fbidx)) { - pr_err("Failed to initial fb_info #%d.\n", fbidx); - if (fbidx == 0) - goto err_info0_set; - else - goto err_info1_set; - } - - /* register frame buffer */ - pr_info("Ready to register framebuffer #%d.\n", fbidx); - errno = register_framebuffer(info[fbidx]); - if (errno < 0) { - pr_err("Failed to register fb_info #%d. err %d\n", - fbidx, - errno); - if (fbidx == 0) - goto err_register0; - else - goto err_register1; - } - pr_info("Accomplished register framebuffer #%d.\n", fbidx); + /* allocate frame buffer info structures according to g_dualview */ + max_fb = g_dualview ? 2 : 1; + for (fbidx = 0; fbidx < max_fb; fbidx++) { + err = sm750fb_frambuffer_alloc(sm750_dev, fbidx); + if (err) + goto release_fb; } - /* no dual view by far */ - fbidx++; - if (sm750_dev->dual && fbidx < 2) - goto ALLOC_FB; - return 0; -err_register1: -err_info1_set: - framebuffer_release(info[1]); -err_info1_alloc: - unregister_framebuffer(info[0]); -err_register0: -err_info0_set: - framebuffer_release(info[0]); -err_info0_alloc: -err_map: - kfree(sm750_dev); -err_share: -err_enable: - return -ENODEV; +release_fb: + sm750fb_frambuffer_release(sm750_dev); + return err; } static void lynxfb_pci_remove(struct pci_dev *pdev) { - struct fb_info *info; struct sm750_dev *sm750_dev; - struct lynxfb_par *par; - int cnt; - cnt = 2; sm750_dev = pci_get_drvdata(pdev); - while (cnt-- > 0) { - info = sm750_dev->fbinfo[cnt]; - if (!info) - continue; - par = info->par; - - unregister_framebuffer(info); - /* release frame buffer */ - framebuffer_release(info); - } + sm750fb_frambuffer_release(sm750_dev); arch_phys_wc_del(sm750_dev->mtrr.vram); iounmap(sm750_dev->pvReg); iounmap(sm750_dev->pvMem); kfree(g_settings); - kfree(sm750_dev); - pci_set_drvdata(pdev, NULL); } static int __init lynxfb_setup(char *options) diff --git a/drivers/staging/sm750fb/sm750.h b/drivers/staging/sm750fb/sm750.h index b0a93cdc7292..8e70ce0d6da4 100644 --- a/drivers/staging/sm750fb/sm750.h +++ b/drivers/staging/sm750fb/sm750.h @@ -53,7 +53,7 @@ struct lynx_accel { /* base virtual address of de data port */ volatile unsigned char __iomem *dpPortBase; - /* function fointers */ + /* function pointers */ void (*de_init)(struct lynx_accel *); int (*de_wait)(void);/* see if hardware ready to work */ @@ -79,7 +79,7 @@ struct sm750_dev { struct fb_info *fbinfo[2]; struct lynx_accel accel; int accel_off; - int dual; + int fb_count; int mtrr_off; struct{ int vram; diff --git a/drivers/staging/sm750fb/sm750_accel.c b/drivers/staging/sm750fb/sm750_accel.c index 43e59725920c..9aa4066ac86d 100644 --- a/drivers/staging/sm750fb/sm750_accel.c +++ b/drivers/staging/sm750fb/sm750_accel.c @@ -17,7 +17,6 @@ #include "sm750.h" #include "sm750_accel.h" -#include "sm750_help.h" static inline void write_dpr(struct lynx_accel *accel, int offset, u32 regValue) { writel(regValue, accel->dprBase + offset); @@ -41,20 +40,16 @@ void hw_de_init(struct lynx_accel *accel) write_dpr(accel, DE_MASKS, 0xFFFFFFFF); /* dpr1c */ - reg = FIELD_SET(0, DE_STRETCH_FORMAT, PATTERN_XY, NORMAL)| - FIELD_VALUE(0, DE_STRETCH_FORMAT, PATTERN_Y, 0)| - FIELD_VALUE(0, DE_STRETCH_FORMAT, PATTERN_X, 0)| - FIELD_SET(0, DE_STRETCH_FORMAT, ADDRESSING, XY)| - FIELD_VALUE(0, DE_STRETCH_FORMAT, SOURCE_HEIGHT, 3); + reg = 0x3; - clr = FIELD_CLEAR(DE_STRETCH_FORMAT, PATTERN_XY)& - FIELD_CLEAR(DE_STRETCH_FORMAT, PATTERN_Y)& - FIELD_CLEAR(DE_STRETCH_FORMAT, PATTERN_X)& - FIELD_CLEAR(DE_STRETCH_FORMAT, ADDRESSING)& - FIELD_CLEAR(DE_STRETCH_FORMAT, SOURCE_HEIGHT); + clr = DE_STRETCH_FORMAT_PATTERN_XY | DE_STRETCH_FORMAT_PATTERN_Y_MASK | + DE_STRETCH_FORMAT_PATTERN_X_MASK | + DE_STRETCH_FORMAT_ADDRESSING_MASK | + DE_STRETCH_FORMAT_SOURCE_HEIGHT_MASK; - /* DE_STRETCH bpp format need be initilized in setMode routine */ - write_dpr(accel, DE_STRETCH_FORMAT, (read_dpr(accel, DE_STRETCH_FORMAT) & clr) | reg); + /* DE_STRETCH bpp format need be initialized in setMode routine */ + write_dpr(accel, DE_STRETCH_FORMAT, + (read_dpr(accel, DE_STRETCH_FORMAT) & ~clr) | reg); /* disable clipping and transparent */ write_dpr(accel, DE_CLIP_TL, 0); /* dpr2c */ @@ -63,16 +58,11 @@ void hw_de_init(struct lynx_accel *accel) write_dpr(accel, DE_COLOR_COMPARE_MASK, 0); /* dpr24 */ write_dpr(accel, DE_COLOR_COMPARE, 0); - reg = FIELD_SET(0, DE_CONTROL, TRANSPARENCY, DISABLE)| - FIELD_SET(0, DE_CONTROL, TRANSPARENCY_MATCH, OPAQUE)| - FIELD_SET(0, DE_CONTROL, TRANSPARENCY_SELECT, SOURCE); - - clr = FIELD_CLEAR(DE_CONTROL, TRANSPARENCY)& - FIELD_CLEAR(DE_CONTROL, TRANSPARENCY_MATCH)& - FIELD_CLEAR(DE_CONTROL, TRANSPARENCY_SELECT); + clr = DE_CONTROL_TRANSPARENCY | DE_CONTROL_TRANSPARENCY_MATCH | + DE_CONTROL_TRANSPARENCY_SELECT; /* dpr0c */ - write_dpr(accel, DE_CONTROL, (read_dpr(accel, DE_CONTROL)&clr)|reg); + write_dpr(accel, DE_CONTROL, read_dpr(accel, DE_CONTROL) & ~clr); } /* set2dformat only be called from setmode functions @@ -85,7 +75,9 @@ void hw_set2dformat(struct lynx_accel *accel, int fmt) /* fmt=0,1,2 for 8,16,32,bpp on sm718/750/502 */ reg = read_dpr(accel, DE_STRETCH_FORMAT); - reg = FIELD_VALUE(reg, DE_STRETCH_FORMAT, PIXEL_FORMAT, fmt); + reg &= ~DE_STRETCH_FORMAT_PIXEL_FORMAT_MASK; + reg |= ((fmt << DE_STRETCH_FORMAT_PIXEL_FORMAT_SHIFT) & + DE_STRETCH_FORMAT_PIXEL_FORMAT_MASK); write_dpr(accel, DE_STRETCH_FORMAT, reg); } @@ -105,31 +97,28 @@ int hw_fillrect(struct lynx_accel *accel, write_dpr(accel, DE_WINDOW_DESTINATION_BASE, base); /* dpr40 */ write_dpr(accel, DE_PITCH, - FIELD_VALUE(0, DE_PITCH, DESTINATION, pitch/Bpp)| - FIELD_VALUE(0, DE_PITCH, SOURCE, pitch/Bpp)); /* dpr10 */ + ((pitch / Bpp << DE_PITCH_DESTINATION_SHIFT) & + DE_PITCH_DESTINATION_MASK) | + (pitch / Bpp & DE_PITCH_SOURCE_MASK)); /* dpr10 */ write_dpr(accel, DE_WINDOW_WIDTH, - FIELD_VALUE(0, DE_WINDOW_WIDTH, DESTINATION, pitch/Bpp)| - FIELD_VALUE(0, DE_WINDOW_WIDTH, SOURCE, pitch/Bpp)); /* dpr44 */ + ((pitch / Bpp << DE_WINDOW_WIDTH_DST_SHIFT) & + DE_WINDOW_WIDTH_DST_MASK) | + (pitch / Bpp & DE_WINDOW_WIDTH_SRC_MASK)); /* dpr44 */ write_dpr(accel, DE_FOREGROUND, color); /* DPR14 */ write_dpr(accel, DE_DESTINATION, - FIELD_SET(0, DE_DESTINATION, WRAP, DISABLE)| - FIELD_VALUE(0, DE_DESTINATION, X, x)| - FIELD_VALUE(0, DE_DESTINATION, Y, y)); /* dpr4 */ + ((x << DE_DESTINATION_X_SHIFT) & DE_DESTINATION_X_MASK) | + (y & DE_DESTINATION_Y_MASK)); /* dpr4 */ write_dpr(accel, DE_DIMENSION, - FIELD_VALUE(0, DE_DIMENSION, X, width)| - FIELD_VALUE(0, DE_DIMENSION, Y_ET, height)); /* dpr8 */ + ((width << DE_DIMENSION_X_SHIFT) & DE_DIMENSION_X_MASK) | + (height & DE_DIMENSION_Y_ET_MASK)); /* dpr8 */ - deCtrl = - FIELD_SET(0, DE_CONTROL, STATUS, START)| - FIELD_SET(0, DE_CONTROL, DIRECTION, LEFT_TO_RIGHT)| - FIELD_SET(0, DE_CONTROL, LAST_PIXEL, ON)| - FIELD_SET(0, DE_CONTROL, COMMAND, RECTANGLE_FILL)| - FIELD_SET(0, DE_CONTROL, ROP_SELECT, ROP2)| - FIELD_VALUE(0, DE_CONTROL, ROP, rop); /* dpr0xc */ + deCtrl = DE_CONTROL_STATUS | DE_CONTROL_LAST_PIXEL | + DE_CONTROL_COMMAND_RECTANGLE_FILL | DE_CONTROL_ROP_SELECT | + (rop & DE_CONTROL_ROP_MASK); /* dpr0xc */ write_dpr(accel, DE_CONTROL, deCtrl); return 0; @@ -237,18 +226,18 @@ unsigned int rop2) /* ROP value */ Note that input pitch is BYTE value, but the 2D Pitch register uses pixel values. Need Byte to pixel conversion. */ - { - write_dpr(accel, DE_PITCH, - FIELD_VALUE(0, DE_PITCH, DESTINATION, (dPitch/Bpp)) | - FIELD_VALUE(0, DE_PITCH, SOURCE, (sPitch/Bpp))); /* dpr10 */ - } + write_dpr(accel, DE_PITCH, + ((dPitch / Bpp << DE_PITCH_DESTINATION_SHIFT) & + DE_PITCH_DESTINATION_MASK) | + (sPitch / Bpp & DE_PITCH_SOURCE_MASK)); /* dpr10 */ /* Screen Window width in Pixels. 2D engine uses this value to calculate the linear address in frame buffer for a given point. */ write_dpr(accel, DE_WINDOW_WIDTH, - FIELD_VALUE(0, DE_WINDOW_WIDTH, DESTINATION, (dPitch/Bpp)) | - FIELD_VALUE(0, DE_WINDOW_WIDTH, SOURCE, (sPitch/Bpp))); /* dpr3c */ + ((dPitch / Bpp << DE_WINDOW_WIDTH_DST_SHIFT) & + DE_WINDOW_WIDTH_DST_MASK) | + (sPitch / Bpp & DE_WINDOW_WIDTH_SRC_MASK)); /* dpr3c */ if (accel->de_wait() != 0) return -1; @@ -256,24 +245,18 @@ unsigned int rop2) /* ROP value */ { write_dpr(accel, DE_SOURCE, - FIELD_SET(0, DE_SOURCE, WRAP, DISABLE) | - FIELD_VALUE(0, DE_SOURCE, X_K1, sx) | - FIELD_VALUE(0, DE_SOURCE, Y_K2, sy)); /* dpr0 */ + ((sx << DE_SOURCE_X_K1_SHIFT) & DE_SOURCE_X_K1_MASK) | + (sy & DE_SOURCE_Y_K2_MASK)); /* dpr0 */ write_dpr(accel, DE_DESTINATION, - FIELD_SET(0, DE_DESTINATION, WRAP, DISABLE) | - FIELD_VALUE(0, DE_DESTINATION, X, dx) | - FIELD_VALUE(0, DE_DESTINATION, Y, dy)); /* dpr04 */ + ((dx << DE_DESTINATION_X_SHIFT) & DE_DESTINATION_X_MASK) | + (dy & DE_DESTINATION_Y_MASK)); /* dpr04 */ write_dpr(accel, DE_DIMENSION, - FIELD_VALUE(0, DE_DIMENSION, X, width) | - FIELD_VALUE(0, DE_DIMENSION, Y_ET, height)); /* dpr08 */ - - de_ctrl = FIELD_VALUE(0, DE_CONTROL, ROP, rop2) | - FIELD_SET(0, DE_CONTROL, ROP_SELECT, ROP2) | - FIELD_SET(0, DE_CONTROL, COMMAND, BITBLT) | - ((nDirection == RIGHT_TO_LEFT) ? - FIELD_SET(0, DE_CONTROL, DIRECTION, RIGHT_TO_LEFT) - : FIELD_SET(0, DE_CONTROL, DIRECTION, LEFT_TO_RIGHT)) | - FIELD_SET(0, DE_CONTROL, STATUS, START); + ((width << DE_DIMENSION_X_SHIFT) & DE_DIMENSION_X_MASK) | + (height & DE_DIMENSION_Y_ET_MASK)); /* dpr08 */ + + de_ctrl = (rop2 & DE_CONTROL_ROP_MASK) | DE_CONTROL_ROP_SELECT | + ((nDirection == RIGHT_TO_LEFT) ? DE_CONTROL_DIRECTION : 0) | + DE_CONTROL_COMMAND_BITBLT | DE_CONTROL_STATUS; write_dpr(accel, DE_CONTROL, de_ctrl); /* dpr0c */ } @@ -287,10 +270,8 @@ static unsigned int deGetTransparency(struct lynx_accel *accel) de_ctrl = read_dpr(accel, DE_CONTROL); - de_ctrl &= - FIELD_MASK(DE_CONTROL_TRANSPARENCY_MATCH) | - FIELD_MASK(DE_CONTROL_TRANSPARENCY_SELECT)| - FIELD_MASK(DE_CONTROL_TRANSPARENCY); + de_ctrl &= (DE_CONTROL_TRANSPARENCY_MATCH | + DE_CONTROL_TRANSPARENCY_SELECT | DE_CONTROL_TRANSPARENCY); return de_ctrl; } @@ -305,7 +286,7 @@ int hw_imageblit(struct lynx_accel *accel, u32 dx, u32 dy, /* Starting coordinate of destination surface */ u32 width, - u32 height, /* width and height of rectange in pixel value */ + u32 height, /* width and height of rectangle in pixel value */ u32 fColor, /* Foreground color (corresponding to a 1 in the monochrome data */ u32 bColor, /* Background color (corresponding to a 0 in the monochrome data */ u32 rop2) /* ROP value */ @@ -338,42 +319,39 @@ int hw_imageblit(struct lynx_accel *accel, Note that input pitch is BYTE value, but the 2D Pitch register uses pixel values. Need Byte to pixel conversion. */ - { - write_dpr(accel, DE_PITCH, - FIELD_VALUE(0, DE_PITCH, DESTINATION, dPitch/bytePerPixel) | - FIELD_VALUE(0, DE_PITCH, SOURCE, dPitch/bytePerPixel)); /* dpr10 */ - } + write_dpr(accel, DE_PITCH, + ((dPitch / bytePerPixel << DE_PITCH_DESTINATION_SHIFT) & + DE_PITCH_DESTINATION_MASK) | + (dPitch / bytePerPixel & DE_PITCH_SOURCE_MASK)); /* dpr10 */ /* Screen Window width in Pixels. 2D engine uses this value to calculate the linear address in frame buffer for a given point. */ write_dpr(accel, DE_WINDOW_WIDTH, - FIELD_VALUE(0, DE_WINDOW_WIDTH, DESTINATION, (dPitch/bytePerPixel)) | - FIELD_VALUE(0, DE_WINDOW_WIDTH, SOURCE, (dPitch/bytePerPixel))); + ((dPitch / bytePerPixel << DE_WINDOW_WIDTH_DST_SHIFT) & + DE_WINDOW_WIDTH_DST_MASK) | + (dPitch / bytePerPixel & DE_WINDOW_WIDTH_SRC_MASK)); /* Note: For 2D Source in Host Write, only X_K1_MONO field is needed, and Y_K2 field is not used. For mono bitmap, use startBit for X_K1. */ write_dpr(accel, DE_SOURCE, - FIELD_SET(0, DE_SOURCE, WRAP, DISABLE) | - FIELD_VALUE(0, DE_SOURCE, X_K1_MONO, startBit)); /* dpr00 */ + (startBit << DE_SOURCE_X_K1_SHIFT) & + DE_SOURCE_X_K1_MONO_MASK); /* dpr00 */ write_dpr(accel, DE_DESTINATION, - FIELD_SET(0, DE_DESTINATION, WRAP, DISABLE) | - FIELD_VALUE(0, DE_DESTINATION, X, dx) | - FIELD_VALUE(0, DE_DESTINATION, Y, dy)); /* dpr04 */ + ((dx << DE_DESTINATION_X_SHIFT) & DE_DESTINATION_X_MASK) | + (dy & DE_DESTINATION_Y_MASK)); /* dpr04 */ write_dpr(accel, DE_DIMENSION, - FIELD_VALUE(0, DE_DIMENSION, X, width) | - FIELD_VALUE(0, DE_DIMENSION, Y_ET, height)); /* dpr08 */ + ((width << DE_DIMENSION_X_SHIFT) & DE_DIMENSION_X_MASK) | + (height & DE_DIMENSION_Y_ET_MASK)); /* dpr08 */ write_dpr(accel, DE_FOREGROUND, fColor); write_dpr(accel, DE_BACKGROUND, bColor); - de_ctrl = FIELD_VALUE(0, DE_CONTROL, ROP, rop2) | - FIELD_SET(0, DE_CONTROL, ROP_SELECT, ROP2) | - FIELD_SET(0, DE_CONTROL, COMMAND, HOST_WRITE) | - FIELD_SET(0, DE_CONTROL, HOST, MONO) | - FIELD_SET(0, DE_CONTROL, STATUS, START); + de_ctrl = (rop2 & DE_CONTROL_ROP_MASK) | + DE_CONTROL_ROP_SELECT | DE_CONTROL_COMMAND_HOST_WRITE | + DE_CONTROL_HOST | DE_CONTROL_STATUS; write_dpr(accel, DE_CONTROL, de_ctrl | deGetTransparency(accel)); diff --git a/drivers/staging/sm750fb/sm750_accel.h b/drivers/staging/sm750fb/sm750_accel.h index f252e47d5ee9..d59d005e0add 100644 --- a/drivers/staging/sm750fb/sm750_accel.h +++ b/drivers/staging/sm750fb/sm750_accel.h @@ -21,212 +21,162 @@ #define DE_PORT_ADDR_TYPE3 0x100000 #define DE_SOURCE 0x0 -#define DE_SOURCE_WRAP 31:31 -#define DE_SOURCE_WRAP_DISABLE 0 -#define DE_SOURCE_WRAP_ENABLE 1 -#define DE_SOURCE_X_K1 29:16 -#define DE_SOURCE_Y_K2 15:0 -#define DE_SOURCE_X_K1_MONO 20:16 +#define DE_SOURCE_WRAP BIT(31) +#define DE_SOURCE_X_K1_SHIFT 16 +#define DE_SOURCE_X_K1_MASK (0x3fff << 16) +#define DE_SOURCE_X_K1_MONO_MASK (0x1f << 16) +#define DE_SOURCE_Y_K2_MASK 0xffff #define DE_DESTINATION 0x4 -#define DE_DESTINATION_WRAP 31:31 -#define DE_DESTINATION_WRAP_DISABLE 0 -#define DE_DESTINATION_WRAP_ENABLE 1 -#define DE_DESTINATION_X 28:16 -#define DE_DESTINATION_Y 15:0 +#define DE_DESTINATION_WRAP BIT(31) +#define DE_DESTINATION_X_SHIFT 16 +#define DE_DESTINATION_X_MASK (0x1fff << 16) +#define DE_DESTINATION_Y_MASK 0xffff #define DE_DIMENSION 0x8 -#define DE_DIMENSION_X 28:16 -#define DE_DIMENSION_Y_ET 15:0 +#define DE_DIMENSION_X_SHIFT 16 +#define DE_DIMENSION_X_MASK (0x1fff << 16) +#define DE_DIMENSION_Y_ET_MASK 0x1fff #define DE_CONTROL 0xC -#define DE_CONTROL_STATUS 31:31 -#define DE_CONTROL_STATUS_STOP 0 -#define DE_CONTROL_STATUS_START 1 -#define DE_CONTROL_PATTERN 30:30 -#define DE_CONTROL_PATTERN_MONO 0 -#define DE_CONTROL_PATTERN_COLOR 1 -#define DE_CONTROL_UPDATE_DESTINATION_X 29:29 -#define DE_CONTROL_UPDATE_DESTINATION_X_DISABLE 0 -#define DE_CONTROL_UPDATE_DESTINATION_X_ENABLE 1 -#define DE_CONTROL_QUICK_START 28:28 -#define DE_CONTROL_QUICK_START_DISABLE 0 -#define DE_CONTROL_QUICK_START_ENABLE 1 -#define DE_CONTROL_DIRECTION 27:27 -#define DE_CONTROL_DIRECTION_LEFT_TO_RIGHT 0 -#define DE_CONTROL_DIRECTION_RIGHT_TO_LEFT 1 -#define DE_CONTROL_MAJOR 26:26 -#define DE_CONTROL_MAJOR_X 0 -#define DE_CONTROL_MAJOR_Y 1 -#define DE_CONTROL_STEP_X 25:25 -#define DE_CONTROL_STEP_X_POSITIVE 1 -#define DE_CONTROL_STEP_X_NEGATIVE 0 -#define DE_CONTROL_STEP_Y 24:24 -#define DE_CONTROL_STEP_Y_POSITIVE 1 -#define DE_CONTROL_STEP_Y_NEGATIVE 0 -#define DE_CONTROL_STRETCH 23:23 -#define DE_CONTROL_STRETCH_DISABLE 0 -#define DE_CONTROL_STRETCH_ENABLE 1 -#define DE_CONTROL_HOST 22:22 -#define DE_CONTROL_HOST_COLOR 0 -#define DE_CONTROL_HOST_MONO 1 -#define DE_CONTROL_LAST_PIXEL 21:21 -#define DE_CONTROL_LAST_PIXEL_OFF 0 -#define DE_CONTROL_LAST_PIXEL_ON 1 -#define DE_CONTROL_COMMAND 20:16 -#define DE_CONTROL_COMMAND_BITBLT 0 -#define DE_CONTROL_COMMAND_RECTANGLE_FILL 1 -#define DE_CONTROL_COMMAND_DE_TILE 2 -#define DE_CONTROL_COMMAND_TRAPEZOID_FILL 3 -#define DE_CONTROL_COMMAND_ALPHA_BLEND 4 -#define DE_CONTROL_COMMAND_RLE_STRIP 5 -#define DE_CONTROL_COMMAND_SHORT_STROKE 6 -#define DE_CONTROL_COMMAND_LINE_DRAW 7 -#define DE_CONTROL_COMMAND_HOST_WRITE 8 -#define DE_CONTROL_COMMAND_HOST_READ 9 -#define DE_CONTROL_COMMAND_HOST_WRITE_BOTTOM_UP 10 -#define DE_CONTROL_COMMAND_ROTATE 11 -#define DE_CONTROL_COMMAND_FONT 12 -#define DE_CONTROL_COMMAND_TEXTURE_LOAD 15 -#define DE_CONTROL_ROP_SELECT 15:15 -#define DE_CONTROL_ROP_SELECT_ROP3 0 -#define DE_CONTROL_ROP_SELECT_ROP2 1 -#define DE_CONTROL_ROP2_SOURCE 14:14 -#define DE_CONTROL_ROP2_SOURCE_BITMAP 0 -#define DE_CONTROL_ROP2_SOURCE_PATTERN 1 -#define DE_CONTROL_MONO_DATA 13:12 -#define DE_CONTROL_MONO_DATA_NOT_PACKED 0 -#define DE_CONTROL_MONO_DATA_8_PACKED 1 -#define DE_CONTROL_MONO_DATA_16_PACKED 2 -#define DE_CONTROL_MONO_DATA_32_PACKED 3 -#define DE_CONTROL_REPEAT_ROTATE 11:11 -#define DE_CONTROL_REPEAT_ROTATE_DISABLE 0 -#define DE_CONTROL_REPEAT_ROTATE_ENABLE 1 -#define DE_CONTROL_TRANSPARENCY_MATCH 10:10 -#define DE_CONTROL_TRANSPARENCY_MATCH_OPAQUE 0 -#define DE_CONTROL_TRANSPARENCY_MATCH_TRANSPARENT 1 -#define DE_CONTROL_TRANSPARENCY_SELECT 9:9 -#define DE_CONTROL_TRANSPARENCY_SELECT_SOURCE 0 -#define DE_CONTROL_TRANSPARENCY_SELECT_DESTINATION 1 -#define DE_CONTROL_TRANSPARENCY 8:8 -#define DE_CONTROL_TRANSPARENCY_DISABLE 0 -#define DE_CONTROL_TRANSPARENCY_ENABLE 1 -#define DE_CONTROL_ROP 7:0 +#define DE_CONTROL_STATUS BIT(31) +#define DE_CONTROL_PATTERN BIT(30) +#define DE_CONTROL_UPDATE_DESTINATION_X BIT(29) +#define DE_CONTROL_QUICK_START BIT(28) +#define DE_CONTROL_DIRECTION BIT(27) +#define DE_CONTROL_MAJOR BIT(26) +#define DE_CONTROL_STEP_X BIT(25) +#define DE_CONTROL_STEP_Y BIT(24) +#define DE_CONTROL_STRETCH BIT(23) +#define DE_CONTROL_HOST BIT(22) +#define DE_CONTROL_LAST_PIXEL BIT(21) +#define DE_CONTROL_COMMAND_SHIFT 16 +#define DE_CONTROL_COMMAND_MASK (0x1f << 16) +#define DE_CONTROL_COMMAND_BITBLT (0x0 << 16) +#define DE_CONTROL_COMMAND_RECTANGLE_FILL (0x1 << 16) +#define DE_CONTROL_COMMAND_DE_TILE (0x2 << 16) +#define DE_CONTROL_COMMAND_TRAPEZOID_FILL (0x3 << 16) +#define DE_CONTROL_COMMAND_ALPHA_BLEND (0x4 << 16) +#define DE_CONTROL_COMMAND_RLE_STRIP (0x5 << 16) +#define DE_CONTROL_COMMAND_SHORT_STROKE (0x6 << 16) +#define DE_CONTROL_COMMAND_LINE_DRAW (0x7 << 16) +#define DE_CONTROL_COMMAND_HOST_WRITE (0x8 << 16) +#define DE_CONTROL_COMMAND_HOST_READ (0x9 << 16) +#define DE_CONTROL_COMMAND_HOST_WRITE_BOTTOM_UP (0xa << 16) +#define DE_CONTROL_COMMAND_ROTATE (0xb << 16) +#define DE_CONTROL_COMMAND_FONT (0xc << 16) +#define DE_CONTROL_COMMAND_TEXTURE_LOAD (0xe << 16) +#define DE_CONTROL_ROP_SELECT BIT(15) +#define DE_CONTROL_ROP2_SOURCE BIT(14) +#define DE_CONTROL_MONO_DATA_SHIFT 12 +#define DE_CONTROL_MONO_DATA_MASK (0x3 << 12) +#define DE_CONTROL_MONO_DATA_NOT_PACKED (0x0 << 12) +#define DE_CONTROL_MONO_DATA_8_PACKED (0x1 << 12) +#define DE_CONTROL_MONO_DATA_16_PACKED (0x2 << 12) +#define DE_CONTROL_MONO_DATA_32_PACKED (0x3 << 12) +#define DE_CONTROL_REPEAT_ROTATE BIT(11) +#define DE_CONTROL_TRANSPARENCY_MATCH BIT(10) +#define DE_CONTROL_TRANSPARENCY_SELECT BIT(9) +#define DE_CONTROL_TRANSPARENCY BIT(8) +#define DE_CONTROL_ROP_MASK 0xff /* Pseudo fields. */ -#define DE_CONTROL_SHORT_STROKE_DIR 27:24 -#define DE_CONTROL_SHORT_STROKE_DIR_225 0 -#define DE_CONTROL_SHORT_STROKE_DIR_135 1 -#define DE_CONTROL_SHORT_STROKE_DIR_315 2 -#define DE_CONTROL_SHORT_STROKE_DIR_45 3 -#define DE_CONTROL_SHORT_STROKE_DIR_270 4 -#define DE_CONTROL_SHORT_STROKE_DIR_90 5 -#define DE_CONTROL_SHORT_STROKE_DIR_180 8 -#define DE_CONTROL_SHORT_STROKE_DIR_0 10 -#define DE_CONTROL_ROTATION 25:24 -#define DE_CONTROL_ROTATION_0 0 -#define DE_CONTROL_ROTATION_270 1 -#define DE_CONTROL_ROTATION_90 2 -#define DE_CONTROL_ROTATION_180 3 +#define DE_CONTROL_SHORT_STROKE_DIR_MASK (0xf << 24) +#define DE_CONTROL_SHORT_STROKE_DIR_225 (0x0 << 24) +#define DE_CONTROL_SHORT_STROKE_DIR_135 (0x1 << 24) +#define DE_CONTROL_SHORT_STROKE_DIR_315 (0x2 << 24) +#define DE_CONTROL_SHORT_STROKE_DIR_45 (0x3 << 24) +#define DE_CONTROL_SHORT_STROKE_DIR_270 (0x4 << 24) +#define DE_CONTROL_SHORT_STROKE_DIR_90 (0x5 << 24) +#define DE_CONTROL_SHORT_STROKE_DIR_180 (0x8 << 24) +#define DE_CONTROL_SHORT_STROKE_DIR_0 (0xa << 24) +#define DE_CONTROL_ROTATION_MASK (0x3 << 24) +#define DE_CONTROL_ROTATION_0 (0x0 << 24) +#define DE_CONTROL_ROTATION_270 (0x1 << 24) +#define DE_CONTROL_ROTATION_90 (0x2 << 24) +#define DE_CONTROL_ROTATION_180 (0x3 << 24) #define DE_PITCH 0x000010 -#define DE_PITCH_DESTINATION 28:16 -#define DE_PITCH_SOURCE 12:0 +#define DE_PITCH_DESTINATION_SHIFT 16 +#define DE_PITCH_DESTINATION_MASK (0x1fff << 16) +#define DE_PITCH_SOURCE_MASK 0x1fff #define DE_FOREGROUND 0x000014 -#define DE_FOREGROUND_COLOR 31:0 +#define DE_FOREGROUND_COLOR_MASK 0xffffffff #define DE_BACKGROUND 0x000018 -#define DE_BACKGROUND_COLOR 31:0 +#define DE_BACKGROUND_COLOR_MASK 0xffffffff #define DE_STRETCH_FORMAT 0x00001C -#define DE_STRETCH_FORMAT_PATTERN_XY 30:30 -#define DE_STRETCH_FORMAT_PATTERN_XY_NORMAL 0 -#define DE_STRETCH_FORMAT_PATTERN_XY_OVERWRITE 1 -#define DE_STRETCH_FORMAT_PATTERN_Y 29:27 -#define DE_STRETCH_FORMAT_PATTERN_X 25:23 -#define DE_STRETCH_FORMAT_PIXEL_FORMAT 21:20 -#define DE_STRETCH_FORMAT_PIXEL_FORMAT_8 0 -#define DE_STRETCH_FORMAT_PIXEL_FORMAT_16 1 -#define DE_STRETCH_FORMAT_PIXEL_FORMAT_32 2 -#define DE_STRETCH_FORMAT_PIXEL_FORMAT_24 3 - -#define DE_STRETCH_FORMAT_ADDRESSING 19:16 -#define DE_STRETCH_FORMAT_ADDRESSING_XY 0 -#define DE_STRETCH_FORMAT_ADDRESSING_LINEAR 15 -#define DE_STRETCH_FORMAT_SOURCE_HEIGHT 11:0 +#define DE_STRETCH_FORMAT_PATTERN_XY BIT(30) +#define DE_STRETCH_FORMAT_PATTERN_Y_SHIFT 27 +#define DE_STRETCH_FORMAT_PATTERN_Y_MASK (0x7 << 27) +#define DE_STRETCH_FORMAT_PATTERN_X_SHIFT 23 +#define DE_STRETCH_FORMAT_PATTERN_X_MASK (0x7 << 23) +#define DE_STRETCH_FORMAT_PIXEL_FORMAT_SHIFT 20 +#define DE_STRETCH_FORMAT_PIXEL_FORMAT_MASK (0x3 << 20) +#define DE_STRETCH_FORMAT_PIXEL_FORMAT_8 (0x0 << 20) +#define DE_STRETCH_FORMAT_PIXEL_FORMAT_16 (0x1 << 20) +#define DE_STRETCH_FORMAT_PIXEL_FORMAT_32 (0x2 << 20) +#define DE_STRETCH_FORMAT_PIXEL_FORMAT_24 (0x3 << 20) +#define DE_STRETCH_FORMAT_ADDRESSING_SHIFT 16 +#define DE_STRETCH_FORMAT_ADDRESSING_MASK (0xf << 16) +#define DE_STRETCH_FORMAT_ADDRESSING_XY (0x0 << 16) +#define DE_STRETCH_FORMAT_ADDRESSING_LINEAR (0xf << 16) +#define DE_STRETCH_FORMAT_SOURCE_HEIGHT_MASK 0xfff #define DE_COLOR_COMPARE 0x000020 -#define DE_COLOR_COMPARE_COLOR 23:0 +#define DE_COLOR_COMPARE_COLOR_MASK 0xffffff #define DE_COLOR_COMPARE_MASK 0x000024 -#define DE_COLOR_COMPARE_MASK_MASKS 23:0 +#define DE_COLOR_COMPARE_MASK_MASK 0xffffff #define DE_MASKS 0x000028 -#define DE_MASKS_BYTE_MASK 31:16 -#define DE_MASKS_BIT_MASK 15:0 +#define DE_MASKS_BYTE_MASK (0xffff << 16) +#define DE_MASKS_BIT_MASK 0xffff #define DE_CLIP_TL 0x00002C -#define DE_CLIP_TL_TOP 31:16 -#define DE_CLIP_TL_STATUS 13:13 -#define DE_CLIP_TL_STATUS_DISABLE 0 -#define DE_CLIP_TL_STATUS_ENABLE 1 -#define DE_CLIP_TL_INHIBIT 12:12 -#define DE_CLIP_TL_INHIBIT_OUTSIDE 0 -#define DE_CLIP_TL_INHIBIT_INSIDE 1 -#define DE_CLIP_TL_LEFT 11:0 +#define DE_CLIP_TL_TOP_MASK (0xffff << 16) +#define DE_CLIP_TL_STATUS BIT(13) +#define DE_CLIP_TL_INHIBIT BIT(12) +#define DE_CLIP_TL_LEFT_MASK 0xfff #define DE_CLIP_BR 0x000030 -#define DE_CLIP_BR_BOTTOM 31:16 -#define DE_CLIP_BR_RIGHT 12:0 +#define DE_CLIP_BR_BOTTOM_MASK (0xffff << 16) +#define DE_CLIP_BR_RIGHT_MASK 0x1fff #define DE_MONO_PATTERN_LOW 0x000034 -#define DE_MONO_PATTERN_LOW_PATTERN 31:0 +#define DE_MONO_PATTERN_LOW_PATTERN_MASK 0xffffffff #define DE_MONO_PATTERN_HIGH 0x000038 -#define DE_MONO_PATTERN_HIGH_PATTERN 31:0 +#define DE_MONO_PATTERN_HIGH_PATTERN_MASK 0xffffffff #define DE_WINDOW_WIDTH 0x00003C -#define DE_WINDOW_WIDTH_DESTINATION 28:16 -#define DE_WINDOW_WIDTH_SOURCE 12:0 +#define DE_WINDOW_WIDTH_DST_SHIFT 16 +#define DE_WINDOW_WIDTH_DST_MASK (0x1fff << 16) +#define DE_WINDOW_WIDTH_SRC_MASK 0x1fff #define DE_WINDOW_SOURCE_BASE 0x000040 -#define DE_WINDOW_SOURCE_BASE_EXT 27:27 -#define DE_WINDOW_SOURCE_BASE_EXT_LOCAL 0 -#define DE_WINDOW_SOURCE_BASE_EXT_EXTERNAL 1 -#define DE_WINDOW_SOURCE_BASE_CS 26:26 -#define DE_WINDOW_SOURCE_BASE_CS_0 0 -#define DE_WINDOW_SOURCE_BASE_CS_1 1 -#define DE_WINDOW_SOURCE_BASE_ADDRESS 25:0 +#define DE_WINDOW_SOURCE_BASE_EXT BIT(27) +#define DE_WINDOW_SOURCE_BASE_CS BIT(26) +#define DE_WINDOW_SOURCE_BASE_ADDRESS_MASK 0x3ffffff #define DE_WINDOW_DESTINATION_BASE 0x000044 -#define DE_WINDOW_DESTINATION_BASE_EXT 27:27 -#define DE_WINDOW_DESTINATION_BASE_EXT_LOCAL 0 -#define DE_WINDOW_DESTINATION_BASE_EXT_EXTERNAL 1 -#define DE_WINDOW_DESTINATION_BASE_CS 26:26 -#define DE_WINDOW_DESTINATION_BASE_CS_0 0 -#define DE_WINDOW_DESTINATION_BASE_CS_1 1 -#define DE_WINDOW_DESTINATION_BASE_ADDRESS 25:0 +#define DE_WINDOW_DESTINATION_BASE_EXT BIT(27) +#define DE_WINDOW_DESTINATION_BASE_CS BIT(26) +#define DE_WINDOW_DESTINATION_BASE_ADDRESS_MASK 0x3ffffff #define DE_ALPHA 0x000048 -#define DE_ALPHA_VALUE 7:0 +#define DE_ALPHA_VALUE_MASK 0xff #define DE_WRAP 0x00004C -#define DE_WRAP_X 31:16 -#define DE_WRAP_Y 15:0 +#define DE_WRAP_X_MASK (0xffff << 16) +#define DE_WRAP_Y_MASK 0xffff #define DE_STATUS 0x000050 -#define DE_STATUS_CSC 1:1 -#define DE_STATUS_CSC_CLEAR 0 -#define DE_STATUS_CSC_NOT_ACTIVE 0 -#define DE_STATUS_CSC_ACTIVE 1 -#define DE_STATUS_2D 0:0 -#define DE_STATUS_2D_CLEAR 0 -#define DE_STATUS_2D_NOT_ACTIVE 0 -#define DE_STATUS_2D_ACTIVE 1 - - +#define DE_STATUS_CSC BIT(1) +#define DE_STATUS_2D BIT(0) /* blt direction */ #define TOP_TO_BOTTOM 0 @@ -268,7 +218,7 @@ int hw_imageblit(struct lynx_accel *accel, u32 dx, u32 dy, /* Starting coordinate of destination surface */ u32 width, - u32 height, /* width and height of rectange in pixel value */ + u32 height, /* width and height of rectangle in pixel value */ u32 fColor, /* Foreground color (corresponding to a 1 in the monochrome data */ u32 bColor, /* Background color (corresponding to a 0 in the monochrome data */ u32 rop2); diff --git a/drivers/staging/sm750fb/sm750_cursor.c b/drivers/staging/sm750fb/sm750_cursor.c index 3b7ce9275f51..d622d65b6cee 100644 --- a/drivers/staging/sm750fb/sm750_cursor.c +++ b/drivers/staging/sm750fb/sm750_cursor.c @@ -16,45 +16,34 @@ #include <linux/screen_info.h> #include "sm750.h" -#include "sm750_help.h" #include "sm750_cursor.h" -#define PEEK32(addr) \ -readl(cursor->mmio + (addr)) #define POKE32(addr, data) \ writel((data), cursor->mmio + (addr)) /* cursor control for voyager and 718/750*/ #define HWC_ADDRESS 0x0 -#define HWC_ADDRESS_ENABLE 31:31 -#define HWC_ADDRESS_ENABLE_DISABLE 0 -#define HWC_ADDRESS_ENABLE_ENABLE 1 -#define HWC_ADDRESS_EXT 27:27 -#define HWC_ADDRESS_EXT_LOCAL 0 -#define HWC_ADDRESS_EXT_EXTERNAL 1 -#define HWC_ADDRESS_CS 26:26 -#define HWC_ADDRESS_CS_0 0 -#define HWC_ADDRESS_CS_1 1 -#define HWC_ADDRESS_ADDRESS 25:0 +#define HWC_ADDRESS_ENABLE BIT(31) +#define HWC_ADDRESS_EXT BIT(27) +#define HWC_ADDRESS_CS BIT(26) +#define HWC_ADDRESS_ADDRESS_MASK 0x3ffffff #define HWC_LOCATION 0x4 -#define HWC_LOCATION_TOP 27:27 -#define HWC_LOCATION_TOP_INSIDE 0 -#define HWC_LOCATION_TOP_OUTSIDE 1 -#define HWC_LOCATION_Y 26:16 -#define HWC_LOCATION_LEFT 11:11 -#define HWC_LOCATION_LEFT_INSIDE 0 -#define HWC_LOCATION_LEFT_OUTSIDE 1 -#define HWC_LOCATION_X 10:0 +#define HWC_LOCATION_TOP BIT(27) +#define HWC_LOCATION_Y_SHIFT 16 +#define HWC_LOCATION_Y_MASK (0x7ff << 16) +#define HWC_LOCATION_LEFT BIT(11) +#define HWC_LOCATION_X_MASK 0x7ff #define HWC_COLOR_12 0x8 -#define HWC_COLOR_12_2_RGB565 31:16 -#define HWC_COLOR_12_1_RGB565 15:0 +#define HWC_COLOR_12_2_RGB565_SHIFT 16 +#define HWC_COLOR_12_2_RGB565_MASK (0xffff << 16) +#define HWC_COLOR_12_1_RGB565_MASK 0xffff #define HWC_COLOR_3 0xC -#define HWC_COLOR_3_RGB565 15:0 +#define HWC_COLOR_3_RGB565_MASK 0xffff /* hw_cursor_xxx works for voyager,718 and 750 */ @@ -62,9 +51,7 @@ void hw_cursor_enable(struct lynx_cursor *cursor) { u32 reg; - reg = FIELD_VALUE(0, HWC_ADDRESS, ADDRESS, cursor->offset)| - FIELD_SET(0, HWC_ADDRESS, EXT, LOCAL)| - FIELD_SET(0, HWC_ADDRESS, ENABLE, ENABLE); + reg = (cursor->offset & HWC_ADDRESS_ADDRESS_MASK) | HWC_ADDRESS_ENABLE; POKE32(HWC_ADDRESS, reg); } void hw_cursor_disable(struct lynx_cursor *cursor) @@ -83,14 +70,17 @@ void hw_cursor_setPos(struct lynx_cursor *cursor, { u32 reg; - reg = FIELD_VALUE(0, HWC_LOCATION, Y, y)| - FIELD_VALUE(0, HWC_LOCATION, X, x); + reg = (((y << HWC_LOCATION_Y_SHIFT) & HWC_LOCATION_Y_MASK) | + (x & HWC_LOCATION_X_MASK)); POKE32(HWC_LOCATION, reg); } void hw_cursor_setColor(struct lynx_cursor *cursor, u32 fg, u32 bg) { - POKE32(HWC_COLOR_12, (fg<<16)|(bg&0xffff)); + u32 reg = (fg << HWC_COLOR_12_2_RGB565_SHIFT) & + HWC_COLOR_12_2_RGB565_MASK; + + POKE32(HWC_COLOR_12, reg | (bg & HWC_COLOR_12_1_RGB565_MASK)); POKE32(HWC_COLOR_3, 0xffe0); } @@ -115,15 +105,6 @@ void hw_cursor_setData(struct lynx_cursor *cursor, pstart = cursor->vstart; pbuffer = pstart; -/* - if(odd &1){ - hw_cursor_setData2(cursor,rop,pcol,pmsk); - } - odd++; - if(odd > 0xfffffff0) - odd=0; -*/ - for (i = 0; i < count; i++) { color = *pcol++; mask = *pmsk++; @@ -143,8 +124,7 @@ void hw_cursor_setData(struct lynx_cursor *cursor, iowrite16(data, pbuffer); /* assume pitch is 1,2,4,8,...*/ - if ((i+1) % pitch == 0) - { + if ((i + 1) % pitch == 0) { /* need a return */ pstart += offset; pbuffer = pstart; diff --git a/drivers/staging/sm750fb/sm750_help.h b/drivers/staging/sm750fb/sm750_help.h deleted file mode 100644 index c070cf25a7d6..000000000000 --- a/drivers/staging/sm750fb/sm750_help.h +++ /dev/null @@ -1,56 +0,0 @@ -#ifndef LYNX_HELP_H__ -#define LYNX_HELP_H__ - -/* Internal macros */ -#define _F_START(f) (0 ? f) -#define _F_END(f) (1 ? f) -#define _F_SIZE(f) (1 + _F_END(f) - _F_START(f)) -#define _F_MASK(f) (((1 << _F_SIZE(f)) - 1) << _F_START(f)) -#define _F_NORMALIZE(v, f) (((v) & _F_MASK(f)) >> _F_START(f)) -#define _F_DENORMALIZE(v, f) (((v) << _F_START(f)) & _F_MASK(f)) - -/* Global macros */ -#define FIELD_GET(x, reg, field) \ -( \ - _F_NORMALIZE((x), reg ## _ ## field) \ -) - -#define FIELD_SET(x, reg, field, value) \ -( \ - (x & ~_F_MASK(reg ## _ ## field)) \ - | _F_DENORMALIZE(reg ## _ ## field ## _ ## value, reg ## _ ## field) \ -) - -#define FIELD_VALUE(x, reg, field, value) \ -( \ - (x & ~_F_MASK(reg ## _ ## field)) \ - | _F_DENORMALIZE(value, reg ## _ ## field) \ -) - -#define FIELD_CLEAR(reg, field) \ -( \ - ~_F_MASK(reg ## _ ## field) \ -) - -/* Field Macros */ -#define FIELD_START(field) (0 ? field) -#define FIELD_END(field) (1 ? field) -#define FIELD_SIZE(field) (1 + FIELD_END(field) - FIELD_START(field)) -#define FIELD_MASK(field) (((1 << (FIELD_SIZE(field)-1)) | ((1 << (FIELD_SIZE(field)-1)) - 1)) << FIELD_START(field)) - -static inline unsigned int absDiff(unsigned int a, unsigned int b) -{ - if (a < b) - return b-a; - else - return a-b; -} - -/* n / d + 1 / 2 = (2n + d) / 2d */ -#define roundedDiv(num, denom) ((2 * (num) + (denom)) / (2 * (denom))) -#define MHz(x) ((x) * 1000000) - - - - -#endif diff --git a/drivers/staging/sm750fb/sm750_hw.c b/drivers/staging/sm750fb/sm750_hw.c index 41822c6c0380..2daeedd88c30 100644 --- a/drivers/staging/sm750fb/sm750_hw.c +++ b/drivers/staging/sm750fb/sm750_hw.c @@ -1,4 +1,3 @@ -#include <linux/version.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> @@ -108,65 +107,62 @@ int hw_sm750_inithw(struct sm750_dev *sm750_dev, struct pci_dev *pdev) /* for sm718,open pci burst */ if (sm750_dev->devid == 0x718) { POKE32(SYSTEM_CTRL, - FIELD_SET(PEEK32(SYSTEM_CTRL), SYSTEM_CTRL, PCI_BURST, ON)); + PEEK32(SYSTEM_CTRL) | SYSTEM_CTRL_PCI_BURST); } if (getChipType() != SM750LE) { + unsigned int val; /* does user need CRT ?*/ if (sm750_dev->nocrt) { POKE32(MISC_CTRL, - FIELD_SET(PEEK32(MISC_CTRL), - MISC_CTRL, - DAC_POWER, OFF)); + PEEK32(MISC_CTRL) | MISC_CTRL_DAC_POWER_OFF); /* shut off dpms */ - POKE32(SYSTEM_CTRL, - FIELD_SET(PEEK32(SYSTEM_CTRL), - SYSTEM_CTRL, - DPMS, VNHN)); + val = PEEK32(SYSTEM_CTRL) & ~SYSTEM_CTRL_DPMS_MASK; + val |= SYSTEM_CTRL_DPMS_VPHN; + POKE32(SYSTEM_CTRL, val); } else { POKE32(MISC_CTRL, - FIELD_SET(PEEK32(MISC_CTRL), - MISC_CTRL, - DAC_POWER, ON)); + PEEK32(MISC_CTRL) & ~MISC_CTRL_DAC_POWER_OFF); /* turn on dpms */ - POKE32(SYSTEM_CTRL, - FIELD_SET(PEEK32(SYSTEM_CTRL), - SYSTEM_CTRL, - DPMS, VPHP)); + val = PEEK32(SYSTEM_CTRL) & ~SYSTEM_CTRL_DPMS_MASK; + val |= SYSTEM_CTRL_DPMS_VPHP; + POKE32(SYSTEM_CTRL, val); } + val = PEEK32(PANEL_DISPLAY_CTRL) & + ~(PANEL_DISPLAY_CTRL_DUAL_DISPLAY | + PANEL_DISPLAY_CTRL_DOUBLE_PIXEL); switch (sm750_dev->pnltype) { - case sm750_doubleTFT: case sm750_24TFT: + break; + case sm750_doubleTFT: + val |= PANEL_DISPLAY_CTRL_DOUBLE_PIXEL; + break; case sm750_dualTFT: - POKE32(PANEL_DISPLAY_CTRL, - FIELD_VALUE(PEEK32(PANEL_DISPLAY_CTRL), - PANEL_DISPLAY_CTRL, - TFT_DISP, - sm750_dev->pnltype)); - break; + val |= PANEL_DISPLAY_CTRL_DUAL_DISPLAY; + break; } + POKE32(PANEL_DISPLAY_CTRL, val); } else { - /* for 750LE ,no DVI chip initilization makes Monitor no signal */ + /* for 750LE ,no DVI chip initialization makes Monitor no signal */ /* Set up GPIO for software I2C to program DVI chip in the Xilinx SP605 board, in order to have video signal. */ - sm750_sw_i2c_init(0, 1); + sm750_sw_i2c_init(0, 1); - - /* Customer may NOT use CH7301 DVI chip, which has to be - initialized differently. - */ - if (sm750_sw_i2c_read_reg(0xec, 0x4a) == 0x95) { + /* Customer may NOT use CH7301 DVI chip, which has to be + initialized differently. + */ + if (sm750_sw_i2c_read_reg(0xec, 0x4a) == 0x95) { /* The following register values for CH7301 are from Chrontel app note and our experiment. */ pr_info("yes,CH7301 DVI chip found\n"); - sm750_sw_i2c_write_reg(0xec, 0x1d, 0x16); - sm750_sw_i2c_write_reg(0xec, 0x21, 0x9); - sm750_sw_i2c_write_reg(0xec, 0x49, 0xC0); + sm750_sw_i2c_write_reg(0xec, 0x1d, 0x16); + sm750_sw_i2c_write_reg(0xec, 0x21, 0x9); + sm750_sw_i2c_write_reg(0xec, 0x49, 0xC0); pr_info("okay,CH7301 DVI chip setup done\n"); - } + } } /* init 2d engine */ @@ -310,53 +306,51 @@ int hw_sm750_crtc_setMode(struct lynxfb_crtc *crtc, if (crtc->channel != sm750_secondary) { /* set pitch, offset ,width,start address ,etc... */ POKE32(PANEL_FB_ADDRESS, - FIELD_SET(0, PANEL_FB_ADDRESS, STATUS, CURRENT)| - FIELD_SET(0, PANEL_FB_ADDRESS, EXT, LOCAL)| - FIELD_VALUE(0, PANEL_FB_ADDRESS, ADDRESS, crtc->oScreen)); + crtc->oScreen & PANEL_FB_ADDRESS_ADDRESS_MASK); reg = var->xres * (var->bits_per_pixel >> 3); /* crtc->channel is not equal to par->index on numeric,be aware of that */ reg = ALIGN(reg, crtc->line_pad); - - POKE32(PANEL_FB_WIDTH, - FIELD_VALUE(0, PANEL_FB_WIDTH, WIDTH, reg)| - FIELD_VALUE(0, PANEL_FB_WIDTH, OFFSET, fix->line_length)); - - POKE32(PANEL_WINDOW_WIDTH, - FIELD_VALUE(0, PANEL_WINDOW_WIDTH, WIDTH, var->xres - 1)| - FIELD_VALUE(0, PANEL_WINDOW_WIDTH, X, var->xoffset)); - - POKE32(PANEL_WINDOW_HEIGHT, - FIELD_VALUE(0, PANEL_WINDOW_HEIGHT, HEIGHT, var->yres_virtual - 1)| - FIELD_VALUE(0, PANEL_WINDOW_HEIGHT, Y, var->yoffset)); + reg = (reg << PANEL_FB_WIDTH_WIDTH_SHIFT) & + PANEL_FB_WIDTH_WIDTH_MASK; + reg |= (fix->line_length & PANEL_FB_WIDTH_OFFSET_MASK); + POKE32(PANEL_FB_WIDTH, reg); + + reg = ((var->xres - 1) << PANEL_WINDOW_WIDTH_WIDTH_SHIFT) & + PANEL_WINDOW_WIDTH_WIDTH_MASK; + reg |= (var->xoffset & PANEL_WINDOW_WIDTH_X_MASK); + POKE32(PANEL_WINDOW_WIDTH, reg); + + reg = ((var->yres_virtual - 1) << + PANEL_WINDOW_HEIGHT_HEIGHT_SHIFT); + reg &= PANEL_WINDOW_HEIGHT_HEIGHT_MASK; + reg |= (var->yoffset & PANEL_WINDOW_HEIGHT_Y_MASK); + POKE32(PANEL_WINDOW_HEIGHT, reg); POKE32(PANEL_PLANE_TL, 0); - POKE32(PANEL_PLANE_BR, - FIELD_VALUE(0, PANEL_PLANE_BR, BOTTOM, var->yres - 1)| - FIELD_VALUE(0, PANEL_PLANE_BR, RIGHT, var->xres - 1)); + reg = ((var->yres - 1) << PANEL_PLANE_BR_BOTTOM_SHIFT) & + PANEL_PLANE_BR_BOTTOM_MASK; + reg |= ((var->xres - 1) & PANEL_PLANE_BR_RIGHT_MASK); + POKE32(PANEL_PLANE_BR, reg); /* set pixel format */ reg = PEEK32(PANEL_DISPLAY_CTRL); - POKE32(PANEL_DISPLAY_CTRL, - FIELD_VALUE(reg, - PANEL_DISPLAY_CTRL, FORMAT, - (var->bits_per_pixel >> 4) - )); + POKE32(PANEL_DISPLAY_CTRL, reg | (var->bits_per_pixel >> 4)); } else { /* not implemented now */ POKE32(CRT_FB_ADDRESS, crtc->oScreen); reg = var->xres * (var->bits_per_pixel >> 3); /* crtc->channel is not equal to par->index on numeric,be aware of that */ - reg = ALIGN(reg, crtc->line_pad); - - POKE32(CRT_FB_WIDTH, - FIELD_VALUE(0, CRT_FB_WIDTH, WIDTH, reg)| - FIELD_VALUE(0, CRT_FB_WIDTH, OFFSET, fix->line_length)); + reg = ALIGN(reg, crtc->line_pad) << CRT_FB_WIDTH_WIDTH_SHIFT; + reg &= CRT_FB_WIDTH_WIDTH_MASK; + reg |= (fix->line_length & CRT_FB_WIDTH_OFFSET_MASK); + POKE32(CRT_FB_WIDTH, reg); /* SET PIXEL FORMAT */ reg = PEEK32(CRT_DISPLAY_CTRL); - reg = FIELD_VALUE(reg, CRT_DISPLAY_CTRL, FORMAT, var->bits_per_pixel >> 4); + reg |= ((var->bits_per_pixel >> 4) & + CRT_DISPLAY_CTRL_FORMAT_MASK); POKE32(CRT_DISPLAY_CTRL, reg); } @@ -382,31 +376,36 @@ int hw_sm750le_setBLANK(struct lynxfb_output *output, int blank) switch (blank) { case FB_BLANK_UNBLANK: dpms = CRT_DISPLAY_CTRL_DPMS_0; - crtdb = CRT_DISPLAY_CTRL_BLANK_OFF; + crtdb = 0; break; case FB_BLANK_NORMAL: dpms = CRT_DISPLAY_CTRL_DPMS_0; - crtdb = CRT_DISPLAY_CTRL_BLANK_ON; + crtdb = CRT_DISPLAY_CTRL_BLANK; break; case FB_BLANK_VSYNC_SUSPEND: dpms = CRT_DISPLAY_CTRL_DPMS_2; - crtdb = CRT_DISPLAY_CTRL_BLANK_ON; + crtdb = CRT_DISPLAY_CTRL_BLANK; break; case FB_BLANK_HSYNC_SUSPEND: dpms = CRT_DISPLAY_CTRL_DPMS_1; - crtdb = CRT_DISPLAY_CTRL_BLANK_ON; + crtdb = CRT_DISPLAY_CTRL_BLANK; break; case FB_BLANK_POWERDOWN: dpms = CRT_DISPLAY_CTRL_DPMS_3; - crtdb = CRT_DISPLAY_CTRL_BLANK_ON; + crtdb = CRT_DISPLAY_CTRL_BLANK; break; default: return -EINVAL; } if (output->paths & sm750_crt) { - POKE32(CRT_DISPLAY_CTRL, FIELD_VALUE(PEEK32(CRT_DISPLAY_CTRL), CRT_DISPLAY_CTRL, DPMS, dpms)); - POKE32(CRT_DISPLAY_CTRL, FIELD_VALUE(PEEK32(CRT_DISPLAY_CTRL), CRT_DISPLAY_CTRL, BLANK, crtdb)); + unsigned int val; + + val = PEEK32(CRT_DISPLAY_CTRL) & ~CRT_DISPLAY_CTRL_DPMS_MASK; + POKE32(CRT_DISPLAY_CTRL, val | dpms); + + val = PEEK32(CRT_DISPLAY_CTRL) & ~CRT_DISPLAY_CTRL_BLANK; + POKE32(CRT_DISPLAY_CTRL, val | crtdb); } return 0; } @@ -419,42 +418,45 @@ int hw_sm750_setBLANK(struct lynxfb_output *output, int blank) switch (blank) { case FB_BLANK_UNBLANK: - pr_info("flag = FB_BLANK_UNBLANK\n"); + pr_debug("flag = FB_BLANK_UNBLANK\n"); dpms = SYSTEM_CTRL_DPMS_VPHP; - pps = PANEL_DISPLAY_CTRL_DATA_ENABLE; - crtdb = CRT_DISPLAY_CTRL_BLANK_OFF; + pps = PANEL_DISPLAY_CTRL_DATA; break; case FB_BLANK_NORMAL: - pr_info("flag = FB_BLANK_NORMAL\n"); + pr_debug("flag = FB_BLANK_NORMAL\n"); dpms = SYSTEM_CTRL_DPMS_VPHP; - pps = PANEL_DISPLAY_CTRL_DATA_DISABLE; - crtdb = CRT_DISPLAY_CTRL_BLANK_ON; + crtdb = CRT_DISPLAY_CTRL_BLANK; break; case FB_BLANK_VSYNC_SUSPEND: dpms = SYSTEM_CTRL_DPMS_VNHP; - pps = PANEL_DISPLAY_CTRL_DATA_DISABLE; - crtdb = CRT_DISPLAY_CTRL_BLANK_ON; + crtdb = CRT_DISPLAY_CTRL_BLANK; break; case FB_BLANK_HSYNC_SUSPEND: dpms = SYSTEM_CTRL_DPMS_VPHN; - pps = PANEL_DISPLAY_CTRL_DATA_DISABLE; - crtdb = CRT_DISPLAY_CTRL_BLANK_ON; + crtdb = CRT_DISPLAY_CTRL_BLANK; break; case FB_BLANK_POWERDOWN: dpms = SYSTEM_CTRL_DPMS_VNHN; - pps = PANEL_DISPLAY_CTRL_DATA_DISABLE; - crtdb = CRT_DISPLAY_CTRL_BLANK_ON; + crtdb = CRT_DISPLAY_CTRL_BLANK; break; } if (output->paths & sm750_crt) { + unsigned int val = PEEK32(SYSTEM_CTRL) & ~SYSTEM_CTRL_DPMS_MASK; - POKE32(SYSTEM_CTRL, FIELD_VALUE(PEEK32(SYSTEM_CTRL), SYSTEM_CTRL, DPMS, dpms)); - POKE32(CRT_DISPLAY_CTRL, FIELD_VALUE(PEEK32(CRT_DISPLAY_CTRL), CRT_DISPLAY_CTRL, BLANK, crtdb)); + POKE32(SYSTEM_CTRL, val | dpms); + + val = PEEK32(CRT_DISPLAY_CTRL) & ~CRT_DISPLAY_CTRL_BLANK; + POKE32(CRT_DISPLAY_CTRL, val | crtdb); } - if (output->paths & sm750_panel) - POKE32(PANEL_DISPLAY_CTRL, FIELD_VALUE(PEEK32(PANEL_DISPLAY_CTRL), PANEL_DISPLAY_CTRL, DATA, pps)); + if (output->paths & sm750_panel) { + unsigned int val = PEEK32(PANEL_DISPLAY_CTRL); + + val &= ~PANEL_DISPLAY_CTRL_DATA; + val |= pps; + POKE32(PANEL_DISPLAY_CTRL, val); + } return 0; } @@ -468,21 +470,21 @@ void hw_sm750_initAccel(struct sm750_dev *sm750_dev) if (getChipType() == SM750LE) { reg = PEEK32(DE_STATE1); - reg = FIELD_SET(reg, DE_STATE1, DE_ABORT, ON); + reg |= DE_STATE1_DE_ABORT; POKE32(DE_STATE1, reg); reg = PEEK32(DE_STATE1); - reg = FIELD_SET(reg, DE_STATE1, DE_ABORT, OFF); + reg &= ~DE_STATE1_DE_ABORT; POKE32(DE_STATE1, reg); } else { /* engine reset */ reg = PEEK32(SYSTEM_CTRL); - reg = FIELD_SET(reg, SYSTEM_CTRL, DE_ABORT, ON); + reg |= SYSTEM_CTRL_DE_ABORT; POKE32(SYSTEM_CTRL, reg); reg = PEEK32(SYSTEM_CTRL); - reg = FIELD_SET(reg, SYSTEM_CTRL, DE_ABORT, OFF); + reg &= ~SYSTEM_CTRL_DE_ABORT; POKE32(SYSTEM_CTRL, reg); } @@ -493,15 +495,15 @@ void hw_sm750_initAccel(struct sm750_dev *sm750_dev) int hw_sm750le_deWait(void) { int i = 0x10000000; + unsigned int mask = DE_STATE2_DE_STATUS_BUSY | DE_STATE2_DE_FIFO_EMPTY | + DE_STATE2_DE_MEM_FIFO_EMPTY; while (i--) { - unsigned int dwVal = PEEK32(DE_STATE2); + unsigned int val = PEEK32(DE_STATE2); - if ((FIELD_GET(dwVal, DE_STATE2, DE_STATUS) == DE_STATE2_DE_STATUS_IDLE) && - (FIELD_GET(dwVal, DE_STATE2, DE_FIFO) == DE_STATE2_DE_FIFO_EMPTY) && - (FIELD_GET(dwVal, DE_STATE2, DE_MEM_FIFO) == DE_STATE2_DE_MEM_FIFO_EMPTY)) { + if ((val & mask) == + (DE_STATE2_DE_FIFO_EMPTY | DE_STATE2_DE_MEM_FIFO_EMPTY)) return 0; - } } /* timeout error */ return -1; @@ -511,15 +513,16 @@ int hw_sm750le_deWait(void) int hw_sm750_deWait(void) { int i = 0x10000000; + unsigned int mask = SYSTEM_CTRL_DE_STATUS_BUSY | + SYSTEM_CTRL_DE_FIFO_EMPTY | + SYSTEM_CTRL_DE_MEM_FIFO_EMPTY; while (i--) { - unsigned int dwVal = PEEK32(SYSTEM_CTRL); + unsigned int val = PEEK32(SYSTEM_CTRL); - if ((FIELD_GET(dwVal, SYSTEM_CTRL, DE_STATUS) == SYSTEM_CTRL_DE_STATUS_IDLE) && - (FIELD_GET(dwVal, SYSTEM_CTRL, DE_FIFO) == SYSTEM_CTRL_DE_FIFO_EMPTY) && - (FIELD_GET(dwVal, SYSTEM_CTRL, DE_MEM_FIFO) == SYSTEM_CTRL_DE_MEM_FIFO_EMPTY)) { + if ((val & mask) == + (SYSTEM_CTRL_DE_FIFO_EMPTY | SYSTEM_CTRL_DE_MEM_FIFO_EMPTY)) return 0; - } } /* timeout error */ return -1; @@ -541,12 +544,12 @@ int hw_sm750_pan_display(struct lynxfb_crtc *crtc, total += crtc->oScreen; if (crtc->channel == sm750_primary) { POKE32(PANEL_FB_ADDRESS, - FIELD_VALUE(PEEK32(PANEL_FB_ADDRESS), - PANEL_FB_ADDRESS, ADDRESS, total)); + PEEK32(PANEL_FB_ADDRESS) | + (total & PANEL_FB_ADDRESS_ADDRESS_MASK)); } else { POKE32(CRT_FB_ADDRESS, - FIELD_VALUE(PEEK32(CRT_FB_ADDRESS), - CRT_FB_ADDRESS, ADDRESS, total)); + PEEK32(CRT_FB_ADDRESS) | + (total & CRT_FB_ADDRESS_ADDRESS_MASK)); } return 0; } diff --git a/drivers/staging/speakup/buffers.c b/drivers/staging/speakup/buffers.c index 8565c2343968..723d5df44221 100644 --- a/drivers/staging/speakup/buffers.c +++ b/drivers/staging/speakup/buffers.c @@ -27,7 +27,7 @@ void speakup_start_ttys(void) for (i = 0; i < MAX_NR_CONSOLES; i++) { if (speakup_console[i] && speakup_console[i]->tty_stopped) continue; - if ((vc_cons[i].d != NULL) && (vc_cons[i].d->port.tty != NULL)) + if ((vc_cons[i].d) && (vc_cons[i].d->port.tty)) start_tty(vc_cons[i].d->port.tty); } } @@ -38,7 +38,7 @@ static void speakup_stop_ttys(void) int i; for (i = 0; i < MAX_NR_CONSOLES; i++) - if ((vc_cons[i].d != NULL) && (vc_cons[i].d->port.tty != NULL)) + if ((vc_cons[i].d && (vc_cons[i].d->port.tty))) stop_tty(vc_cons[i].d->port.tty); } diff --git a/drivers/staging/speakup/devsynth.c b/drivers/staging/speakup/devsynth.c index d1ffdf4c0c4b..84989711ae67 100644 --- a/drivers/staging/speakup/devsynth.c +++ b/drivers/staging/speakup/devsynth.c @@ -76,9 +76,9 @@ void speakup_register_devsynth(void) if (misc_registered != 0) return; /* zero it so if register fails, deregister will not ref invalid ptrs */ - if (misc_register(&synth_device)) + if (misc_register(&synth_device)) { pr_warn("Couldn't initialize miscdevice /dev/synth.\n"); - else { + } else { pr_info("initialized device: /dev/synth, node (MAJOR %d, MINOR %d)\n", MISC_MAJOR, SYNTH_MINOR); misc_registered = 1; diff --git a/drivers/staging/speakup/fakekey.c b/drivers/staging/speakup/fakekey.c index 5e1f16c36b49..8f058b42f68d 100644 --- a/drivers/staging/speakup/fakekey.c +++ b/drivers/staging/speakup/fakekey.c @@ -12,10 +12,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/types.h> #include <linux/slab.h> @@ -28,7 +24,7 @@ #define PRESSED 1 #define RELEASED 0 -static DEFINE_PER_CPU(bool, reporting_keystroke); +static DEFINE_PER_CPU(int, reporting_keystroke); static struct input_dev *virt_keyboard; diff --git a/drivers/staging/speakup/i18n.c b/drivers/staging/speakup/i18n.c index 12f880ed4ddf..8960079e4d60 100644 --- a/drivers/staging/speakup/i18n.c +++ b/drivers/staging/speakup/i18n.c @@ -393,10 +393,7 @@ static const int num_groups = ARRAY_SIZE(all_groups); char *spk_msg_get(enum msg_index_t index) { - char *ch; - - ch = speakup_msgs[index]; - return ch; + return speakup_msgs[index]; } /* diff --git a/drivers/staging/speakup/keyhelp.c b/drivers/staging/speakup/keyhelp.c index 02d5c706aee7..ce94cb13e256 100644 --- a/drivers/staging/speakup/keyhelp.c +++ b/drivers/staging/speakup/keyhelp.c @@ -14,10 +14,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/keyboard.h> @@ -74,7 +70,7 @@ static void build_key_data(void) for (i = 0; i < nstates; i++, kp++) { if (!*kp) continue; - if ((state_tbl[i]&16) != 0 && *kp == SPK_KEY) + if ((state_tbl[i] & 16) != 0 && *kp == SPK_KEY) continue; counters[*kp]++; } @@ -83,7 +79,7 @@ static void build_key_data(void) if (counters[i] == 0) continue; key_offsets[i] = offset; - offset += (counters[i]+1); + offset += (counters[i] + 1); if (offset >= MAXKEYS) break; } @@ -97,7 +93,7 @@ static void build_key_data(void) ch1 = *kp++; if (!ch1) continue; - if ((state_tbl[i]&16) != 0 && ch1 == SPK_KEY) + if ((state_tbl[i] & 16) != 0 && ch1 == SPK_KEY) continue; key = (state_tbl[i] << 8) + ch; counters[ch1]--; @@ -130,14 +126,14 @@ static int help_init(void) int i; int num_funcs = MSG_FUNCNAMES_END - MSG_FUNCNAMES_START + 1; - state_tbl = spk_our_keys[0]+SHIFT_TBL_SIZE+2; + state_tbl = spk_our_keys[0] + SHIFT_TBL_SIZE + 2; for (i = 0; i < num_funcs; i++) { char *cur_funcname = spk_msg_get(MSG_FUNCNAMES_START + i); if (start == *cur_funcname) continue; start = *cur_funcname; - letter_offsets[(start&31)-1] = i; + letter_offsets[(start & 31) - 1] = i; } return 0; } @@ -160,12 +156,12 @@ int spk_handle_help(struct vc_data *vc, u_char type, u_char ch, u_short key) ch |= 32; /* lower case */ if (ch < 'a' || ch > 'z') return -1; - if (letter_offsets[ch-'a'] == -1) { + if (letter_offsets[ch - 'a'] == -1) { synth_printf(spk_msg_get(MSG_NO_COMMAND), ch); synth_printf("\n"); return 1; } - cur_item = letter_offsets[ch-'a']; + cur_item = letter_offsets[ch - 'a']; } else if (type == KT_CUR) { if (ch == 0 && (MSG_FUNCNAMES_START + cur_item + 1) <= @@ -186,7 +182,7 @@ int spk_handle_help(struct vc_data *vc, u_char type, u_char ch, u_short key) name = NULL; if ((type != KT_SPKUP) && (key > 0) && (key <= num_key_names)) { synth_printf("%s\n", - spk_msg_get(MSG_KEYNAMES_START + key-1)); + spk_msg_get(MSG_KEYNAMES_START + key - 1)); return 1; } for (i = 0; funcvals[i] != 0 && !name; i++) { @@ -195,7 +191,7 @@ int spk_handle_help(struct vc_data *vc, u_char type, u_char ch, u_short key) } if (!name) return -1; - kp = spk_our_keys[key]+1; + kp = spk_our_keys[key] + 1; for (i = 0; i < nstates; i++) { if (ch == kp[i]) break; diff --git a/drivers/staging/speakup/kobjects.c b/drivers/staging/speakup/kobjects.c index fdfeb42b2b8f..528cbdce4227 100644 --- a/drivers/staging/speakup/kobjects.c +++ b/drivers/staging/speakup/kobjects.c @@ -862,66 +862,66 @@ static struct kobj_attribute version_attribute = __ATTR_RO(version); static struct kobj_attribute delimiters_attribute = - __ATTR(delimiters, S_IWUSR|S_IRUGO, punc_show, punc_store); + __ATTR(delimiters, S_IWUSR | S_IRUGO, punc_show, punc_store); static struct kobj_attribute ex_num_attribute = - __ATTR(ex_num, S_IWUSR|S_IRUGO, punc_show, punc_store); + __ATTR(ex_num, S_IWUSR | S_IRUGO, punc_show, punc_store); static struct kobj_attribute punc_all_attribute = - __ATTR(punc_all, S_IWUSR|S_IRUGO, punc_show, punc_store); + __ATTR(punc_all, S_IWUSR | S_IRUGO, punc_show, punc_store); static struct kobj_attribute punc_most_attribute = - __ATTR(punc_most, S_IWUSR|S_IRUGO, punc_show, punc_store); + __ATTR(punc_most, S_IWUSR | S_IRUGO, punc_show, punc_store); static struct kobj_attribute punc_some_attribute = - __ATTR(punc_some, S_IWUSR|S_IRUGO, punc_show, punc_store); + __ATTR(punc_some, S_IWUSR | S_IRUGO, punc_show, punc_store); static struct kobj_attribute repeats_attribute = - __ATTR(repeats, S_IWUSR|S_IRUGO, punc_show, punc_store); + __ATTR(repeats, S_IWUSR | S_IRUGO, punc_show, punc_store); static struct kobj_attribute attrib_bleep_attribute = - __ATTR(attrib_bleep, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(attrib_bleep, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute bell_pos_attribute = - __ATTR(bell_pos, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(bell_pos, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute bleep_time_attribute = - __ATTR(bleep_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(bleep_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute bleeps_attribute = - __ATTR(bleeps, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(bleeps, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute cursor_time_attribute = - __ATTR(cursor_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(cursor_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute key_echo_attribute = - __ATTR(key_echo, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(key_echo, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute no_interrupt_attribute = - __ATTR(no_interrupt, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(no_interrupt, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute punc_level_attribute = - __ATTR(punc_level, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(punc_level, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute reading_punc_attribute = - __ATTR(reading_punc, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(reading_punc, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute say_control_attribute = - __ATTR(say_control, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(say_control, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute say_word_ctl_attribute = - __ATTR(say_word_ctl, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(say_word_ctl, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute spell_delay_attribute = - __ATTR(spell_delay, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(spell_delay, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); /* * These attributes are i18n related. */ static struct kobj_attribute announcements_attribute = - __ATTR(announcements, S_IWUSR|S_IRUGO, message_show, message_store); + __ATTR(announcements, S_IWUSR | S_IRUGO, message_show, message_store); static struct kobj_attribute characters_attribute = - __ATTR(characters, S_IWUSR|S_IRUGO, chars_chartab_show, + __ATTR(characters, S_IWUSR | S_IRUGO, chars_chartab_show, chars_chartab_store); static struct kobj_attribute chartab_attribute = - __ATTR(chartab, S_IWUSR|S_IRUGO, chars_chartab_show, + __ATTR(chartab, S_IWUSR | S_IRUGO, chars_chartab_show, chars_chartab_store); static struct kobj_attribute ctl_keys_attribute = - __ATTR(ctl_keys, S_IWUSR|S_IRUGO, message_show, message_store); + __ATTR(ctl_keys, S_IWUSR | S_IRUGO, message_show, message_store); static struct kobj_attribute colors_attribute = - __ATTR(colors, S_IWUSR|S_IRUGO, message_show, message_store); + __ATTR(colors, S_IWUSR | S_IRUGO, message_show, message_store); static struct kobj_attribute formatted_attribute = - __ATTR(formatted, S_IWUSR|S_IRUGO, message_show, message_store); + __ATTR(formatted, S_IWUSR | S_IRUGO, message_show, message_store); static struct kobj_attribute function_names_attribute = - __ATTR(function_names, S_IWUSR|S_IRUGO, message_show, message_store); + __ATTR(function_names, S_IWUSR | S_IRUGO, message_show, message_store); static struct kobj_attribute key_names_attribute = - __ATTR(key_names, S_IWUSR|S_IRUGO, message_show, message_store); + __ATTR(key_names, S_IWUSR | S_IRUGO, message_show, message_store); static struct kobj_attribute states_attribute = - __ATTR(states, S_IWUSR|S_IRUGO, message_show, message_store); + __ATTR(states, S_IWUSR | S_IRUGO, message_show, message_store); /* * Create groups of attributes so that we can create and destroy them all diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c index 30cf973f326d..a22fb07512a1 100644 --- a/drivers/staging/speakup/main.c +++ b/drivers/staging/speakup/main.c @@ -16,10 +16,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> @@ -1192,7 +1188,8 @@ static void do_handle_latin(struct vc_data *vc, u_char value, char up_flag) spin_lock_irqsave(&speakup_info.spinlock, flags); if (up_flag) { - spk_lastkey = spk_keydown = 0; + spk_lastkey = 0; + spk_keydown = 0; spin_unlock_irqrestore(&speakup_info.spinlock, flags); return; } @@ -1666,7 +1663,8 @@ static void cursor_done(u_long data) if (win_enabled) { if (vc->vc_x >= win_left && vc->vc_x <= win_right && vc->vc_y >= win_top && vc->vc_y <= win_bottom) { - spk_keydown = is_cursor = 0; + spk_keydown = 0; + is_cursor = 0; goto out; } } @@ -1676,7 +1674,8 @@ static void cursor_done(u_long data) } if (cursor_track == CT_Highlight) { if (speak_highlight(vc)) { - spk_keydown = is_cursor = 0; + spk_keydown = 0; + is_cursor = 0; goto out; } } @@ -1686,7 +1685,8 @@ static void cursor_done(u_long data) say_line_from_to(vc, 0, vc->vc_cols, 0); else say_char(vc); - spk_keydown = is_cursor = 0; + spk_keydown = 0; + is_cursor = 0; out: spin_unlock_irqrestore(&speakup_info.spinlock, flags); } @@ -1866,8 +1866,10 @@ static void speakup_win_set(struct vc_data *vc) static void speakup_win_clear(struct vc_data *vc) { - win_top = win_bottom = 0; - win_left = win_right = 0; + win_top = 0; + win_bottom = 0; + win_left = 0; + win_right = 0; win_start = 0; synth_printf("%s\n", spk_msg_get(MSG_WINDOW_CLEARED)); } @@ -2002,10 +2004,13 @@ static u_char key_speakup, spk_key_locked; static void speakup_lock(struct vc_data *vc) { - if (!spk_key_locked) - spk_key_locked = key_speakup = 16; - else - spk_key_locked = key_speakup = 0; + if (!spk_key_locked) { + spk_key_locked = 16; + key_speakup = 16; + } else { + spk_key_locked = 0; + key_speakup = 0; + } } typedef void (*spkup_hand) (struct vc_data *); @@ -2269,7 +2274,7 @@ static void __exit speakup_exit(void) unregister_vt_notifier(&vt_notifier_block); speakup_unregister_devsynth(); speakup_cancel_paste(); - del_timer(&cursor_timer); + del_timer_sync(&cursor_timer); kthread_stop(speakup_task); speakup_task = NULL; mutex_lock(&spk_mutex); diff --git a/drivers/staging/speakup/serialio.c b/drivers/staging/speakup/serialio.c index a5bbb338f275..c2c435cc3d63 100644 --- a/drivers/staging/speakup/serialio.c +++ b/drivers/staging/speakup/serialio.c @@ -8,7 +8,8 @@ #include <linux/serial_core.h> /* WARNING: Do not change this to <linux/serial.h> without testing that - * SERIAL_PORT_DFNS does get defined to the appropriate value. */ + * SERIAL_PORT_DFNS does get defined to the appropriate value. + */ #include <asm/serial.h> #ifndef SERIAL_PORT_DFNS @@ -92,8 +93,6 @@ const struct old_serial_port *spk_serial_init(int index) static irqreturn_t synth_readbuf_handler(int irq, void *dev_id) { unsigned long flags; -/*printk(KERN_ERR "in irq\n"); */ -/*pr_warn("in IRQ\n"); */ int c; spin_lock_irqsave(&speakup_info.spinlock, flags); @@ -101,8 +100,6 @@ static irqreturn_t synth_readbuf_handler(int irq, void *dev_id) c = inb_p(speakup_info.port_tts+UART_RX); synth->read_buff_add((u_char) c); -/*printk(KERN_ERR "c = %d\n", c); */ -/*pr_warn("C = %d\n", c); */ } spin_unlock_irqrestore(&speakup_info.spinlock, flags); return IRQ_HANDLED; @@ -175,9 +172,6 @@ int spk_wait_for_xmitr(void) while (!((inb_p(speakup_info.port_tts + UART_MSR)) & UART_MSR_CTS)) { /* CTS */ if (--tmout == 0) { - /* pr_warn("%s: timed out (cts)\n", - * synth->long_name); - */ timeouts++; return 0; } diff --git a/drivers/staging/speakup/speakup_acntpc.c b/drivers/staging/speakup/speakup_acntpc.c index f418893928ec..efb791bb642b 100644 --- a/drivers/staging/speakup/speakup_acntpc.c +++ b/drivers/staging/speakup/speakup_acntpc.c @@ -14,11 +14,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * * this code is specificly written as a driver for the speakup screenreview * package and is not a general device driver. * This driver is for the Aicom Acent PC internal synthesizer. diff --git a/drivers/staging/speakup/speakup_acntsa.c b/drivers/staging/speakup/speakup_acntsa.c index af2690f38950..34f45d3549b2 100644 --- a/drivers/staging/speakup/speakup_acntsa.c +++ b/drivers/staging/speakup/speakup_acntsa.c @@ -15,10 +15,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * * this code is specificly written as a driver for the speakup screenreview * package and is not a general device driver. */ diff --git a/drivers/staging/speakup/speakup_apollo.c b/drivers/staging/speakup/speakup_apollo.c index 51788f7d4480..3cbc8a7ad1ef 100644 --- a/drivers/staging/speakup/speakup_apollo.c +++ b/drivers/staging/speakup/speakup_apollo.c @@ -15,10 +15,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * * this code is specificly written as a driver for the speakup screenreview * package and is not a general device driver. */ diff --git a/drivers/staging/speakup/speakup_audptr.c b/drivers/staging/speakup/speakup_audptr.c index a9a687232955..7a12b8408b67 100644 --- a/drivers/staging/speakup/speakup_audptr.c +++ b/drivers/staging/speakup/speakup_audptr.c @@ -15,10 +15,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * * specificly written as a driver for the speakup screenreview * s not a general device driver. */ diff --git a/drivers/staging/speakup/speakup_bns.c b/drivers/staging/speakup/speakup_bns.c index 80f8358d4199..570f0c21745e 100644 --- a/drivers/staging/speakup/speakup_bns.c +++ b/drivers/staging/speakup/speakup_bns.c @@ -15,10 +15,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * * this code is specificly written as a driver for the speakup screenreview * package and is not a general device driver. */ diff --git a/drivers/staging/speakup/speakup_decext.c b/drivers/staging/speakup/speakup_decext.c index e0b5db9bb46e..1a5cf3d0a559 100644 --- a/drivers/staging/speakup/speakup_decext.c +++ b/drivers/staging/speakup/speakup_decext.c @@ -15,10 +15,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * * specificly written as a driver for the speakup screenreview * s not a general device driver. */ @@ -71,30 +67,30 @@ static struct var_t vars[] = { * These attributes will appear in /sys/accessibility/speakup/decext. */ static struct kobj_attribute caps_start_attribute = - __ATTR(caps_start, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(caps_start, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute caps_stop_attribute = - __ATTR(caps_stop, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(caps_stop, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute pitch_attribute = - __ATTR(pitch, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(pitch, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute punct_attribute = - __ATTR(punct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(punct, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute rate_attribute = - __ATTR(rate, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(rate, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute voice_attribute = - __ATTR(voice, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(voice, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute vol_attribute = - __ATTR(vol, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(vol, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute delay_time_attribute = - __ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(delay_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute direct_attribute = - __ATTR(direct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(direct, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute full_time_attribute = - __ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(full_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute jiffy_delta_attribute = - __ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(jiffy_delta, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); static struct kobj_attribute trigger_time_attribute = - __ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store); + __ATTR(trigger_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store); /* * Create a group of attributes so that we can create and destroy them all diff --git a/drivers/staging/speakup/speakup_decpc.c b/drivers/staging/speakup/speakup_decpc.c index 4893fef3f894..d6479bd2163b 100644 --- a/drivers/staging/speakup/speakup_decpc.c +++ b/drivers/staging/speakup/speakup_decpc.c @@ -24,10 +24,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/jiffies.h> #include <linux/sched.h> diff --git a/drivers/staging/speakup/speakup_dectlk.c b/drivers/staging/speakup/speakup_dectlk.c index 09063b82326f..764656759fbf 100644 --- a/drivers/staging/speakup/speakup_dectlk.c +++ b/drivers/staging/speakup/speakup_dectlk.c @@ -15,10 +15,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * * specificly written as a driver for the speakup screenreview * s not a general device driver. */ diff --git a/drivers/staging/speakup/speakup_dtlk.c b/drivers/staging/speakup/speakup_dtlk.c index 345efd3344b0..38aa4013bf62 100644 --- a/drivers/staging/speakup/speakup_dtlk.c +++ b/drivers/staging/speakup/speakup_dtlk.c @@ -15,10 +15,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * * specificly written as a driver for the speakup screenreview * package it's not a general device driver. * This driver is for the RC Systems DoubleTalk PC internal synthesizer. diff --git a/drivers/staging/speakup/speakup_dummy.c b/drivers/staging/speakup/speakup_dummy.c index f66811269475..87d2a8002b47 100644 --- a/drivers/staging/speakup/speakup_dummy.c +++ b/drivers/staging/speakup/speakup_dummy.c @@ -17,10 +17,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * * specificly written as a driver for the speakup screenreview * s not a general device driver. */ diff --git a/drivers/staging/speakup/speakup_keypc.c b/drivers/staging/speakup/speakup_keypc.c index 6ea027365664..5e2170bf4a8b 100644 --- a/drivers/staging/speakup/speakup_keypc.c +++ b/drivers/staging/speakup/speakup_keypc.c @@ -13,10 +13,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * * specificly written as a driver for the speakup screenreview * package it's not a general device driver. * This driver is for the Keynote Gold internal synthesizer. diff --git a/drivers/staging/speakup/speakup_ltlk.c b/drivers/staging/speakup/speakup_ltlk.c index cc4806be806b..b474e8b65f9a 100644 --- a/drivers/staging/speakup/speakup_ltlk.c +++ b/drivers/staging/speakup/speakup_ltlk.c @@ -15,10 +15,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * * specificly written as a driver for the speakup screenreview * s not a general device driver. */ diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c index b2eb5b133a5d..6b1d0f538bbd 100644 --- a/drivers/staging/speakup/speakup_soft.c +++ b/drivers/staging/speakup/speakup_soft.c @@ -14,9 +14,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * this code is specificly written as a driver for the speakup screenreview * package and is not a general device driver. diff --git a/drivers/staging/speakup/speakup_spkout.c b/drivers/staging/speakup/speakup_spkout.c index 1007a6168c3c..e449f2770c1f 100644 --- a/drivers/staging/speakup/speakup_spkout.c +++ b/drivers/staging/speakup/speakup_spkout.c @@ -15,10 +15,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * * specificly written as a driver for the speakup screenreview * s not a general device driver. */ diff --git a/drivers/staging/speakup/speakup_txprt.c b/drivers/staging/speakup/speakup_txprt.c index 6c21e7112210..fd98d4ffcb3e 100644 --- a/drivers/staging/speakup/speakup_txprt.c +++ b/drivers/staging/speakup/speakup_txprt.c @@ -15,10 +15,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * * specificly written as a driver for the speakup screenreview * s not a general device driver. */ diff --git a/drivers/staging/speakup/spk_priv.h b/drivers/staging/speakup/spk_priv.h index 9bb281d36556..98c4b6f0344a 100644 --- a/drivers/staging/speakup/spk_priv.h +++ b/drivers/staging/speakup/spk_priv.h @@ -16,10 +16,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef _SPEAKUP_PRIVATE_H #define _SPEAKUP_PRIVATE_H diff --git a/drivers/staging/speakup/spk_priv_keyinfo.h b/drivers/staging/speakup/spk_priv_keyinfo.h index 3116ef78c196..130e9cb0118b 100644 --- a/drivers/staging/speakup/spk_priv_keyinfo.h +++ b/drivers/staging/speakup/spk_priv_keyinfo.h @@ -16,10 +16,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef _SPEAKUP_KEYINFO_H diff --git a/drivers/staging/speakup/spkguide.txt b/drivers/staging/speakup/spkguide.txt index b699de3c649f..c23549c54c3c 100644 --- a/drivers/staging/speakup/spkguide.txt +++ b/drivers/staging/speakup/spkguide.txt @@ -1179,7 +1179,6 @@ if desired. Copyright (C) 2000,2001,2002 Free Software Foundation, Inc. - 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c index 01eddab93c66..4f462c35fdd9 100644 --- a/drivers/staging/speakup/synth.c +++ b/drivers/staging/speakup/synth.c @@ -179,7 +179,7 @@ int spk_synth_is_alive_restart(struct spk_synth *synth) { if (synth->alive) return 1; - if (!synth->alive && spk_wait_for_xmitr() > 0) { + if (spk_wait_for_xmitr() > 0) { /* restart */ synth->alive = 1; synth_printf("%s", synth->init); diff --git a/drivers/staging/speakup/varhandlers.c b/drivers/staging/speakup/varhandlers.c index ab4fe8de415f..e1393d2a2b0f 100644 --- a/drivers/staging/speakup/varhandlers.c +++ b/drivers/staging/speakup/varhandlers.c @@ -176,7 +176,6 @@ struct punc_var_t *spk_get_punc_var(enum var_id_t var_id) int spk_set_num_var(int input, struct st_var_header *var, int how) { int val; - short ret = 0; int *p_val = var->p_val; int l; char buf[32]; @@ -186,50 +185,51 @@ int spk_set_num_var(int input, struct st_var_header *var, int how) if (!var_data) return -ENODATA; - if (how == E_NEW_DEFAULT) { + val = var_data->u.n.value; + switch (how) { + case E_NEW_DEFAULT: if (input < var_data->u.n.low || input > var_data->u.n.high) return -ERANGE; var_data->u.n.default_val = input; return 0; - } - if (how == E_DEFAULT) { + case E_DEFAULT: val = var_data->u.n.default_val; - ret = -ERESTART; - } else { - if (how == E_SET) - val = input; - else - val = var_data->u.n.value; - if (how == E_INC) - val += input; - else if (how == E_DEC) - val -= input; - if (val < var_data->u.n.low || val > var_data->u.n.high) - return -ERANGE; + break; + case E_SET: + val = input; + break; + case E_INC: + val += input; + break; + case E_DEC: + val -= input; + break; } + + if (val < var_data->u.n.low || val > var_data->u.n.high) + return -ERANGE; + var_data->u.n.value = val; if (var->var_type == VAR_TIME && p_val != NULL) { *p_val = msecs_to_jiffies(val); - return ret; + return 0; } if (p_val != NULL) *p_val = val; if (var->var_id == PUNC_LEVEL) { spk_punc_mask = spk_punc_masks[val]; - return ret; + return 0; } if (var_data->u.n.multiplier != 0) val *= var_data->u.n.multiplier; val += var_data->u.n.offset; if (var->var_id < FIRST_SYNTH_VAR || !synth) - return ret; - if (synth->synth_adjust) { - int status = synth->synth_adjust(var); + return 0; + if (synth->synth_adjust) + return synth->synth_adjust(var); - return (status != 0) ? status : ret; - } if (!var_data->u.n.synth_fmt) - return ret; + return 0; if (var->var_id == PITCH) cp = spk_pitch_buff; else @@ -240,7 +240,7 @@ int spk_set_num_var(int input, struct st_var_header *var, int how) l = sprintf(cp, var_data->u.n.synth_fmt, var_data->u.n.out_str[val]); synth_printf("%s", cp); - return ret; + return 0; } int spk_set_string_var(const char *page, struct st_var_header *var, int len) diff --git a/drivers/staging/staging.c b/drivers/staging/staging.c deleted file mode 100644 index 233e589c0932..000000000000 --- a/drivers/staging/staging.c +++ /dev/null @@ -1,19 +0,0 @@ -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/module.h> - -static int __init staging_init(void) -{ - return 0; -} - -static void __exit staging_exit(void) -{ -} - -module_init(staging_init); -module_exit(staging_exit); - -MODULE_AUTHOR("Greg Kroah-Hartman"); -MODULE_DESCRIPTION("Staging Core"); -MODULE_LICENSE("GPL"); diff --git a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c index 824d460911ec..774958a8ce02 100644 --- a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c +++ b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c @@ -334,7 +334,7 @@ static int synpatics_rmi4_touchpad_report(struct synaptics_rmi4_data *pdata, * 10 = finger present but data may not be accurate, * 11 = reserved for product use. */ - finger_registers = (fingers_supported + 3)/4; + finger_registers = (fingers_supported + 3) / 4; data_base_addr = rfi->fn_desc.data_base_addr; retval = synaptics_rmi4_i2c_block_read(pdata, data_base_addr, values, finger_registers); @@ -350,7 +350,7 @@ static int synpatics_rmi4_touchpad_report(struct synaptics_rmi4_data *pdata, data_reg_blk_size = rfi->size_of_data_register_block; for (finger = 0; finger < fingers_supported; finger++) { /* determine which data byte the finger status is in */ - reg = finger/4; + reg = finger / 4; /* bit shift to get finger's status */ finger_shift = (finger % 4) * 2; finger_status = (values[reg] >> finger_shift) & 3; @@ -566,7 +566,7 @@ static int synpatics_rmi4_touchpad_detect(struct synaptics_rmi4_data *pdata, } pdata->fingers_supported = rfi->num_of_data_points; /* Need to get interrupt info for handling interrupts */ - rfi->index_to_intr_reg = (interruptcount + 7)/8; + rfi->index_to_intr_reg = (interruptcount + 7) / 8; if (rfi->index_to_intr_reg != 0) rfi->index_to_intr_reg -= 1; /* @@ -870,7 +870,7 @@ static int synaptics_rmi4_i2c_query_device(struct synaptics_rmi4_data *pdata) * Descriptor structure. * Describes the number of i2c devices on the bus that speak RMI. */ -static struct synaptics_rmi4_platform_data synaptics_rmi4_platformdata = { +static const struct synaptics_rmi4_platform_data synaptics_rmi4_platformdata = { .irq_type = (IRQF_TRIGGER_FALLING | IRQF_SHARED), .x_flip = false, .y_flip = true, @@ -912,7 +912,7 @@ static int synaptics_rmi4_probe return -ENOMEM; rmi4_data->input_dev = input_allocate_device(); - if (rmi4_data->input_dev == NULL) { + if (!rmi4_data->input_dev) { retval = -ENOMEM; goto err_input; } @@ -1039,7 +1039,6 @@ static int synaptics_rmi4_remove(struct i2c_client *client) return 0; } -#ifdef CONFIG_PM /** * synaptics_rmi4_suspend() - suspend the touch screen controller * @dev: pointer to device structure @@ -1047,7 +1046,7 @@ static int synaptics_rmi4_remove(struct i2c_client *client) * This function is used to suspend the * touch panel controller and returns integer */ -static int synaptics_rmi4_suspend(struct device *dev) +static int __maybe_unused synaptics_rmi4_suspend(struct device *dev) { /* Touch sleep mode */ int retval; @@ -1081,7 +1080,7 @@ static int synaptics_rmi4_suspend(struct device *dev) * This function is used to resume the touch panel * controller and returns integer. */ -static int synaptics_rmi4_resume(struct device *dev) +static int __maybe_unused synaptics_rmi4_resume(struct device *dev) { int retval; unsigned char intr_status; @@ -1112,8 +1111,6 @@ static int synaptics_rmi4_resume(struct device *dev) return 0; } -#endif - static SIMPLE_DEV_PM_OPS(synaptics_rmi4_dev_pm_ops, synaptics_rmi4_suspend, synaptics_rmi4_resume); diff --git a/drivers/staging/unisys/MAINTAINERS b/drivers/staging/unisys/MAINTAINERS index c9cef0b91531..cc46e37e64c1 100644 --- a/drivers/staging/unisys/MAINTAINERS +++ b/drivers/staging/unisys/MAINTAINERS @@ -1,5 +1,5 @@ Unisys s-Par drivers -M: Ben Romer <sparmaintainer@unisys.com> +M: David Kershner <sparmaintainer@unisys.com> S: Maintained F: Documentation/s-Par/overview.txt F: Documentation/s-Par/proc-entries.txt diff --git a/drivers/staging/unisys/include/guestlinuxdebug.h b/drivers/staging/unisys/include/guestlinuxdebug.h index 82ee565395ba..b81287f5e2c3 100644 --- a/drivers/staging/unisys/include/guestlinuxdebug.h +++ b/drivers/staging/unisys/include/guestlinuxdebug.h @@ -17,9 +17,10 @@ #define __GUESTLINUXDEBUG_H__ /* -* This file contains supporting interface for "vmcallinterface.h", particularly -* regarding adding additional structure and functionality to linux -* ISSUE_IO_VMCALL_POSTCODE_SEVERITY */ + * This file contains supporting interface for "vmcallinterface.h", particularly + * regarding adding additional structure and functionality to linux + * ISSUE_IO_VMCALL_POSTCODE_SEVERITY + */ /******* INFO ON ISSUE_POSTCODE_LINUX() BELOW *******/ enum driver_pc { /* POSTCODE driver identifier tuples */ @@ -133,9 +134,9 @@ enum event_pc { /* POSTCODE event identifier tuples */ #define POSTCODE_SEVERITY_ERR DIAG_SEVERITY_ERR #define POSTCODE_SEVERITY_WARNING DIAG_SEVERITY_WARNING -#define POSTCODE_SEVERITY_INFO DIAG_SEVERITY_PRINT /* TODO-> Info currently - * doesn't show, so we - * set info=warning */ +/* TODO-> Info currently doesn't show, so we set info=warning */ +#define POSTCODE_SEVERITY_INFO DIAG_SEVERITY_PRINT + /* example call of POSTCODE_LINUX_2(VISOR_CHIPSET_PC, POSTCODE_SEVERITY_ERR); * Please also note that the resulting postcode is in hex, so if you are * searching for the __LINE__ number, convert it first to decimal. The line diff --git a/drivers/staging/unisys/include/iochannel.h b/drivers/staging/unisys/include/iochannel.h index 162ca187a66b..880d9f04cbcf 100644 --- a/drivers/staging/unisys/include/iochannel.h +++ b/drivers/staging/unisys/include/iochannel.h @@ -575,7 +575,7 @@ struct spar_io_channel_protocol { * room) */ static inline u16 -add_physinfo_entries(u32 inp_pfn, u16 inp_off, u32 inp_len, u16 index, +add_physinfo_entries(u64 inp_pfn, u16 inp_off, u32 inp_len, u16 index, u16 max_pi_arr_entries, struct phys_info pi_arr[]) { u32 len; @@ -589,21 +589,19 @@ add_physinfo_entries(u32 inp_pfn, u16 inp_off, u32 inp_len, u16 index, pi_arr[index].pi_pfn = inp_pfn; pi_arr[index].pi_off = (u16)inp_off; pi_arr[index].pi_len = (u16)inp_len; - return index + 1; + return index + 1; } - /* this entry spans multiple pages */ - for (len = inp_len, i = 0; len; - len -= pi_arr[index + i].pi_len, i++) { + /* this entry spans multiple pages */ + for (len = inp_len, i = 0; len; + len -= pi_arr[index + i].pi_len, i++) { if (index + i >= max_pi_arr_entries) return 0; pi_arr[index + i].pi_pfn = inp_pfn + i; if (i == 0) { pi_arr[index].pi_off = inp_off; pi_arr[index].pi_len = firstlen; - } - - else { + } else { pi_arr[index + i].pi_off = 0; pi_arr[index + i].pi_len = (u16)MINNUM(len, (u32)PI_PAGE_SIZE); diff --git a/drivers/staging/unisys/visorbus/controlvmchannel.h b/drivers/staging/unisys/visorbus/controlvmchannel.h index ec25366b127c..03e36fb6a5a0 100644 --- a/drivers/staging/unisys/visorbus/controlvmchannel.h +++ b/drivers/staging/unisys/visorbus/controlvmchannel.h @@ -55,22 +55,25 @@ #define CONTROLVM_CRASHMSG_MAX 2 struct spar_segment_state { - u16 enabled:1; /* Bit 0: May enter other states */ - u16 active:1; /* Bit 1: Assigned to active partition */ - u16 alive:1; /* Bit 2: Configure message sent to - * service/server */ - u16 revoked:1; /* Bit 3: similar to partition state - * ShuttingDown */ - u16 allocated:1; /* Bit 4: memory (device/port number) - * has been selected by Command */ - u16 known:1; /* Bit 5: has been introduced to the - * service/guest partition */ - u16 ready:1; /* Bit 6: service/Guest partition has - * responded to introduction */ - u16 operating:1; /* Bit 7: resource is configured and - * operating */ - /* Note: don't use high bit unless we need to switch to ushort - * which is non-compliant */ + /* Bit 0: May enter other states */ + u16 enabled:1; + /* Bit 1: Assigned to active partition */ + u16 active:1; + /* Bit 2: Configure message sent to service/server */ + u16 alive:1; + /* Bit 3: similar to partition state ShuttingDown */ + u16 revoked:1; + /* Bit 4: memory (device/port number) has been selected by Command */ + u16 allocated:1; + /* Bit 5: has been introduced to the service/guest partition */ + u16 known:1; + /* Bit 6: service/Guest partition has responded to introduction */ + u16 ready:1; + /* Bit 7: resource is configured and operating */ + u16 operating:1; +/* Note: don't use high bit unless we need to switch to ushort + * which is non-compliant + */ }; static const struct spar_segment_state segment_state_running = { @@ -177,53 +180,53 @@ struct controlvm_message_header { /* For requests, indicates the message type. */ /* For responses, indicates the type of message we are responding to. */ - u32 message_size; /* Includes size of this struct + size - * of message */ - u32 segment_index; /* Index of segment containing Vm - * message/information */ - u32 completion_status; /* Error status code or result of - * message completion */ + /* Includes size of this struct + size of message */ + u32 message_size; + /* Index of segment containing Vm message/information */ + u32 segment_index; + /* Error status code or result of message completion */ + u32 completion_status; struct { - u32 failed:1; /* =1 in a response to * signify - * failure */ - u32 response_expected:1; /* =1 in all messages that expect a - * response (Control ignores this - * bit) */ - u32 server:1; /* =1 in all bus & device-related - * messages where the message - * receiver is to act as the bus or - * device server */ - u32 test_message:1; /* =1 for testing use only - * (Control and Command ignore this - * bit) */ - u32 partial_completion:1; /* =1 if there are forthcoming - * responses/acks associated - * with this message */ - u32 preserve:1; /* =1 this is to let us know to - * preserve channel contents - * (for running guests)*/ - u32 writer_in_diag:1; /* =1 the DiagWriter is active in the - * Diagnostic Partition*/ + /* =1 in a response to signify failure */ + u32 failed:1; + /* =1 in all messages that expect a response */ + u32 response_expected:1; + /* =1 in all bus & device-related messages where the message + * receiver is to act as the bus or device server + */ + u32 server:1; + /* =1 for testing use only (Control and Command ignore this */ + u32 test_message:1; + /* =1 if there are forthcoming responses/acks associated + * with this message + */ + u32 partial_completion:1; + /* =1 this is to let us know to preserve channel contents */ + u32 preserve:1; + /* =1 the DiagWriter is active in the Diagnostic Partition */ + u32 writer_in_diag:1; } flags; - u32 reserved; /* Natural alignment */ - u64 message_handle; /* Identifies the particular message instance, - * and is used to match particular */ + /* Natural alignment */ + u32 reserved; + /* Identifies the particular message instance */ + u64 message_handle; /* request instances with the corresponding response instance. */ - u64 payload_vm_offset; /* Offset of payload area from start of this - * instance of ControlVm segment */ - u32 payload_max_bytes; /* Maximum bytes allocated in payload - * area of ControlVm segment */ - u32 payload_bytes; /* Actual number of bytes of payload - * area to copy between IO/Command; */ + /* Offset of payload area from start of this instance */ + u64 payload_vm_offset; + /* Maximum bytes allocated in payload area of ControlVm segment */ + u32 payload_max_bytes; + /* Actual number of bytes of payload area to copy between IO/Command */ + u32 payload_bytes; /* if non-zero, there is a payload to copy. */ }; struct controlvm_packet_device_create { u32 bus_no; /* bus # (0..n-1) from the msg receiver's end */ u32 dev_no; /* bus-relative (0..n-1) device number */ - u64 channel_addr; /* Guest physical address of the channel, which - * can be dereferenced by the receiver of this - * ControlVm command */ + /* Guest physical address of the channel, which can be dereferenced by + * the receiver of this ControlVm command + */ + u64 channel_addr; u64 channel_bytes; /* specifies size of the channel in bytes */ uuid_le data_type_uuid; /* specifies format of data in channel */ uuid_le dev_inst_uuid; /* instance guid for the device */ @@ -231,8 +234,8 @@ struct controlvm_packet_device_create { }; /* for CONTROLVM_DEVICE_CREATE */ struct controlvm_packet_device_configure { - u32 bus_no; /* bus # (0..n-1) from the msg - * receiver's perspective */ + /* bus # (0..n-1) from the msg receiver's perspective */ + u32 bus_no; /* Control uses header SegmentIndex field to access bus number... */ u32 dev_no; /* bus-relative (0..n-1) device number */ } ; /* for CONTROLVM_DEVICE_CONFIGURE */ @@ -251,50 +254,50 @@ struct controlvm_message_device_configure { struct controlvm_message_packet { union { struct { - u32 bus_no; /* bus # (0..n-1) from the msg - * receiver's perspective */ - u32 dev_count; /* indicates the max number of - * devices on this bus */ - u64 channel_addr; /* Guest physical address of - * the channel, which can be - * dereferenced by the receiver - * of this ControlVm command */ + /* bus # (0..n-1) from the msg receiver's perspective */ + u32 bus_no; + /* indicates the max number of devices on this bus */ + u32 dev_count; + /* Guest physical address of the channel, which can be + * dereferenced by the receiver of this ControlVm command + */ + u64 channel_addr; u64 channel_bytes; /* size of the channel */ - uuid_le bus_data_type_uuid; /* indicates format of - * data in bus channel*/ + /* indicates format of data in bus channel*/ + uuid_le bus_data_type_uuid; uuid_le bus_inst_uuid; /* instance uuid for the bus */ } create_bus; /* for CONTROLVM_BUS_CREATE */ struct { - u32 bus_no; /* bus # (0..n-1) from the msg - * receiver's perspective */ + /* bus # (0..n-1) from the msg receiver's perspective */ + u32 bus_no; u32 reserved; /* Natural alignment purposes */ } destroy_bus; /* for CONTROLVM_BUS_DESTROY */ struct { - u32 bus_no; /* bus # (0..n-1) from the receiver's - * perspective */ + /* bus # (0..n-1) from the receiver's perspective */ + u32 bus_no; u32 reserved1; /* for alignment purposes */ - u64 guest_handle; /* This is used to convert - * guest physical address to - * physical address */ + /* This is used to convert guest physical address to physical address */ + u64 guest_handle; u64 recv_bus_irq_handle; /* specifies interrupt info. It is used by SP * to register to receive interrupts from the * CP. This interrupt is used for bus level * notifications. The corresponding - * sendBusInterruptHandle is kept in CP. */ + * sendBusInterruptHandle is kept in CP. + */ } configure_bus; /* for CONTROLVM_BUS_CONFIGURE */ /* for CONTROLVM_DEVICE_CREATE */ struct controlvm_packet_device_create create_device; struct { - u32 bus_no; /* bus # (0..n-1) from the msg - * receiver's perspective */ + /* bus # (0..n-1) from the msg receiver's perspective */ + u32 bus_no; u32 dev_no; /* bus-relative (0..n-1) device # */ } destroy_device; /* for CONTROLVM_DEVICE_DESTROY */ /* for CONTROLVM_DEVICE_CONFIGURE */ struct controlvm_packet_device_configure configure_device; struct { - u32 bus_no; /* bus # (0..n-1) from the msg - * receiver's perspective */ + /* bus # (0..n-1) from the msg receiver's perspective */ + u32 bus_no; u32 dev_no; /* bus-relative (0..n-1) device # */ } reconfigure_device; /* for CONTROLVM_DEVICE_RECONFIGURE */ struct { @@ -307,8 +310,8 @@ struct controlvm_message_packet { u32 dev_no; struct spar_segment_state state; struct { - u32 phys_device:1; /* =1 if message is for - * a physical device */ + /* =1 if message is for a physical device */ + u32 phys_device:1; } flags; u8 reserved[2]; /* Natural alignment purposes */ } device_change_state; /* for CONTROLVM_DEVICE_CHANGESTATE */ @@ -320,9 +323,10 @@ struct controlvm_message_packet { } device_change_state_event; /* for CONTROLVM_DEVICE_CHANGESTATE_EVENT */ struct { - u32 bus_count; /* indicates the max number of busses */ - u32 switch_count; /* indicates the max number of - * switches if a service partition */ + /* indicates the max number of busses */ + u32 bus_count; + /* indicates the max number of switches */ + u32 switch_count; enum ultra_chipset_feature features; u32 platform_number; /* Platform Number */ } init_chipset; /* for CONTROLVM_CHIPSET_INIT */ @@ -330,11 +334,12 @@ struct controlvm_message_packet { u32 options; /* reserved */ u32 test; /* bit 0 set to run embedded selftest */ } chipset_selftest; /* for CONTROLVM_CHIPSET_SELFTEST */ - u64 addr; /* a physical address of something, that can be - * dereferenced by the receiver of this - * ControlVm command (depends on command id) */ - u64 handle; /* a handle of something (depends on command - * id) */ + /* a physical address of something, that can be dereferenced + * by the receiver of this ControlVm command + */ + u64 addr; + /* a handle of something (depends on command id) */ + u64 handle; }; }; @@ -357,8 +362,8 @@ struct spar_controlvm_channel_protocol { u64 gp_nvram; /* guest phys addr of NVRAM channel */ u64 request_payload_offset; /* Offset to request payload area */ u64 event_payload_offset; /* Offset to event payload area */ - u32 request_payload_bytes; /* Bytes available in request payload - * area */ + /* Bytes available in request payload area */ + u32 request_payload_bytes; u32 event_payload_bytes;/* Bytes available in event payload area */ u32 control_channel_bytes; u32 nvram_channel_bytes; /* Bytes in PartitionNvram segment */ @@ -384,41 +389,37 @@ struct spar_controlvm_channel_protocol { u64 virtual_guest_image_size; u64 prototype_control_channel_offset; u64 virtual_guest_partition_handle; - - u16 restore_action; /* Restore Action field to restore the guest - * partition */ - u16 dump_action; /* For Windows guests it shows if the visordisk - * is running in dump mode */ + /* Restore Action field to restore the guest partition */ + u16 restore_action; + /* For Windows guests it shows if the visordisk is in dump mode */ + u16 dump_action; u16 nvram_fail_count; u16 saved_crash_message_count; /* = CONTROLVM_CRASHMSG_MAX */ - u32 saved_crash_message_offset; /* Offset to request payload area needed - * for crash dump */ - u32 installation_error; /* Type of error encountered during - * installation */ + /* Offset to request payload area needed for crash dump */ + u32 saved_crash_message_offset; + /* Type of error encountered during installation */ + u32 installation_error; u32 installation_text_id; /* Id of string to display */ - u16 installation_remaining_steps;/* Number of remaining installation - * steps (for progress bars) */ - u8 tool_action; /* ULTRA_TOOL_ACTIONS Installation Action - * field */ + /* Number of remaining installation steps (for progress bars) */ + u16 installation_remaining_steps; + /* ULTRA_TOOL_ACTIONS Installation Action field */ + u8 tool_action; u8 reserved; /* alignment */ struct efi_spar_indication efi_spar_ind; struct efi_spar_indication efi_spar_ind_supported; u32 sp_reserved; - u8 reserved2[28]; /* Force signals to begin on 128-byte cache - * line */ - struct signal_queue_header request_queue;/* Service or guest partition - * uses this queue to send - * requests to Control */ - struct signal_queue_header response_queue;/* Control uses this queue to - * respond to service or guest - * partition requests */ - struct signal_queue_header event_queue; /* Control uses this queue to - * send events to service or - * guest partition */ - struct signal_queue_header event_ack_queue;/* Service or guest partition - * uses this queue to ack - * Control events */ - + /* Force signals to begin on 128-byte cache line */ + u8 reserved2[28]; + /* guest partition uses this queue to send requests to Control */ + struct signal_queue_header request_queue; + /* Control uses this queue to respond to service or guest + * partition requests + */ + struct signal_queue_header response_queue; + /* Control uses this queue to send events to guest partition */ + struct signal_queue_header event_queue; + /* Service or guest partition uses this queue to ack Control events */ + struct signal_queue_header event_ack_queue; /* Request fixed-size message pool - does not include payload */ struct controlvm_message request_msg[CONTROLVM_MESSAGE_MAX]; diff --git a/drivers/staging/unisys/visorbus/vbusdeviceinfo.h b/drivers/staging/unisys/visorbus/vbusdeviceinfo.h index f59fd8a523c4..abdab4ad0b36 100644 --- a/drivers/staging/unisys/visorbus/vbusdeviceinfo.h +++ b/drivers/staging/unisys/visorbus/vbusdeviceinfo.h @@ -62,7 +62,7 @@ vbuschannel_sanitize_buffer(char *p, int remain, char *src, int srcmax) p++; remain--; chars++; - } else if (p == NULL) { + } else if (!p) { chars++; } nonprintable_streak = 0; @@ -72,7 +72,7 @@ vbuschannel_sanitize_buffer(char *p, int remain, char *src, int srcmax) p++; remain--; chars++; - } else if (p == NULL) { + } else if (!p) { chars++; } } else { @@ -124,7 +124,8 @@ vbuschannel_itoa(char *p, int remain, int num) } if (remain < digits) { /* not enough room left at <p> to hold number, so fill with - * '?' */ + * '?' + */ for (i = 0; i < remain; i++, p++) *p = '?'; return remain; diff --git a/drivers/staging/unisys/visorbus/visorbus_main.c b/drivers/staging/unisys/visorbus/visorbus_main.c index eac97d22278a..533bb5b3d284 100644 --- a/drivers/staging/unisys/visorbus/visorbus_main.c +++ b/drivers/staging/unisys/visorbus/visorbus_main.c @@ -221,7 +221,6 @@ visorbus_release_busdevice(struct device *xdev) { struct visor_device *dev = dev_get_drvdata(xdev); - dev_set_drvdata(xdev, NULL); kfree(dev); } @@ -701,12 +700,10 @@ DRIVER_ATTR_version(struct device_driver *xdrv, char *buf) static int register_driver_attributes(struct visor_driver *drv) { - int rc; struct driver_attribute version = __ATTR(version, S_IRUGO, DRIVER_ATTR_version, NULL); drv->version_attr = version; - rc = driver_create_file(&drv->driver, &drv->version_attr); - return rc; + return driver_create_file(&drv->driver, &drv->version_attr); } static void @@ -771,7 +768,7 @@ visordriver_probe_device(struct device *xdev) get_device(&dev->device); if (!drv->probe) { up(&dev->visordriver_callback_lock); - rc = -1; + rc = -ENODEV; goto away; } rc = drv->probe(dev); @@ -973,7 +970,7 @@ EXPORT_SYMBOL_GPL(visorbus_disable_channel_interrupts); static int create_visor_device(struct visor_device *dev) { - int rc = -1; + int rc; u32 chipset_bus_no = dev->chipset_bus_no; u32 chipset_dev_no = dev->chipset_dev_no; @@ -995,6 +992,7 @@ create_visor_device(struct visor_device *dev) if (!dev->periodic_work) { POSTCODE_LINUX_3(DEVICE_CREATE_FAILURE_PC, chipset_dev_no, DIAG_SEVERITY_ERR); + rc = -EINVAL; goto away; } @@ -1032,14 +1030,15 @@ create_visor_device(struct visor_device *dev) if (rc < 0) { POSTCODE_LINUX_3(DEVICE_REGISTER_FAILURE_PC, chipset_dev_no, DIAG_SEVERITY_ERR); - goto away_register; + goto away_unregister; } list_add_tail(&dev->list_all, &list_all_device_instances); return 0; -away_register: +away_unregister: device_unregister(&dev->device); + away: put_device(&dev->device); return rc; @@ -1058,23 +1057,21 @@ static int get_vbus_header_info(struct visorchannel *chan, struct spar_vbus_headerinfo *hdr_info) { - int rc = -1; - if (!SPAR_VBUS_CHANNEL_OK_CLIENT(visorchannel_get_header(chan))) - goto away; + return -EINVAL; + if (visorchannel_read(chan, sizeof(struct channel_header), hdr_info, sizeof(*hdr_info)) < 0) { - goto away; + return -EIO; } if (hdr_info->struct_bytes < sizeof(struct spar_vbus_headerinfo)) - goto away; + return -EINVAL; + if (hdr_info->device_info_struct_bytes < sizeof(struct ultra_vbus_deviceinfo)) { - goto away; + return -EINVAL; } - rc = 0; -away: - return rc; + return 0; } /* Write the contents of <info> to the struct @@ -1197,17 +1194,14 @@ fix_vbus_dev_info(struct visor_device *visordev) static int create_bus_instance(struct visor_device *dev) { - int rc; int id = dev->chipset_bus_no; struct spar_vbus_headerinfo *hdr_info; POSTCODE_LINUX_2(BUS_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO); hdr_info = kzalloc(sizeof(*hdr_info), GFP_KERNEL); - if (!hdr_info) { - rc = -1; - goto away; - } + if (!hdr_info) + return -ENOMEM; dev_set_name(&dev->device, "visorbus%d", id); dev->device.bus = &visorbus_type; @@ -1217,8 +1211,8 @@ create_bus_instance(struct visor_device *dev) if (device_register(&dev->device) < 0) { POSTCODE_LINUX_3(DEVICE_CREATE_FAILURE_PC, id, POSTCODE_SEVERITY_ERR); - rc = -1; - goto away_mem; + kfree(hdr_info); + return -ENODEV; } if (get_vbus_header_info(dev->visorchannel, hdr_info) >= 0) { @@ -1234,11 +1228,6 @@ create_bus_instance(struct visor_device *dev) list_add_tail(&dev->list_all, &list_all_bus_instances); dev_set_drvdata(&dev->device, dev); return 0; - -away_mem: - kfree(hdr_info); -away: - return rc; } /** Remove a device instance for the visor bus itself. @@ -1328,7 +1317,7 @@ chipset_bus_destroy(struct visor_device *dev) static void chipset_device_create(struct visor_device *dev_info) { - int rc = -1; + int rc; u32 bus_no = dev_info->chipset_bus_no; u32 dev_no = dev_info->chipset_dev_no; @@ -1371,9 +1360,9 @@ pause_state_change_complete(struct visor_device *dev, int status) return; /* Notify the chipset driver that the pause is complete, which - * will presumably want to send some sort of response to the - * initiator. - */ + * will presumably want to send some sort of response to the + * initiator. + */ (*chipset_responders.device_pause) (dev, status); } @@ -1405,7 +1394,7 @@ resume_state_change_complete(struct visor_device *dev, int status) static void initiate_chipset_device_pause_resume(struct visor_device *dev, bool is_pause) { - int rc = -1, x; + int rc; struct visor_driver *drv = NULL; void (*notify_func)(struct visor_device *dev, int response) = NULL; @@ -1414,14 +1403,18 @@ initiate_chipset_device_pause_resume(struct visor_device *dev, bool is_pause) else notify_func = chipset_responders.device_resume; if (!notify_func) - goto away; + return; drv = to_visor_driver(dev->device.driver); - if (!drv) - goto away; + if (!drv) { + (*notify_func)(dev, -ENODEV); + return; + } - if (dev->pausing || dev->resuming) - goto away; + if (dev->pausing || dev->resuming) { + (*notify_func)(dev, -EBUSY); + return; + } /* Note that even though both drv->pause() and drv->resume * specify a callback function, it is NOT necessary for us to @@ -1431,11 +1424,13 @@ initiate_chipset_device_pause_resume(struct visor_device *dev, bool is_pause) * visorbus while child function drivers are still running. */ if (is_pause) { - if (!drv->pause) - goto away; + if (!drv->pause) { + (*notify_func)(dev, -EINVAL); + return; + } dev->pausing = true; - x = drv->pause(dev, pause_state_change_complete); + rc = drv->pause(dev, pause_state_change_complete); } else { /* This should be done at BUS resume time, but an * existing problem prevents us from ever getting a bus @@ -1444,24 +1439,20 @@ initiate_chipset_device_pause_resume(struct visor_device *dev, bool is_pause) * would never even get here in that case. */ fix_vbus_dev_info(dev); - if (!drv->resume) - goto away; + if (!drv->resume) { + (*notify_func)(dev, -EINVAL); + return; + } dev->resuming = true; - x = drv->resume(dev, resume_state_change_complete); + rc = drv->resume(dev, resume_state_change_complete); } - if (x < 0) { + if (rc < 0) { if (is_pause) dev->pausing = false; else dev->resuming = false; - goto away; - } - rc = 0; -away: - if (rc < 0) { - if (notify_func) - (*notify_func)(dev, rc); + (*notify_func)(dev, -EINVAL); } } diff --git a/drivers/staging/unisys/visorbus/visorchannel.c b/drivers/staging/unisys/visorbus/visorchannel.c index 891b8db7c5ec..b68a904ac617 100644 --- a/drivers/staging/unisys/visorbus/visorchannel.c +++ b/drivers/staging/unisys/visorbus/visorchannel.c @@ -73,7 +73,7 @@ visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes, channel = kzalloc(sizeof(*channel), gfp); if (!channel) - goto cleanup; + return NULL; channel->needs_lock = needs_lock; spin_lock_init(&channel->insert_lock); @@ -89,14 +89,14 @@ visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes, if (!channel->requested) { if (uuid_le_cmp(guid, spar_video_guid)) { /* Not the video channel we care about this */ - goto cleanup; + goto err_destroy_channel; } } channel->mapped = memremap(physaddr, size, MEMREMAP_WB); if (!channel->mapped) { release_mem_region(physaddr, size); - goto cleanup; + goto err_destroy_channel; } channel->physaddr = physaddr; @@ -105,7 +105,7 @@ visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes, err = visorchannel_read(channel, 0, &channel->chan_hdr, sizeof(struct channel_header)); if (err) - goto cleanup; + goto err_destroy_channel; /* we had better be a CLIENT of this channel */ if (channel_bytes == 0) @@ -122,7 +122,7 @@ visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes, if (!channel->requested) { if (uuid_le_cmp(guid, spar_video_guid)) { /* Different we care about this */ - goto cleanup; + goto err_destroy_channel; } } @@ -130,7 +130,7 @@ visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes, MEMREMAP_WB); if (!channel->mapped) { release_mem_region(channel->physaddr, channel_bytes); - goto cleanup; + goto err_destroy_channel; } channel->nbytes = channel_bytes; @@ -139,7 +139,7 @@ visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes, channel->guid = guid; return channel; -cleanup: +err_destroy_channel: visorchannel_destroy(channel); return NULL; } @@ -293,14 +293,14 @@ visorchannel_clear(struct visorchannel *channel, ulong offset, u8 ch, err = visorchannel_write(channel, offset + written, buf, thisbytes); if (err) - goto cleanup; + goto out_free_page; written += thisbytes; nbytes -= thisbytes; } err = 0; -cleanup: +out_free_page: free_page((unsigned long)buf); return err; } @@ -461,7 +461,7 @@ signalinsert_inner(struct visorchannel *channel, u32 queue, void *msg) if (!sig_read_header(channel, queue, &sig_hdr)) return false; - sig_hdr.head = ((sig_hdr.head + 1) % sig_hdr.max_slots); + sig_hdr.head = (sig_hdr.head + 1) % sig_hdr.max_slots; if (sig_hdr.head == sig_hdr.tail) { sig_hdr.num_overflows++; visorchannel_write(channel, @@ -521,7 +521,7 @@ visorchannel_signalqueue_slots_avail(struct visorchannel *channel, u32 queue) tail = sig_hdr.tail; if (head < tail) head = head + sig_hdr.max_slots; - slots_used = (head - tail); + slots_used = head - tail; slots_avail = sig_hdr.max_signals - slots_used; return (int)slots_avail; } diff --git a/drivers/staging/unisys/visorbus/visorchipset.c b/drivers/staging/unisys/visorbus/visorchipset.c index 07594f43853d..5fbda7b218c7 100644 --- a/drivers/staging/unisys/visorbus/visorchipset.c +++ b/drivers/staging/unisys/visorbus/visorchipset.c @@ -43,11 +43,10 @@ #define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100 -#define MAX_CONTROLVM_PAYLOAD_BYTES (1024*128) +#define MAX_CONTROLVM_PAYLOAD_BYTES (1024 * 128) #define VISORCHIPSET_MMAP_CONTROLCHANOFFSET 0x00000000 - #define UNISYS_SPAR_LEAF_ID 0x40000000 /* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */ @@ -62,6 +61,7 @@ static int visorchipset_major; static int visorchipset_visorbusregwait = 1; /* default is on */ static int visorchipset_holdchipsetready; static unsigned long controlvm_payload_bytes_buffered; +static u32 dump_vhba_bus; static int visorchipset_open(struct inode *inode, struct file *file) @@ -86,8 +86,8 @@ visorchipset_release(struct inode *inode, struct file *file) */ #define MIN_IDLE_SECONDS 10 static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST; -static unsigned long most_recent_message_jiffies; /* when we got our last - * controlvm message */ +/* when we got our last controlvm message */ +static unsigned long most_recent_message_jiffies; static int visorbusregistered; #define MAX_CHIPSET_EVENTS 2 @@ -103,7 +103,6 @@ struct parser_context { }; static struct delayed_work periodic_controlvm_work; -static struct workqueue_struct *periodic_controlvm_workqueue; static DEFINE_SEMAPHORE(notifier_lock); static struct cdev file_cdev; @@ -120,7 +119,8 @@ static struct visorchannel *controlvm_channel; struct visor_controlvm_payload_info { u8 *ptr; /* pointer to base address of payload pool */ u64 offset; /* offset from beginning of controlvm - * channel to beginning of payload * pool */ + * channel to beginning of payload * pool + */ u32 bytes; /* number of bytes in payload pool */ }; @@ -184,7 +184,8 @@ struct putfile_request { * - this list is added to when controlvm messages come in that supply * file data * - this list is removed from via the hotplug program that is actually - * consuming these buffers to write as file data */ + * consuming these buffers to write as file data + */ struct list_head input_buffer_list; spinlock_t req_list_lock; /* lock for input_buffer_list */ @@ -352,7 +353,6 @@ static void controlvm_respond_physdev_changestate( struct controlvm_message_header *msg_hdr, int response, struct spar_segment_state state); - static void parser_done(struct parser_context *ctx); static struct parser_context * @@ -377,7 +377,7 @@ parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry) rc = NULL; goto cleanup; } - ctx = kzalloc(allocbytes, GFP_KERNEL|__GFP_NORETRY); + ctx = kzalloc(allocbytes, GFP_KERNEL | __GFP_NORETRY); if (!ctx) { if (retry) *retry = true; @@ -397,24 +397,16 @@ parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry) rc = NULL; goto cleanup; } - p = __va((unsigned long) (addr)); + p = __va((unsigned long)(addr)); memcpy(ctx->data, p, bytes); } else { - void *mapping; - - if (!request_mem_region(addr, bytes, "visorchipset")) { - rc = NULL; - goto cleanup; - } + void *mapping = memremap(addr, bytes, MEMREMAP_WB); - mapping = memremap(addr, bytes, MEMREMAP_WB); if (!mapping) { - release_mem_region(addr, bytes); rc = NULL; goto cleanup; } memcpy(ctx->data, mapping, bytes); - release_mem_region(addr, bytes); memunmap(mapping); } @@ -437,7 +429,7 @@ parser_id_get(struct parser_context *ctx) { struct spar_controlvm_parameters_header *phdr = NULL; - if (ctx == NULL) + if (!ctx) return NULL_UUID_LE; phdr = (struct spar_controlvm_parameters_header *)(ctx->data); return phdr->id; @@ -460,8 +452,9 @@ parser_param_start(struct parser_context *ctx, { struct spar_controlvm_parameters_header *phdr = NULL; - if (ctx == NULL) - goto Away; + if (!ctx) + return; + phdr = (struct spar_controlvm_parameters_header *)(ctx->data); switch (which_string) { case PARSERSTRING_INITIATOR: @@ -483,9 +476,6 @@ parser_param_start(struct parser_context *ctx, default: break; } - -Away: - return; } static void parser_done(struct parser_context *ctx) @@ -520,16 +510,15 @@ parser_string_get(struct parser_context *ctx) } if (value_length < 0) /* '\0' was not included in the length */ value_length = nscan; - value = kmalloc(value_length + 1, GFP_KERNEL|__GFP_NORETRY); - if (value == NULL) + value = kmalloc(value_length + 1, GFP_KERNEL | __GFP_NORETRY); + if (!value) return NULL; if (value_length > 0) memcpy(value, pscan, value_length); - ((u8 *) (value))[value_length] = '\0'; + ((u8 *)(value))[value_length] = '\0'; return value; } - static ssize_t toolaction_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -537,8 +526,8 @@ static ssize_t toolaction_show(struct device *dev, u8 tool_action; visorchannel_read(controlvm_channel, - offsetof(struct spar_controlvm_channel_protocol, - tool_action), &tool_action, sizeof(u8)); + offsetof(struct spar_controlvm_channel_protocol, + tool_action), &tool_action, sizeof(u8)); return scnprintf(buf, PAGE_SIZE, "%u\n", tool_action); } @@ -706,6 +695,7 @@ static int match_visorbus_dev_by_id(struct device *dev, void *data) return 0; } + struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no, struct visor_device *from) { @@ -788,13 +778,15 @@ chipset_init(struct controlvm_message *inmsg) POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO); /* Set features to indicate we support parahotplug (if Command - * also supports it). */ + * also supports it). + */ features = inmsg->cmd.init_chipset. features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG; /* Set the "reply" bit so Command knows this is a - * features-aware driver. */ + * features-aware driver. + */ features |= ULTRA_CHIPSET_FEATURE_REPLY; cleanup: @@ -813,7 +805,7 @@ controlvm_init_response(struct controlvm_message *msg, msg->hdr.payload_max_bytes = 0; if (response < 0) { msg->hdr.flags.failed = 1; - msg->hdr.completion_status = (u32) (-response); + msg->hdr.completion_status = (u32)(-response); } } @@ -868,11 +860,64 @@ enum crash_obj_type { }; static void +save_crash_message(struct controlvm_message *msg, enum crash_obj_type typ) +{ + u32 local_crash_msg_offset; + u16 local_crash_msg_count; + + if (visorchannel_read(controlvm_channel, + offsetof(struct spar_controlvm_channel_protocol, + saved_crash_message_count), + &local_crash_msg_count, sizeof(u16)) < 0) { + POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC, + POSTCODE_SEVERITY_ERR); + return; + } + + if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) { + POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC, + local_crash_msg_count, + POSTCODE_SEVERITY_ERR); + return; + } + + if (visorchannel_read(controlvm_channel, + offsetof(struct spar_controlvm_channel_protocol, + saved_crash_message_offset), + &local_crash_msg_offset, sizeof(u32)) < 0) { + POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC, + POSTCODE_SEVERITY_ERR); + return; + } + + if (typ == CRASH_BUS) { + if (visorchannel_write(controlvm_channel, + local_crash_msg_offset, + msg, + sizeof(struct controlvm_message)) < 0) { + POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC, + POSTCODE_SEVERITY_ERR); + return; + } + } else { + local_crash_msg_offset += sizeof(struct controlvm_message); + if (visorchannel_write(controlvm_channel, + local_crash_msg_offset, + msg, + sizeof(struct controlvm_message)) < 0) { + POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC, + POSTCODE_SEVERITY_ERR); + return; + } + } +} + +static void bus_responder(enum controlvm_id cmd_id, struct controlvm_message_header *pending_msg_hdr, int response) { - if (pending_msg_hdr == NULL) + if (!pending_msg_hdr) return; /* no controlvm response needed */ if (pending_msg_hdr->id != (u32)cmd_id) @@ -890,7 +935,7 @@ device_changestate_responder(enum controlvm_id cmd_id, u32 bus_no = p->chipset_bus_no; u32 dev_no = p->chipset_dev_no; - if (p->pending_msg_hdr == NULL) + if (!p->pending_msg_hdr) return; /* no controlvm response needed */ if (p->pending_msg_hdr->id != cmd_id) return; @@ -911,7 +956,7 @@ device_responder(enum controlvm_id cmd_id, struct controlvm_message_header *pending_msg_hdr, int response) { - if (pending_msg_hdr == NULL) + if (!pending_msg_hdr) return; /* no controlvm response needed */ if (pending_msg_hdr->id != (u32)cmd_id) @@ -1127,6 +1172,10 @@ bus_create(struct controlvm_message *inmsg) goto cleanup; } bus_info->visorchannel = visorchannel; + if (uuid_le_cmp(cmd->create_bus.bus_inst_uuid, spar_siovm_uuid) == 0) { + dump_vhba_bus = bus_no; + save_crash_message(inmsg, CRASH_BUS); + } POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO); @@ -1177,7 +1226,7 @@ bus_configure(struct controlvm_message *inmsg, POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no, POSTCODE_SEVERITY_ERR); rc = -CONTROLVM_RESP_ERROR_BUS_INVALID; - } else if (bus_info->pending_msg_hdr != NULL) { + } else if (bus_info->pending_msg_hdr) { POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no, POSTCODE_SEVERITY_ERR); rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT; @@ -1263,6 +1312,10 @@ my_device_create(struct controlvm_message *inmsg) } dev_info->visorchannel = visorchannel; dev_info->channel_type_guid = cmd->create_device.data_type_uuid; + if (uuid_le_cmp(cmd->create_device.data_type_uuid, + spar_vhba_channel_protocol_uuid) == 0) + save_crash_message(inmsg, CRASH_DEV); + POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no, POSTCODE_SEVERITY_INFO); cleanup: @@ -1913,8 +1966,7 @@ cleanup: poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST; } - queue_delayed_work(periodic_controlvm_workqueue, - &periodic_controlvm_work, poll_jiffies); + schedule_delayed_work(&periodic_controlvm_work, poll_jiffies); } static void @@ -2011,8 +2063,7 @@ cleanup: poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW; - queue_delayed_work(periodic_controlvm_workqueue, - &periodic_controlvm_work, poll_jiffies); + schedule_delayed_work(&periodic_controlvm_work, poll_jiffies); } static void @@ -2197,7 +2248,7 @@ static inline int issue_vmcall_update_physical_time(u64 adjustment) static long visorchipset_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - s64 adjustment; + u64 adjustment; s64 vrtc_offset; switch (cmd) { @@ -2262,7 +2313,6 @@ visorchipset_init(struct acpi_device *acpi_device) { int rc = 0; u64 addr; - int tmp_sz = sizeof(struct spar_controlvm_channel_protocol); uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID; addr = controlvm_get_channel_address(); @@ -2272,8 +2322,10 @@ visorchipset_init(struct acpi_device *acpi_device) memset(&busdev_notifiers, 0, sizeof(busdev_notifiers)); memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info)); - controlvm_channel = visorchannel_create_with_lock(addr, tmp_sz, + controlvm_channel = visorchannel_create_with_lock(addr, 0, GFP_KERNEL, uuid); + if (!controlvm_channel) + return -ENODEV; if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT( visorchannel_get_header(controlvm_channel))) { initialize_controlvm_payload(); @@ -2299,29 +2351,15 @@ visorchipset_init(struct acpi_device *acpi_device) else INIT_DELAYED_WORK(&periodic_controlvm_work, controlvm_periodic_work); - periodic_controlvm_workqueue = - create_singlethread_workqueue("visorchipset_controlvm"); - if (!periodic_controlvm_workqueue) { - POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC, - DIAG_SEVERITY_ERR); - rc = -ENOMEM; - goto cleanup; - } most_recent_message_jiffies = jiffies; poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST; - rc = queue_delayed_work(periodic_controlvm_workqueue, - &periodic_controlvm_work, poll_jiffies); - if (rc < 0) { - POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC, - DIAG_SEVERITY_ERR); - goto cleanup; - } + schedule_delayed_work(&periodic_controlvm_work, poll_jiffies); visorchipset_platform_device.dev.devt = major_dev; if (platform_device_register(&visorchipset_platform_device) < 0) { POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR); - rc = -1; + rc = -ENODEV; goto cleanup; } POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO); @@ -2351,10 +2389,7 @@ visorchipset_exit(struct acpi_device *acpi_device) visorbus_exit(); - cancel_delayed_work(&periodic_controlvm_work); - flush_workqueue(periodic_controlvm_workqueue); - destroy_workqueue(periodic_controlvm_workqueue); - periodic_controlvm_workqueue = NULL; + cancel_delayed_work_sync(&periodic_controlvm_work); destroy_controlvm_payload_info(&controlvm_payload_info); memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header)); diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c index d5178b44ba8c..e93bb1dbfd97 100644 --- a/drivers/staging/unisys/visorhba/visorhba_main.c +++ b/drivers/staging/unisys/visorhba/visorhba_main.c @@ -167,7 +167,7 @@ static int visor_thread_start(struct visor_thread_info *thrinfo, { /* used to stop the thread */ init_completion(&thrinfo->has_stopped); - thrinfo->task = kthread_run(threadfn, thrcontext, name); + thrinfo->task = kthread_run(threadfn, thrcontext, "%s", name); if (IS_ERR(thrinfo->task)) { thrinfo->id = 0; return PTR_ERR(thrinfo->task); @@ -323,9 +323,9 @@ static int forward_taskmgmt_command(enum task_mgmt_types tasktype, goto err_del_scsipending_ent; if (tasktype == TASK_MGMT_ABORT_TASK) - scsicmd->result = (DID_ABORT << 16); + scsicmd->result = DID_ABORT << 16; else - scsicmd->result = (DID_RESET << 16); + scsicmd->result = DID_RESET << 16; scsicmd->scsi_done(scsicmd); @@ -1062,7 +1062,7 @@ static int visorhba_resume(struct visor_device *dev, return -EINVAL; if (devdata->serverdown && !devdata->serverchangingstate) - devdata->serverchangingstate = 1; + devdata->serverchangingstate = true; visor_thread_start(&devdata->threadinfo, process_incoming_rsps, devdata, "vhba_incming"); diff --git a/drivers/staging/unisys/visorinput/Kconfig b/drivers/staging/unisys/visorinput/Kconfig index 3476d419d32c..655cd62433de 100644 --- a/drivers/staging/unisys/visorinput/Kconfig +++ b/drivers/staging/unisys/visorinput/Kconfig @@ -4,7 +4,7 @@ config UNISYS_VISORINPUT tristate "Unisys visorinput driver" - depends on UNISYSSPAR && UNISYS_VISORBUS && FB + depends on UNISYSSPAR && UNISYS_VISORBUS && FB && INPUT ---help--- The Unisys s-Par visorinput driver provides a virtualized system console (keyboard and mouse) that is accessible through the diff --git a/drivers/staging/unisys/visorinput/ultrainputreport.h b/drivers/staging/unisys/visorinput/ultrainputreport.h index 3e6a52f4b6bf..1bc3d2064080 100644 --- a/drivers/staging/unisys/visorinput/ultrainputreport.h +++ b/drivers/staging/unisys/visorinput/ultrainputreport.h @@ -29,33 +29,40 @@ enum ultra_inputaction { inputaction_mouse_button_up = 3, /* arg1: 1=left,2=center,3=right */ inputaction_mouse_button_click = 4, /* arg1: 1=left,2=center,3=right */ inputaction_mouse_button_dclick = 5, /* arg1: 1=left,2=center, - 3=right */ + * 3=right + */ inputaction_wheel_rotate_away = 6, /* arg1: wheel rotation away from - user */ + * user + */ inputaction_wheel_rotate_toward = 7, /* arg1: wheel rotation toward - user */ + * user + */ inputaction_set_max_xy = 8, /* set screen maxXY; arg1=x, arg2=y */ inputaction_key_down = 64, /* arg1: scancode, as follows: - If arg1 <= 0xff, it's a 1-byte - scancode and arg1 is that scancode. - If arg1 > 0xff, it's a 2-byte - scanecode, with the 1st byte in the - low 8 bits, and the 2nd byte in the - high 8 bits. E.g., the right ALT key - would appear as x'38e0'. */ + * If arg1 <= 0xff, it's a 1-byte + * scancode and arg1 is that scancode. + * If arg1 > 0xff, it's a 2-byte + * scanecode, with the 1st byte in the + * low 8 bits, and the 2nd byte in the + * high 8 bits. E.g., the right ALT key + * would appear as x'38e0'. + */ inputaction_key_up = 65, /* arg1: scancode (in same format as - inputaction_keyDown) */ + * inputaction_keyDown) + */ inputaction_set_locking_key_state = 66, /* arg1: scancode (in same format - as inputaction_keyDown); - MUST refer to one of the - locking keys, like capslock, - numlock, or scrolllock - arg2: 1 iff locking key should be - in the LOCKED position - (e.g., light is ON) */ + * as inputaction_keyDown); + * MUST refer to one of the + * locking keys, like capslock, + * numlock, or scrolllock + * arg2: 1 iff locking key should be + * in the LOCKED position + * (e.g., light is ON) + */ inputaction_key_down_up = 67, /* arg1: scancode (in same format - as inputaction_keyDown) */ + * as inputaction_keyDown) + */ inputaction_last }; diff --git a/drivers/staging/unisys/visorinput/visorinput.c b/drivers/staging/unisys/visorinput/visorinput.c index 38d4d5b884df..13c0316112ac 100644 --- a/drivers/staging/unisys/visorinput/visorinput.c +++ b/drivers/staging/unisys/visorinput/visorinput.c @@ -222,8 +222,9 @@ static int visorinput_open(struct input_dev *visorinput_dev) struct visorinput_devdata *devdata = input_get_drvdata(visorinput_dev); if (!devdata) { - pr_err("%s input_get_drvdata(%p) returned NULL\n", - __func__, visorinput_dev); + dev_err(&visorinput_dev->dev, + "%s input_get_drvdata(%p) returned NULL\n", + __func__, visorinput_dev); return -EINVAL; } dev_dbg(&visorinput_dev->dev, "%s opened\n", __func__); @@ -236,8 +237,9 @@ static void visorinput_close(struct input_dev *visorinput_dev) struct visorinput_devdata *devdata = input_get_drvdata(visorinput_dev); if (!devdata) { - pr_err("%s input_get_drvdata(%p) returned NULL\n", - __func__, visorinput_dev); + dev_err(&visorinput_dev->dev, + "%s input_get_drvdata(%p) returned NULL\n", + __func__, visorinput_dev); return; } dev_dbg(&visorinput_dev->dev, "%s closed\n", __func__); diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c index 05194707278a..be0d057346c3 100644 --- a/drivers/staging/unisys/visornic/visornic_main.c +++ b/drivers/staging/unisys/visornic/visornic_main.c @@ -36,6 +36,7 @@ * = 163840 bytes */ #define MAX_BUF 163840 +#define NAPI_WEIGHT 64 static int visornic_probe(struct visor_device *dev); static void visornic_remove(struct visor_device *dev); @@ -58,8 +59,6 @@ static const struct file_operations debugfs_enable_ints_fops = { .write = enable_ints_write, }; -static struct workqueue_struct *visornic_timeout_reset_workqueue; - /* GUIDS for director channel type supported by this driver. */ static struct visor_channeltype_descriptor visornic_channel_types[] = { /* Note that the only channel type we expect to be reported by the @@ -376,8 +375,8 @@ visornic_serverdown(struct visornic_devdata *devdata, __func__); spin_unlock_irqrestore(&devdata->priv_lock, flags); return -EINVAL; - } else - spin_unlock_irqrestore(&devdata->priv_lock, flags); + } + spin_unlock_irqrestore(&devdata->priv_lock, flags); return 0; } @@ -761,9 +760,8 @@ static unsigned long devdata_xmits_outstanding(struct visornic_devdata *devdata) if (devdata->chstat.sent_xmit >= devdata->chstat.got_xmit_done) return devdata->chstat.sent_xmit - devdata->chstat.got_xmit_done; - else - return (ULONG_MAX - devdata->chstat.got_xmit_done - + devdata->chstat.sent_xmit + 1); + return (ULONG_MAX - devdata->chstat.got_xmit_done + + devdata->chstat.sent_xmit + 1); } /** @@ -1028,7 +1026,7 @@ visornic_set_multi(struct net_device *netdev) cmdrsp->net.type = NET_RCV_PROMISC; cmdrsp->net.enbdis.context = netdev; cmdrsp->net.enbdis.enable = - (netdev->flags & IFF_PROMISC); + netdev->flags & IFF_PROMISC; visorchannel_signalinsert(devdata->dev->visorchannel, IOCHAN_TO_IOPART, cmdrsp); @@ -1069,7 +1067,7 @@ visornic_xmit_timeout(struct net_device *netdev) spin_unlock_irqrestore(&devdata->priv_lock, flags); return; } - queue_work(visornic_timeout_reset_workqueue, &devdata->timeout_reset); + schedule_work(&devdata->timeout_reset); spin_unlock_irqrestore(&devdata->priv_lock, flags); } @@ -1218,8 +1216,9 @@ visornic_rx(struct uiscmdrsp *cmdrsp) /* length rcvd is greater than firstfrag in this skb rcv buf */ skb->tail += RCVPOST_BUF_SIZE; /* amount in skb->data */ skb->data_len = skb->len - RCVPOST_BUF_SIZE; /* amount that - will be in - frag_list */ + * will be in + * frag_list + */ } else { /* data fits in this skb - no chaining - do * PRECAUTIONARY check @@ -1315,12 +1314,14 @@ visornic_rx(struct uiscmdrsp *cmdrsp) } if (found_mc) break; /* accept packet, dest - matches a multicast - address */ + * matches a multicast + * address + */ } } else if (skb->pkt_type == PACKET_HOST) { break; /* accept packet, h_dest must match vnic - mac address */ + * mac address + */ } else if (skb->pkt_type == PACKET_OTHERHOST) { /* something is not right */ dev_err(&devdata->netdev->dev, @@ -1363,7 +1364,6 @@ devdata_initialize(struct visornic_devdata *devdata, struct visor_device *dev) { if (!devdata) return NULL; - memset(devdata, '\0', sizeof(struct visornic_devdata)); devdata->dev = dev; devdata->incarnation_id = get_jiffies_64(); return devdata; @@ -1613,14 +1613,15 @@ drain_resp_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata) */ static void service_resp_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata, - int *rx_work_done) + int *rx_work_done, int budget) { unsigned long flags; struct net_device *netdev; + while (*rx_work_done < budget) { /* TODO: CLIENT ACQUIRE -- Don't really need this at the - * moment */ - for (;;) { + * moment + */ if (!visorchannel_signalremove(devdata->dev->visorchannel, IOCHAN_FROM_IOPART, cmdrsp)) @@ -1709,7 +1710,7 @@ static int visornic_poll(struct napi_struct *napi, int budget) int rx_count = 0; send_rcv_posts_if_needed(devdata); - service_resp_queue(devdata->cmdrsp, devdata, &rx_count); + service_resp_queue(devdata->cmdrsp, devdata, &rx_count, budget); /* * If there aren't any more packets to receive @@ -1768,7 +1769,7 @@ static int visornic_probe(struct visor_device *dev) } netdev->netdev_ops = &visornic_dev_ops; - netdev->watchdog_timeo = (5 * HZ); + netdev->watchdog_timeo = 5 * HZ; SET_NETDEV_DEV(netdev, &dev->device); /* Get MAC adddress from channel and read it into the device. */ @@ -1893,6 +1894,16 @@ static int visornic_probe(struct visor_device *dev) goto cleanup_napi_add; } + /* Let's start our threads to get responses */ + netif_napi_add(netdev, &devdata->napi, visornic_poll, NAPI_WEIGHT); + + /* + * Note: Interupts have to be enable before the while + * loop below because the napi routine is responsible for + * setting enab_dis_acked + */ + visorbus_enable_channel_interrupts(dev); + err = register_netdev(netdev); if (err) { dev_err(&dev->device, @@ -1984,7 +1995,7 @@ static void visornic_remove(struct visor_device *dev) } /* going_away prevents new items being added to the workqueues */ - flush_workqueue(visornic_timeout_reset_workqueue); + cancel_work_sync(&devdata->timeout_reset); debugfs_remove_recursive(devdata->eth_debugfs_dir); @@ -2103,21 +2114,10 @@ static int visornic_init(void) if (!ret) goto cleanup_debugfs; - /* create workqueue for tx timeout reset */ - visornic_timeout_reset_workqueue = - create_singlethread_workqueue("visornic_timeout_reset"); - if (!visornic_timeout_reset_workqueue) - goto cleanup_workqueue; - err = visorbus_register_visor_driver(&visornic_driver); if (!err) return 0; -cleanup_workqueue: - if (visornic_timeout_reset_workqueue) { - flush_workqueue(visornic_timeout_reset_workqueue); - destroy_workqueue(visornic_timeout_reset_workqueue); - } cleanup_debugfs: debugfs_remove_recursive(visornic_debugfs_dir); @@ -2133,10 +2133,6 @@ static void visornic_cleanup(void) { visorbus_unregister_visor_driver(&visornic_driver); - if (visornic_timeout_reset_workqueue) { - flush_workqueue(visornic_timeout_reset_workqueue); - destroy_workqueue(visornic_timeout_reset_workqueue); - } debugfs_remove_recursive(visornic_debugfs_dir); } diff --git a/drivers/staging/vme/devices/vme_pio2_core.c b/drivers/staging/vme/devices/vme_pio2_core.c index 4f3cdbcedb3e..28a45689e2f4 100644 --- a/drivers/staging/vme/devices/vme_pio2_core.c +++ b/drivers/staging/vme/devices/vme_pio2_core.c @@ -215,11 +215,9 @@ static int pio2_probe(struct vme_dev *vdev) u8 reg; int vec; - card = kzalloc(sizeof(*card), GFP_KERNEL); - if (!card) { - retval = -ENOMEM; - goto err_struct; - } + card = devm_kzalloc(&vdev->dev, sizeof(*card), GFP_KERNEL); + if (!card) + return -ENOMEM; card->id = vdev->num; card->bus = bus[card->id]; @@ -232,8 +230,7 @@ static int pio2_probe(struct vme_dev *vdev) for (i = 0; i < PIO2_VARIANT_LENGTH; i++) { if (!isdigit(card->variant[i])) { dev_err(&card->vdev->dev, "Variant invalid\n"); - retval = -EINVAL; - goto err_variant; + return -EINVAL; } } @@ -244,8 +241,7 @@ static int pio2_probe(struct vme_dev *vdev) if (card->irq_vector & ~PIO2_VME_VECTOR_MASK) { dev_err(&card->vdev->dev, "Invalid VME IRQ Vector, vector must not use lower 4 bits\n"); - retval = -EINVAL; - goto err_vector; + return -EINVAL; } /* @@ -284,8 +280,7 @@ static int pio2_probe(struct vme_dev *vdev) if (!card->window) { dev_err(&card->vdev->dev, "Unable to assign VME master resource\n"); - retval = -EIO; - goto err_window; + return -EIO; } retval = vme_master_set(card->window, 1, card->base, 0x10000, VME_A24, @@ -430,11 +425,6 @@ err_read: vme_master_set(card->window, 0, 0, 0, VME_A16, 0, VME_D16); err_set: vme_master_free(card->window); -err_window: -err_vector: -err_variant: - kfree(card); -err_struct: return retval; } @@ -466,8 +456,6 @@ static int pio2_remove(struct vme_dev *vdev) vme_master_free(card->window); - kfree(card); - return 0; } diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c index b6730a8068fd..3d338122b590 100644 --- a/drivers/staging/vt6655/card.c +++ b/drivers/staging/vt6655/card.c @@ -443,7 +443,6 @@ bool CARDbRadioPowerOff(struct vnt_private *priv) MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE2); MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3); break; - } MACvRegBitsOff(priv->PortOffset, MAC_REG_HOSTCR, HOSTCR_RXON); @@ -499,7 +498,6 @@ bool CARDbRadioPowerOn(struct vnt_private *priv) MACvWordRegBitsOn(priv->PortOffset, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE2 | SOFTPWRCTL_SWPE3)); break; - } priv->bRadioOff = false; @@ -535,11 +533,9 @@ CARDvSafeResetTx( } /* set MAC TD pointer */ - MACvSetCurrTXDescAddr(TYPE_TXDMA0, priv->PortOffset, - (priv->td0_pool_dma)); + MACvSetCurrTXDescAddr(TYPE_TXDMA0, priv, priv->td0_pool_dma); - MACvSetCurrTXDescAddr(TYPE_AC0DMA, priv->PortOffset, - (priv->td1_pool_dma)); + MACvSetCurrTXDescAddr(TYPE_AC0DMA, priv, priv->td1_pool_dma); /* set MAC Beacon TX pointer */ MACvSetCurrBCNTxDescAddr(priv->PortOffset, @@ -590,11 +586,9 @@ CARDvSafeResetRx( MACvRx0PerPktMode(priv->PortOffset); MACvRx1PerPktMode(priv->PortOffset); /* set MAC RD pointer */ - MACvSetCurrRx0DescAddr(priv->PortOffset, - priv->rd0_pool_dma); + MACvSetCurrRx0DescAddr(priv, priv->rd0_pool_dma); - MACvSetCurrRx1DescAddr(priv->PortOffset, - priv->rd1_pool_dma); + MACvSetCurrRx1DescAddr(priv, priv->rd1_pool_dma); } /* @@ -816,7 +810,6 @@ bool CARDbIsOFDMinBasicRate(struct vnt_private *priv) unsigned char CARDbyGetPktType(struct vnt_private *priv) { - if (priv->byBBType == BB_TYPE_11A || priv->byBBType == BB_TYPE_11B) return (unsigned char)priv->byBBType; else if (CARDbIsOFDMinBasicRate((void *)priv)) @@ -839,8 +832,6 @@ unsigned char CARDbyGetPktType(struct vnt_private *priv) */ void CARDvSetLoopbackMode(struct vnt_private *priv, unsigned short wLoopbackMode) { - void __iomem *dwIoBase = priv->PortOffset; - switch (wLoopbackMode) { case CARD_LB_NONE: case CARD_LB_MAC: @@ -850,7 +841,7 @@ void CARDvSetLoopbackMode(struct vnt_private *priv, unsigned short wLoopbackMode break; } /* set MAC loopback */ - MACvSetLoopbackMode(dwIoBase, LOBYTE(wLoopbackMode)); + MACvSetLoopbackMode(priv, LOBYTE(wLoopbackMode)); /* set Baseband loopback */ } @@ -867,9 +858,8 @@ void CARDvSetLoopbackMode(struct vnt_private *priv, unsigned short wLoopbackMode */ bool CARDbSoftwareReset(struct vnt_private *priv) { - /* reset MAC */ - if (!MACbSafeSoftwareReset(priv->PortOffset)) + if (!MACbSafeSoftwareReset(priv)) return false; return true; diff --git a/drivers/staging/vt6655/channel.c b/drivers/staging/vt6655/channel.c index 7a717828fa09..9ac1ef9d0d51 100644 --- a/drivers/staging/vt6655/channel.c +++ b/drivers/staging/vt6655/channel.c @@ -174,64 +174,63 @@ void vnt_init_bands(struct vnt_private *priv) * Return Value: true if succeeded; false if failed. * */ -bool set_channel(void *pDeviceHandler, struct ieee80211_channel *ch) +bool set_channel(struct vnt_private *priv, struct ieee80211_channel *ch) { - struct vnt_private *pDevice = pDeviceHandler; - bool bResult = true; + bool ret = true; - if (pDevice->byCurrentCh == ch->hw_value) - return bResult; + if (priv->byCurrentCh == ch->hw_value) + return ret; /* Set VGA to max sensitivity */ - if (pDevice->bUpdateBBVGA && - pDevice->byBBVGACurrent != pDevice->abyBBVGA[0]) { - pDevice->byBBVGACurrent = pDevice->abyBBVGA[0]; + if (priv->bUpdateBBVGA && + priv->byBBVGACurrent != priv->abyBBVGA[0]) { + priv->byBBVGACurrent = priv->abyBBVGA[0]; - BBvSetVGAGainOffset(pDevice, pDevice->byBBVGACurrent); + BBvSetVGAGainOffset(priv, priv->byBBVGACurrent); } /* clear NAV */ - MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MACCR, MACCR_CLRNAV); + MACvRegBitsOn(priv->PortOffset, MAC_REG_MACCR, MACCR_CLRNAV); /* TX_PE will reserve 3 us for MAX2829 A mode only, it is for better TX throughput */ - if (pDevice->byRFType == RF_AIROHA7230) - RFbAL7230SelectChannelPostProcess(pDevice, pDevice->byCurrentCh, + if (priv->byRFType == RF_AIROHA7230) + RFbAL7230SelectChannelPostProcess(priv, priv->byCurrentCh, ch->hw_value); - pDevice->byCurrentCh = ch->hw_value; - bResult &= RFbSelectChannel(pDevice, pDevice->byRFType, - ch->hw_value); + priv->byCurrentCh = ch->hw_value; + ret &= RFbSelectChannel(priv, priv->byRFType, + ch->hw_value); /* Init Synthesizer Table */ - if (pDevice->bEnablePSMode) - RFvWriteWakeProgSyn(pDevice, pDevice->byRFType, ch->hw_value); + if (priv->bEnablePSMode) + RFvWriteWakeProgSyn(priv, priv->byRFType, ch->hw_value); - BBvSoftwareReset(pDevice); + BBvSoftwareReset(priv); - if (pDevice->byLocalID > REV_ID_VT3253_B1) { + if (priv->byLocalID > REV_ID_VT3253_B1) { unsigned long flags; - spin_lock_irqsave(&pDevice->lock, flags); + spin_lock_irqsave(&priv->lock, flags); /* set HW default power register */ - MACvSelectPage1(pDevice->PortOffset); - RFbSetPower(pDevice, RATE_1M, pDevice->byCurrentCh); - VNSvOutPortB(pDevice->PortOffset + MAC_REG_PWRCCK, - pDevice->byCurPwr); - RFbSetPower(pDevice, RATE_6M, pDevice->byCurrentCh); - VNSvOutPortB(pDevice->PortOffset + MAC_REG_PWROFDM, - pDevice->byCurPwr); - MACvSelectPage0(pDevice->PortOffset); - - spin_unlock_irqrestore(&pDevice->lock, flags); + MACvSelectPage1(priv->PortOffset); + RFbSetPower(priv, RATE_1M, priv->byCurrentCh); + VNSvOutPortB(priv->PortOffset + MAC_REG_PWRCCK, + priv->byCurPwr); + RFbSetPower(priv, RATE_6M, priv->byCurrentCh); + VNSvOutPortB(priv->PortOffset + MAC_REG_PWROFDM, + priv->byCurPwr); + MACvSelectPage0(priv->PortOffset); + + spin_unlock_irqrestore(&priv->lock, flags); } - if (pDevice->byBBType == BB_TYPE_11B) - RFbSetPower(pDevice, RATE_1M, pDevice->byCurrentCh); + if (priv->byBBType == BB_TYPE_11B) + RFbSetPower(priv, RATE_1M, priv->byCurrentCh); else - RFbSetPower(pDevice, RATE_6M, pDevice->byCurrentCh); + RFbSetPower(priv, RATE_6M, priv->byCurrentCh); - return bResult; + return ret; } diff --git a/drivers/staging/vt6655/channel.h b/drivers/staging/vt6655/channel.h index e2be6fca5f26..2d613e7f169c 100644 --- a/drivers/staging/vt6655/channel.h +++ b/drivers/staging/vt6655/channel.h @@ -27,6 +27,6 @@ void vnt_init_bands(struct vnt_private *); -bool set_channel(void *pDeviceHandler, struct ieee80211_channel *); +bool set_channel(struct vnt_private *, struct ieee80211_channel *); #endif /* _CHANNEL_H_ */ diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c index fefbf826c622..c3eea07ca97e 100644 --- a/drivers/staging/vt6655/device_main.c +++ b/drivers/staging/vt6655/device_main.c @@ -211,11 +211,11 @@ static void device_init_registers(struct vnt_private *priv) unsigned char byCCKPwrdBm = 0; unsigned char byOFDMPwrdBm = 0; - MACbShutdown(priv->PortOffset); + MACbShutdown(priv); BBvSoftwareReset(priv); /* Do MACbSoftwareReset in MACvInitialize */ - MACbSoftwareReset(priv->PortOffset); + MACbSoftwareReset(priv); priv->bAES = false; @@ -229,7 +229,7 @@ static void device_init_registers(struct vnt_private *priv) priv->byTopCCKBasicRate = RATE_1M; /* init MAC */ - MACvInitialize(priv->PortOffset); + MACvInitialize(priv); /* Get Local ID */ VNSvInPortB(priv->PortOffset + MAC_REG_LOCALID, &priv->byLocalID); @@ -357,8 +357,8 @@ static void device_init_registers(struct vnt_private *priv) MAC_REG_CFG, (CFG_TKIPOPT | CFG_NOTXTIMEOUT)); /* set performance parameter by registry */ - MACvSetShortRetryLimit(priv->PortOffset, priv->byShortRetryLimit); - MACvSetLongRetryLimit(priv->PortOffset, priv->byLongRetryLimit); + MACvSetShortRetryLimit(priv, priv->byShortRetryLimit); + MACvSetLongRetryLimit(priv, priv->byLongRetryLimit); /* reset TSF counter */ VNSvOutPortB(priv->PortOffset + MAC_REG_TFTCTL, TFTCTL_TSFCNTRST); @@ -742,6 +742,11 @@ static bool device_alloc_rx_buf(struct vnt_private *priv, dma_map_single(&priv->pcid->dev, skb_put(rd_info->skb, skb_tailroom(rd_info->skb)), priv->rx_buf_sz, DMA_FROM_DEVICE); + if (dma_mapping_error(&priv->pcid->dev, rd_info->skb_dma)) { + dev_kfree_skb(rd_info->skb); + rd_info->skb = NULL; + return false; + } *((unsigned int *)&rd->rd0) = 0; /* FIX cast */ @@ -884,7 +889,7 @@ static void device_error(struct vnt_private *priv, unsigned short status) if (status & ISR_FETALERR) { dev_err(&priv->pcid->dev, "Hardware fatal error\n"); - MACbShutdown(priv->PortOffset); + MACbShutdown(priv); return; } } @@ -1012,7 +1017,7 @@ static void vnt_interrupt_process(struct vnt_private *priv) if ((priv->op_mode == NL80211_IFTYPE_AP || priv->op_mode == NL80211_IFTYPE_ADHOC) && priv->vif->bss_conf.enable_beacon) { - MACvOneShotTimer1MicroSec(priv->PortOffset, + MACvOneShotTimer1MicroSec(priv, (priv->vif->bss_conf.beacon_int - MAKE_BEACON_RESERVED) << 10); } @@ -1166,7 +1171,7 @@ static int vnt_start(struct ieee80211_hw *hw) if (!device_init_rings(priv)) return -ENOMEM; - ret = request_irq(priv->pcid->irq, &vnt_interrupt, + ret = request_irq(priv->pcid->irq, vnt_interrupt, IRQF_SHARED, "vt6655", priv); if (ret) { dev_dbg(&priv->pcid->dev, "failed to start irq\n"); @@ -1197,8 +1202,8 @@ static void vnt_stop(struct ieee80211_hw *hw) cancel_work_sync(&priv->interrupt_work); - MACbShutdown(priv->PortOffset); - MACbSoftwareReset(priv->PortOffset); + MACbShutdown(priv); + MACbSoftwareReset(priv); CARDbRadioPowerOff(priv); device_free_td0_ring(priv); @@ -1636,13 +1641,13 @@ vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent) INIT_WORK(&priv->interrupt_work, vnt_interrupt_work); /* do reset */ - if (!MACbSoftwareReset(priv->PortOffset)) { + if (!MACbSoftwareReset(priv)) { dev_err(&pcid->dev, ": Failed to access MAC hardware..\n"); device_free_info(priv); return -ENODEV; } /* initial to reload eeprom */ - MACvInitialize(priv->PortOffset); + MACvInitialize(priv); MACvReadEtherAddress(priv->PortOffset, priv->abyCurrentNetAddr); /* Get RFType */ @@ -1690,7 +1695,7 @@ static int vt6655_suspend(struct pci_dev *pcid, pm_message_t state) pci_save_state(pcid); - MACbShutdown(priv->PortOffset); + MACbShutdown(priv); pci_disable_device(pcid); pci_set_power_state(pcid, pci_choose_state(pcid, state)); diff --git a/drivers/staging/vt6655/key.c b/drivers/staging/vt6655/key.c index f2b3fea90533..ffcaf25fdd8b 100644 --- a/drivers/staging/vt6655/key.c +++ b/drivers/staging/vt6655/key.c @@ -36,7 +36,7 @@ int vnt_key_init_table(struct vnt_private *priv) u32 i; for (i = 0; i < MAX_KEY_TABLE; i++) - MACvDisableKeyEntry(priv->PortOffset, i); + MACvDisableKeyEntry(priv, i); return 0; } @@ -104,7 +104,7 @@ static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr, key->key[15] |= 0x80; } - MACvSetKeyEntry(priv->PortOffset, key_mode, entry, key_inx, + MACvSetKeyEntry(priv, key_mode, entry, key_inx, bssid, (u32 *)key->key, priv->byLocalID); return 0; @@ -126,13 +126,13 @@ int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta, switch (key->cipher) { case 0: for (u = 0 ; u < MAX_KEY_TABLE; u++) - MACvDisableKeyEntry(priv->PortOffset, u); + MACvDisableKeyEntry(priv, u); return ret; case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: for (u = 0; u < MAX_KEY_TABLE; u++) - MACvDisableKeyEntry(priv->PortOffset, u); + MACvDisableKeyEntry(priv, u); vnt_set_keymode(hw, mac_addr, key, VNT_KEY_DEFAULTKEY, KEY_CTL_WEP, true); diff --git a/drivers/staging/vt6655/mac.c b/drivers/staging/vt6655/mac.c index 688c3be168d1..45196c6e9e12 100644 --- a/drivers/staging/vt6655/mac.c +++ b/drivers/staging/vt6655/mac.c @@ -61,7 +61,7 @@ * * Parameters: * In: - * dwIoBase - Base Address for MAC + * io_base - Base Address for MAC * byRegOfs - Offset of MAC Register * byTestBits - Test bits * Out: @@ -70,13 +70,12 @@ * Return Value: true if all test bits On; otherwise false * */ -bool MACbIsRegBitsOn(void __iomem *dwIoBase, unsigned char byRegOfs, +bool MACbIsRegBitsOn(struct vnt_private *priv, unsigned char byRegOfs, unsigned char byTestBits) { - unsigned char byData; + void __iomem *io_base = priv->PortOffset; - VNSvInPortB(dwIoBase + byRegOfs, &byData); - return (byData & byTestBits) == byTestBits; + return (ioread8(io_base + byRegOfs) & byTestBits) == byTestBits; } /* @@ -85,7 +84,7 @@ bool MACbIsRegBitsOn(void __iomem *dwIoBase, unsigned char byRegOfs, * * Parameters: * In: - * dwIoBase - Base Address for MAC + * io_base - Base Address for MAC * byRegOfs - Offset of MAC Register * byTestBits - Test bits * Out: @@ -94,13 +93,12 @@ bool MACbIsRegBitsOn(void __iomem *dwIoBase, unsigned char byRegOfs, * Return Value: true if all test bits Off; otherwise false * */ -bool MACbIsRegBitsOff(void __iomem *dwIoBase, unsigned char byRegOfs, +bool MACbIsRegBitsOff(struct vnt_private *priv, unsigned char byRegOfs, unsigned char byTestBits) { - unsigned char byData; + void __iomem *io_base = priv->PortOffset; - VNSvInPortB(dwIoBase + byRegOfs, &byData); - return !(byData & byTestBits); + return !(ioread8(io_base + byRegOfs) & byTestBits); } /* @@ -109,19 +107,18 @@ bool MACbIsRegBitsOff(void __iomem *dwIoBase, unsigned char byRegOfs, * * Parameters: * In: - * dwIoBase - Base Address for MAC + * io_base - Base Address for MAC * Out: * none * * Return Value: true if interrupt is disable; otherwise false * */ -bool MACbIsIntDisable(void __iomem *dwIoBase) +bool MACbIsIntDisable(struct vnt_private *priv) { - unsigned long dwData; + void __iomem *io_base = priv->PortOffset; - VNSvInPortD(dwIoBase + MAC_REG_IMR, &dwData); - if (dwData != 0) + if (ioread32(io_base + MAC_REG_IMR)) return false; return true; @@ -133,7 +130,7 @@ bool MACbIsIntDisable(void __iomem *dwIoBase) * * Parameters: * In: - * dwIoBase - Base Address for MAC + * io_base - Base Address for MAC * byRetryLimit- Retry Limit * Out: * none @@ -141,10 +138,11 @@ bool MACbIsIntDisable(void __iomem *dwIoBase) * Return Value: none * */ -void MACvSetShortRetryLimit(void __iomem *dwIoBase, unsigned char byRetryLimit) +void MACvSetShortRetryLimit(struct vnt_private *priv, unsigned char byRetryLimit) { + void __iomem *io_base = priv->PortOffset; /* set SRT */ - VNSvOutPortB(dwIoBase + MAC_REG_SRT, byRetryLimit); + iowrite8(byRetryLimit, io_base + MAC_REG_SRT); } @@ -154,7 +152,7 @@ void MACvSetShortRetryLimit(void __iomem *dwIoBase, unsigned char byRetryLimit) * * Parameters: * In: - * dwIoBase - Base Address for MAC + * io_base - Base Address for MAC * byRetryLimit- Retry Limit * Out: * none @@ -162,10 +160,11 @@ void MACvSetShortRetryLimit(void __iomem *dwIoBase, unsigned char byRetryLimit) * Return Value: none * */ -void MACvSetLongRetryLimit(void __iomem *dwIoBase, unsigned char byRetryLimit) +void MACvSetLongRetryLimit(struct vnt_private *priv, unsigned char byRetryLimit) { + void __iomem *io_base = priv->PortOffset; /* set LRT */ - VNSvOutPortB(dwIoBase + MAC_REG_LRT, byRetryLimit); + iowrite8(byRetryLimit, io_base + MAC_REG_LRT); } /* @@ -174,7 +173,7 @@ void MACvSetLongRetryLimit(void __iomem *dwIoBase, unsigned char byRetryLimit) * * Parameters: * In: - * dwIoBase - Base Address for MAC + * io_base - Base Address for MAC * byLoopbackMode - Loopback Mode * Out: * none @@ -182,16 +181,14 @@ void MACvSetLongRetryLimit(void __iomem *dwIoBase, unsigned char byRetryLimit) * Return Value: none * */ -void MACvSetLoopbackMode(void __iomem *dwIoBase, unsigned char byLoopbackMode) +void MACvSetLoopbackMode(struct vnt_private *priv, unsigned char byLoopbackMode) { - unsigned char byOrgValue; + void __iomem *io_base = priv->PortOffset; byLoopbackMode <<= 6; /* set TCR */ - VNSvInPortB(dwIoBase + MAC_REG_TEST, &byOrgValue); - byOrgValue = byOrgValue & 0x3F; - byOrgValue = byOrgValue | byLoopbackMode; - VNSvOutPortB(dwIoBase + MAC_REG_TEST, byOrgValue); + iowrite8((ioread8(io_base + MAC_REG_TEST) & 0x3f) | byLoopbackMode, + io_base + MAC_REG_TEST); } /* @@ -200,29 +197,27 @@ void MACvSetLoopbackMode(void __iomem *dwIoBase, unsigned char byLoopbackMode) * * Parameters: * In: - * dwIoBase - Base Address for MAC + * io_base - Base Address for MAC * Out: - * pbyCxtBuf - Context buffer + * cxt_buf - Context buffer * * Return Value: none * */ -void MACvSaveContext(void __iomem *dwIoBase, unsigned char *pbyCxtBuf) +void MACvSaveContext(struct vnt_private *priv, unsigned char *cxt_buf) { - int ii; + void __iomem *io_base = priv->PortOffset; /* read page0 register */ - for (ii = 0; ii < MAC_MAX_CONTEXT_SIZE_PAGE0; ii++) - VNSvInPortB((dwIoBase + ii), (pbyCxtBuf + ii)); + memcpy_fromio(cxt_buf, io_base, MAC_MAX_CONTEXT_SIZE_PAGE0); - MACvSelectPage1(dwIoBase); + MACvSelectPage1(io_base); /* read page1 register */ - for (ii = 0; ii < MAC_MAX_CONTEXT_SIZE_PAGE1; ii++) - VNSvInPortB((dwIoBase + ii), - (pbyCxtBuf + MAC_MAX_CONTEXT_SIZE_PAGE0 + ii)); + memcpy_fromio(cxt_buf + MAC_MAX_CONTEXT_SIZE_PAGE0, io_base, + MAC_MAX_CONTEXT_SIZE_PAGE1); - MACvSelectPage0(dwIoBase); + MACvSelectPage0(io_base); } /* @@ -231,53 +226,50 @@ void MACvSaveContext(void __iomem *dwIoBase, unsigned char *pbyCxtBuf) * * Parameters: * In: - * dwIoBase - Base Address for MAC - * pbyCxtBuf - Context buffer + * io_base - Base Address for MAC + * cxt_buf - Context buffer * Out: * none * * Return Value: none * */ -void MACvRestoreContext(void __iomem *dwIoBase, unsigned char *pbyCxtBuf) +void MACvRestoreContext(struct vnt_private *priv, unsigned char *cxt_buf) { - int ii; + void __iomem *io_base = priv->PortOffset; - MACvSelectPage1(dwIoBase); + MACvSelectPage1(io_base); /* restore page1 */ - for (ii = 0; ii < MAC_MAX_CONTEXT_SIZE_PAGE1; ii++) - VNSvOutPortB((dwIoBase + ii), - *(pbyCxtBuf + MAC_MAX_CONTEXT_SIZE_PAGE0 + ii)); + memcpy_toio(io_base, cxt_buf + MAC_MAX_CONTEXT_SIZE_PAGE0, + MAC_MAX_CONTEXT_SIZE_PAGE1); - MACvSelectPage0(dwIoBase); + MACvSelectPage0(io_base); /* restore RCR,TCR,IMR... */ - for (ii = MAC_REG_RCR; ii < MAC_REG_ISR; ii++) - VNSvOutPortB(dwIoBase + ii, *(pbyCxtBuf + ii)); + memcpy_toio(io_base + MAC_REG_RCR, cxt_buf + MAC_REG_RCR, + MAC_REG_ISR - MAC_REG_RCR); /* restore MAC Config. */ - for (ii = MAC_REG_LRT; ii < MAC_REG_PAGE1SEL; ii++) - VNSvOutPortB(dwIoBase + ii, *(pbyCxtBuf + ii)); + memcpy_toio(io_base + MAC_REG_LRT, cxt_buf + MAC_REG_LRT, + MAC_REG_PAGE1SEL - MAC_REG_LRT); - VNSvOutPortB(dwIoBase + MAC_REG_CFG, *(pbyCxtBuf + MAC_REG_CFG)); + iowrite8(*(cxt_buf + MAC_REG_CFG), io_base + MAC_REG_CFG); /* restore PS Config. */ - for (ii = MAC_REG_PSCFG; ii < MAC_REG_BBREGCTL; ii++) - VNSvOutPortB(dwIoBase + ii, *(pbyCxtBuf + ii)); + memcpy_toio(io_base + MAC_REG_PSCFG, cxt_buf + MAC_REG_PSCFG, + MAC_REG_BBREGCTL - MAC_REG_PSCFG); /* restore CURR_RX_DESC_ADDR, CURR_TX_DESC_ADDR */ - VNSvOutPortD(dwIoBase + MAC_REG_TXDMAPTR0, - *(unsigned long *)(pbyCxtBuf + MAC_REG_TXDMAPTR0)); - VNSvOutPortD(dwIoBase + MAC_REG_AC0DMAPTR, - *(unsigned long *)(pbyCxtBuf + MAC_REG_AC0DMAPTR)); - VNSvOutPortD(dwIoBase + MAC_REG_BCNDMAPTR, - *(unsigned long *)(pbyCxtBuf + MAC_REG_BCNDMAPTR)); - - VNSvOutPortD(dwIoBase + MAC_REG_RXDMAPTR0, - *(unsigned long *)(pbyCxtBuf + MAC_REG_RXDMAPTR0)); - - VNSvOutPortD(dwIoBase + MAC_REG_RXDMAPTR1, - *(unsigned long *)(pbyCxtBuf + MAC_REG_RXDMAPTR1)); + iowrite32(*(u32 *)(cxt_buf + MAC_REG_TXDMAPTR0), + io_base + MAC_REG_TXDMAPTR0); + iowrite32(*(u32 *)(cxt_buf + MAC_REG_AC0DMAPTR), + io_base + MAC_REG_AC0DMAPTR); + iowrite32(*(u32 *)(cxt_buf + MAC_REG_BCNDMAPTR), + io_base + MAC_REG_BCNDMAPTR); + iowrite32(*(u32 *)(cxt_buf + MAC_REG_RXDMAPTR0), + io_base + MAC_REG_RXDMAPTR0); + iowrite32(*(u32 *)(cxt_buf + MAC_REG_RXDMAPTR1), + io_base + MAC_REG_RXDMAPTR1); } /* @@ -286,24 +278,23 @@ void MACvRestoreContext(void __iomem *dwIoBase, unsigned char *pbyCxtBuf) * * Parameters: * In: - * dwIoBase - Base Address for MAC + * io_base - Base Address for MAC * Out: * none * * Return Value: true if Reset Success; otherwise false * */ -bool MACbSoftwareReset(void __iomem *dwIoBase) +bool MACbSoftwareReset(struct vnt_private *priv) { - unsigned char byData; + void __iomem *io_base = priv->PortOffset; unsigned short ww; /* turn on HOSTCR_SOFTRST, just write 0x01 to reset */ - VNSvOutPortB(dwIoBase + MAC_REG_HOSTCR, 0x01); + iowrite8(0x01, io_base + MAC_REG_HOSTCR); for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { - VNSvInPortB(dwIoBase + MAC_REG_HOSTCR, &byData); - if (!(byData & HOSTCR_SOFTRST)) + if (!(ioread8(io_base + MAC_REG_HOSTCR) & HOSTCR_SOFTRST)) break; } if (ww == W_MAX_TIMEOUT) @@ -317,14 +308,14 @@ bool MACbSoftwareReset(void __iomem *dwIoBase) * * Parameters: * In: - * dwIoBase - Base Address for MAC + * io_base - Base Address for MAC * Out: * none * * Return Value: true if success; otherwise false * */ -bool MACbSafeSoftwareReset(void __iomem *dwIoBase) +bool MACbSafeSoftwareReset(struct vnt_private *priv) { unsigned char abyTmpRegData[MAC_MAX_CONTEXT_SIZE_PAGE0+MAC_MAX_CONTEXT_SIZE_PAGE1]; bool bRetVal; @@ -334,11 +325,11 @@ bool MACbSafeSoftwareReset(void __iomem *dwIoBase) * reset, then restore register's value */ /* save MAC context */ - MACvSaveContext(dwIoBase, abyTmpRegData); + MACvSaveContext(priv, abyTmpRegData); /* do reset */ - bRetVal = MACbSoftwareReset(dwIoBase); + bRetVal = MACbSoftwareReset(priv); /* restore MAC context, except CR0 */ - MACvRestoreContext(dwIoBase, abyTmpRegData); + MACvRestoreContext(priv, abyTmpRegData); return bRetVal; } @@ -349,27 +340,25 @@ bool MACbSafeSoftwareReset(void __iomem *dwIoBase) * * Parameters: * In: - * dwIoBase - Base Address for MAC + * io_base - Base Address for MAC * Out: * none * * Return Value: true if success; otherwise false * */ -bool MACbSafeRxOff(void __iomem *dwIoBase) +bool MACbSafeRxOff(struct vnt_private *priv) { + void __iomem *io_base = priv->PortOffset; unsigned short ww; - unsigned long dwData; - unsigned char byData; /* turn off wow temp for turn off Rx safely */ /* Clear RX DMA0,1 */ - VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL0, DMACTL_CLRRUN); - VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL1, DMACTL_CLRRUN); + iowrite32(DMACTL_CLRRUN, io_base + MAC_REG_RXDMACTL0); + iowrite32(DMACTL_CLRRUN, io_base + MAC_REG_RXDMACTL1); for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { - VNSvInPortD(dwIoBase + MAC_REG_RXDMACTL0, &dwData); - if (!(dwData & DMACTL_RUN)) + if (!(ioread32(io_base + MAC_REG_RXDMACTL0) & DMACTL_RUN)) break; } if (ww == W_MAX_TIMEOUT) { @@ -377,8 +366,7 @@ bool MACbSafeRxOff(void __iomem *dwIoBase) return false; } for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { - VNSvInPortD(dwIoBase + MAC_REG_RXDMACTL1, &dwData); - if (!(dwData & DMACTL_RUN)) + if (!(ioread32(io_base + MAC_REG_RXDMACTL1) & DMACTL_RUN)) break; } if (ww == W_MAX_TIMEOUT) { @@ -387,11 +375,10 @@ bool MACbSafeRxOff(void __iomem *dwIoBase) } /* try to safe shutdown RX */ - MACvRegBitsOff(dwIoBase, MAC_REG_HOSTCR, HOSTCR_RXON); + MACvRegBitsOff(io_base, MAC_REG_HOSTCR, HOSTCR_RXON); /* W_MAX_TIMEOUT is the timeout period */ for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { - VNSvInPortB(dwIoBase + MAC_REG_HOSTCR, &byData); - if (!(byData & HOSTCR_RXONST)) + if (!(ioread8(io_base + MAC_REG_HOSTCR) & HOSTCR_RXONST)) break; } if (ww == W_MAX_TIMEOUT) { @@ -407,28 +394,26 @@ bool MACbSafeRxOff(void __iomem *dwIoBase) * * Parameters: * In: - * dwIoBase - Base Address for MAC + * io_base - Base Address for MAC * Out: * none * * Return Value: true if success; otherwise false * */ -bool MACbSafeTxOff(void __iomem *dwIoBase) +bool MACbSafeTxOff(struct vnt_private *priv) { + void __iomem *io_base = priv->PortOffset; unsigned short ww; - unsigned long dwData; - unsigned char byData; /* Clear TX DMA */ /* Tx0 */ - VNSvOutPortD(dwIoBase + MAC_REG_TXDMACTL0, DMACTL_CLRRUN); + iowrite32(DMACTL_CLRRUN, io_base + MAC_REG_TXDMACTL0); /* AC0 */ - VNSvOutPortD(dwIoBase + MAC_REG_AC0DMACTL, DMACTL_CLRRUN); + iowrite32(DMACTL_CLRRUN, io_base + MAC_REG_AC0DMACTL); for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { - VNSvInPortD(dwIoBase + MAC_REG_TXDMACTL0, &dwData); - if (!(dwData & DMACTL_RUN)) + if (!(ioread32(io_base + MAC_REG_TXDMACTL0) & DMACTL_RUN)) break; } if (ww == W_MAX_TIMEOUT) { @@ -436,8 +421,7 @@ bool MACbSafeTxOff(void __iomem *dwIoBase) return false; } for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { - VNSvInPortD(dwIoBase + MAC_REG_AC0DMACTL, &dwData); - if (!(dwData & DMACTL_RUN)) + if (!(ioread32(io_base + MAC_REG_AC0DMACTL) & DMACTL_RUN)) break; } if (ww == W_MAX_TIMEOUT) { @@ -446,12 +430,11 @@ bool MACbSafeTxOff(void __iomem *dwIoBase) } /* try to safe shutdown TX */ - MACvRegBitsOff(dwIoBase, MAC_REG_HOSTCR, HOSTCR_TXON); + MACvRegBitsOff(io_base, MAC_REG_HOSTCR, HOSTCR_TXON); /* W_MAX_TIMEOUT is the timeout period */ for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { - VNSvInPortB(dwIoBase + MAC_REG_HOSTCR, &byData); - if (!(byData & HOSTCR_TXONST)) + if (!(ioread8(io_base + MAC_REG_HOSTCR) & HOSTCR_TXONST)) break; } if (ww == W_MAX_TIMEOUT) { @@ -467,29 +450,31 @@ bool MACbSafeTxOff(void __iomem *dwIoBase) * * Parameters: * In: - * dwIoBase - Base Address for MAC + * io_base - Base Address for MAC * Out: * none * * Return Value: true if success; otherwise false * */ -bool MACbSafeStop(void __iomem *dwIoBase) +bool MACbSafeStop(struct vnt_private *priv) { - MACvRegBitsOff(dwIoBase, MAC_REG_TCR, TCR_AUTOBCNTX); + void __iomem *io_base = priv->PortOffset; - if (!MACbSafeRxOff(dwIoBase)) { + MACvRegBitsOff(io_base, MAC_REG_TCR, TCR_AUTOBCNTX); + + if (!MACbSafeRxOff(priv)) { pr_debug(" MACbSafeRxOff == false)\n"); - MACbSafeSoftwareReset(dwIoBase); + MACbSafeSoftwareReset(priv); return false; } - if (!MACbSafeTxOff(dwIoBase)) { + if (!MACbSafeTxOff(priv)) { pr_debug(" MACbSafeTxOff == false)\n"); - MACbSafeSoftwareReset(dwIoBase); + MACbSafeSoftwareReset(priv); return false; } - MACvRegBitsOff(dwIoBase, MAC_REG_HOSTCR, HOSTCR_MACEN); + MACvRegBitsOff(io_base, MAC_REG_HOSTCR, HOSTCR_MACEN); return true; } @@ -500,24 +485,25 @@ bool MACbSafeStop(void __iomem *dwIoBase) * * Parameters: * In: - * dwIoBase - Base Address for MAC + * io_base - Base Address for MAC * Out: * none * * Return Value: true if success; otherwise false * */ -bool MACbShutdown(void __iomem *dwIoBase) +bool MACbShutdown(struct vnt_private *priv) { + void __iomem *io_base = priv->PortOffset; /* disable MAC IMR */ - MACvIntDisable(dwIoBase); - MACvSetLoopbackMode(dwIoBase, MAC_LB_INTERNAL); + MACvIntDisable(io_base); + MACvSetLoopbackMode(priv, MAC_LB_INTERNAL); /* stop the adapter */ - if (!MACbSafeStop(dwIoBase)) { - MACvSetLoopbackMode(dwIoBase, MAC_LB_NONE); + if (!MACbSafeStop(priv)) { + MACvSetLoopbackMode(priv, MAC_LB_NONE); return false; } - MACvSetLoopbackMode(dwIoBase, MAC_LB_NONE); + MACvSetLoopbackMode(priv, MAC_LB_NONE); return true; } @@ -527,28 +513,29 @@ bool MACbShutdown(void __iomem *dwIoBase) * * Parameters: * In: - * dwIoBase - Base Address for MAC + * io_base - Base Address for MAC * Out: * none * * Return Value: none * */ -void MACvInitialize(void __iomem *dwIoBase) +void MACvInitialize(struct vnt_private *priv) { + void __iomem *io_base = priv->PortOffset; /* clear sticky bits */ - MACvClearStckDS(dwIoBase); + MACvClearStckDS(io_base); /* disable force PME-enable */ - VNSvOutPortB(dwIoBase + MAC_REG_PMC1, PME_OVR); + iowrite8(PME_OVR, io_base + MAC_REG_PMC1); /* only 3253 A */ /* do reset */ - MACbSoftwareReset(dwIoBase); + MACbSoftwareReset(priv); /* reset TSF counter */ - VNSvOutPortB(dwIoBase + MAC_REG_TFTCTL, TFTCTL_TSFCNTRST); + iowrite8(TFTCTL_TSFCNTRST, io_base + MAC_REG_TFTCTL); /* enable TSF counter */ - VNSvOutPortB(dwIoBase + MAC_REG_TFTCTL, TFTCTL_TSFCNTREN); + iowrite8(TFTCTL_TSFCNTREN, io_base + MAC_REG_TFTCTL); } /* @@ -557,33 +544,32 @@ void MACvInitialize(void __iomem *dwIoBase) * * Parameters: * In: - * dwIoBase - Base Address for MAC - * dwCurrDescAddr - Descriptor Address + * io_base - Base Address for MAC + * curr_desc_addr - Descriptor Address * Out: * none * * Return Value: none * */ -void MACvSetCurrRx0DescAddr(void __iomem *dwIoBase, unsigned long dwCurrDescAddr) +void MACvSetCurrRx0DescAddr(struct vnt_private *priv, u32 curr_desc_addr) { + void __iomem *io_base = priv->PortOffset; unsigned short ww; - unsigned char byData; - unsigned char byOrgDMACtl; + unsigned char org_dma_ctl; - VNSvInPortB(dwIoBase + MAC_REG_RXDMACTL0, &byOrgDMACtl); - if (byOrgDMACtl & DMACTL_RUN) - VNSvOutPortB(dwIoBase + MAC_REG_RXDMACTL0+2, DMACTL_RUN); + org_dma_ctl = ioread8(io_base + MAC_REG_RXDMACTL0); + if (org_dma_ctl & DMACTL_RUN) + iowrite8(DMACTL_RUN, io_base + MAC_REG_RXDMACTL0 + 2); for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { - VNSvInPortB(dwIoBase + MAC_REG_RXDMACTL0, &byData); - if (!(byData & DMACTL_RUN)) + if (!(ioread8(io_base + MAC_REG_RXDMACTL0) & DMACTL_RUN)) break; } - VNSvOutPortD(dwIoBase + MAC_REG_RXDMAPTR0, dwCurrDescAddr); - if (byOrgDMACtl & DMACTL_RUN) - VNSvOutPortB(dwIoBase + MAC_REG_RXDMACTL0, DMACTL_RUN); + iowrite32(curr_desc_addr, io_base + MAC_REG_RXDMAPTR0); + if (org_dma_ctl & DMACTL_RUN) + iowrite8(DMACTL_RUN, io_base + MAC_REG_RXDMACTL0); } /* @@ -592,33 +578,32 @@ void MACvSetCurrRx0DescAddr(void __iomem *dwIoBase, unsigned long dwCurrDescAddr * * Parameters: * In: - * dwIoBase - Base Address for MAC - * dwCurrDescAddr - Descriptor Address + * io_base - Base Address for MAC + * curr_desc_addr - Descriptor Address * Out: * none * * Return Value: none * */ -void MACvSetCurrRx1DescAddr(void __iomem *dwIoBase, unsigned long dwCurrDescAddr) +void MACvSetCurrRx1DescAddr(struct vnt_private *priv, u32 curr_desc_addr) { + void __iomem *io_base = priv->PortOffset; unsigned short ww; - unsigned char byData; - unsigned char byOrgDMACtl; + unsigned char org_dma_ctl; - VNSvInPortB(dwIoBase + MAC_REG_RXDMACTL1, &byOrgDMACtl); - if (byOrgDMACtl & DMACTL_RUN) - VNSvOutPortB(dwIoBase + MAC_REG_RXDMACTL1+2, DMACTL_RUN); + org_dma_ctl = ioread8(io_base + MAC_REG_RXDMACTL1); + if (org_dma_ctl & DMACTL_RUN) + iowrite8(DMACTL_RUN, io_base + MAC_REG_RXDMACTL1 + 2); for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { - VNSvInPortB(dwIoBase + MAC_REG_RXDMACTL1, &byData); - if (!(byData & DMACTL_RUN)) + if (!(ioread8(io_base + MAC_REG_RXDMACTL1) & DMACTL_RUN)) break; } - VNSvOutPortD(dwIoBase + MAC_REG_RXDMAPTR1, dwCurrDescAddr); - if (byOrgDMACtl & DMACTL_RUN) - VNSvOutPortB(dwIoBase + MAC_REG_RXDMACTL1, DMACTL_RUN); + iowrite32(curr_desc_addr, io_base + MAC_REG_RXDMAPTR1); + if (org_dma_ctl & DMACTL_RUN) + iowrite8(DMACTL_RUN, io_base + MAC_REG_RXDMACTL1); } @@ -628,34 +613,33 @@ void MACvSetCurrRx1DescAddr(void __iomem *dwIoBase, unsigned long dwCurrDescAddr * * Parameters: * In: - * dwIoBase - Base Address for MAC - * dwCurrDescAddr - Descriptor Address + * io_base - Base Address for MAC + * curr_desc_addr - Descriptor Address * Out: * none * * Return Value: none * */ -void MACvSetCurrTx0DescAddrEx(void __iomem *dwIoBase, - unsigned long dwCurrDescAddr) +void MACvSetCurrTx0DescAddrEx(struct vnt_private *priv, + u32 curr_desc_addr) { + void __iomem *io_base = priv->PortOffset; unsigned short ww; - unsigned char byData; - unsigned char byOrgDMACtl; + unsigned char org_dma_ctl; - VNSvInPortB(dwIoBase + MAC_REG_TXDMACTL0, &byOrgDMACtl); - if (byOrgDMACtl & DMACTL_RUN) - VNSvOutPortB(dwIoBase + MAC_REG_TXDMACTL0+2, DMACTL_RUN); + org_dma_ctl = ioread8(io_base + MAC_REG_TXDMACTL0); + if (org_dma_ctl & DMACTL_RUN) + iowrite8(DMACTL_RUN, io_base + MAC_REG_TXDMACTL0 + 2); for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { - VNSvInPortB(dwIoBase + MAC_REG_TXDMACTL0, &byData); - if (!(byData & DMACTL_RUN)) + if (!(ioread8(io_base + MAC_REG_TXDMACTL0) & DMACTL_RUN)) break; } - VNSvOutPortD(dwIoBase + MAC_REG_TXDMAPTR0, dwCurrDescAddr); - if (byOrgDMACtl & DMACTL_RUN) - VNSvOutPortB(dwIoBase + MAC_REG_TXDMACTL0, DMACTL_RUN); + iowrite32(curr_desc_addr, io_base + MAC_REG_TXDMAPTR0); + if (org_dma_ctl & DMACTL_RUN) + iowrite8(DMACTL_RUN, io_base + MAC_REG_TXDMACTL0); } /* @@ -664,8 +648,8 @@ void MACvSetCurrTx0DescAddrEx(void __iomem *dwIoBase, * * Parameters: * In: - * dwIoBase - Base Address for MAC - * dwCurrDescAddr - Descriptor Address + * io_base - Base Address for MAC + * curr_desc_addr - Descriptor Address * Out: * none * @@ -673,36 +657,35 @@ void MACvSetCurrTx0DescAddrEx(void __iomem *dwIoBase, * */ /* TxDMA1 = AC0DMA */ -void MACvSetCurrAC0DescAddrEx(void __iomem *dwIoBase, - unsigned long dwCurrDescAddr) +void MACvSetCurrAC0DescAddrEx(struct vnt_private *priv, + u32 curr_desc_addr) { + void __iomem *io_base = priv->PortOffset; unsigned short ww; - unsigned char byData; - unsigned char byOrgDMACtl; + unsigned char org_dma_ctl; - VNSvInPortB(dwIoBase + MAC_REG_AC0DMACTL, &byOrgDMACtl); - if (byOrgDMACtl & DMACTL_RUN) - VNSvOutPortB(dwIoBase + MAC_REG_AC0DMACTL+2, DMACTL_RUN); + org_dma_ctl = ioread8(io_base + MAC_REG_AC0DMACTL); + if (org_dma_ctl & DMACTL_RUN) + iowrite8(DMACTL_RUN, io_base + MAC_REG_AC0DMACTL + 2); for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { - VNSvInPortB(dwIoBase + MAC_REG_AC0DMACTL, &byData); - if (!(byData & DMACTL_RUN)) + if (!(ioread8(io_base + MAC_REG_AC0DMACTL) & DMACTL_RUN)) break; } if (ww == W_MAX_TIMEOUT) pr_debug(" DBG_PORT80(0x26)\n"); - VNSvOutPortD(dwIoBase + MAC_REG_AC0DMAPTR, dwCurrDescAddr); - if (byOrgDMACtl & DMACTL_RUN) - VNSvOutPortB(dwIoBase + MAC_REG_AC0DMACTL, DMACTL_RUN); + iowrite32(curr_desc_addr, io_base + MAC_REG_AC0DMAPTR); + if (org_dma_ctl & DMACTL_RUN) + iowrite8(DMACTL_RUN, io_base + MAC_REG_AC0DMACTL); } -void MACvSetCurrTXDescAddr(int iTxType, void __iomem *dwIoBase, - unsigned long dwCurrDescAddr) +void MACvSetCurrTXDescAddr(int iTxType, struct vnt_private *priv, + u32 curr_desc_addr) { if (iTxType == TYPE_AC0DMA) - MACvSetCurrAC0DescAddrEx(dwIoBase, dwCurrDescAddr); + MACvSetCurrAC0DescAddrEx(priv, curr_desc_addr); else if (iTxType == TYPE_TXDMA0) - MACvSetCurrTx0DescAddrEx(dwIoBase, dwCurrDescAddr); + MACvSetCurrTx0DescAddrEx(priv, curr_desc_addr); } /* @@ -711,7 +694,7 @@ void MACvSetCurrTXDescAddr(int iTxType, void __iomem *dwIoBase, * * Parameters: * In: - * dwIoBase - Base Address for MAC + * io_base - Base Address for MAC * uDelay - Delay time (timer resolution is 4 us) * Out: * none @@ -719,25 +702,26 @@ void MACvSetCurrTXDescAddr(int iTxType, void __iomem *dwIoBase, * Return Value: none * */ -void MACvTimer0MicroSDelay(void __iomem *dwIoBase, unsigned int uDelay) +void MACvTimer0MicroSDelay(struct vnt_private *priv, unsigned int uDelay) { + void __iomem *io_base = priv->PortOffset; unsigned char byValue; unsigned int uu, ii; - VNSvOutPortB(dwIoBase + MAC_REG_TMCTL0, 0); - VNSvOutPortD(dwIoBase + MAC_REG_TMDATA0, uDelay); - VNSvOutPortB(dwIoBase + MAC_REG_TMCTL0, (TMCTL_TMD | TMCTL_TE)); + iowrite8(0, io_base + MAC_REG_TMCTL0); + iowrite32(uDelay, io_base + MAC_REG_TMDATA0); + iowrite8((TMCTL_TMD | TMCTL_TE), io_base + MAC_REG_TMCTL0); for (ii = 0; ii < 66; ii++) { /* assume max PCI clock is 66Mhz */ for (uu = 0; uu < uDelay; uu++) { - VNSvInPortB(dwIoBase + MAC_REG_TMCTL0, &byValue); + byValue = ioread8(io_base + MAC_REG_TMCTL0); if ((byValue == 0) || (byValue & TMCTL_TSUSP)) { - VNSvOutPortB(dwIoBase + MAC_REG_TMCTL0, 0); + iowrite8(0, io_base + MAC_REG_TMCTL0); return; } } } - VNSvOutPortB(dwIoBase + MAC_REG_TMCTL0, 0); + iowrite8(0, io_base + MAC_REG_TMCTL0); } /* @@ -746,7 +730,7 @@ void MACvTimer0MicroSDelay(void __iomem *dwIoBase, unsigned int uDelay) * * Parameters: * In: - * dwIoBase - Base Address for MAC + * io_base - Base Address for MAC * uDelay - Delay time * Out: * none @@ -754,38 +738,41 @@ void MACvTimer0MicroSDelay(void __iomem *dwIoBase, unsigned int uDelay) * Return Value: none * */ -void MACvOneShotTimer1MicroSec(void __iomem *dwIoBase, unsigned int uDelayTime) +void MACvOneShotTimer1MicroSec(struct vnt_private *priv, unsigned int uDelayTime) { - VNSvOutPortB(dwIoBase + MAC_REG_TMCTL1, 0); - VNSvOutPortD(dwIoBase + MAC_REG_TMDATA1, uDelayTime); - VNSvOutPortB(dwIoBase + MAC_REG_TMCTL1, (TMCTL_TMD | TMCTL_TE)); + void __iomem *io_base = priv->PortOffset; + + iowrite8(0, io_base + MAC_REG_TMCTL1); + iowrite32(uDelayTime, io_base + MAC_REG_TMDATA1); + iowrite8((TMCTL_TMD | TMCTL_TE), io_base + MAC_REG_TMCTL1); } -void MACvSetMISCFifo(void __iomem *dwIoBase, unsigned short wOffset, - unsigned long dwData) +void MACvSetMISCFifo(struct vnt_private *priv, unsigned short offset, + u32 data) { - if (wOffset > 273) + void __iomem *io_base = priv->PortOffset; + + if (offset > 273) return; - VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset); - VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData); - VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE); + iowrite16(offset, io_base + MAC_REG_MISCFFNDEX); + iowrite32(data, io_base + MAC_REG_MISCFFDATA); + iowrite16(MISCFFCTL_WRITE, io_base + MAC_REG_MISCFFCTL); } -bool MACbPSWakeup(void __iomem *dwIoBase) +bool MACbPSWakeup(struct vnt_private *priv) { - unsigned char byOrgValue; + void __iomem *io_base = priv->PortOffset; unsigned int ww; /* Read PSCTL */ - if (MACbIsRegBitsOff(dwIoBase, MAC_REG_PSCTL, PSCTL_PS)) + if (MACbIsRegBitsOff(priv, MAC_REG_PSCTL, PSCTL_PS)) return true; /* Disable PS */ - MACvRegBitsOff(dwIoBase, MAC_REG_PSCTL, PSCTL_PSEN); + MACvRegBitsOff(io_base, MAC_REG_PSCTL, PSCTL_PSEN); /* Check if SyncFlushOK */ for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { - VNSvInPortB(dwIoBase + MAC_REG_PSCTL, &byOrgValue); - if (byOrgValue & PSCTL_WAKEDONE) + if (ioread8(io_base + MAC_REG_PSCTL) & PSCTL_WAKEDONE) break; } if (ww == W_MAX_TIMEOUT) { @@ -801,7 +788,7 @@ bool MACbPSWakeup(void __iomem *dwIoBase) * * Parameters: * In: - * dwIoBase - Base Address for MAC + * io_base - Base Address for MAC * * Out: * none @@ -810,57 +797,58 @@ bool MACbPSWakeup(void __iomem *dwIoBase) * */ -void MACvSetKeyEntry(void __iomem *dwIoBase, unsigned short wKeyCtl, +void MACvSetKeyEntry(struct vnt_private *priv, unsigned short wKeyCtl, unsigned int uEntryIdx, unsigned int uKeyIdx, unsigned char *pbyAddr, u32 *pdwKey, unsigned char byLocalID) { - unsigned short wOffset; - u32 dwData; + void __iomem *io_base = priv->PortOffset; + unsigned short offset; + u32 data; int ii; if (byLocalID <= 1) return; pr_debug("MACvSetKeyEntry\n"); - wOffset = MISCFIFO_KEYETRY0; - wOffset += (uEntryIdx * MISCFIFO_KEYENTRYSIZE); - - dwData = 0; - dwData |= wKeyCtl; - dwData <<= 16; - dwData |= MAKEWORD(*(pbyAddr+4), *(pbyAddr+5)); - pr_debug("1. wOffset: %d, Data: %X, KeyCtl:%X\n", - wOffset, dwData, wKeyCtl); - - VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset); - VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData); - VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE); - wOffset++; - - dwData = 0; - dwData |= *(pbyAddr+3); - dwData <<= 8; - dwData |= *(pbyAddr+2); - dwData <<= 8; - dwData |= *(pbyAddr+1); - dwData <<= 8; - dwData |= *(pbyAddr+0); - pr_debug("2. wOffset: %d, Data: %X\n", wOffset, dwData); - - VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset); - VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData); - VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE); - wOffset++; - - wOffset += (uKeyIdx * 4); + offset = MISCFIFO_KEYETRY0; + offset += (uEntryIdx * MISCFIFO_KEYENTRYSIZE); + + data = 0; + data |= wKeyCtl; + data <<= 16; + data |= MAKEWORD(*(pbyAddr + 4), *(pbyAddr + 5)); + pr_debug("1. offset: %d, Data: %X, KeyCtl:%X\n", + offset, data, wKeyCtl); + + iowrite16(offset, io_base + MAC_REG_MISCFFNDEX); + iowrite32(data, io_base + MAC_REG_MISCFFDATA); + iowrite16(MISCFFCTL_WRITE, io_base + MAC_REG_MISCFFCTL); + offset++; + + data = 0; + data |= *(pbyAddr + 3); + data <<= 8; + data |= *(pbyAddr + 2); + data <<= 8; + data |= *(pbyAddr + 1); + data <<= 8; + data |= *pbyAddr; + pr_debug("2. offset: %d, Data: %X\n", offset, data); + + iowrite16(offset, io_base + MAC_REG_MISCFFNDEX); + iowrite32(data, io_base + MAC_REG_MISCFFDATA); + iowrite16(MISCFFCTL_WRITE, io_base + MAC_REG_MISCFFCTL); + offset++; + + offset += (uKeyIdx * 4); for (ii = 0; ii < 4; ii++) { /* always push 128 bits */ - pr_debug("3.(%d) wOffset: %d, Data: %X\n", - ii, wOffset+ii, *pdwKey); - VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset+ii); - VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, *pdwKey++); - VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE); + pr_debug("3.(%d) offset: %d, Data: %X\n", + ii, offset + ii, *pdwKey); + iowrite16(offset + ii, io_base + MAC_REG_MISCFFNDEX); + iowrite32(*pdwKey++, io_base + MAC_REG_MISCFFDATA); + iowrite16(MISCFFCTL_WRITE, io_base + MAC_REG_MISCFFCTL); } } @@ -870,7 +858,7 @@ void MACvSetKeyEntry(void __iomem *dwIoBase, unsigned short wKeyCtl, * * Parameters: * In: - * dwIoBase - Base Address for MAC + * io_base - Base Address for MAC * * Out: * none @@ -878,14 +866,15 @@ void MACvSetKeyEntry(void __iomem *dwIoBase, unsigned short wKeyCtl, * Return Value: none * */ -void MACvDisableKeyEntry(void __iomem *dwIoBase, unsigned int uEntryIdx) +void MACvDisableKeyEntry(struct vnt_private *priv, unsigned int uEntryIdx) { - unsigned short wOffset; + void __iomem *io_base = priv->PortOffset; + unsigned short offset; - wOffset = MISCFIFO_KEYETRY0; - wOffset += (uEntryIdx * MISCFIFO_KEYENTRYSIZE); + offset = MISCFIFO_KEYETRY0; + offset += (uEntryIdx * MISCFIFO_KEYENTRYSIZE); - VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset); - VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, 0); - VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE); + iowrite16(offset, io_base + MAC_REG_MISCFFNDEX); + iowrite32(0, io_base + MAC_REG_MISCFFDATA); + iowrite16(MISCFFCTL_WRITE, io_base + MAC_REG_MISCFFCTL); } diff --git a/drivers/staging/vt6655/mac.h b/drivers/staging/vt6655/mac.h index 8e0200a78b19..030f529c339b 100644 --- a/drivers/staging/vt6655/mac.h +++ b/drivers/staging/vt6655/mac.h @@ -890,57 +890,57 @@ do { \ #define MACvSetRFLE_LatchBase(dwIoBase) \ MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_RFLEOPT) -bool MACbIsRegBitsOn(void __iomem *dwIoBase, unsigned char byRegOfs, +bool MACbIsRegBitsOn(struct vnt_private *, unsigned char byRegOfs, unsigned char byTestBits); -bool MACbIsRegBitsOff(void __iomem *dwIoBase, unsigned char byRegOfs, +bool MACbIsRegBitsOff(struct vnt_private *, unsigned char byRegOfs, unsigned char byTestBits); -bool MACbIsIntDisable(void __iomem *dwIoBase); +bool MACbIsIntDisable(struct vnt_private *); -void MACvSetShortRetryLimit(void __iomem *dwIoBase, unsigned char byRetryLimit); +void MACvSetShortRetryLimit(struct vnt_private *, unsigned char byRetryLimit); -void MACvSetLongRetryLimit(void __iomem *dwIoBase, unsigned char byRetryLimit); -void MACvGetLongRetryLimit(void __iomem *dwIoBase, +void MACvSetLongRetryLimit(struct vnt_private *, unsigned char byRetryLimit); +void MACvGetLongRetryLimit(struct vnt_private *, unsigned char *pbyRetryLimit); -void MACvSetLoopbackMode(void __iomem *dwIoBase, unsigned char byLoopbackMode); - -void MACvSaveContext(void __iomem *dwIoBase, unsigned char *pbyCxtBuf); -void MACvRestoreContext(void __iomem *dwIoBase, unsigned char *pbyCxtBuf); - -bool MACbSoftwareReset(void __iomem *dwIoBase); -bool MACbSafeSoftwareReset(void __iomem *dwIoBase); -bool MACbSafeRxOff(void __iomem *dwIoBase); -bool MACbSafeTxOff(void __iomem *dwIoBase); -bool MACbSafeStop(void __iomem *dwIoBase); -bool MACbShutdown(void __iomem *dwIoBase); -void MACvInitialize(void __iomem *dwIoBase); -void MACvSetCurrRx0DescAddr(void __iomem *dwIoBase, - unsigned long dwCurrDescAddr); -void MACvSetCurrRx1DescAddr(void __iomem *dwIoBase, - unsigned long dwCurrDescAddr); -void MACvSetCurrTXDescAddr(int iTxType, void __iomem *dwIoBase, - unsigned long dwCurrDescAddr); -void MACvSetCurrTx0DescAddrEx(void __iomem *dwIoBase, - unsigned long dwCurrDescAddr); -void MACvSetCurrAC0DescAddrEx(void __iomem *dwIoBase, - unsigned long dwCurrDescAddr); -void MACvSetCurrSyncDescAddrEx(void __iomem *dwIoBase, - unsigned long dwCurrDescAddr); -void MACvSetCurrATIMDescAddrEx(void __iomem *dwIoBase, - unsigned long dwCurrDescAddr); -void MACvTimer0MicroSDelay(void __iomem *dwIoBase, unsigned int uDelay); -void MACvOneShotTimer1MicroSec(void __iomem *dwIoBase, unsigned int uDelayTime); - -void MACvSetMISCFifo(void __iomem *dwIoBase, unsigned short wOffset, - unsigned long dwData); - -bool MACbPSWakeup(void __iomem *dwIoBase); - -void MACvSetKeyEntry(void __iomem *dwIoBase, unsigned short wKeyCtl, +void MACvSetLoopbackMode(struct vnt_private *, unsigned char byLoopbackMode); + +void MACvSaveContext(struct vnt_private *, unsigned char *pbyCxtBuf); +void MACvRestoreContext(struct vnt_private *, unsigned char *pbyCxtBuf); + +bool MACbSoftwareReset(struct vnt_private *); +bool MACbSafeSoftwareReset(struct vnt_private *); +bool MACbSafeRxOff(struct vnt_private *); +bool MACbSafeTxOff(struct vnt_private *); +bool MACbSafeStop(struct vnt_private *); +bool MACbShutdown(struct vnt_private *); +void MACvInitialize(struct vnt_private *); +void MACvSetCurrRx0DescAddr(struct vnt_private *, + u32 curr_desc_addr); +void MACvSetCurrRx1DescAddr(struct vnt_private *, + u32 curr_desc_addr); +void MACvSetCurrTXDescAddr(int iTxType, struct vnt_private *, + u32 curr_desc_addr); +void MACvSetCurrTx0DescAddrEx(struct vnt_private *, + u32 curr_desc_addr); +void MACvSetCurrAC0DescAddrEx(struct vnt_private *, + u32 curr_desc_addr); +void MACvSetCurrSyncDescAddrEx(struct vnt_private *, + u32 curr_desc_addr); +void MACvSetCurrATIMDescAddrEx(struct vnt_private *, + u32 curr_desc_addr); +void MACvTimer0MicroSDelay(struct vnt_private *, unsigned int uDelay); +void MACvOneShotTimer1MicroSec(struct vnt_private *, unsigned int uDelayTime); + +void MACvSetMISCFifo(struct vnt_private *, unsigned short wOffset, + u32 dwData); + +bool MACbPSWakeup(struct vnt_private *); + +void MACvSetKeyEntry(struct vnt_private *, unsigned short wKeyCtl, unsigned int uEntryIdx, unsigned int uKeyIdx, unsigned char *pbyAddr, u32 *pdwKey, unsigned char byLocalID); -void MACvDisableKeyEntry(void __iomem *dwIoBase, unsigned int uEntryIdx); +void MACvDisableKeyEntry(struct vnt_private *, unsigned int uEntryIdx); #endif /* __MAC_H__ */ diff --git a/drivers/staging/vt6655/power.c b/drivers/staging/vt6655/power.c index 06e6b9d871c4..bc8ca981a629 100644 --- a/drivers/staging/vt6655/power.c +++ b/drivers/staging/vt6655/power.c @@ -64,44 +64,43 @@ void PSvEnablePowerSaving( - void *hDeviceContext, + struct vnt_private *priv, unsigned short wListenInterval ) { - struct vnt_private *pDevice = hDeviceContext; - u16 wAID = pDevice->current_aid | BIT(14) | BIT(15); + u16 wAID = priv->current_aid | BIT(14) | BIT(15); /* set period of power up before TBTT */ - VNSvOutPortW(pDevice->PortOffset + MAC_REG_PWBT, C_PWBT); - if (pDevice->op_mode != NL80211_IFTYPE_ADHOC) { + VNSvOutPortW(priv->PortOffset + MAC_REG_PWBT, C_PWBT); + if (priv->op_mode != NL80211_IFTYPE_ADHOC) { /* set AID */ - VNSvOutPortW(pDevice->PortOffset + MAC_REG_AIDATIM, wAID); + VNSvOutPortW(priv->PortOffset + MAC_REG_AIDATIM, wAID); } else { /* set ATIM Window */ #if 0 /* TODO atim window */ - MACvWriteATIMW(pDevice->PortOffset, pMgmt->wCurrATIMWindow); + MACvWriteATIMW(priv->PortOffset, pMgmt->wCurrATIMWindow); #endif } /* Set AutoSleep */ - MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP); + MACvRegBitsOn(priv->PortOffset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP); /* Set HWUTSF */ - MACvRegBitsOn(pDevice->PortOffset, MAC_REG_TFTCTL, TFTCTL_HWUTSF); + MACvRegBitsOn(priv->PortOffset, MAC_REG_TFTCTL, TFTCTL_HWUTSF); if (wListenInterval >= 2) { /* clear always listen beacon */ - MACvRegBitsOff(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_ALBCN); + MACvRegBitsOff(priv->PortOffset, MAC_REG_PSCTL, PSCTL_ALBCN); /* first time set listen next beacon */ - MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_LNBCN); + MACvRegBitsOn(priv->PortOffset, MAC_REG_PSCTL, PSCTL_LNBCN); } else { /* always listen beacon */ - MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_ALBCN); + MACvRegBitsOn(priv->PortOffset, MAC_REG_PSCTL, PSCTL_ALBCN); } /* enable power saving hw function */ - MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PSEN); - pDevice->bEnablePSMode = true; + MACvRegBitsOn(priv->PortOffset, MAC_REG_PSCTL, PSCTL_PSEN); + priv->bEnablePSMode = true; - pDevice->bPWBitOn = true; + priv->bPWBitOn = true; pr_debug("PS:Power Saving Mode Enable...\n"); } @@ -117,23 +116,21 @@ PSvEnablePowerSaving( void PSvDisablePowerSaving( - void *hDeviceContext + struct vnt_private *priv ) { - struct vnt_private *pDevice = hDeviceContext; - /* disable power saving hw function */ - MACbPSWakeup(pDevice->PortOffset); + MACbPSWakeup(priv); /* clear AutoSleep */ - MACvRegBitsOff(pDevice->PortOffset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP); + MACvRegBitsOff(priv->PortOffset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP); /* clear HWUTSF */ - MACvRegBitsOff(pDevice->PortOffset, MAC_REG_TFTCTL, TFTCTL_HWUTSF); + MACvRegBitsOff(priv->PortOffset, MAC_REG_TFTCTL, TFTCTL_HWUTSF); /* set always listen beacon */ - MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_ALBCN); + MACvRegBitsOn(priv->PortOffset, MAC_REG_PSCTL, PSCTL_ALBCN); - pDevice->bEnablePSMode = false; + priv->bEnablePSMode = false; - pDevice->bPWBitOn = false; + priv->bPWBitOn = false; } @@ -149,27 +146,26 @@ PSvDisablePowerSaving( bool PSbIsNextTBTTWakeUp( - void *hDeviceContext + struct vnt_private *priv ) { - struct vnt_private *pDevice = hDeviceContext; - struct ieee80211_hw *hw = pDevice->hw; + struct ieee80211_hw *hw = priv->hw; struct ieee80211_conf *conf = &hw->conf; - bool bWakeUp = false; + bool wake_up = false; if (conf->listen_interval > 1) { - if (!pDevice->wake_up_count) - pDevice->wake_up_count = conf->listen_interval; + if (!priv->wake_up_count) + priv->wake_up_count = conf->listen_interval; - --pDevice->wake_up_count; + --priv->wake_up_count; - if (pDevice->wake_up_count == 1) { + if (priv->wake_up_count == 1) { /* Turn on wake up to listen next beacon */ - MACvRegBitsOn(pDevice->PortOffset, + MACvRegBitsOn(priv->PortOffset, MAC_REG_PSCTL, PSCTL_LNBCN); - bWakeUp = true; + wake_up = true; } } - return bWakeUp; + return wake_up; } diff --git a/drivers/staging/vt6655/power.h b/drivers/staging/vt6655/power.h index 538e68507bb0..d82dd8d6d68b 100644 --- a/drivers/staging/vt6655/power.h +++ b/drivers/staging/vt6655/power.h @@ -29,25 +29,27 @@ #ifndef __POWER_H__ #define __POWER_H__ +#include "device.h" + #define C_PWBT 1000 /* micro sec. power up before TBTT */ #define PS_FAST_INTERVAL 1 /* Fast power saving listen interval */ #define PS_MAX_INTERVAL 4 /* MAX power saving listen interval */ void PSvDisablePowerSaving( - void *hDeviceContext + struct vnt_private * ); void PSvEnablePowerSaving( - void *hDeviceContext, + struct vnt_private *, unsigned short wListenInterval ); bool PSbIsNextTBTTWakeUp( - void *hDeviceContext + struct vnt_private * ); #endif /* __POWER_H__ */ diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c index 4c22bb318c79..ae10da21ddd0 100644 --- a/drivers/staging/vt6655/rf.c +++ b/drivers/staging/vt6655/rf.c @@ -420,9 +420,9 @@ static bool s_bAL7230Init(struct vnt_private *priv) { void __iomem *dwIoBase = priv->PortOffset; int ii; - bool bResult; + bool ret; - bResult = true; + ret = true; /* 3-wire control for normal mode */ VNSvOutPortB(dwIoBase + MAC_REG_SOFTPWRCTL, 0); @@ -432,21 +432,21 @@ static bool s_bAL7230Init(struct vnt_private *priv) BBvPowerSaveModeOFF(priv); /* RobertYu:20050106, have DC value for Calibration */ for (ii = 0; ii < CB_AL7230_INIT_SEQ; ii++) - bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[ii]); + ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[ii]); /* PLL On */ MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3); /* Calibration */ - MACvTimer0MicroSDelay(dwIoBase, 150);/* 150us */ + MACvTimer0MicroSDelay(priv, 150);/* 150us */ /* TXDCOC:active, RCK:disable */ - bResult &= IFRFbWriteEmbedded(priv, (0x9ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW)); - MACvTimer0MicroSDelay(dwIoBase, 30);/* 30us */ + ret &= IFRFbWriteEmbedded(priv, (0x9ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW)); + MACvTimer0MicroSDelay(priv, 30);/* 30us */ /* TXDCOC:disable, RCK:active */ - bResult &= IFRFbWriteEmbedded(priv, (0x3ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW)); - MACvTimer0MicroSDelay(dwIoBase, 30);/* 30us */ + ret &= IFRFbWriteEmbedded(priv, (0x3ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW)); + MACvTimer0MicroSDelay(priv, 30);/* 30us */ /* TXDCOC:disable, RCK:disable */ - bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[CB_AL7230_INIT_SEQ-1]); + ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[CB_AL7230_INIT_SEQ-1]); MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 | SOFTPWRCTL_SWPE2 | @@ -459,7 +459,7 @@ static bool s_bAL7230Init(struct vnt_private *priv) /* 3-wire control for power saving mode */ VNSvOutPortB(dwIoBase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); /* 1100 0000 */ - return bResult; + return ret; } /* Need to Pull PLLON low when writing channel registers through @@ -467,27 +467,27 @@ static bool s_bAL7230Init(struct vnt_private *priv) static bool s_bAL7230SelectChannel(struct vnt_private *priv, unsigned char byChannel) { void __iomem *dwIoBase = priv->PortOffset; - bool bResult; + bool ret; - bResult = true; + ret = true; /* PLLON Off */ MACvWordRegBitsOff(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3); - bResult &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable0[byChannel - 1]); - bResult &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable1[byChannel - 1]); - bResult &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable2[byChannel - 1]); + ret &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable0[byChannel - 1]); + ret &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable1[byChannel - 1]); + ret &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable2[byChannel - 1]); /* PLLOn On */ MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3); /* Set Channel[7] = 0 to tell H/W channel is changing now. */ VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel & 0x7F)); - MACvTimer0MicroSDelay(dwIoBase, SWITCH_CHANNEL_DELAY_AL7230); + MACvTimer0MicroSDelay(priv, SWITCH_CHANNEL_DELAY_AL7230); /* Set Channel[7] = 1 to tell H/W channel change is done. */ VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel | 0x80)); - return bResult; + return ret; } /* @@ -540,9 +540,9 @@ static bool RFbAL2230Init(struct vnt_private *priv) { void __iomem *dwIoBase = priv->PortOffset; int ii; - bool bResult; + bool ret; - bResult = true; + ret = true; /* 3-wire control for normal mode */ VNSvOutPortB(dwIoBase + MAC_REG_SOFTPWRCTL, 0); @@ -556,18 +556,18 @@ static bool RFbAL2230Init(struct vnt_private *priv) IFRFbWriteEmbedded(priv, (0x07168700+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW)); for (ii = 0; ii < CB_AL2230_INIT_SEQ; ii++) - bResult &= IFRFbWriteEmbedded(priv, dwAL2230InitTable[ii]); - MACvTimer0MicroSDelay(dwIoBase, 30); /* delay 30 us */ + ret &= IFRFbWriteEmbedded(priv, dwAL2230InitTable[ii]); + MACvTimer0MicroSDelay(priv, 30); /* delay 30 us */ /* PLL On */ MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3); - MACvTimer0MicroSDelay(dwIoBase, 150);/* 150us */ - bResult &= IFRFbWriteEmbedded(priv, (0x00d80f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW)); - MACvTimer0MicroSDelay(dwIoBase, 30);/* 30us */ - bResult &= IFRFbWriteEmbedded(priv, (0x00780f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW)); - MACvTimer0MicroSDelay(dwIoBase, 30);/* 30us */ - bResult &= IFRFbWriteEmbedded(priv, dwAL2230InitTable[CB_AL2230_INIT_SEQ-1]); + MACvTimer0MicroSDelay(priv, 150);/* 150us */ + ret &= IFRFbWriteEmbedded(priv, (0x00d80f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW)); + MACvTimer0MicroSDelay(priv, 30);/* 30us */ + ret &= IFRFbWriteEmbedded(priv, (0x00780f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW)); + MACvTimer0MicroSDelay(priv, 30);/* 30us */ + ret &= IFRFbWriteEmbedded(priv, dwAL2230InitTable[CB_AL2230_INIT_SEQ-1]); MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 | SOFTPWRCTL_SWPE2 | @@ -577,26 +577,26 @@ static bool RFbAL2230Init(struct vnt_private *priv) /* 3-wire control for power saving mode */ VNSvOutPortB(dwIoBase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); /* 1100 0000 */ - return bResult; + return ret; } static bool RFbAL2230SelectChannel(struct vnt_private *priv, unsigned char byChannel) { void __iomem *dwIoBase = priv->PortOffset; - bool bResult; + bool ret; - bResult = true; + ret = true; - bResult &= IFRFbWriteEmbedded(priv, dwAL2230ChannelTable0[byChannel - 1]); - bResult &= IFRFbWriteEmbedded(priv, dwAL2230ChannelTable1[byChannel - 1]); + ret &= IFRFbWriteEmbedded(priv, dwAL2230ChannelTable0[byChannel - 1]); + ret &= IFRFbWriteEmbedded(priv, dwAL2230ChannelTable1[byChannel - 1]); /* Set Channel[7] = 0 to tell H/W channel is changing now. */ VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel & 0x7F)); - MACvTimer0MicroSDelay(dwIoBase, SWITCH_CHANNEL_DELAY_AL2230); + MACvTimer0MicroSDelay(priv, SWITCH_CHANNEL_DELAY_AL2230); /* Set Channel[7] = 1 to tell H/W channel change is done. */ VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel | 0x80)); - return bResult; + return ret; } /* @@ -612,30 +612,28 @@ static bool RFbAL2230SelectChannel(struct vnt_private *priv, unsigned char byCha * Return Value: true if succeeded; false if failed. * */ -bool RFbInit( - struct vnt_private *priv -) +bool RFbInit(struct vnt_private *priv) { - bool bResult = true; + bool ret = true; switch (priv->byRFType) { case RF_AIROHA: case RF_AL2230S: priv->byMaxPwrLevel = AL2230_PWR_IDX_LEN; - bResult = RFbAL2230Init(priv); + ret = RFbAL2230Init(priv); break; case RF_AIROHA7230: priv->byMaxPwrLevel = AL7230_PWR_IDX_LEN; - bResult = s_bAL7230Init(priv); + ret = s_bAL7230Init(priv); break; case RF_NOTHING: - bResult = true; + ret = true; break; default: - bResult = false; + ret = false; break; } - return bResult; + return ret; } /* @@ -654,26 +652,26 @@ bool RFbInit( bool RFbSelectChannel(struct vnt_private *priv, unsigned char byRFType, u16 byChannel) { - bool bResult = true; + bool ret = true; switch (byRFType) { case RF_AIROHA: case RF_AL2230S: - bResult = RFbAL2230SelectChannel(priv, byChannel); + ret = RFbAL2230SelectChannel(priv, byChannel); break; /*{{ RobertYu: 20050104 */ case RF_AIROHA7230: - bResult = s_bAL7230SelectChannel(priv, byChannel); + ret = s_bAL7230SelectChannel(priv, byChannel); break; /*}} RobertYu */ case RF_NOTHING: - bResult = true; + ret = true; break; default: - bResult = false; + ret = false; break; } - return bResult; + return ret; } /* @@ -711,11 +709,11 @@ bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char byRFType, return false; for (ii = 0; ii < CB_AL2230_INIT_SEQ; ii++) - MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230InitTable[ii]); + MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230InitTable[ii]); - MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230ChannelTable0[uChannel-1]); + MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230ChannelTable0[uChannel-1]); ii++; - MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230ChannelTable1[uChannel-1]); + MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230ChannelTable1[uChannel-1]); break; /* Need to check, PLLON need to be low for channel setting */ @@ -728,17 +726,17 @@ bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char byRFType, if (uChannel <= CB_MAX_CHANNEL_24G) { for (ii = 0; ii < CB_AL7230_INIT_SEQ; ii++) - MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230InitTable[ii]); + MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230InitTable[ii]); } else { for (ii = 0; ii < CB_AL7230_INIT_SEQ; ii++) - MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230InitTableAMode[ii]); + MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230InitTableAMode[ii]); } - MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable0[uChannel-1]); + MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable0[uChannel-1]); ii++; - MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable1[uChannel-1]); + MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable1[uChannel-1]); ii++; - MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable2[uChannel-1]); + MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable2[uChannel-1]); break; case RF_NOTHING: @@ -748,7 +746,7 @@ bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char byRFType, return false; } - MACvSetMISCFifo(dwIoBase, MISCFIFO_SYNINFO_IDX, (unsigned long)MAKEWORD(bySleepCount, byInitCount)); + MACvSetMISCFifo(priv, MISCFIFO_SYNINFO_IDX, (unsigned long)MAKEWORD(bySleepCount, byInitCount)); return true; } @@ -772,7 +770,7 @@ bool RFbSetPower( u16 uCH ) { - bool bResult = true; + bool ret = true; unsigned char byPwr = 0; unsigned char byDec = 0; @@ -818,11 +816,11 @@ bool RFbSetPower( if (priv->byCurPwr == byPwr) return true; - bResult = RFbRawSetPower(priv, byPwr, rate); - if (bResult) + ret = RFbRawSetPower(priv, byPwr, rate); + if (ret) priv->byCurPwr = byPwr; - return bResult; + return ret; } /* @@ -845,7 +843,7 @@ bool RFbRawSetPower( unsigned int rate ) { - bool bResult = true; + bool ret = true; unsigned long dwMax7230Pwr = 0; if (byPwr >= priv->byMaxPwrLevel) @@ -853,22 +851,22 @@ bool RFbRawSetPower( switch (priv->byRFType) { case RF_AIROHA: - bResult &= IFRFbWriteEmbedded(priv, dwAL2230PowerTable[byPwr]); + ret &= IFRFbWriteEmbedded(priv, dwAL2230PowerTable[byPwr]); if (rate <= RATE_11M) - bResult &= IFRFbWriteEmbedded(priv, 0x0001B400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW); + ret &= IFRFbWriteEmbedded(priv, 0x0001B400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW); else - bResult &= IFRFbWriteEmbedded(priv, 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW); + ret &= IFRFbWriteEmbedded(priv, 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW); break; case RF_AL2230S: - bResult &= IFRFbWriteEmbedded(priv, dwAL2230PowerTable[byPwr]); + ret &= IFRFbWriteEmbedded(priv, dwAL2230PowerTable[byPwr]); if (rate <= RATE_11M) { - bResult &= IFRFbWriteEmbedded(priv, 0x040C1400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW); - bResult &= IFRFbWriteEmbedded(priv, 0x00299B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW); + ret &= IFRFbWriteEmbedded(priv, 0x040C1400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW); + ret &= IFRFbWriteEmbedded(priv, 0x00299B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW); } else { - bResult &= IFRFbWriteEmbedded(priv, 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW); - bResult &= IFRFbWriteEmbedded(priv, 0x00099B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW); + ret &= IFRFbWriteEmbedded(priv, 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW); + ret &= IFRFbWriteEmbedded(priv, 0x00099B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW); } break; @@ -879,13 +877,13 @@ bool RFbRawSetPower( dwMax7230Pwr = 0x080C0B00 | ((byPwr) << 12) | (BY_AL7230_REG_LEN << 3) | IFREGCTL_REGW; - bResult &= IFRFbWriteEmbedded(priv, dwMax7230Pwr); + ret &= IFRFbWriteEmbedded(priv, dwMax7230Pwr); break; default: break; } - return bResult; + return ret; } /*+ @@ -934,32 +932,32 @@ bool RFbAL7230SelectChannelPostProcess(struct vnt_private *priv, u16 byOldChannel, u16 byNewChannel) { - bool bResult; + bool ret; - bResult = true; + ret = true; /* if change between 11 b/g and 11a need to update the following * register * Channel Index 1~14 */ if ((byOldChannel <= CB_MAX_CHANNEL_24G) && (byNewChannel > CB_MAX_CHANNEL_24G)) { /* Change from 2.4G to 5G [Reg] */ - bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[2]); - bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[3]); - bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[5]); - bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[7]); - bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[10]); - bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[12]); - bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[15]); + ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[2]); + ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[3]); + ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[5]); + ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[7]); + ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[10]); + ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[12]); + ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[15]); } else if ((byOldChannel > CB_MAX_CHANNEL_24G) && (byNewChannel <= CB_MAX_CHANNEL_24G)) { /* Change from 5G to 2.4G [Reg] */ - bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[2]); - bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[3]); - bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[5]); - bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[7]); - bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[10]); - bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[12]); - bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[15]); + ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[2]); + ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[3]); + ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[5]); + ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[7]); + ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[10]); + ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[12]); + ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[15]); } - return bResult; + return ret; } diff --git a/drivers/staging/vt6656/device.h b/drivers/staging/vt6656/device.h index 76b5f4127f95..4832666cc580 100644 --- a/drivers/staging/vt6656/device.h +++ b/drivers/staging/vt6656/device.h @@ -259,8 +259,8 @@ enum { }; /* flags for options */ -#define DEVICE_FLAGS_UNPLUG BIT(0) -#define DEVICE_FLAGS_DISCONNECTED BIT(1) +#define DEVICE_FLAGS_UNPLUG 0 +#define DEVICE_FLAGS_DISCONNECTED 1 struct vnt_private { /* mac80211 */ diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c index ee8d1e1a24c2..f9afab77b79f 100644 --- a/drivers/staging/vt6656/main_usb.c +++ b/drivers/staging/vt6656/main_usb.c @@ -74,10 +74,10 @@ MODULE_PARM_DESC(tx_buffers, "Number of receive usb tx buffers"); #define LONG_RETRY_DEF 4 /* BasebandType[] baseband type selected - 0: indicate 802.11a type - 1: indicate 802.11b type - 2: indicate 802.11g type -*/ + * 0: indicate 802.11a type + * 1: indicate 802.11b type + * 2: indicate 802.11g type + */ #define BBP_TYPE_DEF 2 @@ -284,7 +284,8 @@ static int vnt_init_registers(struct vnt_private *priv) calib_rx_iq = priv->eeprom[EEP_OFS_CALIB_RX_IQ]; if (calib_tx_iq || calib_tx_dc || calib_rx_iq) { /* CR255, enable TX/RX IQ and - DC compensation mode */ + * DC compensation mode + */ vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG, 0xff, @@ -306,7 +307,8 @@ static int vnt_init_registers(struct vnt_private *priv) calib_rx_iq); } else { /* CR255, turn off - BB Calibration compensation */ + * BB Calibration compensation + */ vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG, 0xff, @@ -429,7 +431,7 @@ static bool vnt_alloc_bufs(struct vnt_private *priv) for (ii = 0; ii < priv->num_tx_context; ii++) { tx_context = kmalloc(sizeof(struct vnt_usb_send_context), GFP_KERNEL); - if (tx_context == NULL) + if (!tx_context) goto free_tx; priv->tx_context[ii] = tx_context; @@ -437,7 +439,7 @@ static bool vnt_alloc_bufs(struct vnt_private *priv) tx_context->pkt_no = ii; /* allocate URBs */ - tx_context->urb = usb_alloc_urb(0, GFP_ATOMIC); + tx_context->urb = usb_alloc_urb(0, GFP_KERNEL); if (!tx_context->urb) { dev_err(&priv->usb->dev, "alloc tx urb failed\n"); goto free_tx; @@ -459,14 +461,14 @@ static bool vnt_alloc_bufs(struct vnt_private *priv) rcb->priv = priv; /* allocate URBs */ - rcb->urb = usb_alloc_urb(0, GFP_ATOMIC); - if (rcb->urb == NULL) { + rcb->urb = usb_alloc_urb(0, GFP_KERNEL); + if (!rcb->urb) { dev_err(&priv->usb->dev, "Failed to alloc rx urb\n"); goto free_rx_tx; } rcb->skb = dev_alloc_skb(priv->rx_buf_sz); - if (rcb->skb == NULL) + if (!rcb->skb) goto free_rx_tx; rcb->in_use = false; @@ -476,14 +478,14 @@ static bool vnt_alloc_bufs(struct vnt_private *priv) goto free_rx_tx; } - priv->interrupt_urb = usb_alloc_urb(0, GFP_ATOMIC); - if (priv->interrupt_urb == NULL) { + priv->interrupt_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!priv->interrupt_urb) { dev_err(&priv->usb->dev, "Failed to alloc int urb\n"); goto free_rx_tx; } priv->int_buf.data_buf = kmalloc(MAX_INTERRUPT_SIZE, GFP_KERNEL); - if (priv->int_buf.data_buf == NULL) { + if (!priv->int_buf.data_buf) { usb_free_urb(priv->interrupt_urb); goto free_rx_tx; } diff --git a/drivers/staging/vt6656/power.c b/drivers/staging/vt6656/power.c index c025dab0f62c..e322b7d8c617 100644 --- a/drivers/staging/vt6656/power.c +++ b/drivers/staging/vt6656/power.c @@ -103,7 +103,7 @@ void vnt_disable_power_saving(struct vnt_private *priv) /* disable power saving hw function */ vnt_control_out(priv, MESSAGE_TYPE_DISABLE_PS, 0, - 0, 0, NULL); + 0, 0, NULL); /* clear AutoSleep */ vnt_mac_reg_bits_off(priv, MAC_REG_PSCFG, PSCFG_AUTOSLEEP); diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c index 816206c92f57..79a3108719a6 100644 --- a/drivers/staging/vt6656/rf.c +++ b/drivers/staging/vt6656/rf.c @@ -917,8 +917,8 @@ void vnt_rf_table_download(struct vnt_private *priv) if (priv->rf_type == RF_AIROHA7230) { length1 = CB_AL7230_INIT_SEQ * 3; length2 = CB_MAX_CHANNEL * 3; - addr1 = &(al7230_init_table_amode[0][0]); - addr2 = &(al7230_channel_table2[0][0]); + addr1 = &al7230_init_table_amode[0][0]; + addr2 = &al7230_channel_table2[0][0]; memcpy(array, addr1, length1); diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c index 351a99f3d684..f546553de66f 100644 --- a/drivers/staging/vt6656/usbpipe.c +++ b/drivers/staging/vt6656/usbpipe.c @@ -101,9 +101,9 @@ void vnt_control_in_u8(struct vnt_private *priv, u8 reg, u8 reg_off, u8 *data) static void vnt_start_interrupt_urb_complete(struct urb *urb) { struct vnt_private *priv = urb->context; - int status; + int status = urb->status; - switch (urb->status) { + switch (status) { case 0: case -ETIMEDOUT: break; @@ -116,9 +116,7 @@ static void vnt_start_interrupt_urb_complete(struct urb *urb) break; } - status = urb->status; - - if (status != STATUS_SUCCESS) { + if (status) { priv->int_buf.in_use = false; dev_dbg(&priv->usb->dev, "%s status = %d\n", __func__, status); @@ -207,10 +205,9 @@ static void vnt_submit_rx_urb_complete(struct urb *urb) int vnt_submit_rx_urb(struct vnt_private *priv, struct vnt_rcb *rcb) { int status = 0; - struct urb *urb; + struct urb *urb = rcb->urb; - urb = rcb->urb; - if (rcb->skb == NULL) { + if (!rcb->skb) { dev_dbg(&priv->usb->dev, "rcb->skb is null\n"); return status; } @@ -224,7 +221,7 @@ int vnt_submit_rx_urb(struct vnt_private *priv, struct vnt_rcb *rcb) rcb); status = usb_submit_urb(urb, GFP_ATOMIC); - if (status != 0) { + if (status) { dev_dbg(&priv->usb->dev, "Submit Rx URB failed %d\n", status); return STATUS_FAILURE; } @@ -269,15 +266,13 @@ int vnt_tx_context(struct vnt_private *priv, struct vnt_usb_send_context *context) { int status; - struct urb *urb; + struct urb *urb = context->urb; if (test_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags)) { context->in_use = false; return STATUS_RESOURCES; } - urb = context->urb; - usb_fill_bulk_urb(urb, priv->usb, usb_sndbulkpipe(priv->usb, 3), @@ -287,7 +282,7 @@ int vnt_tx_context(struct vnt_private *priv, context); status = usb_submit_urb(urb, GFP_ATOMIC); - if (status != 0) { + if (status) { dev_dbg(&priv->usb->dev, "Submit Tx URB failed %d\n", status); context->in_use = false; diff --git a/drivers/staging/wilc1000/Makefile b/drivers/staging/wilc1000/Makefile index 20a5cb9d4f4c..acc3f3e8481b 100644 --- a/drivers/staging/wilc1000/Makefile +++ b/drivers/staging/wilc1000/Makefile @@ -1,11 +1,9 @@ obj-$(CONFIG_WILC1000) += wilc1000.o -ccflags-y += -DSTA_FIRMWARE=\"atmel/wilc1000_fw.bin\" \ - -DAP_FIRMWARE=\"atmel/wilc1000_ap_fw.bin\" \ - -DP2P_CONCURRENCY_FIRMWARE=\"atmel/wilc1000_p2p_fw.bin\" +ccflags-y += -DFIRMWARE_1002=\"atmel/wilc1002_firmware.bin\" \ + -DFIRMWARE_1003=\"atmel/wilc1003_firmware.bin\" ccflags-y += -I$(src)/ -DWILC_ASIC_A0 -DWILC_DEBUGFS -#ccflags-y += -DTCP_ACK_FILTER wilc1000-objs := wilc_wfi_cfgoperations.o linux_wlan.o linux_mon.o \ wilc_msgqueue.o \ diff --git a/drivers/staging/wilc1000/coreconfigurator.c b/drivers/staging/wilc1000/coreconfigurator.c index 2d4d3f190c01..2c4ae1fc8435 100644 --- a/drivers/staging/wilc1000/coreconfigurator.c +++ b/drivers/staging/wilc1000/coreconfigurator.c @@ -1,22 +1,11 @@ - -/*! - * @file coreconfigurator.c - * @brief - * @author - * @sa coreconfigurator.h - * @date 1 Mar 2012 - * @version 1.0 - */ - #include "coreconfigurator.h" #include "wilc_wlan_if.h" #include "wilc_wlan.h" #include <linux/errno.h> #include <linux/slab.h> #define TAG_PARAM_OFFSET (MAC_HDR_LEN + TIME_STAMP_LEN + \ - BEACON_INTERVAL_LEN + CAP_INFO_LEN) + BEACON_INTERVAL_LEN + CAP_INFO_LEN) -/* Basic Frame Type Codes (2-bit) */ enum basic_frame_type { FRAME_TYPE_CONTROL = 0x04, FRAME_TYPE_DATA = 0x08, @@ -25,7 +14,6 @@ enum basic_frame_type { FRAME_TYPE_FORCE_32BIT = 0xFFFFFFFF }; -/* Frame Type and Subtype Codes (6-bit) */ enum sub_frame_type { ASSOC_REQ = 0x00, ASSOC_RSP = 0x10, @@ -65,7 +53,6 @@ enum sub_frame_type { FRAME_SUBTYPE_FORCE_32BIT = 0xFFFFFFFF }; -/* Element ID of various Information Elements */ enum info_element_id { ISSID = 0, /* Service Set Identifier */ ISUPRATES = 1, /* Supported Rates */ @@ -109,8 +96,6 @@ enum info_element_id { INFOELEM_ID_FORCE_32BIT = 0xFFFFFFFF }; -/* This function extracts the beacon period field from the beacon or probe */ -/* response frame. */ static inline u16 get_beacon_period(u8 *data) { u16 bcn_per; @@ -147,54 +132,36 @@ static inline u32 get_beacon_timestamp_hi(u8 *data) return time_stamp; } -/* This function extracts the 'frame type and sub type' bits from the MAC */ -/* header of the input frame. */ -/* Returns the value in the LSB of the returned value. */ static inline enum sub_frame_type get_sub_type(u8 *header) { return ((enum sub_frame_type)(header[0] & 0xFC)); } -/* This function extracts the 'to ds' bit from the MAC header of the input */ -/* frame. */ -/* Returns the value in the LSB of the returned value. */ static inline u8 get_to_ds(u8 *header) { return (header[1] & 0x01); } -/* This function extracts the 'from ds' bit from the MAC header of the input */ -/* frame. */ -/* Returns the value in the LSB of the returned value. */ static inline u8 get_from_ds(u8 *header) { return ((header[1] & 0x02) >> 1); } -/* This function extracts the MAC Address in 'address1' field of the MAC */ -/* header and updates the MAC Address in the allocated 'addr' variable. */ static inline void get_address1(u8 *pu8msa, u8 *addr) { memcpy(addr, pu8msa + 4, 6); } -/* This function extracts the MAC Address in 'address2' field of the MAC */ -/* header and updates the MAC Address in the allocated 'addr' variable. */ static inline void get_address2(u8 *pu8msa, u8 *addr) { memcpy(addr, pu8msa + 10, 6); } -/* This function extracts the MAC Address in 'address3' field of the MAC */ -/* header and updates the MAC Address in the allocated 'addr' variable. */ static inline void get_address3(u8 *pu8msa, u8 *addr) { memcpy(addr, pu8msa + 16, 6); } -/* This function extracts the BSSID from the incoming WLAN packet based on */ -/* the 'from ds' bit, and updates the MAC Address in the allocated 'addr' */ -/* variable. */ static inline void get_BSSID(u8 *data, u8 *bssid) { if (get_from_ds(data) == 1) @@ -205,20 +172,15 @@ static inline void get_BSSID(u8 *data, u8 *bssid) get_address3(data, bssid); } -/* This function extracts the SSID from a beacon/probe response frame */ static inline void get_ssid(u8 *data, u8 *ssid, u8 *p_ssid_len) { u8 len = 0; u8 i = 0; u8 j = 0; - len = data[MAC_HDR_LEN + TIME_STAMP_LEN + BEACON_INTERVAL_LEN + - CAP_INFO_LEN + 1]; - j = MAC_HDR_LEN + TIME_STAMP_LEN + BEACON_INTERVAL_LEN + - CAP_INFO_LEN + 2; + len = data[TAG_PARAM_OFFSET + 1]; + j = TAG_PARAM_OFFSET + 2; - /* If the SSID length field is set wrongly to a value greater than the */ - /* allowed maximum SSID length limit, reset the length to 0 */ if (len >= MAX_SSID_LEN) len = 0; @@ -230,8 +192,6 @@ static inline void get_ssid(u8 *data, u8 *ssid, u8 *p_ssid_len) *p_ssid_len = len; } -/* This function extracts the capability info field from the beacon or probe */ -/* response frame. */ static inline u16 get_cap_info(u8 *data) { u16 cap_info = 0; @@ -240,8 +200,6 @@ static inline u16 get_cap_info(u8 *data) st = get_sub_type(data); - /* Location of the Capability field is different for Beacon and */ - /* Association frames. */ if ((st == BEACON) || (st == PROBE_RSP)) index += TIME_STAMP_LEN + BEACON_INTERVAL_LEN; @@ -251,8 +209,6 @@ static inline u16 get_cap_info(u8 *data) return cap_info; } -/* This function extracts the capability info field from the Association */ -/* response frame. */ static inline u16 get_assoc_resp_cap_info(u8 *data) { u16 cap_info; @@ -263,8 +219,6 @@ static inline u16 get_assoc_resp_cap_info(u8 *data) return cap_info; } -/* This function extracts the association status code from the incoming */ -/* association response frame and returns association status code */ static inline u16 get_asoc_status(u8 *data) { u16 asoc_status; @@ -275,8 +229,6 @@ static inline u16 get_asoc_status(u8 *data) return asoc_status; } -/* This function extracts association ID from the incoming association */ -/* response frame */ static inline u16 get_asoc_id(u8 *data) { u16 asoc_id; @@ -287,347 +239,147 @@ static inline u16 get_asoc_id(u8 *data) return asoc_id; } -static u8 *get_tim_elm(u8 *pu8msa, u16 u16RxLen, u16 u16TagParamOffset) +static u8 *get_tim_elm(u8 *pu8msa, u16 rx_len, u16 tag_param_offset) { - u16 u16index; - - /*************************************************************************/ - /* Beacon Frame - Frame Body */ - /* --------------------------------------------------------------------- */ - /* |Timestamp |BeaconInt |CapInfo |SSID |SupRates |DSParSet |TIM elm | */ - /* --------------------------------------------------------------------- */ - /* |8 |2 |2 |2-34 |3-10 |3 |4-256 | */ - /* --------------------------------------------------------------------- */ - /* */ - /*************************************************************************/ - - u16index = u16TagParamOffset; - - /* Search for the TIM Element Field and return if the element is found */ - while (u16index < (u16RxLen - FCS_LEN)) { - if (pu8msa[u16index] == ITIM) - return &pu8msa[u16index]; - u16index += (IE_HDR_LEN + pu8msa[u16index + 1]); + u16 index; + + index = tag_param_offset; + + while (index < (rx_len - FCS_LEN)) { + if (pu8msa[index] == ITIM) + return &pu8msa[index]; + index += (IE_HDR_LEN + pu8msa[index + 1]); } return NULL; } -/* This function gets the current channel information from - * the 802.11n beacon/probe response frame */ -static u8 get_current_channel_802_11n(u8 *pu8msa, u16 u16RxLen) +static u8 get_current_channel_802_11n(u8 *pu8msa, u16 rx_len) { u16 index; index = TAG_PARAM_OFFSET; - while (index < (u16RxLen - FCS_LEN)) { + while (index < (rx_len - FCS_LEN)) { if (pu8msa[index] == IDSPARMS) return pu8msa[index + 2]; - /* Increment index by length information and header */ index += pu8msa[index + 1] + IE_HDR_LEN; } - /* Return current channel information from the MIB, if beacon/probe */ - /* response frame does not contain the DS parameter set IE */ - /* return (mget_CurrentChannel() + 1); */ - return 0; /* no MIB here */ + return 0; } -/** - * @brief parses the received 'N' message - * @details - * @param[in] pu8MsgBuffer The message to be parsed - * @param[out] ppstrNetworkInfo pointer to pointer to the structure containing the parsed Network Info - * @return Error code indicating success/failure - * @note - * @author mabubakr - * @date 1 Mar 2012 - * @version 1.0 - */ -s32 wilc_parse_network_info(u8 *pu8MsgBuffer, tstrNetworkInfo **ppstrNetworkInfo) +s32 wilc_parse_network_info(u8 *msg_buffer, + struct network_info **ret_network_info) { - tstrNetworkInfo *pstrNetworkInfo = NULL; - u8 u8MsgType = 0; - u8 u8MsgID = 0; - u16 u16MsgLen = 0; + struct network_info *network_info = NULL; + u8 msg_type = 0; + u8 msg_id = 0; + u16 msg_len = 0; - u16 u16WidID = (u16)WID_NIL; - u16 u16WidLen = 0; - u8 *pu8WidVal = NULL; + u16 wid_id = (u16)WID_NIL; + u16 wid_len = 0; + u8 *wid_val = NULL; - u8MsgType = pu8MsgBuffer[0]; + msg_type = msg_buffer[0]; - /* Check whether the received message type is 'N' */ - if ('N' != u8MsgType) { - PRINT_ER("Received Message format incorrect.\n"); + if ('N' != msg_type) return -EFAULT; - } - - /* Extract message ID */ - u8MsgID = pu8MsgBuffer[1]; - - /* Extract message Length */ - u16MsgLen = MAKE_WORD16(pu8MsgBuffer[2], pu8MsgBuffer[3]); - - /* Extract WID ID */ - u16WidID = MAKE_WORD16(pu8MsgBuffer[4], pu8MsgBuffer[5]); - - /* Extract WID Length */ - u16WidLen = MAKE_WORD16(pu8MsgBuffer[6], pu8MsgBuffer[7]); - /* Assign a pointer to the WID value */ - pu8WidVal = &pu8MsgBuffer[8]; + msg_id = msg_buffer[1]; + msg_len = MAKE_WORD16(msg_buffer[2], msg_buffer[3]); + wid_id = MAKE_WORD16(msg_buffer[4], msg_buffer[5]); + wid_len = MAKE_WORD16(msg_buffer[6], msg_buffer[7]); + wid_val = &msg_buffer[8]; - /* parse the WID value of the WID "WID_NEWORK_INFO" */ { - u8 *pu8msa = NULL; - u16 u16RxLen = 0; - u8 *pu8TimElm = NULL; - u8 *pu8IEs = NULL; - u16 u16IEsLen = 0; - u8 u8index = 0; - u32 u32Tsf_Lo; - u32 u32Tsf_Hi; - - pstrNetworkInfo = kzalloc(sizeof(tstrNetworkInfo), GFP_KERNEL); - if (!pstrNetworkInfo) + u8 *msa = NULL; + u16 rx_len = 0; + u8 *tim_elm = NULL; + u8 *ies = NULL; + u16 ies_len = 0; + u8 index = 0; + u32 tsf_lo; + u32 tsf_hi; + + network_info = kzalloc(sizeof(*network_info), GFP_KERNEL); + if (!network_info) return -ENOMEM; - pstrNetworkInfo->s8rssi = pu8WidVal[0]; + network_info->rssi = wid_val[0]; - /* Assign a pointer to msa "Mac Header Start Address" */ - pu8msa = &pu8WidVal[1]; + msa = &wid_val[1]; - u16RxLen = u16WidLen - 1; + rx_len = wid_len - 1; + network_info->cap_info = get_cap_info(msa); + network_info->tsf_lo = get_beacon_timestamp_lo(msa); - /* parse msa*/ + tsf_lo = get_beacon_timestamp_lo(msa); + tsf_hi = get_beacon_timestamp_hi(msa); - /* Get the cap_info */ - pstrNetworkInfo->u16CapInfo = get_cap_info(pu8msa); - /* Get time-stamp [Low only 32 bit] */ - pstrNetworkInfo->u32Tsf = get_beacon_timestamp_lo(pu8msa); - PRINT_D(CORECONFIG_DBG, "TSF :%x\n", pstrNetworkInfo->u32Tsf); + network_info->tsf_hi = tsf_lo | ((u64)tsf_hi << 32); - /* Get full time-stamp [Low and High 64 bit] */ - u32Tsf_Lo = get_beacon_timestamp_lo(pu8msa); - u32Tsf_Hi = get_beacon_timestamp_hi(pu8msa); + get_ssid(msa, network_info->ssid, &network_info->ssid_len); + get_BSSID(msa, network_info->bssid); - pstrNetworkInfo->u64Tsf = u32Tsf_Lo | ((u64)u32Tsf_Hi << 32); + network_info->ch = get_current_channel_802_11n(msa, + rx_len + FCS_LEN); - /* Get SSID */ - get_ssid(pu8msa, pstrNetworkInfo->au8ssid, &pstrNetworkInfo->u8SsidLen); + index = MAC_HDR_LEN + TIME_STAMP_LEN; - /* Get BSSID */ - get_BSSID(pu8msa, pstrNetworkInfo->au8bssid); + network_info->beacon_period = get_beacon_period(msa + index); - /* - * Extract current channel information from - * the beacon/probe response frame - */ - pstrNetworkInfo->u8channel = get_current_channel_802_11n(pu8msa, - u16RxLen + FCS_LEN); + index += BEACON_INTERVAL_LEN + CAP_INFO_LEN; - /* Get beacon period */ - u8index = MAC_HDR_LEN + TIME_STAMP_LEN; + tim_elm = get_tim_elm(msa, rx_len + FCS_LEN, index); + if (tim_elm) + network_info->dtim_period = tim_elm[3]; + ies = &msa[TAG_PARAM_OFFSET]; + ies_len = rx_len - TAG_PARAM_OFFSET; - pstrNetworkInfo->u16BeaconPeriod = get_beacon_period(pu8msa + u8index); - - u8index += BEACON_INTERVAL_LEN + CAP_INFO_LEN; - - /* Get DTIM Period */ - pu8TimElm = get_tim_elm(pu8msa, u16RxLen + FCS_LEN, u8index); - if (pu8TimElm) - pstrNetworkInfo->u8DtimPeriod = pu8TimElm[3]; - pu8IEs = &pu8msa[MAC_HDR_LEN + TIME_STAMP_LEN + BEACON_INTERVAL_LEN + CAP_INFO_LEN]; - u16IEsLen = u16RxLen - (MAC_HDR_LEN + TIME_STAMP_LEN + BEACON_INTERVAL_LEN + CAP_INFO_LEN); - - if (u16IEsLen > 0) { - pstrNetworkInfo->pu8IEs = kmemdup(pu8IEs, u16IEsLen, - GFP_KERNEL); - if (!pstrNetworkInfo->pu8IEs) + if (ies_len > 0) { + network_info->ies = kmemdup(ies, ies_len, GFP_KERNEL); + if (!network_info->ies) return -ENOMEM; } - pstrNetworkInfo->u16IEsLen = u16IEsLen; - + network_info->ies_len = ies_len; } - *ppstrNetworkInfo = pstrNetworkInfo; + *ret_network_info = network_info; return 0; } -/** - * @brief Deallocates the parsed Network Info - * @details - * @param[in] pstrNetworkInfo Network Info to be deallocated - * @return Error code indicating success/failure - * @note - * @author mabubakr - * @date 1 Mar 2012 - * @version 1.0 - */ -s32 wilc_dealloc_network_info(tstrNetworkInfo *pstrNetworkInfo) +s32 wilc_parse_assoc_resp_info(u8 *buffer, u32 buffer_len, + struct connect_resp_info **ret_connect_resp_info) { - s32 s32Error = 0; - - if (pstrNetworkInfo) { - if (pstrNetworkInfo->pu8IEs) { - kfree(pstrNetworkInfo->pu8IEs); - pstrNetworkInfo->pu8IEs = NULL; - } else { - s32Error = -EFAULT; - } - - kfree(pstrNetworkInfo); - pstrNetworkInfo = NULL; - - } else { - s32Error = -EFAULT; - } - - return s32Error; -} + struct connect_resp_info *connect_resp_info = NULL; + u16 assoc_resp_len = 0; + u8 *ies = NULL; + u16 ies_len = 0; -/** - * @brief parses the received Association Response frame - * @details - * @param[in] pu8Buffer The Association Response frame to be parsed - * @param[out] ppstrConnectRespInfo pointer to pointer to the structure containing the parsed Association Response Info - * @return Error code indicating success/failure - * @note - * @author mabubakr - * @date 2 Apr 2012 - * @version 1.0 - */ -s32 wilc_parse_assoc_resp_info(u8 *pu8Buffer, u32 u32BufferLen, - tstrConnectRespInfo **ppstrConnectRespInfo) -{ - s32 s32Error = 0; - tstrConnectRespInfo *pstrConnectRespInfo = NULL; - u16 u16AssocRespLen = 0; - u8 *pu8IEs = NULL; - u16 u16IEsLen = 0; - - pstrConnectRespInfo = kzalloc(sizeof(tstrConnectRespInfo), GFP_KERNEL); - if (!pstrConnectRespInfo) + connect_resp_info = kzalloc(sizeof(*connect_resp_info), GFP_KERNEL); + if (!connect_resp_info) return -ENOMEM; - /* u16AssocRespLen = pu8Buffer[0]; */ - u16AssocRespLen = (u16)u32BufferLen; - - /* get the status code */ - pstrConnectRespInfo->u16ConnectStatus = get_asoc_status(pu8Buffer); - if (pstrConnectRespInfo->u16ConnectStatus == SUCCESSFUL_STATUSCODE) { - - /* get the capability */ - pstrConnectRespInfo->u16capability = get_assoc_resp_cap_info(pu8Buffer); + assoc_resp_len = (u16)buffer_len; - /* get the Association ID */ - pstrConnectRespInfo->u16AssocID = get_asoc_id(pu8Buffer); + connect_resp_info->status = get_asoc_status(buffer); + if (connect_resp_info->status == SUCCESSFUL_STATUSCODE) { + connect_resp_info->capability = get_assoc_resp_cap_info(buffer); + connect_resp_info->assoc_id = get_asoc_id(buffer); - /* get the Information Elements */ - pu8IEs = &pu8Buffer[CAP_INFO_LEN + STATUS_CODE_LEN + AID_LEN]; - u16IEsLen = u16AssocRespLen - (CAP_INFO_LEN + STATUS_CODE_LEN + AID_LEN); + ies = &buffer[CAP_INFO_LEN + STATUS_CODE_LEN + AID_LEN]; + ies_len = assoc_resp_len - (CAP_INFO_LEN + STATUS_CODE_LEN + + AID_LEN); - pstrConnectRespInfo->pu8RespIEs = kmemdup(pu8IEs, u16IEsLen, GFP_KERNEL); - if (!pstrConnectRespInfo->pu8RespIEs) + connect_resp_info->ies = kmemdup(ies, ies_len, GFP_KERNEL); + if (!connect_resp_info->ies) return -ENOMEM; - pstrConnectRespInfo->u16RespIEsLen = u16IEsLen; - } - - *ppstrConnectRespInfo = pstrConnectRespInfo; - - return s32Error; -} - -/** - * @brief Deallocates the parsed Association Response Info - * @details - * @param[in] pstrNetworkInfo Network Info to be deallocated - * @return Error code indicating success/failure - * @note - * @author mabubakr - * @date 2 Apr 2012 - * @version 1.0 - */ -s32 wilc_dealloc_assoc_resp_info(tstrConnectRespInfo *pstrConnectRespInfo) -{ - s32 s32Error = 0; - - if (pstrConnectRespInfo) { - if (pstrConnectRespInfo->pu8RespIEs) { - kfree(pstrConnectRespInfo->pu8RespIEs); - pstrConnectRespInfo->pu8RespIEs = NULL; - } else { - s32Error = -EFAULT; - } - - kfree(pstrConnectRespInfo); - pstrConnectRespInfo = NULL; - - } else { - s32Error = -EFAULT; + connect_resp_info->ies_len = ies_len; } - return s32Error; -} - -/** - * @brief sends certain Configuration Packet based on the input WIDs pstrWIDs - * using driver config layer - * - * @details - * @param[in] pstrWIDs WIDs to be sent in the configuration packet - * @param[in] u32WIDsCount number of WIDs to be sent in the configuration packet - * @param[out] pu8RxResp The received Packet Response - * @param[out] ps32RxRespLen Length of the received Packet Response - * @return Error code indicating success/failure - * @note - * @author mabubakr - * @date 1 Mar 2012 - * @version 1.0 - */ -s32 wilc_send_config_pkt(struct wilc *wilc, u8 mode, struct wid *wids, - u32 count, u32 drv) -{ - s32 counter = 0, ret = 0; - - if (mode == GET_CFG) { - for (counter = 0; counter < count; counter++) { - PRINT_INFO(CORECONFIG_DBG, "Sending CFG packet [%d][%d]\n", !counter, - (counter == count - 1)); - if (!wilc_wlan_cfg_get(wilc, !counter, - wids[counter].id, - (counter == count - 1), - drv)) { - ret = -ETIMEDOUT; - printk("[Sendconfigpkt]Get Timed out\n"); - break; - } - } - counter = 0; - for (counter = 0; counter < count; counter++) { - wids[counter].size = wilc_wlan_cfg_get_val( - wids[counter].id, - wids[counter].val, - wids[counter].size); - } - } else if (mode == SET_CFG) { - for (counter = 0; counter < count; counter++) { - PRINT_D(CORECONFIG_DBG, "Sending config SET PACKET WID:%x\n", wids[counter].id); - if (!wilc_wlan_cfg_set(wilc, !counter, - wids[counter].id, - wids[counter].val, - wids[counter].size, - (counter == count - 1), - drv)) { - ret = -ETIMEDOUT; - printk("[Sendconfigpkt]Set Timed out\n"); - break; - } - } - } + *ret_connect_resp_info = connect_resp_info; - return ret; + return 0; } diff --git a/drivers/staging/wilc1000/coreconfigurator.h b/drivers/staging/wilc1000/coreconfigurator.h index fc43d04ca1da..076e06ac0d66 100644 --- a/drivers/staging/wilc1000/coreconfigurator.h +++ b/drivers/staging/wilc1000/coreconfigurator.h @@ -50,7 +50,7 @@ #define MAKE_WORD16(lsb, msb) ((((u16)(msb) << 8) & 0xFF00) | (lsb)) #define MAKE_WORD32(lsw, msw) ((((u32)(msw) << 16) & 0xFFFF0000) | (lsw)) -typedef enum { +enum connect_status { SUCCESSFUL_STATUSCODE = 0, UNSPEC_FAIL = 1, UNSUP_CAP = 10, @@ -68,13 +68,6 @@ typedef enum { SHORT_SLOT_UNSUP = 25, OFDM_DSSS_UNSUP = 26, CONNECT_STS_FORCE_16_BIT = 0xFFFF -} tenuConnectSts; - -struct wid { - u16 id; - enum wid_type type; - s32 size; - s8 *val; }; typedef struct { @@ -83,58 +76,54 @@ typedef struct { s8 as8RSSI[NUM_RSSI]; } tstrRSSI; -typedef struct { - s8 s8rssi; - u16 u16CapInfo; - u8 au8ssid[MAX_SSID_LEN]; - u8 u8SsidLen; - u8 au8bssid[6]; - u16 u16BeaconPeriod; - u8 u8DtimPeriod; - u8 u8channel; - unsigned long u32TimeRcvdInScanCached; - unsigned long u32TimeRcvdInScan; - bool bNewNetwork; - u8 u8Found; - u32 u32Tsf; - u8 *pu8IEs; - u16 u16IEsLen; - void *pJoinParams; - tstrRSSI strRssi; - u64 u64Tsf; -} tstrNetworkInfo; +struct network_info { + s8 rssi; + u16 cap_info; + u8 ssid[MAX_SSID_LEN]; + u8 ssid_len; + u8 bssid[6]; + u16 beacon_period; + u8 dtim_period; + u8 ch; + unsigned long time_scan_cached; + unsigned long time_scan; + bool new_network; + u8 found; + u32 tsf_lo; + u8 *ies; + u16 ies_len; + void *join_params; + tstrRSSI str_rssi; + u64 tsf_hi; +}; -typedef struct { - u16 u16capability; - u16 u16ConnectStatus; - u16 u16AssocID; - u8 *pu8RespIEs; - u16 u16RespIEsLen; -} tstrConnectRespInfo; +struct connect_resp_info { + u16 capability; + u16 status; + u16 assoc_id; + u8 *ies; + u16 ies_len; +}; -typedef struct { - u8 au8bssid[6]; - u8 *pu8ReqIEs; - size_t ReqIEsLen; - u8 *pu8RespIEs; - u16 u16RespIEsLen; - u16 u16ConnectStatus; -} tstrConnectInfo; +struct connect_info { + u8 bssid[6]; + u8 *req_ies; + size_t req_ies_len; + u8 *resp_ies; + u16 resp_ies_len; + u16 status; +}; -typedef struct { - u16 u16reason; +struct disconnect_info { + u16 reason; u8 *ie; size_t ie_len; -} tstrDisconnectNotifInfo; - -s32 wilc_send_config_pkt(struct wilc *wilc, u8 mode, struct wid *wids, - u32 count, u32 drv); -s32 wilc_parse_network_info(u8 *pu8MsgBuffer, tstrNetworkInfo **ppstrNetworkInfo); -s32 wilc_dealloc_network_info(tstrNetworkInfo *pstrNetworkInfo); +}; -s32 wilc_parse_assoc_resp_info(u8 *pu8Buffer, u32 u32BufferLen, - tstrConnectRespInfo **ppstrConnectRespInfo); -s32 wilc_dealloc_assoc_resp_info(tstrConnectRespInfo *pstrConnectRespInfo); +s32 wilc_parse_network_info(u8 *msg_buffer, + struct network_info **ret_network_info); +s32 wilc_parse_assoc_resp_info(u8 *buffer, u32 buffer_len, + struct connect_resp_info **ret_connect_resp_info); void wilc_scan_complete_received(struct wilc *wilc, u8 *pu8Buffer, u32 u32Length); void wilc_network_info_received(struct wilc *wilc, u8 *pu8Buffer, diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c index 8c7752034032..0a922c7c7cbf 100644 --- a/drivers/staging/wilc1000/host_interface.c +++ b/drivers/staging/wilc1000/host_interface.c @@ -20,7 +20,6 @@ #define HOST_IF_MSG_SET_CHANNEL 7 #define HOST_IF_MSG_DISCONNECT 8 #define HOST_IF_MSG_GET_RSSI 9 -#define HOST_IF_MSG_GET_CHNL 10 #define HOST_IF_MSG_ADD_BEACON 11 #define HOST_IF_MSG_DEL_BEACON 12 #define HOST_IF_MSG_ADD_STATION 13 @@ -33,20 +32,17 @@ #define HOST_IF_MSG_REMAIN_ON_CHAN 20 #define HOST_IF_MSG_REGISTER_FRAME 21 #define HOST_IF_MSG_LISTEN_TIMER_FIRED 22 -#define HOST_IF_MSG_GET_LINKSPEED 23 #define HOST_IF_MSG_SET_WFIDRV_HANDLER 24 -#define HOST_IF_MSG_SET_MAC_ADDRESS 25 #define HOST_IF_MSG_GET_MAC_ADDRESS 26 #define HOST_IF_MSG_SET_OPERATION_MODE 27 #define HOST_IF_MSG_SET_IPADDRESS 28 #define HOST_IF_MSG_GET_IPADDRESS 29 -#define HOST_IF_MSG_FLUSH_CONNECT 30 #define HOST_IF_MSG_GET_STATISTICS 31 #define HOST_IF_MSG_SET_MULTICAST_FILTER 32 #define HOST_IF_MSG_DEL_BA_SESSION 34 -#define HOST_IF_MSG_Q_IDLE 35 #define HOST_IF_MSG_DEL_ALL_STA 36 -#define HOST_IF_MSG_DEL_ALL_RX_BA_SESSIONS 34 +#define HOST_IF_MSG_SET_TX_POWER 38 +#define HOST_IF_MSG_GET_TX_POWER 39 #define HOST_IF_MSG_EXIT 100 #define HOST_IF_SCAN_TIMEOUT 4000 @@ -57,9 +53,8 @@ #define BLOCK_ACK_REQ_SIZE 0x14 #define FALSE_FRMWR_CHANNEL 100 -struct cfg_param_attr { - struct cfg_param_val cfg_attr_info; -}; +#define TCP_ACK_FILTER_LINK_SPEED_THRESH 54 +#define DEFAULT_LINK_SPEED 72 struct host_if_wpa_attr { u8 *key; @@ -163,6 +158,10 @@ struct sta_inactive_t { u8 mac[6]; }; +struct tx_power { + u8 tx_pwr; +}; + union message_body { struct scan_attr scan_info; struct connect_attr con_info; @@ -188,6 +187,7 @@ union message_body { struct reg_frame reg_frame; char *data; struct del_all_sta del_all_sta_info; + struct tx_power tx_power; }; struct host_if_msg { @@ -201,7 +201,7 @@ struct join_bss_param { u8 dtim_period; u16 beacon_period; u16 cap_info; - u8 au8bssid[6]; + u8 bssid[6]; char ssid[MAX_SSID_LEN]; u8 ssid_len; u8 supp_rates[MAX_RATES_SUPPORTED + 1]; @@ -225,11 +225,11 @@ struct join_bss_param { u8 start_time[4]; }; -struct host_if_drv *terminated_handle; +static struct host_if_drv *terminated_handle; bool wilc_optaining_ip; static u8 P2P_LISTEN_STATE; static struct task_struct *hif_thread_handler; -static WILC_MsgQueueHandle hif_msg_q; +static struct message_queue hif_msg_q; static struct semaphore hif_sema_thread; static struct semaphore hif_sema_driver; static struct semaphore hif_sema_wait_response; @@ -243,8 +243,6 @@ static u8 rcv_assoc_resp[MAX_ASSOC_RESP_FRAME_SIZE]; static bool scan_while_connected; static s8 rssi; -static s8 link_speed; -static u8 ch_no; static u8 set_ip[2][4]; static u8 get_ip[2][4]; static u32 inactive_time; @@ -262,7 +260,8 @@ static struct wilc_vif *join_req_vif; #define FLUSHED_JOIN_REQ 1 #define FLUSHED_BYTE_POS 79 -static void *host_int_ParseJoinBssParam(tstrNetworkInfo *ptstrNetworkInfo); +static void *host_int_ParseJoinBssParam(struct network_info *ptstrNetworkInfo); +static int host_int_get_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx); /* The u8IfIdx starts from 0 to NUM_CONCURRENT_IFC -1, but 0 index used as * special purpose in wilc device, so we add 1 to the index to starts from 1. @@ -270,7 +269,7 @@ static void *host_int_ParseJoinBssParam(tstrNetworkInfo *ptstrNetworkInfo); */ int wilc_get_vif_idx(struct wilc_vif *vif) { - return vif->u8IfIdx + 1; + return vif->idx + 1; } /* We need to minus 1 from idx which is from wilc device to get real index @@ -288,10 +287,10 @@ static struct wilc_vif *wilc_get_vif_from_idx(struct wilc *wilc, int idx) return wilc->vif[index]; } -static s32 handle_set_channel(struct wilc_vif *vif, - struct channel_attr *hif_set_ch) +static void handle_set_channel(struct wilc_vif *vif, + struct channel_attr *hif_set_ch) { - s32 result = 0; + int ret = 0; struct wid wid; wid.id = (u16)WID_CURRENT_CHANNEL; @@ -299,17 +298,11 @@ static s32 handle_set_channel(struct wilc_vif *vif, wid.val = (char *)&hif_set_ch->set_ch; wid.size = sizeof(char); - PRINT_D(HOSTINF_DBG, "Setting channel\n"); + ret = wilc_send_config_pkt(vif, SET_CFG, &wid, 1, + wilc_get_vif_idx(vif)); - result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1, - wilc_get_vif_idx(vif)); - - if (result) { - PRINT_ER("Failed to set channel\n"); - return -EINVAL; - } - - return result; + if (ret) + netdev_err(vif->ndev, "Failed to set channel\n"); } static s32 handle_set_wfi_drv_handler(struct wilc_vif *vif, @@ -319,18 +312,18 @@ static s32 handle_set_wfi_drv_handler(struct wilc_vif *vif, struct wid wid; wid.id = (u16)WID_SET_DRV_HANDLER; - wid.type = WID_INT; - wid.val = (s8 *)&hif_drv_handler->handler; - wid.size = sizeof(u32); + wid.type = WID_STR; + wid.val = (s8 *)hif_drv_handler; + wid.size = sizeof(*hif_drv_handler); - result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1, + result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1, hif_drv_handler->handler); if (!hif_drv_handler->handler) up(&hif_sema_driver); if (result) { - PRINT_ER("Failed to set driver handler\n"); + netdev_err(vif->ndev, "Failed to set driver handler\n"); return -EINVAL; } @@ -348,37 +341,29 @@ static s32 handle_set_operation_mode(struct wilc_vif *vif, wid.val = (s8 *)&hif_op_mode->mode; wid.size = sizeof(u32); - result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1, - wilc_get_vif_idx(vif)); + result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1, + wilc_get_vif_idx(vif)); if ((hif_op_mode->mode) == IDLE_MODE) up(&hif_sema_driver); if (result) { - PRINT_ER("Failed to set driver handler\n"); + netdev_err(vif->ndev, "Failed to set driver handler\n"); return -EINVAL; } return result; } -static s32 host_int_get_ipaddress(struct wilc_vif *vif, - struct host_if_drv *hif_drv, - u8 *u16ipadd, u8 idx); - static s32 handle_set_ip_address(struct wilc_vif *vif, u8 *ip_addr, u8 idx) { s32 result = 0; struct wid wid; char firmware_ip_addr[4] = {0}; - struct host_if_drv *hif_drv = vif->hif_drv; if (ip_addr[0] < 192) ip_addr[0] = 0; - PRINT_INFO(HOSTINF_DBG, "Indx = %d, Handling set IP = %pI4\n", - idx, ip_addr); - memcpy(set_ip[idx], ip_addr, IP_ALEN); wid.id = (u16)WID_IP_ADDRESS; @@ -386,18 +371,16 @@ static s32 handle_set_ip_address(struct wilc_vif *vif, u8 *ip_addr, u8 idx) wid.val = (u8 *)ip_addr; wid.size = IP_ALEN; - result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1, - wilc_get_vif_idx(vif)); + result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1, + wilc_get_vif_idx(vif)); - host_int_get_ipaddress(vif, hif_drv, firmware_ip_addr, idx); + host_int_get_ipaddress(vif, firmware_ip_addr, idx); if (result) { - PRINT_ER("Failed to set IP address\n"); + netdev_err(vif->ndev, "Failed to set IP address\n"); return -EINVAL; } - PRINT_INFO(HOSTINF_DBG, "IP address set\n"); - return result; } @@ -411,10 +394,8 @@ static s32 handle_get_ip_address(struct wilc_vif *vif, u8 idx) wid.val = kmalloc(IP_ALEN, GFP_KERNEL); wid.size = IP_ALEN; - result = wilc_send_config_pkt(vif->wilc, GET_CFG, &wid, 1, - wilc_get_vif_idx(vif)); - - PRINT_INFO(HOSTINF_DBG, "%pI4\n", wid.val); + result = wilc_send_config_pkt(vif, GET_CFG, &wid, 1, + wilc_get_vif_idx(vif)); memcpy(get_ip[idx], wid.val, IP_ALEN); @@ -424,44 +405,10 @@ static s32 handle_get_ip_address(struct wilc_vif *vif, u8 idx) wilc_setup_ipaddress(vif, set_ip[idx], idx); if (result != 0) { - PRINT_ER("Failed to get IP address\n"); + netdev_err(vif->ndev, "Failed to get IP address\n"); return -EINVAL; } - PRINT_INFO(HOSTINF_DBG, "IP address retrieved:: u8IfIdx = %d\n", idx); - PRINT_INFO(HOSTINF_DBG, "%pI4\n", get_ip[idx]); - PRINT_INFO(HOSTINF_DBG, "\n"); - - return result; -} - -static s32 handle_set_mac_address(struct wilc_vif *vif, - struct set_mac_addr *set_mac_addr) -{ - s32 result = 0; - struct wid wid; - u8 *mac_buf = kmalloc(ETH_ALEN, GFP_KERNEL); - - if (!mac_buf) { - PRINT_ER("No buffer to send mac address\n"); - return -EFAULT; - } - memcpy(mac_buf, set_mac_addr->mac_addr, ETH_ALEN); - - wid.id = (u16)WID_MAC_ADDR; - wid.type = WID_STR; - wid.val = mac_buf; - wid.size = ETH_ALEN; - PRINT_D(GENERIC_DBG, "mac addr = :%pM\n", wid.val); - - result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1, - wilc_get_vif_idx(vif)); - if (result) { - PRINT_ER("Failed to set mac address\n"); - result = -EFAULT; - } - - kfree(mac_buf); return result; } @@ -476,11 +423,11 @@ static s32 handle_get_mac_address(struct wilc_vif *vif, wid.val = get_mac_addr->mac_addr; wid.size = ETH_ALEN; - result = wilc_send_config_pkt(vif->wilc, GET_CFG, &wid, 1, - wilc_get_vif_idx(vif)); + result = wilc_send_config_pkt(vif, GET_CFG, &wid, 1, + wilc_get_vif_idx(vif)); if (result) { - PRINT_ER("Failed to get mac address\n"); + netdev_err(vif->ndev, "Failed to get mac address\n"); result = -EFAULT; } up(&hif_sema_wait_response); @@ -494,301 +441,294 @@ static s32 handle_cfg_param(struct wilc_vif *vif, s32 result = 0; struct wid wid_list[32]; struct host_if_drv *hif_drv = vif->hif_drv; - u8 wid_cnt = 0; - - down(&hif_drv->sem_cfg_values); + int i = 0; - PRINT_D(HOSTINF_DBG, "Setting CFG params\n"); + mutex_lock(&hif_drv->cfg_values_lock); - if (cfg_param_attr->cfg_attr_info.flag & BSS_TYPE) { - if (cfg_param_attr->cfg_attr_info.bss_type < 6) { - wid_list[wid_cnt].id = WID_BSS_TYPE; - wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.bss_type; - wid_list[wid_cnt].type = WID_CHAR; - wid_list[wid_cnt].size = sizeof(char); - hif_drv->cfg_values.bss_type = (u8)cfg_param_attr->cfg_attr_info.bss_type; + if (cfg_param_attr->flag & BSS_TYPE) { + if (cfg_param_attr->bss_type < 6) { + wid_list[i].id = WID_BSS_TYPE; + wid_list[i].val = (s8 *)&cfg_param_attr->bss_type; + wid_list[i].type = WID_CHAR; + wid_list[i].size = sizeof(char); + hif_drv->cfg_values.bss_type = (u8)cfg_param_attr->bss_type; } else { - PRINT_ER("check value 6 over\n"); + netdev_err(vif->ndev, "check value 6 over\n"); result = -EINVAL; goto ERRORHANDLER; } - wid_cnt++; - } - if (cfg_param_attr->cfg_attr_info.flag & AUTH_TYPE) { - if (cfg_param_attr->cfg_attr_info.auth_type == 1 || - cfg_param_attr->cfg_attr_info.auth_type == 2 || - cfg_param_attr->cfg_attr_info.auth_type == 5) { - wid_list[wid_cnt].id = WID_AUTH_TYPE; - wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.auth_type; - wid_list[wid_cnt].type = WID_CHAR; - wid_list[wid_cnt].size = sizeof(char); - hif_drv->cfg_values.auth_type = (u8)cfg_param_attr->cfg_attr_info.auth_type; + i++; + } + if (cfg_param_attr->flag & AUTH_TYPE) { + if (cfg_param_attr->auth_type == 1 || + cfg_param_attr->auth_type == 2 || + cfg_param_attr->auth_type == 5) { + wid_list[i].id = WID_AUTH_TYPE; + wid_list[i].val = (s8 *)&cfg_param_attr->auth_type; + wid_list[i].type = WID_CHAR; + wid_list[i].size = sizeof(char); + hif_drv->cfg_values.auth_type = (u8)cfg_param_attr->auth_type; } else { - PRINT_ER("Impossible value \n"); + netdev_err(vif->ndev, "Impossible value\n"); result = -EINVAL; goto ERRORHANDLER; } - wid_cnt++; - } - if (cfg_param_attr->cfg_attr_info.flag & AUTHEN_TIMEOUT) { - if (cfg_param_attr->cfg_attr_info.auth_timeout > 0 && - cfg_param_attr->cfg_attr_info.auth_timeout < 65536) { - wid_list[wid_cnt].id = WID_AUTH_TIMEOUT; - wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.auth_timeout; - wid_list[wid_cnt].type = WID_SHORT; - wid_list[wid_cnt].size = sizeof(u16); - hif_drv->cfg_values.auth_timeout = cfg_param_attr->cfg_attr_info.auth_timeout; + i++; + } + if (cfg_param_attr->flag & AUTHEN_TIMEOUT) { + if (cfg_param_attr->auth_timeout > 0 && + cfg_param_attr->auth_timeout < 65536) { + wid_list[i].id = WID_AUTH_TIMEOUT; + wid_list[i].val = (s8 *)&cfg_param_attr->auth_timeout; + wid_list[i].type = WID_SHORT; + wid_list[i].size = sizeof(u16); + hif_drv->cfg_values.auth_timeout = cfg_param_attr->auth_timeout; } else { - PRINT_ER("Range(1 ~ 65535) over\n"); + netdev_err(vif->ndev, "Range(1 ~ 65535) over\n"); result = -EINVAL; goto ERRORHANDLER; } - wid_cnt++; - } - if (cfg_param_attr->cfg_attr_info.flag & POWER_MANAGEMENT) { - if (cfg_param_attr->cfg_attr_info.power_mgmt_mode < 5) { - wid_list[wid_cnt].id = WID_POWER_MANAGEMENT; - wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.power_mgmt_mode; - wid_list[wid_cnt].type = WID_CHAR; - wid_list[wid_cnt].size = sizeof(char); - hif_drv->cfg_values.power_mgmt_mode = (u8)cfg_param_attr->cfg_attr_info.power_mgmt_mode; + i++; + } + if (cfg_param_attr->flag & POWER_MANAGEMENT) { + if (cfg_param_attr->power_mgmt_mode < 5) { + wid_list[i].id = WID_POWER_MANAGEMENT; + wid_list[i].val = (s8 *)&cfg_param_attr->power_mgmt_mode; + wid_list[i].type = WID_CHAR; + wid_list[i].size = sizeof(char); + hif_drv->cfg_values.power_mgmt_mode = (u8)cfg_param_attr->power_mgmt_mode; } else { - PRINT_ER("Invalide power mode\n"); + netdev_err(vif->ndev, "Invalid power mode\n"); result = -EINVAL; goto ERRORHANDLER; } - wid_cnt++; - } - if (cfg_param_attr->cfg_attr_info.flag & RETRY_SHORT) { - if (cfg_param_attr->cfg_attr_info.short_retry_limit > 0 && - cfg_param_attr->cfg_attr_info.short_retry_limit < 256) { - wid_list[wid_cnt].id = WID_SHORT_RETRY_LIMIT; - wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.short_retry_limit; - wid_list[wid_cnt].type = WID_SHORT; - wid_list[wid_cnt].size = sizeof(u16); - hif_drv->cfg_values.short_retry_limit = cfg_param_attr->cfg_attr_info.short_retry_limit; + i++; + } + if (cfg_param_attr->flag & RETRY_SHORT) { + if (cfg_param_attr->short_retry_limit > 0 && + cfg_param_attr->short_retry_limit < 256) { + wid_list[i].id = WID_SHORT_RETRY_LIMIT; + wid_list[i].val = (s8 *)&cfg_param_attr->short_retry_limit; + wid_list[i].type = WID_SHORT; + wid_list[i].size = sizeof(u16); + hif_drv->cfg_values.short_retry_limit = cfg_param_attr->short_retry_limit; } else { - PRINT_ER("Range(1~256) over\n"); + netdev_err(vif->ndev, "Range(1~256) over\n"); result = -EINVAL; goto ERRORHANDLER; } - wid_cnt++; - } - if (cfg_param_attr->cfg_attr_info.flag & RETRY_LONG) { - if (cfg_param_attr->cfg_attr_info.long_retry_limit > 0 && - cfg_param_attr->cfg_attr_info.long_retry_limit < 256) { - wid_list[wid_cnt].id = WID_LONG_RETRY_LIMIT; - wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.long_retry_limit; - wid_list[wid_cnt].type = WID_SHORT; - wid_list[wid_cnt].size = sizeof(u16); - hif_drv->cfg_values.long_retry_limit = cfg_param_attr->cfg_attr_info.long_retry_limit; + i++; + } + if (cfg_param_attr->flag & RETRY_LONG) { + if (cfg_param_attr->long_retry_limit > 0 && + cfg_param_attr->long_retry_limit < 256) { + wid_list[i].id = WID_LONG_RETRY_LIMIT; + wid_list[i].val = (s8 *)&cfg_param_attr->long_retry_limit; + wid_list[i].type = WID_SHORT; + wid_list[i].size = sizeof(u16); + hif_drv->cfg_values.long_retry_limit = cfg_param_attr->long_retry_limit; } else { - PRINT_ER("Range(1~256) over\n"); + netdev_err(vif->ndev, "Range(1~256) over\n"); result = -EINVAL; goto ERRORHANDLER; } - wid_cnt++; - } - if (cfg_param_attr->cfg_attr_info.flag & FRAG_THRESHOLD) { - if (cfg_param_attr->cfg_attr_info.frag_threshold > 255 && - cfg_param_attr->cfg_attr_info.frag_threshold < 7937) { - wid_list[wid_cnt].id = WID_FRAG_THRESHOLD; - wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.frag_threshold; - wid_list[wid_cnt].type = WID_SHORT; - wid_list[wid_cnt].size = sizeof(u16); - hif_drv->cfg_values.frag_threshold = cfg_param_attr->cfg_attr_info.frag_threshold; + i++; + } + if (cfg_param_attr->flag & FRAG_THRESHOLD) { + if (cfg_param_attr->frag_threshold > 255 && + cfg_param_attr->frag_threshold < 7937) { + wid_list[i].id = WID_FRAG_THRESHOLD; + wid_list[i].val = (s8 *)&cfg_param_attr->frag_threshold; + wid_list[i].type = WID_SHORT; + wid_list[i].size = sizeof(u16); + hif_drv->cfg_values.frag_threshold = cfg_param_attr->frag_threshold; } else { - PRINT_ER("Threshold Range fail\n"); + netdev_err(vif->ndev, "Threshold Range fail\n"); result = -EINVAL; goto ERRORHANDLER; } - wid_cnt++; - } - if (cfg_param_attr->cfg_attr_info.flag & RTS_THRESHOLD) { - if (cfg_param_attr->cfg_attr_info.rts_threshold > 255 && - cfg_param_attr->cfg_attr_info.rts_threshold < 65536) { - wid_list[wid_cnt].id = WID_RTS_THRESHOLD; - wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.rts_threshold; - wid_list[wid_cnt].type = WID_SHORT; - wid_list[wid_cnt].size = sizeof(u16); - hif_drv->cfg_values.rts_threshold = cfg_param_attr->cfg_attr_info.rts_threshold; + i++; + } + if (cfg_param_attr->flag & RTS_THRESHOLD) { + if (cfg_param_attr->rts_threshold > 255 && + cfg_param_attr->rts_threshold < 65536) { + wid_list[i].id = WID_RTS_THRESHOLD; + wid_list[i].val = (s8 *)&cfg_param_attr->rts_threshold; + wid_list[i].type = WID_SHORT; + wid_list[i].size = sizeof(u16); + hif_drv->cfg_values.rts_threshold = cfg_param_attr->rts_threshold; } else { - PRINT_ER("Threshold Range fail\n"); + netdev_err(vif->ndev, "Threshold Range fail\n"); result = -EINVAL; goto ERRORHANDLER; } - wid_cnt++; - } - if (cfg_param_attr->cfg_attr_info.flag & PREAMBLE) { - if (cfg_param_attr->cfg_attr_info.preamble_type < 3) { - wid_list[wid_cnt].id = WID_PREAMBLE; - wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.preamble_type; - wid_list[wid_cnt].type = WID_CHAR; - wid_list[wid_cnt].size = sizeof(char); - hif_drv->cfg_values.preamble_type = cfg_param_attr->cfg_attr_info.preamble_type; + i++; + } + if (cfg_param_attr->flag & PREAMBLE) { + if (cfg_param_attr->preamble_type < 3) { + wid_list[i].id = WID_PREAMBLE; + wid_list[i].val = (s8 *)&cfg_param_attr->preamble_type; + wid_list[i].type = WID_CHAR; + wid_list[i].size = sizeof(char); + hif_drv->cfg_values.preamble_type = cfg_param_attr->preamble_type; } else { - PRINT_ER("Preamle Range(0~2) over\n"); + netdev_err(vif->ndev, "Preamle Range(0~2) over\n"); result = -EINVAL; goto ERRORHANDLER; } - wid_cnt++; - } - if (cfg_param_attr->cfg_attr_info.flag & SHORT_SLOT_ALLOWED) { - if (cfg_param_attr->cfg_attr_info.short_slot_allowed < 2) { - wid_list[wid_cnt].id = WID_SHORT_SLOT_ALLOWED; - wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.short_slot_allowed; - wid_list[wid_cnt].type = WID_CHAR; - wid_list[wid_cnt].size = sizeof(char); - hif_drv->cfg_values.short_slot_allowed = (u8)cfg_param_attr->cfg_attr_info.short_slot_allowed; + i++; + } + if (cfg_param_attr->flag & SHORT_SLOT_ALLOWED) { + if (cfg_param_attr->short_slot_allowed < 2) { + wid_list[i].id = WID_SHORT_SLOT_ALLOWED; + wid_list[i].val = (s8 *)&cfg_param_attr->short_slot_allowed; + wid_list[i].type = WID_CHAR; + wid_list[i].size = sizeof(char); + hif_drv->cfg_values.short_slot_allowed = (u8)cfg_param_attr->short_slot_allowed; } else { - PRINT_ER("Short slot(2) over\n"); + netdev_err(vif->ndev, "Short slot(2) over\n"); result = -EINVAL; goto ERRORHANDLER; } - wid_cnt++; - } - if (cfg_param_attr->cfg_attr_info.flag & TXOP_PROT_DISABLE) { - if (cfg_param_attr->cfg_attr_info.txop_prot_disabled < 2) { - wid_list[wid_cnt].id = WID_11N_TXOP_PROT_DISABLE; - wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.txop_prot_disabled; - wid_list[wid_cnt].type = WID_CHAR; - wid_list[wid_cnt].size = sizeof(char); - hif_drv->cfg_values.txop_prot_disabled = (u8)cfg_param_attr->cfg_attr_info.txop_prot_disabled; + i++; + } + if (cfg_param_attr->flag & TXOP_PROT_DISABLE) { + if (cfg_param_attr->txop_prot_disabled < 2) { + wid_list[i].id = WID_11N_TXOP_PROT_DISABLE; + wid_list[i].val = (s8 *)&cfg_param_attr->txop_prot_disabled; + wid_list[i].type = WID_CHAR; + wid_list[i].size = sizeof(char); + hif_drv->cfg_values.txop_prot_disabled = (u8)cfg_param_attr->txop_prot_disabled; } else { - PRINT_ER("TXOP prot disable\n"); + netdev_err(vif->ndev, "TXOP prot disable\n"); result = -EINVAL; goto ERRORHANDLER; } - wid_cnt++; - } - if (cfg_param_attr->cfg_attr_info.flag & BEACON_INTERVAL) { - if (cfg_param_attr->cfg_attr_info.beacon_interval > 0 && - cfg_param_attr->cfg_attr_info.beacon_interval < 65536) { - wid_list[wid_cnt].id = WID_BEACON_INTERVAL; - wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.beacon_interval; - wid_list[wid_cnt].type = WID_SHORT; - wid_list[wid_cnt].size = sizeof(u16); - hif_drv->cfg_values.beacon_interval = cfg_param_attr->cfg_attr_info.beacon_interval; + i++; + } + if (cfg_param_attr->flag & BEACON_INTERVAL) { + if (cfg_param_attr->beacon_interval > 0 && + cfg_param_attr->beacon_interval < 65536) { + wid_list[i].id = WID_BEACON_INTERVAL; + wid_list[i].val = (s8 *)&cfg_param_attr->beacon_interval; + wid_list[i].type = WID_SHORT; + wid_list[i].size = sizeof(u16); + hif_drv->cfg_values.beacon_interval = cfg_param_attr->beacon_interval; } else { - PRINT_ER("Beacon interval(1~65535) fail\n"); + netdev_err(vif->ndev, "Beacon interval(1~65535)fail\n"); result = -EINVAL; goto ERRORHANDLER; } - wid_cnt++; - } - if (cfg_param_attr->cfg_attr_info.flag & DTIM_PERIOD) { - if (cfg_param_attr->cfg_attr_info.dtim_period > 0 && - cfg_param_attr->cfg_attr_info.dtim_period < 256) { - wid_list[wid_cnt].id = WID_DTIM_PERIOD; - wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.dtim_period; - wid_list[wid_cnt].type = WID_CHAR; - wid_list[wid_cnt].size = sizeof(char); - hif_drv->cfg_values.dtim_period = cfg_param_attr->cfg_attr_info.dtim_period; + i++; + } + if (cfg_param_attr->flag & DTIM_PERIOD) { + if (cfg_param_attr->dtim_period > 0 && + cfg_param_attr->dtim_period < 256) { + wid_list[i].id = WID_DTIM_PERIOD; + wid_list[i].val = (s8 *)&cfg_param_attr->dtim_period; + wid_list[i].type = WID_CHAR; + wid_list[i].size = sizeof(char); + hif_drv->cfg_values.dtim_period = cfg_param_attr->dtim_period; } else { - PRINT_ER("DTIM range(1~255) fail\n"); + netdev_err(vif->ndev, "DTIM range(1~255) fail\n"); result = -EINVAL; goto ERRORHANDLER; } - wid_cnt++; - } - if (cfg_param_attr->cfg_attr_info.flag & SITE_SURVEY) { - if (cfg_param_attr->cfg_attr_info.site_survey_enabled < 3) { - wid_list[wid_cnt].id = WID_SITE_SURVEY; - wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.site_survey_enabled; - wid_list[wid_cnt].type = WID_CHAR; - wid_list[wid_cnt].size = sizeof(char); - hif_drv->cfg_values.site_survey_enabled = (u8)cfg_param_attr->cfg_attr_info.site_survey_enabled; + i++; + } + if (cfg_param_attr->flag & SITE_SURVEY) { + if (cfg_param_attr->site_survey_enabled < 3) { + wid_list[i].id = WID_SITE_SURVEY; + wid_list[i].val = (s8 *)&cfg_param_attr->site_survey_enabled; + wid_list[i].type = WID_CHAR; + wid_list[i].size = sizeof(char); + hif_drv->cfg_values.site_survey_enabled = (u8)cfg_param_attr->site_survey_enabled; } else { - PRINT_ER("Site survey disable\n"); + netdev_err(vif->ndev, "Site survey disable\n"); result = -EINVAL; goto ERRORHANDLER; } - wid_cnt++; - } - if (cfg_param_attr->cfg_attr_info.flag & SITE_SURVEY_SCAN_TIME) { - if (cfg_param_attr->cfg_attr_info.site_survey_scan_time > 0 && - cfg_param_attr->cfg_attr_info.site_survey_scan_time < 65536) { - wid_list[wid_cnt].id = WID_SITE_SURVEY_SCAN_TIME; - wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.site_survey_scan_time; - wid_list[wid_cnt].type = WID_SHORT; - wid_list[wid_cnt].size = sizeof(u16); - hif_drv->cfg_values.site_survey_scan_time = cfg_param_attr->cfg_attr_info.site_survey_scan_time; + i++; + } + if (cfg_param_attr->flag & SITE_SURVEY_SCAN_TIME) { + if (cfg_param_attr->site_survey_scan_time > 0 && + cfg_param_attr->site_survey_scan_time < 65536) { + wid_list[i].id = WID_SITE_SURVEY_SCAN_TIME; + wid_list[i].val = (s8 *)&cfg_param_attr->site_survey_scan_time; + wid_list[i].type = WID_SHORT; + wid_list[i].size = sizeof(u16); + hif_drv->cfg_values.site_survey_scan_time = cfg_param_attr->site_survey_scan_time; } else { - PRINT_ER("Site survey scan time(1~65535) over\n"); + netdev_err(vif->ndev, "Site scan time(1~65535) over\n"); result = -EINVAL; goto ERRORHANDLER; } - wid_cnt++; - } - if (cfg_param_attr->cfg_attr_info.flag & ACTIVE_SCANTIME) { - if (cfg_param_attr->cfg_attr_info.active_scan_time > 0 && - cfg_param_attr->cfg_attr_info.active_scan_time < 65536) { - wid_list[wid_cnt].id = WID_ACTIVE_SCAN_TIME; - wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.active_scan_time; - wid_list[wid_cnt].type = WID_SHORT; - wid_list[wid_cnt].size = sizeof(u16); - hif_drv->cfg_values.active_scan_time = cfg_param_attr->cfg_attr_info.active_scan_time; + i++; + } + if (cfg_param_attr->flag & ACTIVE_SCANTIME) { + if (cfg_param_attr->active_scan_time > 0 && + cfg_param_attr->active_scan_time < 65536) { + wid_list[i].id = WID_ACTIVE_SCAN_TIME; + wid_list[i].val = (s8 *)&cfg_param_attr->active_scan_time; + wid_list[i].type = WID_SHORT; + wid_list[i].size = sizeof(u16); + hif_drv->cfg_values.active_scan_time = cfg_param_attr->active_scan_time; } else { - PRINT_ER("Active scan time(1~65535) over\n"); + netdev_err(vif->ndev, "Active time(1~65535) over\n"); result = -EINVAL; goto ERRORHANDLER; } - wid_cnt++; - } - if (cfg_param_attr->cfg_attr_info.flag & PASSIVE_SCANTIME) { - if (cfg_param_attr->cfg_attr_info.passive_scan_time > 0 && - cfg_param_attr->cfg_attr_info.passive_scan_time < 65536) { - wid_list[wid_cnt].id = WID_PASSIVE_SCAN_TIME; - wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.passive_scan_time; - wid_list[wid_cnt].type = WID_SHORT; - wid_list[wid_cnt].size = sizeof(u16); - hif_drv->cfg_values.passive_scan_time = cfg_param_attr->cfg_attr_info.passive_scan_time; + i++; + } + if (cfg_param_attr->flag & PASSIVE_SCANTIME) { + if (cfg_param_attr->passive_scan_time > 0 && + cfg_param_attr->passive_scan_time < 65536) { + wid_list[i].id = WID_PASSIVE_SCAN_TIME; + wid_list[i].val = (s8 *)&cfg_param_attr->passive_scan_time; + wid_list[i].type = WID_SHORT; + wid_list[i].size = sizeof(u16); + hif_drv->cfg_values.passive_scan_time = cfg_param_attr->passive_scan_time; } else { - PRINT_ER("Passive scan time(1~65535) over\n"); + netdev_err(vif->ndev, "Passive time(1~65535) over\n"); result = -EINVAL; goto ERRORHANDLER; } - wid_cnt++; - } - if (cfg_param_attr->cfg_attr_info.flag & CURRENT_TX_RATE) { - enum CURRENT_TXRATE curr_tx_rate = cfg_param_attr->cfg_attr_info.curr_tx_rate; - - if (curr_tx_rate == AUTORATE || curr_tx_rate == MBPS_1 - || curr_tx_rate == MBPS_2 || curr_tx_rate == MBPS_5_5 - || curr_tx_rate == MBPS_11 || curr_tx_rate == MBPS_6 - || curr_tx_rate == MBPS_9 || curr_tx_rate == MBPS_12 - || curr_tx_rate == MBPS_18 || curr_tx_rate == MBPS_24 - || curr_tx_rate == MBPS_36 || curr_tx_rate == MBPS_48 || curr_tx_rate == MBPS_54) { - wid_list[wid_cnt].id = WID_CURRENT_TX_RATE; - wid_list[wid_cnt].val = (s8 *)&curr_tx_rate; - wid_list[wid_cnt].type = WID_SHORT; - wid_list[wid_cnt].size = sizeof(u16); + i++; + } + if (cfg_param_attr->flag & CURRENT_TX_RATE) { + enum CURRENT_TXRATE curr_tx_rate = cfg_param_attr->curr_tx_rate; + + if (curr_tx_rate == AUTORATE || curr_tx_rate == MBPS_1 || + curr_tx_rate == MBPS_2 || curr_tx_rate == MBPS_5_5 || + curr_tx_rate == MBPS_11 || curr_tx_rate == MBPS_6 || + curr_tx_rate == MBPS_9 || curr_tx_rate == MBPS_12 || + curr_tx_rate == MBPS_18 || curr_tx_rate == MBPS_24 || + curr_tx_rate == MBPS_36 || curr_tx_rate == MBPS_48 || + curr_tx_rate == MBPS_54) { + wid_list[i].id = WID_CURRENT_TX_RATE; + wid_list[i].val = (s8 *)&curr_tx_rate; + wid_list[i].type = WID_SHORT; + wid_list[i].size = sizeof(u16); hif_drv->cfg_values.curr_tx_rate = (u8)curr_tx_rate; } else { - PRINT_ER("out of TX rate\n"); + netdev_err(vif->ndev, "out of TX rate\n"); result = -EINVAL; goto ERRORHANDLER; } - wid_cnt++; + i++; } - result = wilc_send_config_pkt(vif->wilc, SET_CFG, wid_list, - wid_cnt, wilc_get_vif_idx(vif)); + result = wilc_send_config_pkt(vif, SET_CFG, wid_list, + i, wilc_get_vif_idx(vif)); if (result) - PRINT_ER("Error in setting CFG params\n"); + netdev_err(vif->ndev, "Error in setting CFG params\n"); ERRORHANDLER: - up(&hif_drv->sem_cfg_values); + mutex_unlock(&hif_drv->cfg_values_lock); return result; } -static void Handle_wait_msg_q_empty(void) -{ - wilc_initialized = 0; - up(&hif_sema_wait_response); -} - static s32 Handle_ScanDone(struct wilc_vif *vif, enum scan_event enuEvent); @@ -804,50 +744,40 @@ static s32 Handle_Scan(struct wilc_vif *vif, u8 *pu8HdnNtwrksWidVal = NULL; struct host_if_drv *hif_drv = vif->hif_drv; - PRINT_D(HOSTINF_DBG, "Setting SCAN params\n"); - PRINT_D(HOSTINF_DBG, "Scanning: In [%d] state\n", hif_drv->hif_state); - hif_drv->usr_scan_req.scan_result = pstrHostIFscanAttr->result; hif_drv->usr_scan_req.arg = pstrHostIFscanAttr->arg; if ((hif_drv->hif_state >= HOST_IF_SCANNING) && (hif_drv->hif_state < HOST_IF_CONNECTED)) { - PRINT_D(GENERIC_DBG, "Don't scan already in [%d] state\n", - hif_drv->hif_state); - PRINT_ER("Already scan\n"); + netdev_err(vif->ndev, "Already scan\n"); result = -EBUSY; goto ERRORHANDLER; } if (wilc_optaining_ip || wilc_connecting) { - PRINT_D(GENERIC_DBG, "[handle_scan]: Don't do obss scan until IP adresss is obtained\n"); - PRINT_ER("Don't do obss scan\n"); + netdev_err(vif->ndev, "Don't do obss scan\n"); result = -EBUSY; goto ERRORHANDLER; } - PRINT_D(HOSTINF_DBG, "Setting SCAN params\n"); - hif_drv->usr_scan_req.rcvd_ch_cnt = 0; strWIDList[u32WidsCount].id = (u16)WID_SSID_PROBE_REQ; strWIDList[u32WidsCount].type = WID_STR; - for (i = 0; i < pstrHostIFscanAttr->hidden_network.u8ssidnum; i++) - valuesize += ((pstrHostIFscanAttr->hidden_network.pstrHiddenNetworkInfo[i].u8ssidlen) + 1); + for (i = 0; i < pstrHostIFscanAttr->hidden_network.n_ssids; i++) + valuesize += ((pstrHostIFscanAttr->hidden_network.net_info[i].ssid_len) + 1); pu8HdnNtwrksWidVal = kmalloc(valuesize + 1, GFP_KERNEL); strWIDList[u32WidsCount].val = pu8HdnNtwrksWidVal; if (strWIDList[u32WidsCount].val) { pu8Buffer = strWIDList[u32WidsCount].val; - *pu8Buffer++ = pstrHostIFscanAttr->hidden_network.u8ssidnum; + *pu8Buffer++ = pstrHostIFscanAttr->hidden_network.n_ssids; - PRINT_D(HOSTINF_DBG, "In Handle_ProbeRequest number of ssid %d\n", pstrHostIFscanAttr->hidden_network.u8ssidnum); - - for (i = 0; i < pstrHostIFscanAttr->hidden_network.u8ssidnum; i++) { - *pu8Buffer++ = pstrHostIFscanAttr->hidden_network.pstrHiddenNetworkInfo[i].u8ssidlen; - memcpy(pu8Buffer, pstrHostIFscanAttr->hidden_network.pstrHiddenNetworkInfo[i].pu8ssid, pstrHostIFscanAttr->hidden_network.pstrHiddenNetworkInfo[i].u8ssidlen); - pu8Buffer += pstrHostIFscanAttr->hidden_network.pstrHiddenNetworkInfo[i].u8ssidlen; + for (i = 0; i < pstrHostIFscanAttr->hidden_network.n_ssids; i++) { + *pu8Buffer++ = pstrHostIFscanAttr->hidden_network.net_info[i].ssid_len; + memcpy(pu8Buffer, pstrHostIFscanAttr->hidden_network.net_info[i].ssid, pstrHostIFscanAttr->hidden_network.net_info[i].ssid_len); + pu8Buffer += pstrHostIFscanAttr->hidden_network.net_info[i].ssid_len; } strWIDList[u32WidsCount].size = (s32)(valuesize + 1); @@ -896,14 +826,12 @@ static s32 Handle_Scan(struct wilc_vif *vif, else if (hif_drv->hif_state == HOST_IF_IDLE) scan_while_connected = false; - result = wilc_send_config_pkt(vif->wilc, SET_CFG, strWIDList, + result = wilc_send_config_pkt(vif, SET_CFG, strWIDList, u32WidsCount, wilc_get_vif_idx(vif)); if (result) - PRINT_ER("Failed to send scan paramters config packet\n"); - else - PRINT_D(HOSTINF_DBG, "Successfully sent SCAN params config packet\n"); + netdev_err(vif->ndev, "Failed to send scan parameters\n"); ERRORHANDLER: if (result) { @@ -916,8 +844,8 @@ ERRORHANDLER: kfree(pstrHostIFscanAttr->ies); pstrHostIFscanAttr->ies = NULL; - kfree(pstrHostIFscanAttr->hidden_network.pstrHiddenNetworkInfo); - pstrHostIFscanAttr->hidden_network.pstrHiddenNetworkInfo = NULL; + kfree(pstrHostIFscanAttr->hidden_network.net_info); + pstrHostIFscanAttr->hidden_network.net_info = NULL; kfree(pu8HdnNtwrksWidVal); @@ -932,27 +860,24 @@ static s32 Handle_ScanDone(struct wilc_vif *vif, struct wid wid; struct host_if_drv *hif_drv = vif->hif_drv; - PRINT_D(HOSTINF_DBG, "in Handle_ScanDone()\n"); - if (enuEvent == SCAN_EVENT_ABORTED) { - PRINT_D(GENERIC_DBG, "Abort running scan\n"); u8abort_running_scan = 1; wid.id = (u16)WID_ABORT_RUNNING_SCAN; wid.type = WID_CHAR; wid.val = (s8 *)&u8abort_running_scan; wid.size = sizeof(char); - result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1, - wilc_get_vif_idx(vif)); + result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1, + wilc_get_vif_idx(vif)); if (result) { - PRINT_ER("Failed to set abort running scan\n"); + netdev_err(vif->ndev, "Failed to set abort running\n"); result = -EFAULT; } } if (!hif_drv) { - PRINT_ER("Driver handler is NULL\n"); + netdev_err(vif->ndev, "Driver handler is NULL\n"); return result; } @@ -976,35 +901,31 @@ static s32 Handle_Connect(struct wilc_vif *vif, struct join_bss_param *ptstrJoinBssParam; struct host_if_drv *hif_drv = vif->hif_drv; - PRINT_D(GENERIC_DBG, "Handling connect request\n"); - if (memcmp(pstrHostIFconnectAttr->bssid, wilc_connected_ssid, ETH_ALEN) == 0) { result = 0; - PRINT_ER("Trying to connect to an already connected AP, Discard connect request\n"); + netdev_err(vif->ndev, "Discard connect request\n"); return result; } - PRINT_INFO(HOSTINF_DBG, "Saving connection parameters in global structure\n"); - - ptstrJoinBssParam = (struct join_bss_param *)pstrHostIFconnectAttr->params; + ptstrJoinBssParam = pstrHostIFconnectAttr->params; if (!ptstrJoinBssParam) { - PRINT_ER("Required BSSID not found\n"); + netdev_err(vif->ndev, "Required BSSID not found\n"); result = -ENOENT; goto ERRORHANDLER; } if (pstrHostIFconnectAttr->bssid) { - hif_drv->usr_conn_req.pu8bssid = kmalloc(6, GFP_KERNEL); - memcpy(hif_drv->usr_conn_req.pu8bssid, pstrHostIFconnectAttr->bssid, 6); + hif_drv->usr_conn_req.bssid = kmalloc(6, GFP_KERNEL); + memcpy(hif_drv->usr_conn_req.bssid, pstrHostIFconnectAttr->bssid, 6); } hif_drv->usr_conn_req.ssid_len = pstrHostIFconnectAttr->ssid_len; if (pstrHostIFconnectAttr->ssid) { - hif_drv->usr_conn_req.pu8ssid = kmalloc(pstrHostIFconnectAttr->ssid_len + 1, GFP_KERNEL); - memcpy(hif_drv->usr_conn_req.pu8ssid, + hif_drv->usr_conn_req.ssid = kmalloc(pstrHostIFconnectAttr->ssid_len + 1, GFP_KERNEL); + memcpy(hif_drv->usr_conn_req.ssid, pstrHostIFconnectAttr->ssid, pstrHostIFconnectAttr->ssid_len); - hif_drv->usr_conn_req.pu8ssid[pstrHostIFconnectAttr->ssid_len] = '\0'; + hif_drv->usr_conn_req.ssid[pstrHostIFconnectAttr->ssid_len] = '\0'; } hif_drv->usr_conn_req.ies_len = pstrHostIFconnectAttr->ies_len; @@ -1015,7 +936,7 @@ static s32 Handle_Connect(struct wilc_vif *vif, pstrHostIFconnectAttr->ies_len); } - hif_drv->usr_conn_req.u8security = pstrHostIFconnectAttr->security; + hif_drv->usr_conn_req.security = pstrHostIFconnectAttr->security; hif_drv->usr_conn_req.auth_type = pstrHostIFconnectAttr->auth_type; hif_drv->usr_conn_req.conn_result = pstrHostIFconnectAttr->result; hif_drv->usr_conn_req.arg = pstrHostIFconnectAttr->arg; @@ -1055,13 +976,11 @@ static s32 Handle_Connect(struct wilc_vif *vif, strWIDList[u32WidsCount].id = (u16)WID_11I_MODE; strWIDList[u32WidsCount].type = WID_CHAR; strWIDList[u32WidsCount].size = sizeof(char); - strWIDList[u32WidsCount].val = (s8 *)&hif_drv->usr_conn_req.u8security; + strWIDList[u32WidsCount].val = (s8 *)&hif_drv->usr_conn_req.security; u32WidsCount++; if (memcmp("DIRECT-", pstrHostIFconnectAttr->ssid, 7)) - mode_11i = hif_drv->usr_conn_req.u8security; - - PRINT_INFO(HOSTINF_DBG, "Encrypt Mode = %x\n", hif_drv->usr_conn_req.u8security); + mode_11i = hif_drv->usr_conn_req.security; strWIDList[u32WidsCount].id = (u16)WID_AUTH_TYPE; strWIDList[u32WidsCount].type = WID_CHAR; @@ -1072,11 +991,6 @@ static s32 Handle_Connect(struct wilc_vif *vif, if (memcmp("DIRECT-", pstrHostIFconnectAttr->ssid, 7)) auth_type = (u8)hif_drv->usr_conn_req.auth_type; - PRINT_INFO(HOSTINF_DBG, "Authentication Type = %x\n", - hif_drv->usr_conn_req.auth_type); - PRINT_D(HOSTINF_DBG, "Connecting to network of SSID %s on channel %d\n", - hif_drv->usr_conn_req.pu8ssid, pstrHostIFconnectAttr->ch); - strWIDList[u32WidsCount].id = (u16)WID_JOIN_REQ_EXTENDED; strWIDList[u32WidsCount].type = WID_STR; strWIDList[u32WidsCount].size = 112; @@ -1103,12 +1017,11 @@ static s32 Handle_Connect(struct wilc_vif *vif, if ((pstrHostIFconnectAttr->ch >= 1) && (pstrHostIFconnectAttr->ch <= 14)) { *(pu8CurrByte++) = pstrHostIFconnectAttr->ch; } else { - PRINT_ER("Channel out of range\n"); + netdev_err(vif->ndev, "Channel out of range\n"); *(pu8CurrByte++) = 0xFF; } *(pu8CurrByte++) = (ptstrJoinBssParam->cap_info) & 0xFF; *(pu8CurrByte++) = ((ptstrJoinBssParam->cap_info) >> 8) & 0xFF; - PRINT_D(HOSTINF_DBG, "* Cap Info %0x*\n", (*(pu8CurrByte - 2) | ((*(pu8CurrByte - 1)) << 8))); if (pstrHostIFconnectAttr->bssid) memcpy(pu8CurrByte, pstrHostIFconnectAttr->bssid, 6); @@ -1120,26 +1033,20 @@ static s32 Handle_Connect(struct wilc_vif *vif, *(pu8CurrByte++) = (ptstrJoinBssParam->beacon_period) & 0xFF; *(pu8CurrByte++) = ((ptstrJoinBssParam->beacon_period) >> 8) & 0xFF; - PRINT_D(HOSTINF_DBG, "* Beacon Period %d*\n", (*(pu8CurrByte - 2) | ((*(pu8CurrByte - 1)) << 8))); *(pu8CurrByte++) = ptstrJoinBssParam->dtim_period; - PRINT_D(HOSTINF_DBG, "* DTIM Period %d*\n", (*(pu8CurrByte - 1))); memcpy(pu8CurrByte, ptstrJoinBssParam->supp_rates, MAX_RATES_SUPPORTED + 1); pu8CurrByte += (MAX_RATES_SUPPORTED + 1); *(pu8CurrByte++) = ptstrJoinBssParam->wmm_cap; - PRINT_D(HOSTINF_DBG, "* wmm cap%d*\n", (*(pu8CurrByte - 1))); *(pu8CurrByte++) = ptstrJoinBssParam->uapsd_cap; *(pu8CurrByte++) = ptstrJoinBssParam->ht_capable; hif_drv->usr_conn_req.ht_capable = ptstrJoinBssParam->ht_capable; *(pu8CurrByte++) = ptstrJoinBssParam->rsn_found; - PRINT_D(HOSTINF_DBG, "* rsn found %d*\n", *(pu8CurrByte - 1)); *(pu8CurrByte++) = ptstrJoinBssParam->rsn_grp_policy; - PRINT_D(HOSTINF_DBG, "* rsn group policy %0x*\n", (*(pu8CurrByte - 1))); *(pu8CurrByte++) = ptstrJoinBssParam->mode_802_11i; - PRINT_D(HOSTINF_DBG, "* mode_802_11i %d*\n", (*(pu8CurrByte - 1))); memcpy(pu8CurrByte, ptstrJoinBssParam->rsn_pcip_policy, sizeof(ptstrJoinBssParam->rsn_pcip_policy)); pu8CurrByte += sizeof(ptstrJoinBssParam->rsn_pcip_policy); @@ -1154,8 +1061,6 @@ static s32 Handle_Connect(struct wilc_vif *vif, *(pu8CurrByte++) = ptstrJoinBssParam->noa_enabled; if (ptstrJoinBssParam->noa_enabled) { - PRINT_D(HOSTINF_DBG, "NOA present\n"); - *(pu8CurrByte++) = (ptstrJoinBssParam->tsf) & 0xFF; *(pu8CurrByte++) = ((ptstrJoinBssParam->tsf) >> 8) & 0xFF; *(pu8CurrByte++) = ((ptstrJoinBssParam->tsf) >> 16) & 0xFF; @@ -1177,8 +1082,7 @@ static s32 Handle_Connect(struct wilc_vif *vif, memcpy(pu8CurrByte, ptstrJoinBssParam->start_time, sizeof(ptstrJoinBssParam->start_time)); pu8CurrByte += sizeof(ptstrJoinBssParam->start_time); - } else - PRINT_D(HOSTINF_DBG, "NOA not present\n"); + } pu8CurrByte = strWIDList[u32WidsCount].val; u32WidsCount++; @@ -1188,46 +1092,37 @@ static s32 Handle_Connect(struct wilc_vif *vif, join_req_vif = vif; } - PRINT_D(GENERIC_DBG, "send HOST_IF_WAITING_CONN_RESP\n"); - - if (pstrHostIFconnectAttr->bssid) { + if (pstrHostIFconnectAttr->bssid) memcpy(wilc_connected_ssid, pstrHostIFconnectAttr->bssid, ETH_ALEN); - PRINT_D(GENERIC_DBG, "save Bssid = %pM\n", - pstrHostIFconnectAttr->bssid); - PRINT_D(GENERIC_DBG, "save bssid = %pM\n", wilc_connected_ssid); - } - result = wilc_send_config_pkt(vif->wilc, SET_CFG, strWIDList, + result = wilc_send_config_pkt(vif, SET_CFG, strWIDList, u32WidsCount, wilc_get_vif_idx(vif)); if (result) { - PRINT_ER("failed to send config packet\n"); + netdev_err(vif->ndev, "failed to send config packet\n"); result = -EFAULT; goto ERRORHANDLER; } else { - PRINT_D(GENERIC_DBG, "set HOST_IF_WAITING_CONN_RESP\n"); hif_drv->hif_state = HOST_IF_WAITING_CONN_RESP; } ERRORHANDLER: if (result) { - tstrConnectInfo strConnectInfo; + struct connect_info strConnectInfo; del_timer(&hif_drv->connect_timer); - PRINT_D(HOSTINF_DBG, "could not start wilc_connecting to the required network\n"); - - memset(&strConnectInfo, 0, sizeof(tstrConnectInfo)); + memset(&strConnectInfo, 0, sizeof(struct connect_info)); if (pstrHostIFconnectAttr->result) { if (pstrHostIFconnectAttr->bssid) - memcpy(strConnectInfo.au8bssid, pstrHostIFconnectAttr->bssid, 6); + memcpy(strConnectInfo.bssid, pstrHostIFconnectAttr->bssid, 6); if (pstrHostIFconnectAttr->ies) { - strConnectInfo.ReqIEsLen = pstrHostIFconnectAttr->ies_len; - strConnectInfo.pu8ReqIEs = kmalloc(pstrHostIFconnectAttr->ies_len, GFP_KERNEL); - memcpy(strConnectInfo.pu8ReqIEs, + strConnectInfo.req_ies_len = pstrHostIFconnectAttr->ies_len; + strConnectInfo.req_ies = kmalloc(pstrHostIFconnectAttr->ies_len, GFP_KERNEL); + memcpy(strConnectInfo.req_ies, pstrHostIFconnectAttr->ies, pstrHostIFconnectAttr->ies_len); } @@ -1238,15 +1133,14 @@ ERRORHANDLER: NULL, pstrHostIFconnectAttr->arg); hif_drv->hif_state = HOST_IF_IDLE; - kfree(strConnectInfo.pu8ReqIEs); - strConnectInfo.pu8ReqIEs = NULL; + kfree(strConnectInfo.req_ies); + strConnectInfo.req_ies = NULL; } else { - PRINT_ER("Connect callback function pointer is NULL\n"); + netdev_err(vif->ndev, "Connect callback is NULL\n"); } } - PRINT_D(HOSTINF_DBG, "Deallocating connection parameters\n"); kfree(pstrHostIFconnectAttr->bssid); pstrHostIFconnectAttr->bssid = NULL; @@ -1260,63 +1154,16 @@ ERRORHANDLER: return result; } -static s32 Handle_FlushConnect(struct wilc_vif *vif) -{ - s32 result = 0; - struct wid strWIDList[5]; - u32 u32WidsCount = 0; - u8 *pu8CurrByte = NULL; - - strWIDList[u32WidsCount].id = WID_INFO_ELEMENT_ASSOCIATE; - strWIDList[u32WidsCount].type = WID_BIN_DATA; - strWIDList[u32WidsCount].val = info_element; - strWIDList[u32WidsCount].size = info_element_size; - u32WidsCount++; - - strWIDList[u32WidsCount].id = (u16)WID_11I_MODE; - strWIDList[u32WidsCount].type = WID_CHAR; - strWIDList[u32WidsCount].size = sizeof(char); - strWIDList[u32WidsCount].val = (s8 *)(&(mode_11i)); - u32WidsCount++; - - strWIDList[u32WidsCount].id = (u16)WID_AUTH_TYPE; - strWIDList[u32WidsCount].type = WID_CHAR; - strWIDList[u32WidsCount].size = sizeof(char); - strWIDList[u32WidsCount].val = (s8 *)(&auth_type); - u32WidsCount++; - - strWIDList[u32WidsCount].id = (u16)WID_JOIN_REQ_EXTENDED; - strWIDList[u32WidsCount].type = WID_STR; - strWIDList[u32WidsCount].size = join_req_size; - strWIDList[u32WidsCount].val = (s8 *)join_req; - pu8CurrByte = strWIDList[u32WidsCount].val; - - pu8CurrByte += FLUSHED_BYTE_POS; - *(pu8CurrByte) = FLUSHED_JOIN_REQ; - - u32WidsCount++; - - result = wilc_send_config_pkt(vif->wilc, SET_CFG, strWIDList, - u32WidsCount, - wilc_get_vif_idx(join_req_vif)); - if (result) { - PRINT_ER("failed to send config packet\n"); - result = -EINVAL; - } - - return result; -} - static s32 Handle_ConnectTimeout(struct wilc_vif *vif) { s32 result = 0; - tstrConnectInfo strConnectInfo; + struct connect_info strConnectInfo; struct wid wid; u16 u16DummyReasonCode = 0; struct host_if_drv *hif_drv = vif->hif_drv; if (!hif_drv) { - PRINT_ER("Driver handler is NULL\n"); + netdev_err(vif->ndev, "Driver handler is NULL\n"); return result; } @@ -1324,18 +1171,18 @@ static s32 Handle_ConnectTimeout(struct wilc_vif *vif) scan_while_connected = false; - memset(&strConnectInfo, 0, sizeof(tstrConnectInfo)); + memset(&strConnectInfo, 0, sizeof(struct connect_info)); if (hif_drv->usr_conn_req.conn_result) { - if (hif_drv->usr_conn_req.pu8bssid) { - memcpy(strConnectInfo.au8bssid, - hif_drv->usr_conn_req.pu8bssid, 6); + if (hif_drv->usr_conn_req.bssid) { + memcpy(strConnectInfo.bssid, + hif_drv->usr_conn_req.bssid, 6); } if (hif_drv->usr_conn_req.ies) { - strConnectInfo.ReqIEsLen = hif_drv->usr_conn_req.ies_len; - strConnectInfo.pu8ReqIEs = kmalloc(hif_drv->usr_conn_req.ies_len, GFP_KERNEL); - memcpy(strConnectInfo.pu8ReqIEs, + strConnectInfo.req_ies_len = hif_drv->usr_conn_req.ies_len; + strConnectInfo.req_ies = kmalloc(hif_drv->usr_conn_req.ies_len, GFP_KERNEL); + memcpy(strConnectInfo.req_ies, hif_drv->usr_conn_req.ies, hif_drv->usr_conn_req.ies_len); } @@ -1346,10 +1193,10 @@ static s32 Handle_ConnectTimeout(struct wilc_vif *vif) NULL, hif_drv->usr_conn_req.arg); - kfree(strConnectInfo.pu8ReqIEs); - strConnectInfo.pu8ReqIEs = NULL; + kfree(strConnectInfo.req_ies); + strConnectInfo.req_ies = NULL; } else { - PRINT_ER("Connect callback function pointer is NULL\n"); + netdev_err(vif->ndev, "Connect callback is NULL\n"); } wid.id = (u16)WID_DISCONNECT; @@ -1357,18 +1204,16 @@ static s32 Handle_ConnectTimeout(struct wilc_vif *vif) wid.val = (s8 *)&u16DummyReasonCode; wid.size = sizeof(char); - PRINT_D(HOSTINF_DBG, "Sending disconnect request\n"); - - result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1, - wilc_get_vif_idx(vif)); + result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1, + wilc_get_vif_idx(vif)); if (result) - PRINT_ER("Failed to send dissconect config packet\n"); + netdev_err(vif->ndev, "Failed to send dissconect\n"); hif_drv->usr_conn_req.ssid_len = 0; - kfree(hif_drv->usr_conn_req.pu8ssid); - hif_drv->usr_conn_req.pu8ssid = NULL; - kfree(hif_drv->usr_conn_req.pu8bssid); - hif_drv->usr_conn_req.pu8bssid = NULL; + kfree(hif_drv->usr_conn_req.ssid); + hif_drv->usr_conn_req.ssid = NULL; + kfree(hif_drv->usr_conn_req.bssid); + hif_drv->usr_conn_req.bssid = NULL; hif_drv->usr_conn_req.ies_len = 0; kfree(hif_drv->usr_conn_req.ies); hif_drv->usr_conn_req.ies = NULL; @@ -1394,33 +1239,30 @@ static s32 Handle_RcvdNtwrkInfo(struct wilc_vif *vif, u32 i; bool bNewNtwrkFound; s32 result = 0; - tstrNetworkInfo *pstrNetworkInfo = NULL; + struct network_info *pstrNetworkInfo = NULL; void *pJoinParams = NULL; struct host_if_drv *hif_drv = vif->hif_drv; bNewNtwrkFound = true; - PRINT_INFO(HOSTINF_DBG, "Handling received network info\n"); if (hif_drv->usr_scan_req.scan_result) { - PRINT_D(HOSTINF_DBG, "State: Scanning, parsing network information received\n"); wilc_parse_network_info(pstrRcvdNetworkInfo->buffer, &pstrNetworkInfo); if ((!pstrNetworkInfo) || (!hif_drv->usr_scan_req.scan_result)) { - PRINT_ER("driver is null\n"); + netdev_err(vif->ndev, "driver is null\n"); result = -EINVAL; goto done; } for (i = 0; i < hif_drv->usr_scan_req.rcvd_ch_cnt; i++) { - if ((hif_drv->usr_scan_req.net_info[i].au8bssid) && - (pstrNetworkInfo->au8bssid)) { - if (memcmp(hif_drv->usr_scan_req.net_info[i].au8bssid, - pstrNetworkInfo->au8bssid, 6) == 0) { - if (pstrNetworkInfo->s8rssi <= hif_drv->usr_scan_req.net_info[i].s8rssi) { - PRINT_D(HOSTINF_DBG, "Network previously discovered\n"); + if ((hif_drv->usr_scan_req.net_info[i].bssid) && + (pstrNetworkInfo->bssid)) { + if (memcmp(hif_drv->usr_scan_req.net_info[i].bssid, + pstrNetworkInfo->bssid, 6) == 0) { + if (pstrNetworkInfo->rssi <= hif_drv->usr_scan_req.net_info[i].rssi) { goto done; } else { - hif_drv->usr_scan_req.net_info[i].s8rssi = pstrNetworkInfo->s8rssi; + hif_drv->usr_scan_req.net_info[i].rssi = pstrNetworkInfo->rssi; bNewNtwrkFound = false; break; } @@ -1429,30 +1271,26 @@ static s32 Handle_RcvdNtwrkInfo(struct wilc_vif *vif, } if (bNewNtwrkFound) { - PRINT_D(HOSTINF_DBG, "New network found\n"); - if (hif_drv->usr_scan_req.rcvd_ch_cnt < MAX_NUM_SCANNED_NETWORKS) { - hif_drv->usr_scan_req.net_info[hif_drv->usr_scan_req.rcvd_ch_cnt].s8rssi = pstrNetworkInfo->s8rssi; + hif_drv->usr_scan_req.net_info[hif_drv->usr_scan_req.rcvd_ch_cnt].rssi = pstrNetworkInfo->rssi; - if (hif_drv->usr_scan_req.net_info[hif_drv->usr_scan_req.rcvd_ch_cnt].au8bssid && - pstrNetworkInfo->au8bssid) { - memcpy(hif_drv->usr_scan_req.net_info[hif_drv->usr_scan_req.rcvd_ch_cnt].au8bssid, - pstrNetworkInfo->au8bssid, 6); + if (hif_drv->usr_scan_req.net_info[hif_drv->usr_scan_req.rcvd_ch_cnt].bssid && + pstrNetworkInfo->bssid) { + memcpy(hif_drv->usr_scan_req.net_info[hif_drv->usr_scan_req.rcvd_ch_cnt].bssid, + pstrNetworkInfo->bssid, 6); hif_drv->usr_scan_req.rcvd_ch_cnt++; - pstrNetworkInfo->bNewNetwork = true; + pstrNetworkInfo->new_network = true; pJoinParams = host_int_ParseJoinBssParam(pstrNetworkInfo); hif_drv->usr_scan_req.scan_result(SCAN_EVENT_NETWORK_FOUND, pstrNetworkInfo, hif_drv->usr_scan_req.arg, pJoinParams); } - } else { - PRINT_WRN(HOSTINF_DBG, "Discovered networks exceeded max. limit\n"); } } else { - pstrNetworkInfo->bNewNetwork = false; + pstrNetworkInfo->new_network = false; hif_drv->usr_scan_req.scan_result(SCAN_EVENT_NETWORK_FOUND, pstrNetworkInfo, hif_drv->usr_scan_req.arg, NULL); } @@ -1463,8 +1301,8 @@ done: pstrRcvdNetworkInfo->buffer = NULL; if (pstrNetworkInfo) { - wilc_dealloc_network_info(pstrNetworkInfo); - pstrNetworkInfo = NULL; + kfree(pstrNetworkInfo->ies); + kfree(pstrNetworkInfo); } return result; @@ -1487,31 +1325,29 @@ static s32 Handle_RcvdGnrlAsyncInfo(struct wilc_vif *vif, u8 u8MacStatus; u8 u8MacStatusReasonCode; u8 u8MacStatusAdditionalInfo; - tstrConnectInfo strConnectInfo; - tstrDisconnectNotifInfo strDisconnectNotifInfo; + struct connect_info strConnectInfo; + struct disconnect_info strDisconnectNotifInfo; s32 s32Err = 0; struct host_if_drv *hif_drv = vif->hif_drv; if (!hif_drv) { - PRINT_ER("Driver handler is NULL\n"); + netdev_err(vif->ndev, "Driver handler is NULL\n"); return -ENODEV; } - PRINT_D(GENERIC_DBG, "Current State = %d,Received state = %d\n", - hif_drv->hif_state, pstrRcvdGnrlAsyncInfo->buffer[7]); if ((hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP) || (hif_drv->hif_state == HOST_IF_CONNECTED) || hif_drv->usr_scan_req.scan_result) { if (!pstrRcvdGnrlAsyncInfo->buffer || !hif_drv->usr_conn_req.conn_result) { - PRINT_ER("driver is null\n"); + netdev_err(vif->ndev, "driver is null\n"); return -EINVAL; } u8MsgType = pstrRcvdGnrlAsyncInfo->buffer[0]; if ('I' != u8MsgType) { - PRINT_ER("Received Message format incorrect.\n"); + netdev_err(vif->ndev, "Received Message incorrect.\n"); return -EFAULT; } @@ -1522,14 +1358,11 @@ static s32 Handle_RcvdGnrlAsyncInfo(struct wilc_vif *vif, u8MacStatus = pstrRcvdGnrlAsyncInfo->buffer[7]; u8MacStatusReasonCode = pstrRcvdGnrlAsyncInfo->buffer[8]; u8MacStatusAdditionalInfo = pstrRcvdGnrlAsyncInfo->buffer[9]; - PRINT_INFO(HOSTINF_DBG, "Recieved MAC status = %d with Reason = %d , Info = %d\n", u8MacStatus, u8MacStatusReasonCode, u8MacStatusAdditionalInfo); if (hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP) { u32 u32RcvdAssocRespInfoLen = 0; - tstrConnectRespInfo *pstrConnectRespInfo = NULL; - - PRINT_D(HOSTINF_DBG, "Recieved MAC status = %d with Reason = %d , Code = %d\n", u8MacStatus, u8MacStatusReasonCode, u8MacStatusAdditionalInfo); + struct connect_resp_info *pstrConnectRespInfo = NULL; - memset(&strConnectInfo, 0, sizeof(tstrConnectInfo)); + memset(&strConnectInfo, 0, sizeof(struct connect_info)); if (u8MacStatus == MAC_CONNECTED) { memset(rcv_assoc_resp, 0, MAX_ASSOC_RESP_FRAME_SIZE); @@ -1539,59 +1372,54 @@ static s32 Handle_RcvdGnrlAsyncInfo(struct wilc_vif *vif, MAX_ASSOC_RESP_FRAME_SIZE, &u32RcvdAssocRespInfoLen); - PRINT_INFO(HOSTINF_DBG, "Received association response with length = %d\n", u32RcvdAssocRespInfoLen); - if (u32RcvdAssocRespInfoLen != 0) { - PRINT_D(HOSTINF_DBG, "Parsing association response\n"); s32Err = wilc_parse_assoc_resp_info(rcv_assoc_resp, u32RcvdAssocRespInfoLen, &pstrConnectRespInfo); if (s32Err) { - PRINT_ER("wilc_parse_assoc_resp_info() returned error %d\n", s32Err); + netdev_err(vif->ndev, "wilc_parse_assoc_resp_info() returned error %d\n", s32Err); } else { - strConnectInfo.u16ConnectStatus = pstrConnectRespInfo->u16ConnectStatus; - - if (strConnectInfo.u16ConnectStatus == SUCCESSFUL_STATUSCODE) { - PRINT_INFO(HOSTINF_DBG, "Association response received : Successful connection status\n"); - if (pstrConnectRespInfo->pu8RespIEs) { - strConnectInfo.u16RespIEsLen = pstrConnectRespInfo->u16RespIEsLen; - strConnectInfo.pu8RespIEs = kmalloc(pstrConnectRespInfo->u16RespIEsLen, GFP_KERNEL); - memcpy(strConnectInfo.pu8RespIEs, pstrConnectRespInfo->pu8RespIEs, - pstrConnectRespInfo->u16RespIEsLen); + strConnectInfo.status = pstrConnectRespInfo->status; + + if (strConnectInfo.status == SUCCESSFUL_STATUSCODE) { + if (pstrConnectRespInfo->ies) { + strConnectInfo.resp_ies_len = pstrConnectRespInfo->ies_len; + strConnectInfo.resp_ies = kmalloc(pstrConnectRespInfo->ies_len, GFP_KERNEL); + memcpy(strConnectInfo.resp_ies, pstrConnectRespInfo->ies, + pstrConnectRespInfo->ies_len); } } if (pstrConnectRespInfo) { - wilc_dealloc_assoc_resp_info(pstrConnectRespInfo); - pstrConnectRespInfo = NULL; + kfree(pstrConnectRespInfo->ies); + kfree(pstrConnectRespInfo); } } } } if ((u8MacStatus == MAC_CONNECTED) && - (strConnectInfo.u16ConnectStatus != SUCCESSFUL_STATUSCODE)) { - PRINT_ER("Received MAC status is MAC_CONNECTED while the received status code in Asoc Resp is not SUCCESSFUL_STATUSCODE\n"); + (strConnectInfo.status != SUCCESSFUL_STATUSCODE)) { + netdev_err(vif->ndev, "Received MAC status is MAC_CONNECTED while the received status code in Asoc Resp is not SUCCESSFUL_STATUSCODE\n"); eth_zero_addr(wilc_connected_ssid); } else if (u8MacStatus == MAC_DISCONNECTED) { - PRINT_ER("Received MAC status is MAC_DISCONNECTED\n"); + netdev_err(vif->ndev, "Received MAC status is MAC_DISCONNECTED\n"); eth_zero_addr(wilc_connected_ssid); } - if (hif_drv->usr_conn_req.pu8bssid) { - PRINT_D(HOSTINF_DBG, "Retrieving actual BSSID from AP\n"); - memcpy(strConnectInfo.au8bssid, hif_drv->usr_conn_req.pu8bssid, 6); + if (hif_drv->usr_conn_req.bssid) { + memcpy(strConnectInfo.bssid, hif_drv->usr_conn_req.bssid, 6); if ((u8MacStatus == MAC_CONNECTED) && - (strConnectInfo.u16ConnectStatus == SUCCESSFUL_STATUSCODE)) { + (strConnectInfo.status == SUCCESSFUL_STATUSCODE)) { memcpy(hif_drv->assoc_bssid, - hif_drv->usr_conn_req.pu8bssid, ETH_ALEN); + hif_drv->usr_conn_req.bssid, ETH_ALEN); } } if (hif_drv->usr_conn_req.ies) { - strConnectInfo.ReqIEsLen = hif_drv->usr_conn_req.ies_len; - strConnectInfo.pu8ReqIEs = kmalloc(hif_drv->usr_conn_req.ies_len, GFP_KERNEL); - memcpy(strConnectInfo.pu8ReqIEs, + strConnectInfo.req_ies_len = hif_drv->usr_conn_req.ies_len; + strConnectInfo.req_ies = kmalloc(hif_drv->usr_conn_req.ies_len, GFP_KERNEL); + memcpy(strConnectInfo.req_ies, hif_drv->usr_conn_req.ies, hif_drv->usr_conn_req.ies_len); } @@ -1604,48 +1432,42 @@ static s32 Handle_RcvdGnrlAsyncInfo(struct wilc_vif *vif, hif_drv->usr_conn_req.arg); if ((u8MacStatus == MAC_CONNECTED) && - (strConnectInfo.u16ConnectStatus == SUCCESSFUL_STATUSCODE)) { + (strConnectInfo.status == SUCCESSFUL_STATUSCODE)) { wilc_set_power_mgmt(vif, 0, 0); - PRINT_D(HOSTINF_DBG, "MAC status : CONNECTED and Connect Status : Successful\n"); hif_drv->hif_state = HOST_IF_CONNECTED; - PRINT_D(GENERIC_DBG, "Obtaining an IP, Disable Scan\n"); wilc_optaining_ip = true; mod_timer(&wilc_during_ip_timer, jiffies + msecs_to_jiffies(10000)); } else { - PRINT_D(HOSTINF_DBG, "MAC status : %d and Connect Status : %d\n", u8MacStatus, strConnectInfo.u16ConnectStatus); hif_drv->hif_state = HOST_IF_IDLE; scan_while_connected = false; } - kfree(strConnectInfo.pu8RespIEs); - strConnectInfo.pu8RespIEs = NULL; + kfree(strConnectInfo.resp_ies); + strConnectInfo.resp_ies = NULL; - kfree(strConnectInfo.pu8ReqIEs); - strConnectInfo.pu8ReqIEs = NULL; + kfree(strConnectInfo.req_ies); + strConnectInfo.req_ies = NULL; hif_drv->usr_conn_req.ssid_len = 0; - kfree(hif_drv->usr_conn_req.pu8ssid); - hif_drv->usr_conn_req.pu8ssid = NULL; - kfree(hif_drv->usr_conn_req.pu8bssid); - hif_drv->usr_conn_req.pu8bssid = NULL; + kfree(hif_drv->usr_conn_req.ssid); + hif_drv->usr_conn_req.ssid = NULL; + kfree(hif_drv->usr_conn_req.bssid); + hif_drv->usr_conn_req.bssid = NULL; hif_drv->usr_conn_req.ies_len = 0; kfree(hif_drv->usr_conn_req.ies); hif_drv->usr_conn_req.ies = NULL; } else if ((u8MacStatus == MAC_DISCONNECTED) && (hif_drv->hif_state == HOST_IF_CONNECTED)) { - PRINT_D(HOSTINF_DBG, "Received MAC_DISCONNECTED from the FW\n"); - - memset(&strDisconnectNotifInfo, 0, sizeof(tstrDisconnectNotifInfo)); + memset(&strDisconnectNotifInfo, 0, sizeof(struct disconnect_info)); if (hif_drv->usr_scan_req.scan_result) { - PRINT_D(HOSTINF_DBG, "\n\n<< Abort the running OBSS Scan >>\n\n"); del_timer(&hif_drv->scan_timer); Handle_ScanDone(vif, SCAN_EVENT_ABORTED); } - strDisconnectNotifInfo.u16reason = 0; + strDisconnectNotifInfo.reason = 0; strDisconnectNotifInfo.ie = NULL; strDisconnectNotifInfo.ie_len = 0; @@ -1659,16 +1481,16 @@ static s32 Handle_RcvdGnrlAsyncInfo(struct wilc_vif *vif, &strDisconnectNotifInfo, hif_drv->usr_conn_req.arg); } else { - PRINT_ER("Connect result callback function is NULL\n"); + netdev_err(vif->ndev, "Connect result NULL\n"); } eth_zero_addr(hif_drv->assoc_bssid); hif_drv->usr_conn_req.ssid_len = 0; - kfree(hif_drv->usr_conn_req.pu8ssid); - hif_drv->usr_conn_req.pu8ssid = NULL; - kfree(hif_drv->usr_conn_req.pu8bssid); - hif_drv->usr_conn_req.pu8bssid = NULL; + kfree(hif_drv->usr_conn_req.ssid); + hif_drv->usr_conn_req.ssid = NULL; + kfree(hif_drv->usr_conn_req.bssid); + hif_drv->usr_conn_req.bssid = NULL; hif_drv->usr_conn_req.ies_len = 0; kfree(hif_drv->usr_conn_req.ies); hif_drv->usr_conn_req.ies = NULL; @@ -1688,9 +1510,6 @@ static s32 Handle_RcvdGnrlAsyncInfo(struct wilc_vif *vif, } else if ((u8MacStatus == MAC_DISCONNECTED) && (hif_drv->usr_scan_req.scan_result)) { - PRINT_D(HOSTINF_DBG, "Received MAC_DISCONNECTED from the FW while scanning\n"); - PRINT_D(HOSTINF_DBG, "\n\n<< Abort the running Scan >>\n\n"); - del_timer(&hif_drv->scan_timer); if (hif_drv->usr_scan_req.scan_result) Handle_ScanDone(vif, SCAN_EVENT_ABORTED); @@ -1719,8 +1538,6 @@ static int Handle_Key(struct wilc_vif *vif, case WEP: if (pstrHostIFkeyAttr->action & ADDKEY_AP) { - PRINT_D(HOSTINF_DBG, "Handling WEP key\n"); - PRINT_D(GENERIC_DBG, "ID Hostint is %d\n", pstrHostIFkeyAttr->attr.wep.index); strWIDList[0].id = (u16)WID_11I_MODE; strWIDList[0].type = WID_CHAR; strWIDList[0].size = sizeof(char); @@ -1731,39 +1548,32 @@ static int Handle_Key(struct wilc_vif *vif, strWIDList[1].size = sizeof(char); strWIDList[1].val = (s8 *)&pstrHostIFkeyAttr->attr.wep.auth_type; - strWIDList[2].id = (u16)WID_KEY_ID; - strWIDList[2].type = WID_CHAR; - - strWIDList[2].val = (s8 *)&pstrHostIFkeyAttr->attr.wep.index; - strWIDList[2].size = sizeof(char); - - pu8keybuf = kmemdup(pstrHostIFkeyAttr->attr.wep.key, - pstrHostIFkeyAttr->attr.wep.key_len, + pu8keybuf = kmalloc(pstrHostIFkeyAttr->attr.wep.key_len + 2, GFP_KERNEL); - - if (pu8keybuf == NULL) { - PRINT_ER("No buffer to send Key\n"); + if (!pu8keybuf) return -ENOMEM; - } + + pu8keybuf[0] = pstrHostIFkeyAttr->attr.wep.index; + pu8keybuf[1] = pstrHostIFkeyAttr->attr.wep.key_len; + + memcpy(&pu8keybuf[2], pstrHostIFkeyAttr->attr.wep.key, + pstrHostIFkeyAttr->attr.wep.key_len); kfree(pstrHostIFkeyAttr->attr.wep.key); - strWIDList[3].id = (u16)WID_WEP_KEY_VALUE; - strWIDList[3].type = WID_STR; - strWIDList[3].size = pstrHostIFkeyAttr->attr.wep.key_len; - strWIDList[3].val = (s8 *)pu8keybuf; + strWIDList[2].id = (u16)WID_WEP_KEY_VALUE; + strWIDList[2].type = WID_STR; + strWIDList[2].size = pstrHostIFkeyAttr->attr.wep.key_len + 2; + strWIDList[2].val = (s8 *)pu8keybuf; - result = wilc_send_config_pkt(vif->wilc, SET_CFG, - strWIDList, 4, - wilc_get_vif_idx(vif)); + result = wilc_send_config_pkt(vif, SET_CFG, + strWIDList, 3, + wilc_get_vif_idx(vif)); kfree(pu8keybuf); } else if (pstrHostIFkeyAttr->action & ADDKEY) { - PRINT_D(HOSTINF_DBG, "Handling WEP key\n"); pu8keybuf = kmalloc(pstrHostIFkeyAttr->attr.wep.key_len + 2, GFP_KERNEL); - if (!pu8keybuf) { - PRINT_ER("No buffer to send Key\n"); + if (!pu8keybuf) return -ENOMEM; - } pu8keybuf[0] = pstrHostIFkeyAttr->attr.wep.index; memcpy(pu8keybuf + 1, &pstrHostIFkeyAttr->attr.wep.key_len, 1); memcpy(pu8keybuf + 2, pstrHostIFkeyAttr->attr.wep.key, @@ -1775,12 +1585,11 @@ static int Handle_Key(struct wilc_vif *vif, wid.val = (s8 *)pu8keybuf; wid.size = pstrHostIFkeyAttr->attr.wep.key_len + 2; - result = wilc_send_config_pkt(vif->wilc, SET_CFG, - &wid, 1, - wilc_get_vif_idx(vif)); + result = wilc_send_config_pkt(vif, SET_CFG, + &wid, 1, + wilc_get_vif_idx(vif)); kfree(pu8keybuf); } else if (pstrHostIFkeyAttr->action & REMOVEKEY) { - PRINT_D(HOSTINF_DBG, "Removing key\n"); wid.id = (u16)WID_REMOVE_WEP_KEY; wid.type = WID_STR; @@ -1788,20 +1597,18 @@ static int Handle_Key(struct wilc_vif *vif, wid.val = s8idxarray; wid.size = 1; - result = wilc_send_config_pkt(vif->wilc, SET_CFG, - &wid, 1, - wilc_get_vif_idx(vif)); - } else { + result = wilc_send_config_pkt(vif, SET_CFG, + &wid, 1, + wilc_get_vif_idx(vif)); + } else if (pstrHostIFkeyAttr->action & DEFAULTKEY) { wid.id = (u16)WID_KEY_ID; wid.type = WID_CHAR; wid.val = (s8 *)&pstrHostIFkeyAttr->attr.wep.index; wid.size = sizeof(char); - PRINT_D(HOSTINF_DBG, "Setting default key index\n"); - - result = wilc_send_config_pkt(vif->wilc, SET_CFG, - &wid, 1, - wilc_get_vif_idx(vif)); + result = wilc_send_config_pkt(vif, SET_CFG, + &wid, 1, + wilc_get_vif_idx(vif)); } up(&hif_drv->sem_test_key_block); break; @@ -1810,7 +1617,6 @@ static int Handle_Key(struct wilc_vif *vif, if (pstrHostIFkeyAttr->action & ADDKEY_AP) { pu8keybuf = kzalloc(RX_MIC_KEY_MSG_LEN, GFP_KERNEL); if (!pu8keybuf) { - PRINT_ER("No buffer to send RxGTK Key\n"); ret = -ENOMEM; goto _WPARxGtk_end_case_; } @@ -1833,18 +1639,15 @@ static int Handle_Key(struct wilc_vif *vif, strWIDList[1].val = (s8 *)pu8keybuf; strWIDList[1].size = RX_MIC_KEY_MSG_LEN; - result = wilc_send_config_pkt(vif->wilc, SET_CFG, - strWIDList, 2, - wilc_get_vif_idx(vif)); + result = wilc_send_config_pkt(vif, SET_CFG, + strWIDList, 2, + wilc_get_vif_idx(vif)); kfree(pu8keybuf); up(&hif_drv->sem_test_key_block); } else if (pstrHostIFkeyAttr->action & ADDKEY) { - PRINT_D(HOSTINF_DBG, "Handling group key(Rx) function\n"); - pu8keybuf = kzalloc(RX_MIC_KEY_MSG_LEN, GFP_KERNEL); if (pu8keybuf == NULL) { - PRINT_ER("No buffer to send RxGTK Key\n"); ret = -ENOMEM; goto _WPARxGtk_end_case_; } @@ -1852,7 +1655,7 @@ static int Handle_Key(struct wilc_vif *vif, if (hif_drv->hif_state == HOST_IF_CONNECTED) memcpy(pu8keybuf, hif_drv->assoc_bssid, ETH_ALEN); else - PRINT_ER("Couldn't handle WPARxGtk while state is not HOST_IF_CONNECTED\n"); + netdev_err(vif->ndev, "Couldn't handle\n"); memcpy(pu8keybuf + 6, pstrHostIFkeyAttr->attr.wpa.seq, 8); memcpy(pu8keybuf + 14, &pstrHostIFkeyAttr->attr.wpa.index, 1); @@ -1865,9 +1668,9 @@ static int Handle_Key(struct wilc_vif *vif, wid.val = (s8 *)pu8keybuf; wid.size = RX_MIC_KEY_MSG_LEN; - result = wilc_send_config_pkt(vif->wilc, SET_CFG, - &wid, 1, - wilc_get_vif_idx(vif)); + result = wilc_send_config_pkt(vif, SET_CFG, + &wid, 1, + wilc_get_vif_idx(vif)); kfree(pu8keybuf); up(&hif_drv->sem_test_key_block); @@ -1884,7 +1687,6 @@ _WPARxGtk_end_case_: if (pstrHostIFkeyAttr->action & ADDKEY_AP) { pu8keybuf = kmalloc(PTK_KEY_MSG_LEN + 1, GFP_KERNEL); if (!pu8keybuf) { - PRINT_ER("No buffer to send PTK Key\n"); ret = -ENOMEM; goto _WPAPtk_end_case_; } @@ -1905,15 +1707,15 @@ _WPARxGtk_end_case_: strWIDList[1].val = (s8 *)pu8keybuf; strWIDList[1].size = PTK_KEY_MSG_LEN + 1; - result = wilc_send_config_pkt(vif->wilc, SET_CFG, - strWIDList, 2, - wilc_get_vif_idx(vif)); + result = wilc_send_config_pkt(vif, SET_CFG, + strWIDList, 2, + wilc_get_vif_idx(vif)); kfree(pu8keybuf); up(&hif_drv->sem_test_key_block); } else if (pstrHostIFkeyAttr->action & ADDKEY) { pu8keybuf = kmalloc(PTK_KEY_MSG_LEN, GFP_KERNEL); if (!pu8keybuf) { - PRINT_ER("No buffer to send PTK Key\n"); + netdev_err(vif->ndev, "No buffer send PTK\n"); ret = -ENOMEM; goto _WPAPtk_end_case_; } @@ -1928,9 +1730,9 @@ _WPARxGtk_end_case_: wid.val = (s8 *)pu8keybuf; wid.size = PTK_KEY_MSG_LEN; - result = wilc_send_config_pkt(vif->wilc, SET_CFG, - &wid, 1, - wilc_get_vif_idx(vif)); + result = wilc_send_config_pkt(vif, SET_CFG, + &wid, 1, + wilc_get_vif_idx(vif)); kfree(pu8keybuf); up(&hif_drv->sem_test_key_block); } @@ -1943,12 +1745,9 @@ _WPAPtk_end_case_: break; case PMKSA: - - PRINT_D(HOSTINF_DBG, "Handling PMKSA key\n"); - pu8keybuf = kmalloc((pstrHostIFkeyAttr->attr.pmkid.numpmkid * PMKSA_KEY_LEN) + 1, GFP_KERNEL); if (!pu8keybuf) { - PRINT_ER("No buffer to send PMKSA Key\n"); + netdev_err(vif->ndev, "No buffer to send PMKSA Key\n"); return -ENOMEM; } @@ -1964,15 +1763,15 @@ _WPAPtk_end_case_: wid.val = (s8 *)pu8keybuf; wid.size = (pstrHostIFkeyAttr->attr.pmkid.numpmkid * PMKSA_KEY_LEN) + 1; - result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1, - wilc_get_vif_idx(vif)); + result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1, + wilc_get_vif_idx(vif)); kfree(pu8keybuf); break; } if (result) - PRINT_ER("Failed to send key config packet\n"); + netdev_err(vif->ndev, "Failed to send key config packet\n"); return result; } @@ -1990,24 +1789,22 @@ static void Handle_Disconnect(struct wilc_vif *vif) wid.val = (s8 *)&u16DummyReasonCode; wid.size = sizeof(char); - PRINT_D(HOSTINF_DBG, "Sending disconnect request\n"); - wilc_optaining_ip = false; wilc_set_power_mgmt(vif, 0, 0); eth_zero_addr(wilc_connected_ssid); - result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1, - wilc_get_vif_idx(vif)); + result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1, + wilc_get_vif_idx(vif)); if (result) { - PRINT_ER("Failed to send dissconect config packet\n"); + netdev_err(vif->ndev, "Failed to send dissconect\n"); } else { - tstrDisconnectNotifInfo strDisconnectNotifInfo; + struct disconnect_info strDisconnectNotifInfo; - memset(&strDisconnectNotifInfo, 0, sizeof(tstrDisconnectNotifInfo)); + memset(&strDisconnectNotifInfo, 0, sizeof(struct disconnect_info)); - strDisconnectNotifInfo.u16reason = 0; + strDisconnectNotifInfo.reason = 0; strDisconnectNotifInfo.ie = NULL; strDisconnectNotifInfo.ie_len = 0; @@ -2021,10 +1818,8 @@ static void Handle_Disconnect(struct wilc_vif *vif) } if (hif_drv->usr_conn_req.conn_result) { - if (hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP) { - PRINT_D(HOSTINF_DBG, "Upper layer requested termination of connection\n"); + if (hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP) del_timer(&hif_drv->connect_timer); - } hif_drv->usr_conn_req.conn_result(CONN_DISCONN_EVENT_DISCONN_NOTIF, NULL, @@ -2032,7 +1827,7 @@ static void Handle_Disconnect(struct wilc_vif *vif) &strDisconnectNotifInfo, hif_drv->usr_conn_req.arg); } else { - PRINT_ER("usr_conn_req.conn_result = NULL\n"); + netdev_err(vif->ndev, "conn_result = NULL\n"); } scan_while_connected = false; @@ -2042,10 +1837,10 @@ static void Handle_Disconnect(struct wilc_vif *vif) eth_zero_addr(hif_drv->assoc_bssid); hif_drv->usr_conn_req.ssid_len = 0; - kfree(hif_drv->usr_conn_req.pu8ssid); - hif_drv->usr_conn_req.pu8ssid = NULL; - kfree(hif_drv->usr_conn_req.pu8bssid); - hif_drv->usr_conn_req.pu8bssid = NULL; + kfree(hif_drv->usr_conn_req.ssid); + hif_drv->usr_conn_req.ssid = NULL; + kfree(hif_drv->usr_conn_req.bssid); + hif_drv->usr_conn_req.bssid = NULL; hif_drv->usr_conn_req.ies_len = 0; kfree(hif_drv->usr_conn_req.ies); hif_drv->usr_conn_req.ies = NULL; @@ -2069,36 +1864,8 @@ void wilc_resolve_disconnect_aberration(struct wilc_vif *vif) if (!vif->hif_drv) return; if ((vif->hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP) || - (vif->hif_drv->hif_state == HOST_IF_CONNECTING)) { - PRINT_D(HOSTINF_DBG, "\n\n<< correcting Supplicant state machine >>\n\n"); + (vif->hif_drv->hif_state == HOST_IF_CONNECTING)) wilc_disconnect(vif, 1); - } -} - -static s32 Handle_GetChnl(struct wilc_vif *vif) -{ - s32 result = 0; - struct wid wid; - struct host_if_drv *hif_drv = vif->hif_drv; - - wid.id = (u16)WID_CURRENT_CHANNEL; - wid.type = WID_CHAR; - wid.val = (s8 *)&ch_no; - wid.size = sizeof(char); - - PRINT_D(HOSTINF_DBG, "Getting channel value\n"); - - result = wilc_send_config_pkt(vif->wilc, GET_CFG, &wid, 1, - wilc_get_vif_idx(vif)); - - if (result) { - PRINT_ER("Failed to get channel number\n"); - result = -EFAULT; - } - - up(&hif_drv->sem_get_chnl); - - return result; } static void Handle_GetRssi(struct wilc_vif *vif) @@ -2111,43 +1878,16 @@ static void Handle_GetRssi(struct wilc_vif *vif) wid.val = &rssi; wid.size = sizeof(char); - PRINT_D(HOSTINF_DBG, "Getting RSSI value\n"); - - result = wilc_send_config_pkt(vif->wilc, GET_CFG, &wid, 1, - wilc_get_vif_idx(vif)); + result = wilc_send_config_pkt(vif, GET_CFG, &wid, 1, + wilc_get_vif_idx(vif)); if (result) { - PRINT_ER("Failed to get RSSI value\n"); + netdev_err(vif->ndev, "Failed to get RSSI value\n"); result = -EFAULT; } up(&vif->hif_drv->sem_get_rssi); } -static void Handle_GetLinkspeed(struct wilc_vif *vif) -{ - s32 result = 0; - struct wid wid; - struct host_if_drv *hif_drv = vif->hif_drv; - - link_speed = 0; - - wid.id = (u16)WID_LINKSPEED; - wid.type = WID_CHAR; - wid.val = &link_speed; - wid.size = sizeof(char); - - PRINT_D(HOSTINF_DBG, "Getting LINKSPEED value\n"); - - result = wilc_send_config_pkt(vif->wilc, GET_CFG, &wid, 1, - wilc_get_vif_idx(vif)); - if (result) { - PRINT_ER("Failed to get LINKSPEED value\n"); - result = -EFAULT; - } - - up(&hif_drv->sem_get_link_speed); -} - static s32 Handle_GetStatistics(struct wilc_vif *vif, struct rf_info *pstrStatistics) { @@ -2184,14 +1924,21 @@ static s32 Handle_GetStatistics(struct wilc_vif *vif, strWIDList[u32WidsCount].val = (s8 *)&pstrStatistics->tx_fail_cnt; u32WidsCount++; - result = wilc_send_config_pkt(vif->wilc, GET_CFG, strWIDList, - u32WidsCount, - wilc_get_vif_idx(vif)); + result = wilc_send_config_pkt(vif, GET_CFG, strWIDList, + u32WidsCount, + wilc_get_vif_idx(vif)); if (result) - PRINT_ER("Failed to send scan paramters config packet\n"); + netdev_err(vif->ndev, "Failed to send scan parameters\n"); - up(&hif_sema_wait_response); + if (pstrStatistics->link_speed > TCP_ACK_FILTER_LINK_SPEED_THRESH && + pstrStatistics->link_speed != DEFAULT_LINK_SPEED) + wilc_enable_tcp_ack_filter(true); + else if (pstrStatistics->link_speed != DEFAULT_LINK_SPEED) + wilc_enable_tcp_ack_filter(false); + + if (pstrStatistics != &vif->wilc->dummy_statistics) + up(&hif_sema_wait_response); return 0; } @@ -2211,13 +1958,11 @@ static s32 Handle_Get_InActiveTime(struct wilc_vif *vif, stamac = wid.val; memcpy(stamac, strHostIfStaInactiveT->mac, ETH_ALEN); - PRINT_D(CFG80211_DBG, "SETING STA inactive time\n"); - - result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1, - wilc_get_vif_idx(vif)); + result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1, + wilc_get_vif_idx(vif)); if (result) { - PRINT_ER("Failed to SET incative time\n"); + netdev_err(vif->ndev, "Failed to SET incative time\n"); return -EFAULT; } @@ -2226,16 +1971,14 @@ static s32 Handle_Get_InActiveTime(struct wilc_vif *vif, wid.val = (s8 *)&inactive_time; wid.size = sizeof(u32); - result = wilc_send_config_pkt(vif->wilc, GET_CFG, &wid, 1, - wilc_get_vif_idx(vif)); + result = wilc_send_config_pkt(vif, GET_CFG, &wid, 1, + wilc_get_vif_idx(vif)); if (result) { - PRINT_ER("Failed to get incative time\n"); + netdev_err(vif->ndev, "Failed to get incative time\n"); return -EFAULT; } - PRINT_D(CFG80211_DBG, "Getting inactive time : %d\n", inactive_time); - up(&hif_drv->sem_inactive_time); return result; @@ -2248,8 +1991,6 @@ static void Handle_AddBeacon(struct wilc_vif *vif, struct wid wid; u8 *pu8CurrByte; - PRINT_D(HOSTINF_DBG, "Adding BEACON\n"); - wid.id = (u16)WID_ADD_BEACON; wid.type = WID_BIN; wid.size = pstrSetBeaconParam->head_len + pstrSetBeaconParam->tail_len + 16; @@ -2285,10 +2026,10 @@ static void Handle_AddBeacon(struct wilc_vif *vif, memcpy(pu8CurrByte, pstrSetBeaconParam->tail, pstrSetBeaconParam->tail_len); pu8CurrByte += pstrSetBeaconParam->tail_len; - result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1, - wilc_get_vif_idx(vif)); + result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1, + wilc_get_vif_idx(vif)); if (result) - PRINT_ER("Failed to send add beacon config packet\n"); + netdev_err(vif->ndev, "Failed to send add beacon\n"); ERRORHANDLER: kfree(wid.val); @@ -2312,12 +2053,10 @@ static void Handle_DelBeacon(struct wilc_vif *vif) pu8CurrByte = wid.val; - PRINT_D(HOSTINF_DBG, "Deleting BEACON\n"); - - result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1, - wilc_get_vif_idx(vif)); + result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1, + wilc_get_vif_idx(vif)); if (result) - PRINT_ER("Failed to send delete beacon config packet\n"); + netdev_err(vif->ndev, "Failed to send delete beacon\n"); } static u32 WILC_HostIf_PackStaParam(u8 *pu8Buffer, @@ -2327,7 +2066,6 @@ static u32 WILC_HostIf_PackStaParam(u8 *pu8Buffer, pu8CurrByte = pu8Buffer; - PRINT_D(HOSTINF_DBG, "Packing STA params\n"); memcpy(pu8CurrByte, pstrStationParam->bssid, ETH_ALEN); pu8CurrByte += ETH_ALEN; @@ -2375,7 +2113,6 @@ static void Handle_AddStation(struct wilc_vif *vif, struct wid wid; u8 *pu8CurrByte; - PRINT_D(HOSTINF_DBG, "Handling add station\n"); wid.id = (u16)WID_ADD_STA; wid.type = WID_BIN; wid.size = WILC_ADD_STA_LENGTH + pstrStationParam->rates_len; @@ -2387,10 +2124,10 @@ static void Handle_AddStation(struct wilc_vif *vif, pu8CurrByte = wid.val; pu8CurrByte += WILC_HostIf_PackStaParam(pu8CurrByte, pstrStationParam); - result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1, - wilc_get_vif_idx(vif)); + result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1, + wilc_get_vif_idx(vif)); if (result != 0) - PRINT_ER("Failed to send add station config packet\n"); + netdev_err(vif->ndev, "Failed to send add station\n"); ERRORHANDLER: kfree(pstrStationParam->rates); @@ -2410,8 +2147,6 @@ static void Handle_DelAllSta(struct wilc_vif *vif, wid.type = WID_STR; wid.size = (pstrDelAllStaParam->assoc_sta * ETH_ALEN) + 1; - PRINT_D(HOSTINF_DBG, "Handling delete station\n"); - wid.val = kmalloc((pstrDelAllStaParam->assoc_sta * ETH_ALEN) + 1, GFP_KERNEL); if (!wid.val) goto ERRORHANDLER; @@ -2429,10 +2164,10 @@ static void Handle_DelAllSta(struct wilc_vif *vif, pu8CurrByte += ETH_ALEN; } - result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1, - wilc_get_vif_idx(vif)); + result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1, + wilc_get_vif_idx(vif)); if (result) - PRINT_ER("Failed to send add station config packet\n"); + netdev_err(vif->ndev, "Failed to send add station\n"); ERRORHANDLER: kfree(wid.val); @@ -2451,8 +2186,6 @@ static void Handle_DelStation(struct wilc_vif *vif, wid.type = WID_BIN; wid.size = ETH_ALEN; - PRINT_D(HOSTINF_DBG, "Handling delete station\n"); - wid.val = kmalloc(wid.size, GFP_KERNEL); if (!wid.val) goto ERRORHANDLER; @@ -2461,10 +2194,10 @@ static void Handle_DelStation(struct wilc_vif *vif, memcpy(pu8CurrByte, pstrDelStaParam->mac_addr, ETH_ALEN); - result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1, - wilc_get_vif_idx(vif)); + result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1, + wilc_get_vif_idx(vif)); if (result) - PRINT_ER("Failed to send add station config packet\n"); + netdev_err(vif->ndev, "Failed to send add station\n"); ERRORHANDLER: kfree(wid.val); @@ -2481,7 +2214,6 @@ static void Handle_EditStation(struct wilc_vif *vif, wid.type = WID_BIN; wid.size = WILC_ADD_STA_LENGTH + pstrStationParam->rates_len; - PRINT_D(HOSTINF_DBG, "Handling edit station\n"); wid.val = kmalloc(wid.size, GFP_KERNEL); if (!wid.val) goto ERRORHANDLER; @@ -2489,10 +2221,10 @@ static void Handle_EditStation(struct wilc_vif *vif, pu8CurrByte = wid.val; pu8CurrByte += WILC_HostIf_PackStaParam(pu8CurrByte, pstrStationParam); - result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1, - wilc_get_vif_idx(vif)); + result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1, + wilc_get_vif_idx(vif)); if (result) - PRINT_ER("Failed to send edit station config packet\n"); + netdev_err(vif->ndev, "Failed to send edit station\n"); ERRORHANDLER: kfree(pstrStationParam->rates); @@ -2518,26 +2250,20 @@ static int Handle_RemainOnChan(struct wilc_vif *vif, } if (hif_drv->usr_scan_req.scan_result) { - PRINT_INFO(GENERIC_DBG, "Required to remain on chan while scanning return\n"); hif_drv->remain_on_ch_pending = 1; result = -EBUSY; goto ERRORHANDLER; } if (hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP) { - PRINT_INFO(GENERIC_DBG, "Required to remain on chan while connecting return\n"); result = -EBUSY; goto ERRORHANDLER; } if (wilc_optaining_ip || wilc_connecting) { - PRINT_D(GENERIC_DBG, "[handle_scan]: Don't do obss scan until IP adresss is obtained\n"); result = -EBUSY; goto ERRORHANDLER; } - PRINT_D(HOSTINF_DBG, "Setting channel :%d\n", - pstrHostIfRemainOnChan->ch); - u8remain_on_chan_flag = true; wid.id = (u16)WID_REMAIN_ON_CHAN; wid.type = WID_STR; @@ -2551,10 +2277,10 @@ static int Handle_RemainOnChan(struct wilc_vif *vif, wid.val[0] = u8remain_on_chan_flag; wid.val[1] = (s8)pstrHostIfRemainOnChan->ch; - result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1, - wilc_get_vif_idx(vif)); + result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1, + wilc_get_vif_idx(vif)); if (result != 0) - PRINT_ER("Failed to set remain on channel\n"); + netdev_err(vif->ndev, "Failed to set remain on channel\n"); ERRORHANDLER: { @@ -2562,7 +2288,7 @@ ERRORHANDLER: hif_drv->remain_on_ch_timer.data = (unsigned long)vif; mod_timer(&hif_drv->remain_on_ch_timer, jiffies + - msecs_to_jiffies(pstrHostIfRemainOnChan->u32duration)); + msecs_to_jiffies(pstrHostIfRemainOnChan->duration)); if (hif_drv->remain_on_ch.ready) hif_drv->remain_on_ch.ready(hif_drv->remain_on_ch.arg); @@ -2581,10 +2307,6 @@ static int Handle_RegisterFrame(struct wilc_vif *vif, struct wid wid; u8 *pu8CurrByte; - PRINT_D(HOSTINF_DBG, "Handling frame register : %d FrameType: %d\n", - pstrHostIfRegisterFrame->reg, - pstrHostIfRegisterFrame->frame_type); - wid.id = (u16)WID_REGISTER_FRAME; wid.type = WID_STR; wid.val = kmalloc(sizeof(u16) + 2, GFP_KERNEL); @@ -2599,10 +2321,10 @@ static int Handle_RegisterFrame(struct wilc_vif *vif, wid.size = sizeof(u16) + 2; - result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1, - wilc_get_vif_idx(vif)); + result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1, + wilc_get_vif_idx(vif)); if (result) { - PRINT_ER("Failed to frame register config packet\n"); + netdev_err(vif->ndev, "Failed to frame register\n"); result = -EINVAL; } @@ -2617,8 +2339,6 @@ static u32 Handle_ListenStateExpired(struct wilc_vif *vif, s32 result = 0; struct host_if_drv *hif_drv = vif->hif_drv; - PRINT_D(HOSTINF_DBG, "CANCEL REMAIN ON CHAN\n"); - if (P2P_LISTEN_STATE) { u8remain_on_chan_flag = false; wid.id = (u16)WID_REMAIN_ON_CHAN; @@ -2627,17 +2347,17 @@ static u32 Handle_ListenStateExpired(struct wilc_vif *vif, wid.val = kmalloc(wid.size, GFP_KERNEL); if (!wid.val) { - PRINT_ER("Failed to allocate memory\n"); + netdev_err(vif->ndev, "Failed to allocate memory\n"); return -ENOMEM; } wid.val[0] = u8remain_on_chan_flag; wid.val[1] = FALSE_FRMWR_CHANNEL; - result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1, - wilc_get_vif_idx(vif)); + result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1, + wilc_get_vif_idx(vif)); if (result != 0) { - PRINT_ER("Failed to set remain on channel\n"); + netdev_err(vif->ndev, "Failed to set remain channel\n"); goto _done_; } @@ -2647,7 +2367,7 @@ static u32 Handle_ListenStateExpired(struct wilc_vif *vif, } P2P_LISTEN_STATE = 0; } else { - PRINT_D(GENERIC_DBG, "Not in listen state\n"); + netdev_dbg(vif->ndev, "Not in listen state\n"); result = -EFAULT; } @@ -2670,7 +2390,7 @@ static void ListenTimerCB(unsigned long arg) result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); if (result) - PRINT_ER("wilc_mq_send fail\n"); + netdev_err(vif->ndev, "wilc_mq_send fail\n"); } static void Handle_PowerManagement(struct wilc_vif *vif, @@ -2686,16 +2406,14 @@ static void Handle_PowerManagement(struct wilc_vif *vif, s8PowerMode = MIN_FAST_PS; else s8PowerMode = NO_POWERSAVE; - PRINT_D(HOSTINF_DBG, "Handling power mgmt to %d\n", s8PowerMode); + wid.val = &s8PowerMode; wid.size = sizeof(char); - PRINT_D(HOSTINF_DBG, "Handling Power Management\n"); - - result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1, - wilc_get_vif_idx(vif)); + result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1, + wilc_get_vif_idx(vif)); if (result) - PRINT_ER("Failed to send power management config packet\n"); + netdev_err(vif->ndev, "Failed to send power management\n"); } static void Handle_SetMulticastFilter(struct wilc_vif *vif, @@ -2705,8 +2423,6 @@ static void Handle_SetMulticastFilter(struct wilc_vif *vif, struct wid wid; u8 *pu8CurrByte; - PRINT_D(HOSTINF_DBG, "Setup Multicast Filter\n"); - wid.id = (u16)WID_SETUP_MULTICAST_FILTER; wid.type = WID_BIN; wid.size = sizeof(struct set_multicast) + ((strHostIfSetMulti->cnt) * ETH_ALEN); @@ -2729,59 +2445,54 @@ static void Handle_SetMulticastFilter(struct wilc_vif *vif, memcpy(pu8CurrByte, wilc_multicast_mac_addr_list, ((strHostIfSetMulti->cnt) * ETH_ALEN)); - result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1, - wilc_get_vif_idx(vif)); + result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1, + wilc_get_vif_idx(vif)); if (result) - PRINT_ER("Failed to send setup multicast config packet\n"); + netdev_err(vif->ndev, "Failed to send setup multicast\n"); ERRORHANDLER: kfree(wid.val); } -static s32 Handle_DelAllRxBASessions(struct wilc_vif *vif, - struct ba_session_info *strHostIfBASessionInfo) +static void handle_set_tx_pwr(struct wilc_vif *vif, u8 tx_pwr) { - s32 result = 0; + int ret; struct wid wid; - char *ptr = NULL; - PRINT_D(GENERIC_DBG, "Delete Block Ack session with\nBSSID = %.2x:%.2x:%.2x\nTID=%d\n", - strHostIfBASessionInfo->bssid[0], - strHostIfBASessionInfo->bssid[1], - strHostIfBASessionInfo->bssid[2], - strHostIfBASessionInfo->tid); + wid.id = (u16)WID_TX_POWER; + wid.type = WID_CHAR; + wid.val = &tx_pwr; + wid.size = sizeof(char); - wid.id = (u16)WID_DEL_ALL_RX_BA; - wid.type = WID_STR; - wid.val = kmalloc(BLOCK_ACK_REQ_SIZE, GFP_KERNEL); - wid.size = BLOCK_ACK_REQ_SIZE; - ptr = wid.val; - *ptr++ = 0x14; - *ptr++ = 0x3; - *ptr++ = 0x2; - memcpy(ptr, strHostIfBASessionInfo->bssid, ETH_ALEN); - ptr += ETH_ALEN; - *ptr++ = strHostIfBASessionInfo->tid; - *ptr++ = 0; - *ptr++ = 32; - - result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1, - wilc_get_vif_idx(vif)); - if (result) - PRINT_D(HOSTINF_DBG, "Couldn't delete BA Session\n"); + ret = wilc_send_config_pkt(vif, SET_CFG, &wid, 1, + wilc_get_vif_idx(vif)); + if (ret) + netdev_err(vif->ndev, "Failed to set TX PWR\n"); +} - kfree(wid.val); +static void handle_get_tx_pwr(struct wilc_vif *vif, u8 *tx_pwr) +{ + s32 ret = 0; + struct wid wid; - up(&hif_sema_wait_response); + wid.id = (u16)WID_TX_POWER; + wid.type = WID_CHAR; + wid.val = (s8 *)tx_pwr; + wid.size = sizeof(char); - return result; + ret = wilc_send_config_pkt(vif, GET_CFG, &wid, 1, + wilc_get_vif_idx(vif)); + if (ret) + netdev_err(vif->ndev, "Failed to get TX PWR\n"); + + up(&hif_sema_wait_response); } static int hostIFthread(void *pvArg) { u32 u32Ret; struct host_if_msg msg; - struct wilc *wilc = (struct wilc*)pvArg; + struct wilc *wilc = pvArg; struct wilc_vif *vif; memset(&msg, 0, sizeof(struct host_if_msg)); @@ -2789,13 +2500,10 @@ static int hostIFthread(void *pvArg) while (1) { wilc_mq_recv(&hif_msg_q, &msg, sizeof(struct host_if_msg), &u32Ret); vif = msg.vif; - if (msg.id == HOST_IF_MSG_EXIT) { - PRINT_D(GENERIC_DBG, "THREAD: Exiting HostIfThread\n"); + if (msg.id == HOST_IF_MSG_EXIT) break; - } if ((!wilc_initialized)) { - PRINT_D(GENERIC_DBG, "--WAIT--"); usleep_range(200 * 1000, 200 * 1000); wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); continue; @@ -2803,17 +2511,12 @@ static int hostIFthread(void *pvArg) if (msg.id == HOST_IF_MSG_CONNECT && vif->hif_drv->usr_scan_req.scan_result) { - PRINT_D(HOSTINF_DBG, "Requeue connect request till scan done received\n"); wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); usleep_range(2 * 1000, 2 * 1000); continue; } switch (msg.id) { - case HOST_IF_MSG_Q_IDLE: - Handle_wait_msg_q_empty(); - break; - case HOST_IF_MSG_SCAN: Handle_Scan(msg.vif, &msg.body.scan_info); break; @@ -2822,10 +2525,6 @@ static int hostIFthread(void *pvArg) Handle_Connect(msg.vif, &msg.body.con_info); break; - case HOST_IF_MSG_FLUSH_CONNECT: - Handle_FlushConnect(msg.vif); - break; - case HOST_IF_MSG_RCVD_NTWRK_INFO: Handle_RcvdNtwrkInfo(msg.vif, &msg.body.net_info); break; @@ -2853,7 +2552,6 @@ static int hostIFthread(void *pvArg) case HOST_IF_MSG_RCVD_SCAN_COMPLETE: del_timer(&vif->hif_drv->scan_timer); - PRINT_D(HOSTINF_DBG, "scan completed successfully\n"); if (!wilc_wlan_get_num_conn_ifcs(wilc)) wilc_chip_sleep_manually(wilc); @@ -2870,19 +2568,11 @@ static int hostIFthread(void *pvArg) Handle_GetRssi(msg.vif); break; - case HOST_IF_MSG_GET_LINKSPEED: - Handle_GetLinkspeed(msg.vif); - break; - case HOST_IF_MSG_GET_STATISTICS: Handle_GetStatistics(msg.vif, (struct rf_info *)msg.body.data); break; - case HOST_IF_MSG_GET_CHNL: - Handle_GetChnl(msg.vif); - break; - case HOST_IF_MSG_ADD_BEACON: Handle_AddBeacon(msg.vif, &msg.body.beacon_info); break; @@ -2908,13 +2598,11 @@ static int hostIFthread(void *pvArg) break; case HOST_IF_MSG_SCAN_TIMER_FIRED: - PRINT_D(HOSTINF_DBG, "Scan Timeout\n"); Handle_ScanDone(msg.vif, SCAN_EVENT_ABORTED); break; case HOST_IF_MSG_CONNECT_TIMER_FIRED: - PRINT_D(HOSTINF_DBG, "Connect Timeout\n"); Handle_ConnectTimeout(msg.vif); break; @@ -2932,34 +2620,25 @@ static int hostIFthread(void *pvArg) break; case HOST_IF_MSG_SET_IPADDRESS: - PRINT_D(HOSTINF_DBG, "HOST_IF_MSG_SET_IPADDRESS\n"); handle_set_ip_address(vif, msg.body.ip_info.ip_addr, msg.body.ip_info.idx); break; case HOST_IF_MSG_GET_IPADDRESS: - PRINT_D(HOSTINF_DBG, "HOST_IF_MSG_SET_IPADDRESS\n"); handle_get_ip_address(vif, msg.body.ip_info.idx); break; - case HOST_IF_MSG_SET_MAC_ADDRESS: - handle_set_mac_address(msg.vif, - &msg.body.set_mac_info); - break; - case HOST_IF_MSG_GET_MAC_ADDRESS: handle_get_mac_address(msg.vif, &msg.body.get_mac_info); break; case HOST_IF_MSG_REMAIN_ON_CHAN: - PRINT_D(HOSTINF_DBG, "HOST_IF_MSG_REMAIN_ON_CHAN\n"); Handle_RemainOnChan(msg.vif, &msg.body.remain_on_ch); break; case HOST_IF_MSG_REGISTER_FRAME: - PRINT_D(HOSTINF_DBG, "HOST_IF_MSG_REGISTER_FRAME\n"); Handle_RegisterFrame(msg.vif, &msg.body.reg_frame); break; @@ -2968,25 +2647,26 @@ static int hostIFthread(void *pvArg) break; case HOST_IF_MSG_SET_MULTICAST_FILTER: - PRINT_D(HOSTINF_DBG, "HOST_IF_MSG_SET_MULTICAST_FILTER\n"); Handle_SetMulticastFilter(msg.vif, &msg.body.multicast_info); break; - case HOST_IF_MSG_DEL_ALL_RX_BA_SESSIONS: - Handle_DelAllRxBASessions(msg.vif, &msg.body.session_info); - break; - case HOST_IF_MSG_DEL_ALL_STA: Handle_DelAllSta(msg.vif, &msg.body.del_all_sta_info); break; + case HOST_IF_MSG_SET_TX_POWER: + handle_set_tx_pwr(msg.vif, msg.body.tx_power.tx_pwr); + break; + + case HOST_IF_MSG_GET_TX_POWER: + handle_get_tx_pwr(msg.vif, &msg.body.tx_power.tx_pwr); + break; default: - PRINT_ER("[Host Interface] undefined Received Msg ID\n"); + netdev_err(vif->ndev, "[Host Interface] undefined\n"); break; } } - PRINT_D(HOSTINF_DBG, "Releasing thread exit semaphore\n"); up(&hif_sema_thread); return 0; } @@ -3035,7 +2715,7 @@ int wilc_remove_wep_key(struct wilc_vif *vif, u8 index) if (!hif_drv) { result = -EFAULT; - PRINT_ER("Failed to send setup multicast config packet\n"); + netdev_err(vif->ndev, "Failed to send setup multicast\n"); return result; } @@ -3049,7 +2729,7 @@ int wilc_remove_wep_key(struct wilc_vif *vif, u8 index) result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); if (result) - PRINT_ER("Error in sending message queue : Request to remove WEP key\n"); + netdev_err(vif->ndev, "Request to remove WEP key\n"); down(&hif_drv->sem_test_key_block); return result; @@ -3063,7 +2743,7 @@ int wilc_set_wep_default_keyid(struct wilc_vif *vif, u8 index) if (!hif_drv) { result = -EFAULT; - PRINT_ER("driver is null\n"); + netdev_err(vif->ndev, "driver is null\n"); return result; } @@ -3077,7 +2757,7 @@ int wilc_set_wep_default_keyid(struct wilc_vif *vif, u8 index) result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); if (result) - PRINT_ER("Error in sending message queue : Default key index\n"); + netdev_err(vif->ndev, "Default key index\n"); down(&hif_drv->sem_test_key_block); return result; @@ -3091,7 +2771,7 @@ int wilc_add_wep_key_bss_sta(struct wilc_vif *vif, const u8 *key, u8 len, struct host_if_drv *hif_drv = vif->hif_drv; if (!hif_drv) { - PRINT_ER("driver is null\n"); + netdev_err(vif->ndev, "driver is null\n"); return -EFAULT; } @@ -3110,7 +2790,7 @@ int wilc_add_wep_key_bss_sta(struct wilc_vif *vif, const u8 *key, u8 len, result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); if (result) - PRINT_ER("Error in sending message queue :WEP Key\n"); + netdev_err(vif->ndev, "STA - WEP Key\n"); down(&hif_drv->sem_test_key_block); return result; @@ -3122,19 +2802,14 @@ int wilc_add_wep_key_bss_ap(struct wilc_vif *vif, const u8 *key, u8 len, int result = 0; struct host_if_msg msg; struct host_if_drv *hif_drv = vif->hif_drv; - int i; if (!hif_drv) { - PRINT_ER("driver is null\n"); + netdev_err(vif->ndev, "driver is null\n"); return -EFAULT; } memset(&msg, 0, sizeof(struct host_if_msg)); - if (INFO) { - for (i = 0; i < len; i++) - PRINT_INFO(HOSTAPD_DBG, "KEY is %x\n", key[i]); - } msg.id = HOST_IF_MSG_KEY; msg.body.key_info.type = WEP; msg.body.key_info.action = ADDKEY_AP; @@ -3151,7 +2826,7 @@ int wilc_add_wep_key_bss_ap(struct wilc_vif *vif, const u8 *key, u8 len, result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); if (result) - PRINT_ER("Error in sending message queue :WEP Key\n"); + netdev_err(vif->ndev, "AP - WEP Key\n"); down(&hif_drv->sem_test_key_block); return result; @@ -3165,10 +2840,9 @@ int wilc_add_ptk(struct wilc_vif *vif, const u8 *ptk, u8 ptk_key_len, struct host_if_msg msg; struct host_if_drv *hif_drv = vif->hif_drv; u8 key_len = ptk_key_len; - int i; if (!hif_drv) { - PRINT_ER("driver is null\n"); + netdev_err(vif->ndev, "driver is null\n"); return -EFAULT; } @@ -3193,20 +2867,11 @@ int wilc_add_ptk(struct wilc_vif *vif, const u8 *ptk, u8 ptk_key_len, if (!msg.body.key_info.attr.wpa.key) return -ENOMEM; - if (rx_mic) { + if (rx_mic) memcpy(msg.body.key_info.attr.wpa.key + 16, rx_mic, RX_MIC_KEY_LEN); - if (INFO) { - for (i = 0; i < RX_MIC_KEY_LEN; i++) - PRINT_INFO(CFG80211_DBG, "PairwiseRx[%d] = %x\n", i, rx_mic[i]); - } - } - if (tx_mic) { + + if (tx_mic) memcpy(msg.body.key_info.attr.wpa.key + 24, tx_mic, TX_MIC_KEY_LEN); - if (INFO) { - for (i = 0; i < TX_MIC_KEY_LEN; i++) - PRINT_INFO(CFG80211_DBG, "PairwiseTx[%d] = %x\n", i, tx_mic[i]); - } - } msg.body.key_info.attr.wpa.key_len = key_len; msg.body.key_info.attr.wpa.mac_addr = mac_addr; @@ -3216,7 +2881,7 @@ int wilc_add_ptk(struct wilc_vif *vif, const u8 *ptk, u8 ptk_key_len, result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); if (result) - PRINT_ER("Error in sending message queue: PTK Key\n"); + netdev_err(vif->ndev, "PTK Key\n"); down(&hif_drv->sem_test_key_block); @@ -3234,7 +2899,7 @@ int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len, u8 key_len = gtk_key_len; if (!hif_drv) { - PRINT_ER("driver is null\n"); + netdev_err(vif->ndev, "driver is null\n"); return -EFAULT; } memset(&msg, 0, sizeof(struct host_if_msg)); @@ -3284,23 +2949,23 @@ int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len, result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); if (result) - PRINT_ER("Error in sending message queue: RX GTK\n"); + netdev_err(vif->ndev, "RX GTK\n"); down(&hif_drv->sem_test_key_block); return result; } -s32 wilc_set_pmkid_info(struct wilc_vif *vif, - struct host_if_pmkid_attr *pu8PmkidInfoArray) +int wilc_set_pmkid_info(struct wilc_vif *vif, + struct host_if_pmkid_attr *pmkid) { - s32 result = 0; + int result = 0; struct host_if_msg msg; struct host_if_drv *hif_drv = vif->hif_drv; - u32 i; + int i; if (!hif_drv) { - PRINT_ER("driver is null\n"); + netdev_err(vif->ndev, "driver is null\n"); return -EFAULT; } @@ -3311,34 +2976,34 @@ s32 wilc_set_pmkid_info(struct wilc_vif *vif, msg.body.key_info.action = ADDKEY; msg.vif = vif; - for (i = 0; i < pu8PmkidInfoArray->numpmkid; i++) { + for (i = 0; i < pmkid->numpmkid; i++) { memcpy(msg.body.key_info.attr.pmkid.pmkidlist[i].bssid, - &pu8PmkidInfoArray->pmkidlist[i].bssid, ETH_ALEN); + &pmkid->pmkidlist[i].bssid, ETH_ALEN); memcpy(msg.body.key_info.attr.pmkid.pmkidlist[i].pmkid, - &pu8PmkidInfoArray->pmkidlist[i].pmkid, PMKID_LEN); + &pmkid->pmkidlist[i].pmkid, PMKID_LEN); } result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); if (result) - PRINT_ER(" Error in sending messagequeue: PMKID Info\n"); + netdev_err(vif->ndev, "PMKID Info\n"); return result; } -s32 wilc_get_mac_address(struct wilc_vif *vif, u8 *pu8MacAddress) +int wilc_get_mac_address(struct wilc_vif *vif, u8 *mac_addr) { - s32 result = 0; + int result = 0; struct host_if_msg msg; memset(&msg, 0, sizeof(struct host_if_msg)); msg.id = HOST_IF_MSG_GET_MAC_ADDRESS; - msg.body.get_mac_info.mac_addr = pu8MacAddress; + msg.body.get_mac_info.mac_addr = mac_addr; msg.vif = vif; result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); if (result) { - PRINT_ER("Failed to send get mac address\n"); + netdev_err(vif->ndev, "Failed to send get mac address\n"); return -EFAULT; } @@ -3346,42 +3011,23 @@ s32 wilc_get_mac_address(struct wilc_vif *vif, u8 *pu8MacAddress) return result; } -s32 wilc_set_mac_address(struct wilc_vif *vif, u8 *pu8MacAddress) +int wilc_set_join_req(struct wilc_vif *vif, u8 *bssid, const u8 *ssid, + size_t ssid_len, const u8 *ies, size_t ies_len, + wilc_connect_result connect_result, void *user_arg, + u8 security, enum AUTHTYPE auth_type, + u8 channel, void *join_params) { - s32 result = 0; - struct host_if_msg msg; - - PRINT_D(GENERIC_DBG, "mac addr = %x:%x:%x\n", pu8MacAddress[0], pu8MacAddress[1], pu8MacAddress[2]); - - memset(&msg, 0, sizeof(struct host_if_msg)); - msg.id = HOST_IF_MSG_SET_MAC_ADDRESS; - memcpy(msg.body.set_mac_info.mac_addr, pu8MacAddress, ETH_ALEN); - msg.vif = vif; - - result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); - if (result) - PRINT_ER("Failed to send message queue: Set mac address\n"); - - return result; -} - -s32 wilc_set_join_req(struct wilc_vif *vif, u8 *pu8bssid, const u8 *pu8ssid, - size_t ssidLen, const u8 *pu8IEs, size_t IEsLen, - wilc_connect_result pfConnectResult, void *pvUserArg, - u8 u8security, enum AUTHTYPE tenuAuth_type, - u8 u8channel, void *pJoinParams) -{ - s32 result = 0; + int result = 0; struct host_if_msg msg; struct host_if_drv *hif_drv = vif->hif_drv; - if (!hif_drv || !pfConnectResult) { - PRINT_ER("Driver is null\n"); + if (!hif_drv || !connect_result) { + netdev_err(vif->ndev, "Driver is null\n"); return -EFAULT; } - if (!pJoinParams) { - PRINT_ER("Unable to Join - JoinParams is NULL\n"); + if (!join_params) { + netdev_err(vif->ndev, "Unable to Join - JoinParams is NULL\n"); return -EFAULT; } @@ -3389,39 +3035,39 @@ s32 wilc_set_join_req(struct wilc_vif *vif, u8 *pu8bssid, const u8 *pu8ssid, msg.id = HOST_IF_MSG_CONNECT; - msg.body.con_info.security = u8security; - msg.body.con_info.auth_type = tenuAuth_type; - msg.body.con_info.ch = u8channel; - msg.body.con_info.result = pfConnectResult; - msg.body.con_info.arg = pvUserArg; - msg.body.con_info.params = pJoinParams; + msg.body.con_info.security = security; + msg.body.con_info.auth_type = auth_type; + msg.body.con_info.ch = channel; + msg.body.con_info.result = connect_result; + msg.body.con_info.arg = user_arg; + msg.body.con_info.params = join_params; msg.vif = vif; - if (pu8bssid) { - msg.body.con_info.bssid = kmalloc(6, GFP_KERNEL); - memcpy(msg.body.con_info.bssid, pu8bssid, 6); + if (bssid) { + msg.body.con_info.bssid = kmemdup(bssid, 6, GFP_KERNEL); + if (!msg.body.con_info.bssid) + return -ENOMEM; } - if (pu8ssid) { - msg.body.con_info.ssid_len = ssidLen; - msg.body.con_info.ssid = kmalloc(ssidLen, GFP_KERNEL); - memcpy(msg.body.con_info.ssid, pu8ssid, ssidLen); + if (ssid) { + msg.body.con_info.ssid_len = ssid_len; + msg.body.con_info.ssid = kmemdup(ssid, ssid_len, GFP_KERNEL); + if (!msg.body.con_info.ssid) + return -ENOMEM; } - if (pu8IEs) { - msg.body.con_info.ies_len = IEsLen; - msg.body.con_info.ies = kmalloc(IEsLen, GFP_KERNEL); - memcpy(msg.body.con_info.ies, pu8IEs, IEsLen); + if (ies) { + msg.body.con_info.ies_len = ies_len; + msg.body.con_info.ies = kmemdup(ies, ies_len, GFP_KERNEL); + if (!msg.body.con_info.ies) + return -ENOMEM; } if (hif_drv->hif_state < HOST_IF_CONNECTING) hif_drv->hif_state = HOST_IF_CONNECTING; - else - PRINT_D(GENERIC_DBG, "Don't set state to 'connecting' : %d\n", - hif_drv->hif_state); result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); if (result) { - PRINT_ER("Failed to send message queue: Set join request\n"); + netdev_err(vif->ndev, "send message: Set join request\n"); return -EFAULT; } @@ -3432,40 +3078,14 @@ s32 wilc_set_join_req(struct wilc_vif *vif, u8 *pu8bssid, const u8 *pu8ssid, return result; } -s32 wilc_flush_join_req(struct wilc_vif *vif) +int wilc_disconnect(struct wilc_vif *vif, u16 reason_code) { - s32 result = 0; - struct host_if_msg msg; - struct host_if_drv *hif_drv = vif->hif_drv; - - if (!join_req) - return -EFAULT; - - if (!hif_drv) { - PRINT_ER("Driver is null\n"); - return -EFAULT; - } - - msg.id = HOST_IF_MSG_FLUSH_CONNECT; - msg.vif = vif; - - result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); - if (result) { - PRINT_ER("Failed to send message queue: Flush join request\n"); - return -EFAULT; - } - - return result; -} - -s32 wilc_disconnect(struct wilc_vif *vif, u16 u16ReasonCode) -{ - s32 result = 0; + int result = 0; struct host_if_msg msg; struct host_if_drv *hif_drv = vif->hif_drv; if (!hif_drv) { - PRINT_ER("Driver is null\n"); + netdev_err(vif->ndev, "Driver is null\n"); return -EFAULT; } @@ -3476,7 +3096,7 @@ s32 wilc_disconnect(struct wilc_vif *vif, u16 u16ReasonCode) result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); if (result) - PRINT_ER("Failed to send message queue: disconnect\n"); + netdev_err(vif->ndev, "Failed to send message: disconnect\n"); down(&hif_drv->sem_test_disconn_block); @@ -3493,7 +3113,7 @@ static s32 host_int_get_assoc_res_info(struct wilc_vif *vif, struct host_if_drv *hif_drv = vif->hif_drv; if (!hif_drv) { - PRINT_ER("Driver is null\n"); + netdev_err(vif->ndev, "Driver is null\n"); return -EFAULT; } @@ -3502,16 +3122,15 @@ static s32 host_int_get_assoc_res_info(struct wilc_vif *vif, wid.val = pu8AssocRespInfo; wid.size = u32MaxAssocRespInfoLen; - result = wilc_send_config_pkt(vif->wilc, GET_CFG, &wid, 1, - wilc_get_vif_idx(vif)); + result = wilc_send_config_pkt(vif, GET_CFG, &wid, 1, + wilc_get_vif_idx(vif)); if (result) { *pu32RcvdAssocRespInfoLen = 0; - PRINT_ER("Failed to send association response config packet\n"); + netdev_err(vif->ndev, "Failed to send association response\n"); return -EINVAL; - } else { - *pu32RcvdAssocRespInfoLen = wid.size; } + *pu32RcvdAssocRespInfoLen = wid.size; return result; } @@ -3522,7 +3141,7 @@ int wilc_set_mac_chnl_num(struct wilc_vif *vif, u8 channel) struct host_if_drv *hif_drv = vif->hif_drv; if (!hif_drv) { - PRINT_ER("driver is null\n"); + netdev_err(vif->ndev, "driver is null\n"); return -EFAULT; } @@ -3533,32 +3152,14 @@ int wilc_set_mac_chnl_num(struct wilc_vif *vif, u8 channel) result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); if (result) { - PRINT_ER("wilc mq send fail\n"); + netdev_err(vif->ndev, "wilc mq send fail\n"); return -EINVAL; } return 0; } -int wilc_wait_msg_queue_idle(void) -{ - int result = 0; - struct host_if_msg msg; - - memset(&msg, 0, sizeof(struct host_if_msg)); - msg.id = HOST_IF_MSG_Q_IDLE; - result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); - if (result) { - PRINT_ER("wilc mq send fail\n"); - result = -EINVAL; - } - - down(&hif_sema_wait_response); - - return result; -} - -int wilc_set_wfi_drv_handler(struct wilc_vif *vif, int index) +int wilc_set_wfi_drv_handler(struct wilc_vif *vif, int index, u8 mac_idx) { int result = 0; struct host_if_msg msg; @@ -3566,11 +3167,12 @@ int wilc_set_wfi_drv_handler(struct wilc_vif *vif, int index) memset(&msg, 0, sizeof(struct host_if_msg)); msg.id = HOST_IF_MSG_SET_WFIDRV_HANDLER; msg.body.drv.handler = index; + msg.body.drv.mac_idx = mac_idx; msg.vif = vif; result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); if (result) { - PRINT_ER("wilc mq send fail\n"); + netdev_err(vif->ndev, "wilc mq send fail\n"); result = -EINVAL; } @@ -3589,7 +3191,7 @@ int wilc_set_operation_mode(struct wilc_vif *vif, u32 mode) result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); if (result) { - PRINT_ER("wilc mq send fail\n"); + netdev_err(vif->ndev, "wilc mq send fail\n"); result = -EINVAL; } @@ -3604,7 +3206,7 @@ s32 wilc_get_inactive_time(struct wilc_vif *vif, const u8 *mac, struct host_if_drv *hif_drv = vif->hif_drv; if (!hif_drv) { - PRINT_ER("driver is null\n"); + netdev_err(vif->ndev, "driver is null\n"); return -EFAULT; } @@ -3616,7 +3218,7 @@ s32 wilc_get_inactive_time(struct wilc_vif *vif, const u8 *mac, result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); if (result) - PRINT_ER("Failed to send get host channel param's message queue "); + netdev_err(vif->ndev, "Failed to send get host ch param\n"); down(&hif_drv->sem_inactive_time); @@ -3625,9 +3227,9 @@ s32 wilc_get_inactive_time(struct wilc_vif *vif, const u8 *mac, return result; } -s32 wilc_get_rssi(struct wilc_vif *vif, s8 *ps8Rssi) +int wilc_get_rssi(struct wilc_vif *vif, s8 *rssi_level) { - s32 result = 0; + int result = 0; struct host_if_msg msg; struct host_if_drv *hif_drv = vif->hif_drv; @@ -3637,53 +3239,55 @@ s32 wilc_get_rssi(struct wilc_vif *vif, s8 *ps8Rssi) result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); if (result) { - PRINT_ER("Failed to send get host channel param's message queue "); + netdev_err(vif->ndev, "Failed to send get host ch param\n"); return -EFAULT; } down(&hif_drv->sem_get_rssi); - if (!ps8Rssi) { - PRINT_ER("RSS pointer value is null"); + if (!rssi_level) { + netdev_err(vif->ndev, "RSS pointer value is null\n"); return -EFAULT; } - *ps8Rssi = rssi; + *rssi_level = rssi; return result; } -s32 wilc_get_statistics(struct wilc_vif *vif, struct rf_info *pstrStatistics) +int wilc_get_statistics(struct wilc_vif *vif, struct rf_info *stats) { - s32 result = 0; + int result = 0; struct host_if_msg msg; memset(&msg, 0, sizeof(struct host_if_msg)); msg.id = HOST_IF_MSG_GET_STATISTICS; - msg.body.data = (char *)pstrStatistics; + msg.body.data = (char *)stats; msg.vif = vif; result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); if (result) { - PRINT_ER("Failed to send get host channel param's message queue "); + netdev_err(vif->ndev, "Failed to send get host channel\n"); return -EFAULT; } - down(&hif_sema_wait_response); + if (stats != &vif->wilc->dummy_statistics) + down(&hif_sema_wait_response); return result; } -s32 wilc_scan(struct wilc_vif *vif, u8 u8ScanSource, u8 u8ScanType, - u8 *pu8ChnlFreqList, u8 u8ChnlListLen, const u8 *pu8IEs, - size_t IEsLen, wilc_scan_result ScanResult, void *pvUserArg, - struct hidden_network *pstrHiddenNetwork) +int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type, + u8 *ch_freq_list, u8 ch_list_len, const u8 *ies, + size_t ies_len, wilc_scan_result scan_result, void *user_arg, + struct hidden_network *hidden_network) { - s32 result = 0; + int result = 0; struct host_if_msg msg; + struct scan_attr *scan_info = &msg.body.scan_info; struct host_if_drv *hif_drv = vif->hif_drv; - if (!hif_drv || !ScanResult) { - PRINT_ER("hif_drv or ScanResult = NULL\n"); + if (!hif_drv || !scan_result) { + netdev_err(vif->ndev, "hif_drv or scan_result = NULL\n"); return -EFAULT; } @@ -3691,34 +3295,35 @@ s32 wilc_scan(struct wilc_vif *vif, u8 u8ScanSource, u8 u8ScanType, msg.id = HOST_IF_MSG_SCAN; - if (pstrHiddenNetwork) { - msg.body.scan_info.hidden_network.pstrHiddenNetworkInfo = pstrHiddenNetwork->pstrHiddenNetworkInfo; - msg.body.scan_info.hidden_network.u8ssidnum = pstrHiddenNetwork->u8ssidnum; - - } else - PRINT_D(HOSTINF_DBG, "pstrHiddenNetwork IS EQUAL TO NULL\n"); + if (hidden_network) { + scan_info->hidden_network.net_info = hidden_network->net_info; + scan_info->hidden_network.n_ssids = hidden_network->n_ssids; + } msg.vif = vif; - msg.body.scan_info.src = u8ScanSource; - msg.body.scan_info.type = u8ScanType; - msg.body.scan_info.result = ScanResult; - msg.body.scan_info.arg = pvUserArg; - - msg.body.scan_info.ch_list_len = u8ChnlListLen; - msg.body.scan_info.ch_freq_list = kmalloc(u8ChnlListLen, GFP_KERNEL); - memcpy(msg.body.scan_info.ch_freq_list, pu8ChnlFreqList, u8ChnlListLen); + scan_info->src = scan_source; + scan_info->type = scan_type; + scan_info->result = scan_result; + scan_info->arg = user_arg; + + scan_info->ch_list_len = ch_list_len; + scan_info->ch_freq_list = kmemdup(ch_freq_list, + ch_list_len, + GFP_KERNEL); + if (!scan_info->ch_freq_list) + return -ENOMEM; - msg.body.scan_info.ies_len = IEsLen; - msg.body.scan_info.ies = kmalloc(IEsLen, GFP_KERNEL); - memcpy(msg.body.scan_info.ies, pu8IEs, IEsLen); + scan_info->ies_len = ies_len; + scan_info->ies = kmemdup(ies, ies_len, GFP_KERNEL); + if (!scan_info->ies) + return -ENOMEM; result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); if (result) { - PRINT_ER("Error in sending message queue\n"); + netdev_err(vif->ndev, "Error in sending message queue\n"); return -EINVAL; } - PRINT_D(HOSTINF_DBG, ">> Starting the SCAN timer\n"); hif_drv->scan_timer.data = (unsigned long)vif; mod_timer(&hif_drv->scan_timer, jiffies + msecs_to_jiffies(HOST_IF_SCAN_TIMEOUT)); @@ -3726,21 +3331,21 @@ s32 wilc_scan(struct wilc_vif *vif, u8 u8ScanSource, u8 u8ScanType, return result; } -s32 wilc_hif_set_cfg(struct wilc_vif *vif, - struct cfg_param_val *pstrCfgParamVal) +int wilc_hif_set_cfg(struct wilc_vif *vif, + struct cfg_param_attr *cfg_param) { - s32 result = 0; + int result = 0; struct host_if_msg msg; struct host_if_drv *hif_drv = vif->hif_drv; if (!hif_drv) { - PRINT_ER("hif_drv NULL\n"); + netdev_err(vif->ndev, "hif_drv NULL\n"); return -EFAULT; } memset(&msg, 0, sizeof(struct host_if_msg)); msg.id = HOST_IF_MSG_CFG_PARAMS; - msg.body.cfg_info.cfg_attr_info = *pstrCfgParamVal; + msg.body.cfg_info = *cfg_param; msg.vif = vif; result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); @@ -3753,32 +3358,20 @@ static void GetPeriodicRSSI(unsigned long arg) struct wilc_vif *vif = (struct wilc_vif *)arg; if (!vif->hif_drv) { - PRINT_ER("Driver handler is NULL\n"); + netdev_err(vif->ndev, "Driver handler is NULL\n"); return; } - if (vif->hif_drv->hif_state == HOST_IF_CONNECTED) { - s32 result = 0; - struct host_if_msg msg; + if (vif->hif_drv->hif_state == HOST_IF_CONNECTED) + wilc_get_statistics(vif, &vif->wilc->dummy_statistics); - memset(&msg, 0, sizeof(struct host_if_msg)); - - msg.id = HOST_IF_MSG_GET_RSSI; - msg.vif = vif; - - result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); - if (result) { - PRINT_ER("Failed to send get host channel param's message queue "); - return; - } - } periodic_rssi.data = (unsigned long)vif; mod_timer(&periodic_rssi, jiffies + msecs_to_jiffies(5000)); } -s32 wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler) +int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler) { - s32 result = 0; + int result = 0; struct host_if_drv *hif_drv; struct wilc_vif *vif; struct wilc *wilc; @@ -3787,8 +3380,6 @@ s32 wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler) vif = netdev_priv(dev); wilc = vif->wilc; - PRINT_D(HOSTINF_DBG, "Initializing host interface for client %d\n", clients_count + 1); - scan_while_connected = false; sema_init(&hif_sema_wait_response, 0); @@ -3807,7 +3398,6 @@ s32 wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler) wilc_optaining_ip = false; - PRINT_D(HOSTINF_DBG, "Global handle pointer value=%p\n", hif_drv); if (clients_count == 0) { sema_init(&hif_sema_thread, 0); sema_init(&hif_sema_driver, 0); @@ -3817,17 +3407,13 @@ s32 wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler) sema_init(&hif_drv->sem_test_key_block, 0); sema_init(&hif_drv->sem_test_disconn_block, 0); sema_init(&hif_drv->sem_get_rssi, 0); - sema_init(&hif_drv->sem_get_link_speed, 0); - sema_init(&hif_drv->sem_get_chnl, 0); sema_init(&hif_drv->sem_inactive_time, 0); - PRINT_D(HOSTINF_DBG, "INIT: CLIENT COUNT %d\n", clients_count); - if (clients_count == 0) { result = wilc_mq_create(&hif_msg_q); if (result < 0) { - PRINT_ER("Failed to creat MQ\n"); + netdev_err(vif->ndev, "Failed to creat MQ\n"); goto _fail_; } @@ -3835,7 +3421,7 @@ s32 wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler) "WILC_kthread"); if (IS_ERR(hif_thread_handler)) { - PRINT_ER("Failed to creat Thread\n"); + netdev_err(vif->ndev, "Failed to creat Thread\n"); result = -EFAULT; goto _fail_mq_; } @@ -3848,8 +3434,8 @@ s32 wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler) setup_timer(&hif_drv->connect_timer, TimerCB_Connect, 0); setup_timer(&hif_drv->remain_on_ch_timer, ListenTimerCB, 0); - sema_init(&hif_drv->sem_cfg_values, 1); - down(&hif_drv->sem_cfg_values); + mutex_init(&hif_drv->cfg_values_lock); + mutex_lock(&hif_drv->cfg_values_lock); hif_drv->hif_state = HOST_IF_IDLE; hif_drv->cfg_values.site_survey_enabled = SITE_SURVEY_OFF; @@ -3860,14 +3446,7 @@ s32 wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler) hif_drv->p2p_timeout = 0; - PRINT_INFO(HOSTINF_DBG, "Initialization values, Site survey value: %d\n Scan source: %d\n Active scan time: %d\n Passive scan time: %d\nCurrent tx Rate = %d\n", - hif_drv->cfg_values.site_survey_enabled, - hif_drv->cfg_values.scan_source, - hif_drv->cfg_values.active_scan_time, - hif_drv->cfg_values.passive_scan_time, - hif_drv->cfg_values.curr_tx_rate); - - up(&hif_drv->sem_cfg_values); + mutex_unlock(&hif_drv->cfg_values_lock); clients_count++; @@ -3879,34 +3458,27 @@ _fail_: return result; } -s32 wilc_deinit(struct wilc_vif *vif) +int wilc_deinit(struct wilc_vif *vif) { - s32 result = 0; + int result = 0; struct host_if_msg msg; struct host_if_drv *hif_drv = vif->hif_drv; if (!hif_drv) { - PRINT_ER("hif_drv = NULL\n"); - return 0; + netdev_err(vif->ndev, "hif_drv = NULL\n"); + return -EFAULT; } down(&hif_sema_deinit); terminated_handle = hif_drv; - PRINT_D(HOSTINF_DBG, "De-initializing host interface for client %d\n", clients_count); - - if (del_timer_sync(&hif_drv->scan_timer)) - PRINT_D(HOSTINF_DBG, ">> Scan timer is active\n"); - - if (del_timer_sync(&hif_drv->connect_timer)) - PRINT_D(HOSTINF_DBG, ">> Connect timer is active\n"); - - if (del_timer_sync(&periodic_rssi)) - PRINT_D(HOSTINF_DBG, ">> Connect timer is active\n"); + del_timer_sync(&hif_drv->scan_timer); + del_timer_sync(&hif_drv->connect_timer); + del_timer_sync(&periodic_rssi); del_timer_sync(&hif_drv->remain_on_ch_timer); - wilc_set_wfi_drv_handler(vif, 0); + wilc_set_wfi_drv_handler(vif, 0, 0); down(&hif_sema_driver); if (hif_drv->usr_scan_req.scan_result) { @@ -3922,15 +3494,13 @@ s32 wilc_deinit(struct wilc_vif *vif) memset(&msg, 0, sizeof(struct host_if_msg)); if (clients_count == 1) { - if (del_timer_sync(&periodic_rssi)) - PRINT_D(HOSTINF_DBG, ">> Connect timer is active\n"); - + del_timer_sync(&periodic_rssi); msg.id = HOST_IF_MSG_EXIT; msg.vif = vif; result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); if (result != 0) - PRINT_ER("Error in sending deinit's message queue message function: Error(%d)\n", result); + netdev_err(vif->ndev, "deinit : Error(%d)\n", result); down(&hif_sema_thread); @@ -3961,7 +3531,7 @@ void wilc_network_info_received(struct wilc *wilc, u8 *pu8Buffer, hif_drv = vif->hif_drv; if (!hif_drv || hif_drv == terminated_handle) { - PRINT_ER("NetworkInfo received but driver not init[%p]\n", hif_drv); + netdev_err(vif->ndev, "driver not init[%p]\n", hif_drv); return; } @@ -3976,7 +3546,7 @@ void wilc_network_info_received(struct wilc *wilc, u8 *pu8Buffer, result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); if (result) - PRINT_ER("Error in sending network info message queue message parameters: Error(%d)\n", result); + netdev_err(vif->ndev, "message parameters (%d)\n", result); } void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *pu8Buffer, @@ -3998,16 +3568,14 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *pu8Buffer, } hif_drv = vif->hif_drv; - PRINT_D(HOSTINF_DBG, "General asynchronous info packet received\n"); if (!hif_drv || hif_drv == terminated_handle) { - PRINT_D(HOSTINF_DBG, "Wifi driver handler is equal to NULL\n"); up(&hif_sema_deinit); return; } if (!hif_drv->usr_conn_req.conn_result) { - PRINT_ER("Received mac status is not needed when there is no current Connect Reques\n"); + netdev_err(vif->ndev, "there is no current Connect Request\n"); up(&hif_sema_deinit); return; } @@ -4023,7 +3591,7 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *pu8Buffer, result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); if (result) - PRINT_ER("Error in sending message queue asynchronous message info: Error(%d)\n", result); + netdev_err(vif->ndev, "synchronous info (%d)\n", result); up(&hif_sema_deinit); } @@ -4043,8 +3611,6 @@ void wilc_scan_complete_received(struct wilc *wilc, u8 *pu8Buffer, return; hif_drv = vif->hif_drv; - PRINT_D(GENERIC_DBG, "Scan notification received %p\n", hif_drv); - if (!hif_drv || hif_drv == terminated_handle) return; @@ -4056,24 +3622,22 @@ void wilc_scan_complete_received(struct wilc *wilc, u8 *pu8Buffer, result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); if (result) - PRINT_ER("Error in sending message queue scan complete parameters: Error(%d)\n", result); + netdev_err(vif->ndev, "complete param (%d)\n", result); } - - return; } -s32 wilc_remain_on_channel(struct wilc_vif *vif, u32 u32SessionID, - u32 u32duration, u16 chan, - wilc_remain_on_chan_expired RemainOnChanExpired, - wilc_remain_on_chan_ready RemainOnChanReady, - void *pvUserArg) +int wilc_remain_on_channel(struct wilc_vif *vif, u32 session_id, + u32 duration, u16 chan, + wilc_remain_on_chan_expired expired, + wilc_remain_on_chan_ready ready, + void *user_arg) { - s32 result = 0; + int result = 0; struct host_if_msg msg; struct host_if_drv *hif_drv = vif->hif_drv; if (!hif_drv) { - PRINT_ER("driver is null\n"); + netdev_err(vif->ndev, "driver is null\n"); return -EFAULT; } @@ -4081,28 +3645,28 @@ s32 wilc_remain_on_channel(struct wilc_vif *vif, u32 u32SessionID, msg.id = HOST_IF_MSG_REMAIN_ON_CHAN; msg.body.remain_on_ch.ch = chan; - msg.body.remain_on_ch.expired = RemainOnChanExpired; - msg.body.remain_on_ch.ready = RemainOnChanReady; - msg.body.remain_on_ch.arg = pvUserArg; - msg.body.remain_on_ch.u32duration = u32duration; - msg.body.remain_on_ch.id = u32SessionID; + msg.body.remain_on_ch.expired = expired; + msg.body.remain_on_ch.ready = ready; + msg.body.remain_on_ch.arg = user_arg; + msg.body.remain_on_ch.duration = duration; + msg.body.remain_on_ch.id = session_id; msg.vif = vif; result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); if (result) - PRINT_ER("wilc mq send fail\n"); + netdev_err(vif->ndev, "wilc mq send fail\n"); return result; } -s32 wilc_listen_state_expired(struct wilc_vif *vif, u32 u32SessionID) +int wilc_listen_state_expired(struct wilc_vif *vif, u32 session_id) { - s32 result = 0; + int result = 0; struct host_if_msg msg; struct host_if_drv *hif_drv = vif->hif_drv; if (!hif_drv) { - PRINT_ER("driver is null\n"); + netdev_err(vif->ndev, "driver is null\n"); return -EFAULT; } @@ -4111,104 +3675,98 @@ s32 wilc_listen_state_expired(struct wilc_vif *vif, u32 u32SessionID) memset(&msg, 0, sizeof(struct host_if_msg)); msg.id = HOST_IF_MSG_LISTEN_TIMER_FIRED; msg.vif = vif; - msg.body.remain_on_ch.id = u32SessionID; + msg.body.remain_on_ch.id = session_id; result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); if (result) - PRINT_ER("wilc mq send fail\n"); + netdev_err(vif->ndev, "wilc mq send fail\n"); return result; } -s32 wilc_frame_register(struct wilc_vif *vif, u16 u16FrameType, bool bReg) +int wilc_frame_register(struct wilc_vif *vif, u16 frame_type, bool reg) { - s32 result = 0; + int result = 0; struct host_if_msg msg; struct host_if_drv *hif_drv = vif->hif_drv; if (!hif_drv) { - PRINT_ER("driver is null\n"); + netdev_err(vif->ndev, "driver is null\n"); return -EFAULT; } memset(&msg, 0, sizeof(struct host_if_msg)); msg.id = HOST_IF_MSG_REGISTER_FRAME; - switch (u16FrameType) { + switch (frame_type) { case ACTION: - PRINT_D(HOSTINF_DBG, "ACTION\n"); msg.body.reg_frame.reg_id = ACTION_FRM_IDX; break; case PROBE_REQ: - PRINT_D(HOSTINF_DBG, "PROBE REQ\n"); msg.body.reg_frame.reg_id = PROBE_REQ_IDX; break; default: - PRINT_D(HOSTINF_DBG, "Not valid frame type\n"); break; } - msg.body.reg_frame.frame_type = u16FrameType; - msg.body.reg_frame.reg = bReg; + msg.body.reg_frame.frame_type = frame_type; + msg.body.reg_frame.reg = reg; msg.vif = vif; result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); if (result) - PRINT_ER("wilc mq send fail\n"); + netdev_err(vif->ndev, "wilc mq send fail\n"); return result; } -s32 wilc_add_beacon(struct wilc_vif *vif, u32 u32Interval, u32 u32DTIMPeriod, - u32 u32HeadLen, u8 *pu8Head, u32 u32TailLen, u8 *pu8Tail) +int wilc_add_beacon(struct wilc_vif *vif, u32 interval, u32 dtim_period, + u32 head_len, u8 *head, u32 tail_len, u8 *tail) { - s32 result = 0; + int result = 0; struct host_if_msg msg; - struct beacon_attr *pstrSetBeaconParam = &msg.body.beacon_info; + struct beacon_attr *beacon_info = &msg.body.beacon_info; struct host_if_drv *hif_drv = vif->hif_drv; if (!hif_drv) { - PRINT_ER("driver is null\n"); + netdev_err(vif->ndev, "driver is null\n"); return -EFAULT; } memset(&msg, 0, sizeof(struct host_if_msg)); - PRINT_D(HOSTINF_DBG, "Setting adding beacon message queue params\n"); - msg.id = HOST_IF_MSG_ADD_BEACON; msg.vif = vif; - pstrSetBeaconParam->interval = u32Interval; - pstrSetBeaconParam->dtim_period = u32DTIMPeriod; - pstrSetBeaconParam->head_len = u32HeadLen; - pstrSetBeaconParam->head = kmemdup(pu8Head, u32HeadLen, GFP_KERNEL); - if (!pstrSetBeaconParam->head) { + beacon_info->interval = interval; + beacon_info->dtim_period = dtim_period; + beacon_info->head_len = head_len; + beacon_info->head = kmemdup(head, head_len, GFP_KERNEL); + if (!beacon_info->head) { result = -ENOMEM; goto ERRORHANDLER; } - pstrSetBeaconParam->tail_len = u32TailLen; + beacon_info->tail_len = tail_len; - if (u32TailLen > 0) { - pstrSetBeaconParam->tail = kmemdup(pu8Tail, u32TailLen, - GFP_KERNEL); - if (!pstrSetBeaconParam->tail) { + if (tail_len > 0) { + beacon_info->tail = kmemdup(tail, tail_len, GFP_KERNEL); + if (!beacon_info->tail) { result = -ENOMEM; goto ERRORHANDLER; } } else { - pstrSetBeaconParam->tail = NULL; + beacon_info->tail = NULL; } result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); if (result) - PRINT_ER("wilc mq send fail\n"); + netdev_err(vif->ndev, "wilc mq send fail\n"); ERRORHANDLER: if (result) { - kfree(pstrSetBeaconParam->head); + kfree(beacon_info->head); - kfree(pstrSetBeaconParam->tail); + kfree(beacon_info->tail); } return result; @@ -4221,17 +3779,16 @@ int wilc_del_beacon(struct wilc_vif *vif) struct host_if_drv *hif_drv = vif->hif_drv; if (!hif_drv) { - PRINT_ER("driver is null\n"); + netdev_err(vif->ndev, "driver is null\n"); return -EFAULT; } msg.id = HOST_IF_MSG_DEL_BEACON; msg.vif = vif; - PRINT_D(HOSTINF_DBG, "Setting deleting beacon message queue params\n"); result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); if (result) - PRINT_ER("wilc_mq_send fail\n"); + netdev_err(vif->ndev, "wilc_mq_send fail\n"); return result; } @@ -4244,14 +3801,12 @@ int wilc_add_station(struct wilc_vif *vif, struct add_sta_param *sta_param) struct host_if_drv *hif_drv = vif->hif_drv; if (!hif_drv) { - PRINT_ER("driver is null\n"); + netdev_err(vif->ndev, "driver is null\n"); return -EFAULT; } memset(&msg, 0, sizeof(struct host_if_msg)); - PRINT_D(HOSTINF_DBG, "Setting adding station message queue params\n"); - msg.id = HOST_IF_MSG_ADD_STATION; msg.vif = vif; @@ -4266,7 +3821,7 @@ int wilc_add_station(struct wilc_vif *vif, struct add_sta_param *sta_param) result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); if (result) - PRINT_ER("wilc_mq_send fail\n"); + netdev_err(vif->ndev, "wilc_mq_send fail\n"); return result; } @@ -4278,14 +3833,12 @@ int wilc_del_station(struct wilc_vif *vif, const u8 *mac_addr) struct host_if_drv *hif_drv = vif->hif_drv; if (!hif_drv) { - PRINT_ER("driver is null\n"); + netdev_err(vif->ndev, "driver is null\n"); return -EFAULT; } memset(&msg, 0, sizeof(struct host_if_msg)); - PRINT_D(HOSTINF_DBG, "Setting deleting station message queue params\n"); - msg.id = HOST_IF_MSG_DEL_STATION; msg.vif = vif; @@ -4296,160 +3849,141 @@ int wilc_del_station(struct wilc_vif *vif, const u8 *mac_addr) result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); if (result) - PRINT_ER("wilc_mq_send fail\n"); + netdev_err(vif->ndev, "wilc_mq_send fail\n"); return result; } -s32 wilc_del_allstation(struct wilc_vif *vif, u8 pu8MacAddr[][ETH_ALEN]) +int wilc_del_allstation(struct wilc_vif *vif, u8 mac_addr[][ETH_ALEN]) { - s32 result = 0; + int result = 0; struct host_if_msg msg; - struct del_all_sta *pstrDelAllStationMsg = &msg.body.del_all_sta_info; + struct del_all_sta *del_all_sta_info = &msg.body.del_all_sta_info; struct host_if_drv *hif_drv = vif->hif_drv; - u8 au8Zero_Buff[ETH_ALEN] = {0}; - u32 i; - u8 u8AssocNumb = 0; + u8 zero_addr[ETH_ALEN] = {0}; + int i; + u8 assoc_sta = 0; if (!hif_drv) { - PRINT_ER("driver is null\n"); + netdev_err(vif->ndev, "driver is null\n"); return -EFAULT; } memset(&msg, 0, sizeof(struct host_if_msg)); - PRINT_D(HOSTINF_DBG, "Setting deauthenticating station message queue params\n"); - msg.id = HOST_IF_MSG_DEL_ALL_STA; msg.vif = vif; for (i = 0; i < MAX_NUM_STA; i++) { - if (memcmp(pu8MacAddr[i], au8Zero_Buff, ETH_ALEN)) { - memcpy(pstrDelAllStationMsg->del_all_sta[i], pu8MacAddr[i], ETH_ALEN); - PRINT_D(CFG80211_DBG, "BSSID = %x%x%x%x%x%x\n", - pstrDelAllStationMsg->del_all_sta[i][0], - pstrDelAllStationMsg->del_all_sta[i][1], - pstrDelAllStationMsg->del_all_sta[i][2], - pstrDelAllStationMsg->del_all_sta[i][3], - pstrDelAllStationMsg->del_all_sta[i][4], - pstrDelAllStationMsg->del_all_sta[i][5]); - u8AssocNumb++; + if (memcmp(mac_addr[i], zero_addr, ETH_ALEN)) { + memcpy(del_all_sta_info->del_all_sta[i], mac_addr[i], ETH_ALEN); + assoc_sta++; } } - if (!u8AssocNumb) { - PRINT_D(CFG80211_DBG, "NO ASSOCIATED STAS\n"); + if (!assoc_sta) return result; - } - pstrDelAllStationMsg->assoc_sta = u8AssocNumb; + del_all_sta_info->assoc_sta = assoc_sta; result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); if (result) - PRINT_ER("wilc_mq_send fail\n"); + netdev_err(vif->ndev, "wilc_mq_send fail\n"); down(&hif_sema_wait_response); return result; } -s32 wilc_edit_station(struct wilc_vif *vif, - struct add_sta_param *pstrStaParams) +int wilc_edit_station(struct wilc_vif *vif, + struct add_sta_param *sta_param) { - s32 result = 0; + int result = 0; struct host_if_msg msg; - struct add_sta_param *pstrAddStationMsg = &msg.body.add_sta_info; + struct add_sta_param *add_sta_info = &msg.body.add_sta_info; struct host_if_drv *hif_drv = vif->hif_drv; if (!hif_drv) { - PRINT_ER("driver is null\n"); + netdev_err(vif->ndev, "driver is null\n"); return -EFAULT; } - PRINT_D(HOSTINF_DBG, "Setting editing station message queue params\n"); - memset(&msg, 0, sizeof(struct host_if_msg)); msg.id = HOST_IF_MSG_EDIT_STATION; msg.vif = vif; - memcpy(pstrAddStationMsg, pstrStaParams, sizeof(struct add_sta_param)); - if (pstrAddStationMsg->rates_len > 0) { - u8 *rates = kmalloc(pstrAddStationMsg->rates_len, GFP_KERNEL); - - if (!rates) + memcpy(add_sta_info, sta_param, sizeof(struct add_sta_param)); + if (add_sta_info->rates_len > 0) { + add_sta_info->rates = kmemdup(sta_param->rates, + add_sta_info->rates_len, + GFP_KERNEL); + if (!add_sta_info->rates) return -ENOMEM; - - memcpy(rates, pstrStaParams->rates, - pstrAddStationMsg->rates_len); - pstrAddStationMsg->rates = rates; } result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); if (result) - PRINT_ER("wilc_mq_send fail\n"); + netdev_err(vif->ndev, "wilc_mq_send fail\n"); return result; } -s32 wilc_set_power_mgmt(struct wilc_vif *vif, bool bIsEnabled, u32 u32Timeout) +int wilc_set_power_mgmt(struct wilc_vif *vif, bool enabled, u32 timeout) { - s32 result = 0; + int result = 0; struct host_if_msg msg; - struct power_mgmt_param *pstrPowerMgmtParam = &msg.body.pwr_mgmt_info; + struct power_mgmt_param *pwr_mgmt_info = &msg.body.pwr_mgmt_info; struct host_if_drv *hif_drv = vif->hif_drv; - PRINT_INFO(HOSTINF_DBG, "\n\n>> Setting PS to %d <<\n\n", bIsEnabled); - if (!hif_drv) { - PRINT_ER("driver is null\n"); + netdev_err(vif->ndev, "driver is null\n"); return -EFAULT; } - PRINT_D(HOSTINF_DBG, "Setting Power management message queue params\n"); + if (wilc_wlan_get_num_conn_ifcs(vif->wilc) == 2 && enabled) + return 0; memset(&msg, 0, sizeof(struct host_if_msg)); msg.id = HOST_IF_MSG_POWER_MGMT; msg.vif = vif; - pstrPowerMgmtParam->enabled = bIsEnabled; - pstrPowerMgmtParam->timeout = u32Timeout; + pwr_mgmt_info->enabled = enabled; + pwr_mgmt_info->timeout = timeout; result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); if (result) - PRINT_ER("wilc_mq_send fail\n"); + netdev_err(vif->ndev, "wilc_mq_send fail\n"); return result; } -s32 wilc_setup_multicast_filter(struct wilc_vif *vif, bool bIsEnabled, - u32 u32count) +int wilc_setup_multicast_filter(struct wilc_vif *vif, bool enabled, + u32 count) { - s32 result = 0; + int result = 0; struct host_if_msg msg; - struct set_multicast *pstrMulticastFilterParam = &msg.body.multicast_info; + struct set_multicast *multicast_filter_param = &msg.body.multicast_info; struct host_if_drv *hif_drv = vif->hif_drv; if (!hif_drv) { - PRINT_ER("driver is null\n"); + netdev_err(vif->ndev, "driver is null\n"); return -EFAULT; } - PRINT_D(HOSTINF_DBG, "Setting Multicast Filter params\n"); - memset(&msg, 0, sizeof(struct host_if_msg)); msg.id = HOST_IF_MSG_SET_MULTICAST_FILTER; msg.vif = vif; - pstrMulticastFilterParam->enabled = bIsEnabled; - pstrMulticastFilterParam->cnt = u32count; + multicast_filter_param->enabled = enabled; + multicast_filter_param->cnt = count; result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); if (result) - PRINT_ER("wilc_mq_send fail\n"); + netdev_err(vif->ndev, "wilc_mq_send fail\n"); return result; } -static void *host_int_ParseJoinBssParam(tstrNetworkInfo *ptstrNetworkInfo) +static void *host_int_ParseJoinBssParam(struct network_info *ptstrNetworkInfo) { struct join_bss_param *pNewJoinBssParam = NULL; u8 *pu8IEs; @@ -4464,17 +3998,18 @@ static void *host_int_ParseJoinBssParam(tstrNetworkInfo *ptstrNetworkInfo) u8 authTotalCount = 0; u8 i, j; - pu8IEs = ptstrNetworkInfo->pu8IEs; - u16IEsLen = ptstrNetworkInfo->u16IEsLen; + pu8IEs = ptstrNetworkInfo->ies; + u16IEsLen = ptstrNetworkInfo->ies_len; pNewJoinBssParam = kzalloc(sizeof(struct join_bss_param), GFP_KERNEL); if (pNewJoinBssParam) { - pNewJoinBssParam->dtim_period = ptstrNetworkInfo->u8DtimPeriod; - pNewJoinBssParam->beacon_period = ptstrNetworkInfo->u16BeaconPeriod; - pNewJoinBssParam->cap_info = ptstrNetworkInfo->u16CapInfo; - memcpy(pNewJoinBssParam->au8bssid, ptstrNetworkInfo->au8bssid, 6); - memcpy((u8 *)pNewJoinBssParam->ssid, ptstrNetworkInfo->au8ssid, ptstrNetworkInfo->u8SsidLen + 1); - pNewJoinBssParam->ssid_len = ptstrNetworkInfo->u8SsidLen; + pNewJoinBssParam->dtim_period = ptstrNetworkInfo->dtim_period; + pNewJoinBssParam->beacon_period = ptstrNetworkInfo->beacon_period; + pNewJoinBssParam->cap_info = ptstrNetworkInfo->cap_info; + memcpy(pNewJoinBssParam->bssid, ptstrNetworkInfo->bssid, 6); + memcpy((u8 *)pNewJoinBssParam->ssid, ptstrNetworkInfo->ssid, + ptstrNetworkInfo->ssid_len + 1); + pNewJoinBssParam->ssid_len = ptstrNetworkInfo->ssid_len; memset(pNewJoinBssParam->rsn_pcip_policy, 0xFF, 3); memset(pNewJoinBssParam->rsn_auth_policy, 0xFF, 3); @@ -4523,7 +4058,7 @@ static void *host_int_ParseJoinBssParam(tstrNetworkInfo *ptstrNetworkInfo) (pu8IEs[index + 5] == 0x09) && (pu8IEs[index + 6] == 0x0c)) { u16 u16P2P_count; - pNewJoinBssParam->tsf = ptstrNetworkInfo->u32Tsf; + pNewJoinBssParam->tsf = ptstrNetworkInfo->tsf_lo; pNewJoinBssParam->noa_enabled = 1; pNewJoinBssParam->idx = pu8IEs[index + 9]; @@ -4534,10 +4069,6 @@ static void *host_int_ParseJoinBssParam(tstrNetworkInfo *ptstrNetworkInfo) pNewJoinBssParam->opp_enabled = 0; } - PRINT_D(GENERIC_DBG, "P2P Dump\n"); - for (i = 0; i < pu8IEs[index + 7]; i++) - PRINT_D(GENERIC_DBG, " %x\n", pu8IEs[index + 9 + i]); - pNewJoinBssParam->cnt = pu8IEs[index + 11]; u16P2P_count = index + 12; @@ -4606,94 +4137,92 @@ static void *host_int_ParseJoinBssParam(tstrNetworkInfo *ptstrNetworkInfo) return (void *)pNewJoinBssParam; } -void wilc_free_join_params(void *pJoinParams) +int wilc_setup_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx) { - if ((struct bss_param *)pJoinParams) - kfree((struct bss_param *)pJoinParams); - else - PRINT_ER("Unable to FREE null pointer\n"); -} - -s32 wilc_del_all_rx_ba_session(struct wilc_vif *vif, char *pBSSID, char TID) -{ - s32 result = 0; + int result = 0; struct host_if_msg msg; - struct ba_session_info *pBASessionInfo = &msg.body.session_info; struct host_if_drv *hif_drv = vif->hif_drv; if (!hif_drv) { - PRINT_ER("driver is null\n"); + netdev_err(vif->ndev, "driver is null\n"); return -EFAULT; } memset(&msg, 0, sizeof(struct host_if_msg)); - msg.id = HOST_IF_MSG_DEL_ALL_RX_BA_SESSIONS; + msg.id = HOST_IF_MSG_SET_IPADDRESS; - memcpy(pBASessionInfo->bssid, pBSSID, ETH_ALEN); - pBASessionInfo->tid = TID; + msg.body.ip_info.ip_addr = ip_addr; msg.vif = vif; + msg.body.ip_info.idx = idx; result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); if (result) - PRINT_ER("wilc_mq_send fail\n"); - - down(&hif_sema_wait_response); + netdev_err(vif->ndev, "wilc_mq_send fail\n"); return result; } -s32 wilc_setup_ipaddress(struct wilc_vif *vif, u8 *u16ipadd, u8 idx) +static int host_int_get_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx) { - s32 result = 0; + int result = 0; struct host_if_msg msg; struct host_if_drv *hif_drv = vif->hif_drv; - return 0; - if (!hif_drv) { - PRINT_ER("driver is null\n"); + netdev_err(vif->ndev, "driver is null\n"); return -EFAULT; } memset(&msg, 0, sizeof(struct host_if_msg)); - msg.id = HOST_IF_MSG_SET_IPADDRESS; + msg.id = HOST_IF_MSG_GET_IPADDRESS; - msg.body.ip_info.ip_addr = u16ipadd; + msg.body.ip_info.ip_addr = ip_addr; msg.vif = vif; msg.body.ip_info.idx = idx; result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); if (result) - PRINT_ER("wilc_mq_send fail\n"); + netdev_err(vif->ndev, "wilc_mq_send fail\n"); return result; } -static s32 host_int_get_ipaddress(struct wilc_vif *vif, - struct host_if_drv *hif_drv, - u8 *u16ipadd, u8 idx) +int wilc_set_tx_power(struct wilc_vif *vif, u8 tx_power) { - s32 result = 0; + int ret = 0; struct host_if_msg msg; - if (!hif_drv) { - PRINT_ER("driver is null\n"); - return -EFAULT; - } - memset(&msg, 0, sizeof(struct host_if_msg)); - msg.id = HOST_IF_MSG_GET_IPADDRESS; + msg.id = HOST_IF_MSG_SET_TX_POWER; + msg.body.tx_power.tx_pwr = tx_power; + msg.vif = vif; + + ret = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); + if (ret) + netdev_err(vif->ndev, "wilc_mq_send fail\n"); - msg.body.ip_info.ip_addr = u16ipadd; + return ret; +} + +int wilc_get_tx_power(struct wilc_vif *vif, u8 *tx_power) +{ + int ret = 0; + struct host_if_msg msg; + + memset(&msg, 0, sizeof(struct host_if_msg)); + + msg.id = HOST_IF_MSG_GET_TX_POWER; msg.vif = vif; - msg.body.ip_info.idx = idx; - result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); - if (result) - PRINT_ER("wilc_mq_send fail\n"); + ret = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg)); + if (ret) + netdev_err(vif->ndev, "Failed to get TX PWR\n"); - return result; + down(&hif_sema_wait_response); + *tx_power = msg.body.tx_power.tx_pwr; + + return ret; } diff --git a/drivers/staging/wilc1000/host_interface.h b/drivers/staging/wilc1000/host_interface.h index 8faac27002e9..01f3222a4231 100644 --- a/drivers/staging/wilc1000/host_interface.h +++ b/drivers/staging/wilc1000/host_interface.h @@ -96,7 +96,7 @@ enum CURRENT_TXRATE { MBPS_54 = 54 }; -struct cfg_param_val { +struct cfg_param_attr { u32 flag; u8 ht_enable; u8 bss_type; @@ -144,8 +144,8 @@ enum cfg_param { }; struct found_net_info { - u8 au8bssid[6]; - s8 s8rssi; + u8 bssid[6]; + s8 rssi; }; enum scan_event { @@ -168,13 +168,13 @@ enum KEY_TYPE { PMKSA, }; -typedef void (*wilc_scan_result)(enum scan_event, tstrNetworkInfo *, - void *, void *); +typedef void (*wilc_scan_result)(enum scan_event, struct network_info *, + void *, void *); typedef void (*wilc_connect_result)(enum conn_event, - tstrConnectInfo *, + struct connect_info *, u8, - tstrDisconnectNotifInfo *, + struct disconnect_info *, void *); typedef void (*wilc_remain_on_chan_expired)(void *, u32); @@ -186,13 +186,13 @@ struct rcvd_net_info { }; struct hidden_net_info { - u8 *pu8ssid; - u8 u8ssidlen; + u8 *ssid; + u8 ssid_len; }; struct hidden_network { - struct hidden_net_info *pstrHiddenNetworkInfo; - u8 u8ssidnum; + struct hidden_net_info *net_info; + u8 n_ssids; }; struct user_scan_req { @@ -203,9 +203,9 @@ struct user_scan_req { }; struct user_conn_req { - u8 *pu8bssid; - u8 *pu8ssid; - u8 u8security; + u8 *bssid; + u8 *ssid; + u8 security; enum AUTHTYPE auth_type; size_t ssid_len; u8 *ies; @@ -217,6 +217,7 @@ struct user_conn_req { struct drv_handler { u32 handler; + u8 mac_idx; }; struct op_mode { @@ -240,7 +241,7 @@ struct ba_session_info { struct remain_ch { u16 ch; - u32 u32duration; + u32 duration; wilc_remain_on_chan_expired expired; wilc_remain_on_chan_ready ready; void *arg; @@ -271,14 +272,12 @@ struct host_if_drv { enum host_if_state hif_state; u8 assoc_bssid[ETH_ALEN]; - struct cfg_param_val cfg_values; + struct cfg_param_attr cfg_values; - struct semaphore sem_cfg_values; + struct mutex cfg_values_lock; struct semaphore sem_test_key_block; struct semaphore sem_test_disconn_block; struct semaphore sem_get_rssi; - struct semaphore sem_get_link_speed; - struct semaphore sem_get_chnl; struct semaphore sem_inactive_time; struct timer_list scan_timer; @@ -312,68 +311,60 @@ int wilc_add_wep_key_bss_sta(struct wilc_vif *vif, const u8 *key, u8 len, u8 index); int wilc_add_wep_key_bss_ap(struct wilc_vif *vif, const u8 *key, u8 len, u8 index, u8 mode, enum AUTHTYPE auth_type); -s32 wilc_add_ptk(struct wilc_vif *vif, const u8 *pu8Ptk, u8 u8PtkKeylen, - const u8 *mac_addr, const u8 *pu8RxMic, const u8 *pu8TxMic, - u8 mode, u8 u8Ciphermode, u8 u8Idx); +int wilc_add_ptk(struct wilc_vif *vif, const u8 *ptk, u8 ptk_key_len, + const u8 *mac_addr, const u8 *rx_mic, const u8 *tx_mic, + u8 mode, u8 cipher_mode, u8 index); s32 wilc_get_inactive_time(struct wilc_vif *vif, const u8 *mac, u32 *pu32InactiveTime); -s32 wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *pu8RxGtk, u8 u8GtkKeylen, - u8 u8KeyIdx, u32 u32KeyRSClen, const u8 *KeyRSC, - const u8 *pu8RxMic, const u8 *pu8TxMic, u8 mode, - u8 u8Ciphermode); -s32 wilc_add_tx_gtk(struct host_if_drv *hWFIDrv, u8 u8KeyLen, - u8 *pu8TxGtk, u8 u8KeyIdx); -s32 wilc_set_pmkid_info(struct wilc_vif *vif, - struct host_if_pmkid_attr *pu8PmkidInfoArray); -s32 wilc_get_mac_address(struct wilc_vif *vif, u8 *pu8MacAddress); -s32 wilc_set_mac_address(struct wilc_vif *vif, u8 *pu8MacAddress); -int wilc_wait_msg_queue_idle(void); -s32 wilc_set_start_scan_req(struct host_if_drv *hWFIDrv, u8 scanSource); -s32 wilc_set_join_req(struct wilc_vif *vif, u8 *pu8bssid, const u8 *pu8ssid, - size_t ssidLen, const u8 *pu8IEs, size_t IEsLen, - wilc_connect_result pfConnectResult, void *pvUserArg, - u8 u8security, enum AUTHTYPE tenuAuth_type, - u8 u8channel, void *pJoinParams); -s32 wilc_flush_join_req(struct wilc_vif *vif); -s32 wilc_disconnect(struct wilc_vif *vif, u16 u16ReasonCode); +int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len, + u8 index, u32 key_rsc_len, const u8 *key_rsc, + const u8 *rx_mic, const u8 *tx_mic, u8 mode, + u8 cipher_mode); +int wilc_set_pmkid_info(struct wilc_vif *vif, + struct host_if_pmkid_attr *pmkid); +int wilc_get_mac_address(struct wilc_vif *vif, u8 *mac_addr); +int wilc_set_join_req(struct wilc_vif *vif, u8 *bssid, const u8 *ssid, + size_t ssid_len, const u8 *ies, size_t ies_len, + wilc_connect_result connect_result, void *user_arg, + u8 security, enum AUTHTYPE auth_type, + u8 channel, void *join_params); +int wilc_disconnect(struct wilc_vif *vif, u16 reason_code); int wilc_set_mac_chnl_num(struct wilc_vif *vif, u8 channel); -s32 wilc_get_rssi(struct wilc_vif *vif, s8 *ps8Rssi); -s32 wilc_scan(struct wilc_vif *vif, u8 u8ScanSource, u8 u8ScanType, - u8 *pu8ChnlFreqList, u8 u8ChnlListLen, const u8 *pu8IEs, - size_t IEsLen, wilc_scan_result ScanResult, void *pvUserArg, - struct hidden_network *pstrHiddenNetwork); -s32 wilc_hif_set_cfg(struct wilc_vif *vif, - struct cfg_param_val *pstrCfgParamVal); -s32 wilc_init(struct net_device *dev, struct host_if_drv **phWFIDrv); -s32 wilc_deinit(struct wilc_vif *vif); -s32 wilc_add_beacon(struct wilc_vif *vif, u32 u32Interval, u32 u32DTIMPeriod, - u32 u32HeadLen, u8 *pu8Head, u32 u32TailLen, u8 *pu8Tail); +int wilc_get_rssi(struct wilc_vif *vif, s8 *rssi_level); +int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type, + u8 *ch_freq_list, u8 ch_list_len, const u8 *ies, + size_t ies_len, wilc_scan_result scan_result, void *user_arg, + struct hidden_network *hidden_network); +int wilc_hif_set_cfg(struct wilc_vif *vif, + struct cfg_param_attr *cfg_param); +int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler); +int wilc_deinit(struct wilc_vif *vif); +int wilc_add_beacon(struct wilc_vif *vif, u32 interval, u32 dtim_period, + u32 head_len, u8 *head, u32 tail_len, u8 *tail); int wilc_del_beacon(struct wilc_vif *vif); int wilc_add_station(struct wilc_vif *vif, struct add_sta_param *sta_param); -s32 wilc_del_allstation(struct wilc_vif *vif, u8 pu8MacAddr[][ETH_ALEN]); +int wilc_del_allstation(struct wilc_vif *vif, u8 mac_addr[][ETH_ALEN]); int wilc_del_station(struct wilc_vif *vif, const u8 *mac_addr); -s32 wilc_edit_station(struct wilc_vif *vif, - struct add_sta_param *pstrStaParams); -s32 wilc_set_power_mgmt(struct wilc_vif *vif, bool bIsEnabled, u32 u32Timeout); -s32 wilc_setup_multicast_filter(struct wilc_vif *vif, bool bIsEnabled, - u32 u32count); -s32 wilc_setup_ipaddress(struct wilc_vif *vif, u8 *u16ipadd, u8 idx); -s32 wilc_del_all_rx_ba_session(struct wilc_vif *vif, char *pBSSID, char TID); -s32 wilc_remain_on_channel(struct wilc_vif *vif, u32 u32SessionID, - u32 u32duration, u16 chan, - wilc_remain_on_chan_expired RemainOnChanExpired, - wilc_remain_on_chan_ready RemainOnChanReady, - void *pvUserArg); -s32 wilc_listen_state_expired(struct wilc_vif *vif, u32 u32SessionID); -s32 wilc_frame_register(struct wilc_vif *vif, u16 u16FrameType, bool bReg); -int wilc_set_wfi_drv_handler(struct wilc_vif *vif, int index); +int wilc_edit_station(struct wilc_vif *vif, + struct add_sta_param *sta_param); +int wilc_set_power_mgmt(struct wilc_vif *vif, bool enabled, u32 timeout); +int wilc_setup_multicast_filter(struct wilc_vif *vif, bool enabled, + u32 count); +int wilc_setup_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx); +int wilc_remain_on_channel(struct wilc_vif *vif, u32 session_id, + u32 duration, u16 chan, + wilc_remain_on_chan_expired expired, + wilc_remain_on_chan_ready ready, + void *user_arg); +int wilc_listen_state_expired(struct wilc_vif *vif, u32 session_id); +int wilc_frame_register(struct wilc_vif *vif, u16 frame_type, bool reg); +int wilc_set_wfi_drv_handler(struct wilc_vif *vif, int index, u8 mac_idx); int wilc_set_operation_mode(struct wilc_vif *vif, u32 mode); - -void wilc_free_join_params(void *pJoinParams); - -s32 wilc_get_statistics(struct wilc_vif *vif, struct rf_info *pstrStatistics); +int wilc_get_statistics(struct wilc_vif *vif, struct rf_info *stats); void wilc_resolve_disconnect_aberration(struct wilc_vif *vif); int wilc_get_vif_idx(struct wilc_vif *vif); +int wilc_set_tx_power(struct wilc_vif *vif, u8 tx_power); +int wilc_get_tx_power(struct wilc_vif *vif, u8 *tx_power); extern bool wilc_optaining_ip; extern u8 wilc_connected_ssid[6]; diff --git a/drivers/staging/wilc1000/linux_mon.c b/drivers/staging/wilc1000/linux_mon.c index e550027645b7..7d9e5ded8ff4 100644 --- a/drivers/staging/wilc1000/linux_mon.c +++ b/drivers/staging/wilc1000/linux_mon.c @@ -7,22 +7,20 @@ * @version 1.0 */ #include "wilc_wfi_cfgoperations.h" -#include "linux_wlan_common.h" #include "wilc_wlan_if.h" #include "wilc_wlan.h" - struct wilc_wfi_radiotap_hdr { struct ieee80211_radiotap_header hdr; u8 rate; -} __attribute__((packed)); +} __packed; struct wilc_wfi_radiotap_cb_hdr { struct ieee80211_radiotap_header hdr; u8 rate; u8 dump; u16 tx_flags; -} __attribute__((packed)); +} __packed; static struct net_device *wilc_wfi_mon; /* global monitor netdev */ @@ -53,15 +51,11 @@ void WILC_WFI_monitor_rx(u8 *buff, u32 size) struct wilc_wfi_radiotap_hdr *hdr; struct wilc_wfi_radiotap_cb_hdr *cb_hdr; - PRINT_INFO(HOSTAPD_DBG, "In monitor interface receive function\n"); - - if (wilc_wfi_mon == NULL) + if (!wilc_wfi_mon) return; - if (!netif_running(wilc_wfi_mon)) { - PRINT_INFO(HOSTAPD_DBG, "Monitor interface already RUNNING\n"); + if (!netif_running(wilc_wfi_mon)) return; - } /* Get WILC header */ memcpy(&header, (buff - HOST_HDR_OFFSET), HOST_HDR_OFFSET); @@ -71,18 +65,15 @@ void WILC_WFI_monitor_rx(u8 *buff, u32 size) pkt_offset = GET_PKT_OFFSET(header); if (pkt_offset & IS_MANAGMEMENT_CALLBACK) { - /* hostapd callback mgmt frame */ skb = dev_alloc_skb(size + sizeof(struct wilc_wfi_radiotap_cb_hdr)); - if (skb == NULL) { - PRINT_INFO(HOSTAPD_DBG, "Monitor if : No memory to allocate skb"); + if (!skb) return; - } memcpy(skb_put(skb, size), buff, size); - cb_hdr = (struct wilc_wfi_radiotap_cb_hdr *) skb_push(skb, sizeof(*cb_hdr)); + cb_hdr = (struct wilc_wfi_radiotap_cb_hdr *)skb_push(skb, sizeof(*cb_hdr)); memset(cb_hdr, 0, sizeof(struct wilc_wfi_radiotap_cb_hdr)); cb_hdr->hdr.it_version = 0; /* PKTHDR_RADIOTAP_VERSION; */ @@ -103,29 +94,21 @@ void WILC_WFI_monitor_rx(u8 *buff, u32 size) } } else { - skb = dev_alloc_skb(size + sizeof(struct wilc_wfi_radiotap_hdr)); - if (skb == NULL) { - PRINT_INFO(HOSTAPD_DBG, "Monitor if : No memory to allocate skb"); + if (!skb) return; - } memcpy(skb_put(skb, size), buff, size); - hdr = (struct wilc_wfi_radiotap_hdr *) skb_push(skb, sizeof(*hdr)); + hdr = (struct wilc_wfi_radiotap_hdr *)skb_push(skb, sizeof(*hdr)); memset(hdr, 0, sizeof(struct wilc_wfi_radiotap_hdr)); hdr->hdr.it_version = 0; /* PKTHDR_RADIOTAP_VERSION; */ hdr->hdr.it_len = cpu_to_le16(sizeof(struct wilc_wfi_radiotap_hdr)); - PRINT_INFO(HOSTAPD_DBG, "Radiotap len %d\n", hdr->hdr.it_len); hdr->hdr.it_present = cpu_to_le32 (1 << IEEE80211_RADIOTAP_RATE); /* | */ - PRINT_INFO(HOSTAPD_DBG, "Presentflags %d\n", hdr->hdr.it_present); hdr->rate = 5; /* txrate->bitrate / 5; */ - } - - skb->dev = wilc_wfi_mon; skb_set_mac_header(skb, 0); skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -134,8 +117,6 @@ void WILC_WFI_monitor_rx(u8 *buff, u32 size) memset(skb->cb, 0, sizeof(skb->cb)); netif_rx(skb); - - } struct tx_complete_mon_data { @@ -145,48 +126,30 @@ struct tx_complete_mon_data { static void mgmt_tx_complete(void *priv, int status) { - - struct tx_complete_mon_data *pv_data = (struct tx_complete_mon_data *)priv; - u8 *buf = pv_data->buff; - - - - if (status == 1) { - if (INFO || buf[0] == 0x10 || buf[0] == 0xb0) - PRINT_INFO(HOSTAPD_DBG, "Packet sent successfully - Size = %d - Address = %p.\n", pv_data->size, pv_data->buff); - } else { - PRINT_INFO(HOSTAPD_DBG, "Couldn't send packet - Size = %d - Address = %p.\n", pv_data->size, pv_data->buff); - } - - + struct tx_complete_mon_data *pv_data = priv; /* incase of fully hosting mode, the freeing will be done in response to the cfg packet */ kfree(pv_data->buff); kfree(pv_data); } + static int mon_mgmt_tx(struct net_device *dev, const u8 *buf, size_t len) { struct tx_complete_mon_data *mgmt_tx = NULL; - if (dev == NULL) { - PRINT_D(HOSTAPD_DBG, "ERROR: dev == NULL\n"); + if (!dev) return -EFAULT; - } netif_stop_queue(dev); - mgmt_tx = kmalloc(sizeof(struct tx_complete_mon_data), GFP_ATOMIC); - if (mgmt_tx == NULL) { - PRINT_ER("Failed to allocate memory for mgmt_tx structure\n"); - return -EFAULT; - } + mgmt_tx = kmalloc(sizeof(*mgmt_tx), GFP_ATOMIC); + if (!mgmt_tx) + return -ENOMEM; mgmt_tx->buff = kmalloc(len, GFP_ATOMIC); - if (mgmt_tx->buff == NULL) { - PRINT_ER("Failed to allocate memory for mgmt_tx buff\n"); + if (!mgmt_tx->buff) { kfree(mgmt_tx); - return -EFAULT; - + return -ENOMEM; } mgmt_tx->size = len; @@ -211,47 +174,30 @@ static int mon_mgmt_tx(struct net_device *dev, const u8 *buf, size_t len) static netdev_tx_t WILC_WFI_mon_xmit(struct sk_buff *skb, struct net_device *dev) { - u32 rtap_len, i, ret = 0; + u32 rtap_len, ret = 0; struct WILC_WFI_mon_priv *mon_priv; struct sk_buff *skb2; struct wilc_wfi_radiotap_cb_hdr *cb_hdr; - if (wilc_wfi_mon == NULL) + if (!wilc_wfi_mon) return -EFAULT; mon_priv = netdev_priv(wilc_wfi_mon); - - if (mon_priv == NULL) { - PRINT_ER("Monitor interface private structure is NULL\n"); + if (!mon_priv) return -EFAULT; - } - - rtap_len = ieee80211_get_radiotap_len(skb->data); - if (skb->len < rtap_len) { - PRINT_ER("Error in radiotap header\n"); + if (skb->len < rtap_len) return -1; - } - /* skip the radiotap header */ - PRINT_INFO(HOSTAPD_DBG, "Radiotap len: %d\n", rtap_len); - if (INFO) { - for (i = 0; i < rtap_len; i++) - PRINT_INFO(HOSTAPD_DBG, "Radiotap_hdr[%d] %02x\n", i, skb->data[i]); - } - /* Skip the ratio tap header */ skb_pull(skb, rtap_len); - if (skb->data[0] == 0xc0) - PRINT_INFO(HOSTAPD_DBG, "%x:%x:%x:%x:%x%x\n", skb->data[4], skb->data[5], skb->data[6], skb->data[7], skb->data[8], skb->data[9]); - if (skb->data[0] == 0xc0 && (!(memcmp(broadcast, &skb->data[4], 6)))) { skb2 = dev_alloc_skb(skb->len + sizeof(struct wilc_wfi_radiotap_cb_hdr)); memcpy(skb_put(skb2, skb->len), skb->data, skb->len); - cb_hdr = (struct wilc_wfi_radiotap_cb_hdr *) skb_push(skb2, sizeof(*cb_hdr)); + cb_hdr = (struct wilc_wfi_radiotap_cb_hdr *)skb_push(skb2, sizeof(*cb_hdr)); memset(cb_hdr, 0, sizeof(struct wilc_wfi_radiotap_cb_hdr)); cb_hdr->hdr.it_version = 0; /* PKTHDR_RADIOTAP_VERSION; */ @@ -278,24 +224,19 @@ static netdev_tx_t WILC_WFI_mon_xmit(struct sk_buff *skb, } skb->dev = mon_priv->real_ndev; - PRINT_INFO(HOSTAPD_DBG, "Skipping the radiotap header\n"); - - - - /* actual deliver of data is device-specific, and not shown here */ - PRINT_INFO(HOSTAPD_DBG, "SKB netdevice name = %s\n", skb->dev->name); - PRINT_INFO(HOSTAPD_DBG, "MONITOR real dev name = %s\n", mon_priv->real_ndev->name); - /* Identify if Ethernet or MAC header (data or mgmt) */ memcpy(srcAdd, &skb->data[10], 6); memcpy(bssid, &skb->data[16], 6); /* if source address and bssid fields are equal>>Mac header */ /*send it to mgmt frames handler */ if (!(memcmp(srcAdd, bssid, 6))) { - mon_mgmt_tx(mon_priv->real_ndev, skb->data, skb->len); + ret = mon_mgmt_tx(mon_priv->real_ndev, skb->data, skb->len); + if (ret) + netdev_err(dev, "fail to mgmt tx\n"); dev_kfree_skb(skb); - } else + } else { ret = wilc_mac_xmit(skb, mon_priv->real_ndev); + } return ret; } @@ -316,23 +257,16 @@ static const struct net_device_ops wilc_wfi_netdev_ops = { */ struct net_device *WILC_WFI_init_mon_interface(const char *name, struct net_device *real_dev) { - - u32 ret = 0; struct WILC_WFI_mon_priv *priv; /*If monitor interface is already initialized, return it*/ - if (wilc_wfi_mon) { + if (wilc_wfi_mon) return wilc_wfi_mon; - } wilc_wfi_mon = alloc_etherdev(sizeof(struct WILC_WFI_mon_priv)); - if (!wilc_wfi_mon) { - PRINT_ER("failed to allocate memory\n"); + if (!wilc_wfi_mon) return NULL; - - } - wilc_wfi_mon->type = ARPHRD_IEEE80211_RADIOTAP; strncpy(wilc_wfi_mon->name, name, IFNAMSIZ); wilc_wfi_mon->name[IFNAMSIZ - 1] = 0; @@ -340,14 +274,12 @@ struct net_device *WILC_WFI_init_mon_interface(const char *name, struct net_devi ret = register_netdevice(wilc_wfi_mon); if (ret) { - PRINT_ER(" register_netdevice failed (%d)\n", ret); + netdev_err(real_dev, "register_netdevice failed\n"); return NULL; } priv = netdev_priv(wilc_wfi_mon); - if (priv == NULL) { - PRINT_ER("private structure is NULL\n"); + if (!priv) return NULL; - } priv->real_ndev = real_dev; @@ -367,14 +299,11 @@ int WILC_WFI_deinit_mon_interface(void) { bool rollback_lock = false; - if (wilc_wfi_mon != NULL) { - PRINT_D(HOSTAPD_DBG, "In Deinit monitor interface\n"); - PRINT_D(HOSTAPD_DBG, "RTNL is being locked\n"); + if (wilc_wfi_mon) { if (rtnl_is_locked()) { rtnl_unlock(); rollback_lock = true; } - PRINT_D(HOSTAPD_DBG, "Unregister netdev\n"); unregister_netdev(wilc_wfi_mon); if (rollback_lock) { @@ -384,5 +313,4 @@ int WILC_WFI_deinit_mon_interface(void) wilc_wfi_mon = NULL; } return 0; - } diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c index 54fe9d74b780..bfa754bb022d 100644 --- a/drivers/staging/wilc1000/linux_wlan.c +++ b/drivers/staging/wilc1000/linux_wlan.c @@ -1,5 +1,4 @@ #include "wilc_wfi_cfgoperations.h" -#include "linux_wlan_common.h" #include "wilc_wlan_if.h" #include "wilc_wlan.h" @@ -13,7 +12,6 @@ #include <linux/kthread.h> #include <linux/firmware.h> -#include <linux/delay.h> #include <linux/init.h> #include <linux/netdevice.h> @@ -25,7 +23,8 @@ #include <linux/semaphore.h> -static int dev_state_ev_handler(struct notifier_block *this, unsigned long event, void *ptr); +static int dev_state_ev_handler(struct notifier_block *this, + unsigned long event, void *ptr); static struct notifier_block g_dev_notifier = { .notifier_call = dev_state_ev_handler @@ -57,9 +56,10 @@ static const struct net_device_ops wilc_netdev_ops = { }; -static int dev_state_ev_handler(struct notifier_block *this, unsigned long event, void *ptr) +static int dev_state_ev_handler(struct notifier_block *this, + unsigned long event, void *ptr) { - struct in_ifaddr *dev_iface = (struct in_ifaddr *)ptr; + struct in_ifaddr *dev_iface = ptr; struct wilc_priv *priv; struct host_if_drv *hif_drv; struct net_device *dev; @@ -68,66 +68,48 @@ static int dev_state_ev_handler(struct notifier_block *this, unsigned long event u8 null_ip[4] = {0}; char wlan_dev_name[5] = "wlan0"; - if (!dev_iface || !dev_iface->ifa_dev || !dev_iface->ifa_dev->dev) { - PRINT_D(GENERIC_DBG, "dev_iface = NULL\n"); + if (!dev_iface || !dev_iface->ifa_dev || !dev_iface->ifa_dev->dev) return NOTIFY_DONE; - } if (memcmp(dev_iface->ifa_label, "wlan0", 5) && - memcmp(dev_iface->ifa_label, "p2p0", 4)) { - PRINT_D(GENERIC_DBG, "Interface is neither WLAN0 nor P2P0\n"); + memcmp(dev_iface->ifa_label, "p2p0", 4)) return NOTIFY_DONE; - } dev = (struct net_device *)dev_iface->ifa_dev->dev; - if (!dev->ieee80211_ptr || !dev->ieee80211_ptr->wiphy) { - PRINT_D(GENERIC_DBG, "No Wireless registerd\n"); + if (!dev->ieee80211_ptr || !dev->ieee80211_ptr->wiphy) return NOTIFY_DONE; - } + priv = wiphy_priv(dev->ieee80211_ptr->wiphy); - if (!priv) { - PRINT_D(GENERIC_DBG, "No Wireless Priv\n"); + if (!priv) return NOTIFY_DONE; - } - hif_drv = (struct host_if_drv *)priv->hWILCWFIDrv; + + hif_drv = (struct host_if_drv *)priv->hif_drv; vif = netdev_priv(dev); - if (!vif || !hif_drv) { - PRINT_D(GENERIC_DBG, "No Wireless Priv\n"); + if (!vif || !hif_drv) return NOTIFY_DONE; - } - - PRINT_INFO(GENERIC_DBG, "dev_state_ev_handler +++\n"); switch (event) { case NETDEV_UP: - PRINT_D(GENERIC_DBG, "dev_state_ev_handler event=NETDEV_UP %p\n", dev); - - PRINT_INFO(GENERIC_DBG, "\n ============== IP Address Obtained ===============\n\n"); - if (vif->iftype == STATION_MODE || vif->iftype == CLIENT_MODE) { hif_drv->IFC_UP = 1; wilc_optaining_ip = false; del_timer(&wilc_during_ip_timer); - PRINT_D(GENERIC_DBG, "IP obtained , enable scan\n"); } if (wilc_enable_ps) wilc_set_power_mgmt(vif, 1, 0); - PRINT_D(GENERIC_DBG, "[%s] Up IP\n", dev_iface->ifa_label); + netdev_dbg(dev, "[%s] Up IP\n", dev_iface->ifa_label); ip_addr_buf = (char *)&dev_iface->ifa_address; - PRINT_D(GENERIC_DBG, "IP add=%d:%d:%d:%d\n", - ip_addr_buf[0], ip_addr_buf[1], - ip_addr_buf[2], ip_addr_buf[3]); - wilc_setup_ipaddress(vif, ip_addr_buf, vif->u8IfIdx); + netdev_dbg(dev, "IP add=%d:%d:%d:%d\n", + ip_addr_buf[0], ip_addr_buf[1], + ip_addr_buf[2], ip_addr_buf[3]); + wilc_setup_ipaddress(vif, ip_addr_buf, vif->idx); break; case NETDEV_DOWN: - PRINT_D(GENERIC_DBG, "dev_state_ev_handler event=NETDEV_DOWN %p\n", dev); - - PRINT_INFO(GENERIC_DBG, "\n ============== IP Address Released ===============\n\n"); if (vif->iftype == STATION_MODE || vif->iftype == CLIENT_MODE) { hif_drv->IFC_UP = 0; wilc_optaining_ip = false; @@ -138,21 +120,18 @@ static int dev_state_ev_handler(struct notifier_block *this, unsigned long event wilc_resolve_disconnect_aberration(vif); - PRINT_D(GENERIC_DBG, "[%s] Down IP\n", dev_iface->ifa_label); + netdev_dbg(dev, "[%s] Down IP\n", dev_iface->ifa_label); ip_addr_buf = null_ip; - PRINT_D(GENERIC_DBG, "IP add=%d:%d:%d:%d\n", - ip_addr_buf[0], ip_addr_buf[1], - ip_addr_buf[2], ip_addr_buf[3]); + netdev_dbg(dev, "IP add=%d:%d:%d:%d\n", + ip_addr_buf[0], ip_addr_buf[1], + ip_addr_buf[2], ip_addr_buf[3]); - wilc_setup_ipaddress(vif, ip_addr_buf, vif->u8IfIdx); + wilc_setup_ipaddress(vif, ip_addr_buf, vif->idx); break; default: - PRINT_INFO(GENERIC_DBG, "dev_state_ev_handler event=default\n"); - PRINT_INFO(GENERIC_DBG, "[%s] unknown dev event: %lu\n", dev_iface->ifa_label, event); - break; } @@ -163,14 +142,13 @@ static irqreturn_t isr_uh_routine(int irq, void *user_data) { struct wilc_vif *vif; struct wilc *wilc; - struct net_device *dev = (struct net_device *)user_data; + struct net_device *dev = user_data; vif = netdev_priv(dev); wilc = vif->wilc; - PRINT_D(INT_DBG, "Interrupt received UH\n"); if (wilc->close) { - PRINT_ER("Driver is CLOSING: Can't handle UH interrupt\n"); + netdev_err(dev, "Can't handle UH interrupt\n"); return IRQ_HANDLED; } return IRQ_WAKE_THREAD; @@ -180,16 +158,16 @@ static irqreturn_t isr_bh_routine(int irq, void *userdata) { struct wilc_vif *vif; struct wilc *wilc; + struct net_device *dev = userdata; vif = netdev_priv(userdata); wilc = vif->wilc; if (wilc->close) { - PRINT_ER("Driver is CLOSING: Can't handle BH interrupt\n"); + netdev_err(dev, "Can't handle BH interrupt\n"); return IRQ_HANDLED; } - PRINT_D(INT_DBG, "Interrupt received BH\n"); wilc_handle_isr(wilc); return IRQ_HANDLED; @@ -209,7 +187,7 @@ static int init_irq(struct net_device *dev) wl->dev_irq_num = gpio_to_irq(wl->gpio); } else { ret = -1; - PRINT_ER("could not obtain gpio for WILC_INTR\n"); + netdev_err(dev, "could not obtain gpio for WILC_INTR\n"); } if (ret != -1 && request_threaded_irq(wl->dev_irq_num, @@ -217,12 +195,13 @@ static int init_irq(struct net_device *dev) isr_bh_routine, IRQF_TRIGGER_LOW | IRQF_ONESHOT, "WILC_IRQ", dev) < 0) { - PRINT_ER("Failed to request IRQ for GPIO: %d\n", wl->gpio); + netdev_err(dev, "Failed to request IRQ GPIO: %d\n", wl->gpio); gpio_free(wl->gpio); ret = -1; } else { - PRINT_D(INIT_DBG, "IRQ request succeeded IRQ-NUM= %d on GPIO: %d\n", - wl->dev_irq_num, wl->gpio); + netdev_dbg(dev, + "IRQ request succeeded IRQ-NUM= %d on GPIO: %d\n", + wl->dev_irq_num, wl->gpio); } return ret; @@ -243,22 +222,14 @@ static void deinit_irq(struct net_device *dev) } } -void wilc_dbg(u8 *buff) -{ - PRINT_D(INIT_DBG, "%d\n", *buff); -} - int wilc_lock_timeout(struct wilc *nic, void *vp, u32 timeout) { /* FIXME: replace with mutex_lock or wait_for_completion */ int error = -1; - PRINT_D(LOCK_DBG, "Locking %p\n", vp); if (vp) - error = down_timeout((struct semaphore *)vp, + error = down_timeout(vp, msecs_to_jiffies(timeout)); - else - PRINT_ER("Failed, mutex is NULL\n"); return error; } @@ -275,8 +246,6 @@ void wilc_mac_indicate(struct wilc *wilc, int flag) } else { wilc->mac_status = status; } - } else if (flag == WILC_MAC_INDICATE_SCAN) { - PRINT_D(GENERIC_DBG, "Scanning ...\n"); } } @@ -288,26 +257,19 @@ static struct net_device *get_if_handler(struct wilc *wilc, u8 *mac_header) bssid = mac_header + 10; bssid1 = mac_header + 4; - for (i = 0; i < wilc->vif_num; i++) - if (!memcmp(bssid1, wilc->vif[i]->bssid, ETH_ALEN) || - !memcmp(bssid, wilc->vif[i]->bssid, ETH_ALEN)) - return wilc->vif[i]->ndev; - - PRINT_INFO(INIT_DBG, "Invalide handle\n"); - for (i = 0; i < 25; i++) - PRINT_D(INIT_DBG, "%02x ", mac_header[i]); - bssid = mac_header + 18; - bssid1 = mac_header + 12; - for (i = 0; i < wilc->vif_num; i++) - if (!memcmp(bssid1, wilc->vif[i]->bssid, ETH_ALEN) || - !memcmp(bssid, wilc->vif[i]->bssid, ETH_ALEN)) - return wilc->vif[i]->ndev; + for (i = 0; i < wilc->vif_num; i++) { + if (wilc->vif[i]->mode == STATION_MODE) + if (!memcmp(bssid, wilc->vif[i]->bssid, ETH_ALEN)) + return wilc->vif[i]->ndev; + if (wilc->vif[i]->mode == AP_MODE) + if (!memcmp(bssid1, wilc->vif[i]->bssid, ETH_ALEN)) + return wilc->vif[i]->ndev; + } - PRINT_INFO(INIT_DBG, "\n"); return NULL; } -int wilc_wlan_set_bssid(struct net_device *wilc_netdev, u8 *bssid) +int wilc_wlan_set_bssid(struct net_device *wilc_netdev, u8 *bssid, u8 mode) { int i = 0; int ret = -1; @@ -320,6 +282,7 @@ int wilc_wlan_set_bssid(struct net_device *wilc_netdev, u8 *bssid) for (i = 0; i < wilc->vif_num; i++) if (wilc->vif[i]->ndev == wilc_netdev) { memcpy(wilc->vif[i]->bssid, bssid, 6); + wilc->vif[i]->mode = mode; ret = 0; break; } @@ -362,28 +325,21 @@ static int linux_wlan_txq_task(void *vp) up(&wl->txq_thread_started); while (1) { - PRINT_D(TX_DBG, "txq_task Taking a nap :)\n"); down(&wl->txq_event); - PRINT_D(TX_DBG, "txq_task Who waked me up :$\n"); if (wl->close) { up(&wl->txq_thread_started); while (!kthread_should_stop()) schedule(); - - PRINT_D(TX_DBG, "TX thread stopped\n"); break; } - PRINT_D(TX_DBG, "txq_task handle the sending packet and let me go to sleep.\n"); #if !defined USE_TX_BACKOFF_DELAY_IF_NO_BUFFERS ret = wilc_wlan_handle_txq(dev, &txq_count); #else do { ret = wilc_wlan_handle_txq(dev, &txq_count); if (txq_count < FLOW_CONTROL_LOWER_THRESHOLD) { - PRINT_D(TX_DBG, "Waking up queue\n"); - if (netif_queue_stopped(wl->vif[0]->ndev)) netif_wake_queue(wl->vif[0]->ndev); if (netif_queue_stopped(wl->vif[1]->ndev)) @@ -391,9 +347,6 @@ static int linux_wlan_txq_task(void *vp) } if (ret == WILC_TX_ERR_NO_BUF) { - do { - msleep(TX_BACKOFF_WEIGHT_UNIT_MS << backoff_weight); - } while (0); backoff_weight += TX_BACKOFF_WEIGHT_INCR_STEP; if (backoff_weight > TX_BACKOFF_WEIGHT_MAX) backoff_weight = TX_BACKOFF_WEIGHT_MAX; @@ -410,43 +363,31 @@ static int linux_wlan_txq_task(void *vp) return 0; } -void wilc_rx_complete(struct wilc *nic) -{ - PRINT_D(RX_DBG, "RX completed\n"); -} - int wilc_wlan_get_firmware(struct net_device *dev) { struct wilc_vif *vif; struct wilc *wilc; - int ret = 0; + int chip_id, ret = 0; const struct firmware *wilc_firmware; char *firmware; vif = netdev_priv(dev); wilc = vif->wilc; - if (vif->iftype == AP_MODE) { - firmware = AP_FIRMWARE; - } else if (vif->iftype == STATION_MODE) { - firmware = STA_FIRMWARE; - } else { - PRINT_D(INIT_DBG, "Get P2P_CONCURRENCY_FIRMWARE\n"); - firmware = P2P_CONCURRENCY_FIRMWARE; - } + chip_id = wilc_get_chipid(wilc, false); - if (!vif) { - PRINT_ER("vif is NULL\n"); - goto _fail_; - } + if (chip_id < 0x1003a0) + firmware = FIRMWARE_1002; + else + firmware = FIRMWARE_1003; - if (!(&vif->ndev->dev)) { - PRINT_ER("&vif->ndev->dev is NULL\n"); + netdev_info(dev, "loading firmware %s\n", firmware); + + if (!(&vif->ndev->dev)) goto _fail_; - } if (request_firmware(&wilc_firmware, firmware, wilc->dev) != 0) { - PRINT_ER("%s - firmare not available\n", firmware); + netdev_err(dev, "%s - firmare not available\n", firmware); ret = -1; goto _fail_; } @@ -466,20 +407,13 @@ static int linux_wlan_start_firmware(struct net_device *dev) vif = netdev_priv(dev); wilc = vif->wilc; - PRINT_D(INIT_DBG, "Starting Firmware ...\n"); ret = wilc_wlan_start(wilc); - if (ret < 0) { - PRINT_ER("Failed to start Firmware\n"); + if (ret < 0) return ret; - } - PRINT_D(INIT_DBG, "Waiting for Firmware to get ready ...\n"); ret = wilc_lock_timeout(wilc, &wilc->sync_event, 5000); - if (ret) { - PRINT_D(INIT_DBG, "Firmware start timed out"); + if (ret) return ret; - } - PRINT_D(INIT_DBG, "Firmware successfully started\n"); return 0; } @@ -494,128 +428,123 @@ static int wilc1000_firmware_download(struct net_device *dev) wilc = vif->wilc; if (!wilc->firmware) { - PRINT_ER("Firmware buffer is NULL\n"); + netdev_err(dev, "Firmware buffer is NULL\n"); return -ENOBUFS; } - PRINT_D(INIT_DBG, "Downloading Firmware ...\n"); + ret = wilc_wlan_firmware_download(wilc, wilc->firmware->data, wilc->firmware->size); if (ret < 0) return ret; - PRINT_D(INIT_DBG, "Freeing FW buffer ...\n"); - PRINT_D(INIT_DBG, "Releasing firmware\n"); release_firmware(wilc->firmware); wilc->firmware = NULL; - PRINT_D(INIT_DBG, "Download Succeeded\n"); + netdev_dbg(dev, "Download Succeeded\n"); return 0; } static int linux_wlan_init_test_config(struct net_device *dev, - struct wilc *wilc) + struct wilc_vif *vif) { unsigned char c_val[64]; unsigned char mac_add[] = {0x00, 0x80, 0xC2, 0x5E, 0xa2, 0xff}; - + struct wilc *wilc = vif->wilc; struct wilc_priv *priv; struct host_if_drv *hif_drv; - PRINT_D(TX_DBG, "Start configuring Firmware\n"); - get_random_bytes(&mac_add[5], 1); - get_random_bytes(&mac_add[4], 1); + netdev_dbg(dev, "Start configuring Firmware\n"); priv = wiphy_priv(dev->ieee80211_ptr->wiphy); - hif_drv = (struct host_if_drv *)priv->hWILCWFIDrv; - PRINT_D(INIT_DBG, "Host = %p\n", hif_drv); + hif_drv = (struct host_if_drv *)priv->hif_drv; + netdev_dbg(dev, "Host = %p\n", hif_drv); + wilc_get_mac_address(vif, mac_add); - PRINT_D(INIT_DBG, "MAC address is : %02x-%02x-%02x-%02x-%02x-%02x\n", - mac_add[0], mac_add[1], mac_add[2], - mac_add[3], mac_add[4], mac_add[5]); - wilc_get_chipid(wilc, 0); + netdev_dbg(dev, "MAC address is : %pM\n", mac_add); + wilc_get_chipid(wilc, false); *(int *)c_val = 1; - if (!wilc_wlan_cfg_set(wilc, 1, WID_SET_DRV_HANDLER, c_val, 4, 0, 0)) + if (!wilc_wlan_cfg_set(vif, 1, WID_SET_DRV_HANDLER, c_val, 4, 0, 0)) goto _fail_; c_val[0] = 0; - if (!wilc_wlan_cfg_set(wilc, 0, WID_PC_TEST_MODE, c_val, 1, 0, 0)) + if (!wilc_wlan_cfg_set(vif, 0, WID_PC_TEST_MODE, c_val, 1, 0, 0)) goto _fail_; c_val[0] = INFRASTRUCTURE; - if (!wilc_wlan_cfg_set(wilc, 0, WID_BSS_TYPE, c_val, 1, 0, 0)) + if (!wilc_wlan_cfg_set(vif, 0, WID_BSS_TYPE, c_val, 1, 0, 0)) goto _fail_; c_val[0] = RATE_AUTO; - if (!wilc_wlan_cfg_set(wilc, 0, WID_CURRENT_TX_RATE, c_val, 1, 0, 0)) + if (!wilc_wlan_cfg_set(vif, 0, WID_CURRENT_TX_RATE, c_val, 1, 0, 0)) goto _fail_; c_val[0] = G_MIXED_11B_2_MODE; - if (!wilc_wlan_cfg_set(wilc, 0, WID_11G_OPERATING_MODE, c_val, 1, 0, + if (!wilc_wlan_cfg_set(vif, 0, WID_11G_OPERATING_MODE, c_val, 1, 0, 0)) goto _fail_; c_val[0] = 1; - if (!wilc_wlan_cfg_set(wilc, 0, WID_CURRENT_CHANNEL, c_val, 1, 0, 0)) + if (!wilc_wlan_cfg_set(vif, 0, WID_CURRENT_CHANNEL, c_val, 1, 0, 0)) goto _fail_; c_val[0] = G_SHORT_PREAMBLE; - if (!wilc_wlan_cfg_set(wilc, 0, WID_PREAMBLE, c_val, 1, 0, 0)) + if (!wilc_wlan_cfg_set(vif, 0, WID_PREAMBLE, c_val, 1, 0, 0)) goto _fail_; c_val[0] = AUTO_PROT; - if (!wilc_wlan_cfg_set(wilc, 0, WID_11N_PROT_MECH, c_val, 1, 0, 0)) + if (!wilc_wlan_cfg_set(vif, 0, WID_11N_PROT_MECH, c_val, 1, 0, 0)) goto _fail_; c_val[0] = ACTIVE_SCAN; - if (!wilc_wlan_cfg_set(wilc, 0, WID_SCAN_TYPE, c_val, 1, 0, 0)) + if (!wilc_wlan_cfg_set(vif, 0, WID_SCAN_TYPE, c_val, 1, 0, 0)) goto _fail_; c_val[0] = SITE_SURVEY_OFF; - if (!wilc_wlan_cfg_set(wilc, 0, WID_SITE_SURVEY, c_val, 1, 0, 0)) + if (!wilc_wlan_cfg_set(vif, 0, WID_SITE_SURVEY, c_val, 1, 0, 0)) goto _fail_; *((int *)c_val) = 0xffff; - if (!wilc_wlan_cfg_set(wilc, 0, WID_RTS_THRESHOLD, c_val, 2, 0, 0)) + if (!wilc_wlan_cfg_set(vif, 0, WID_RTS_THRESHOLD, c_val, 2, 0, 0)) goto _fail_; *((int *)c_val) = 2346; - if (!wilc_wlan_cfg_set(wilc, 0, WID_FRAG_THRESHOLD, c_val, 2, 0, 0)) + if (!wilc_wlan_cfg_set(vif, 0, WID_FRAG_THRESHOLD, c_val, 2, 0, 0)) goto _fail_; c_val[0] = 0; - if (!wilc_wlan_cfg_set(wilc, 0, WID_BCAST_SSID, c_val, 1, 0, 0)) + if (!wilc_wlan_cfg_set(vif, 0, WID_BCAST_SSID, c_val, 1, 0, 0)) goto _fail_; c_val[0] = 1; - if (!wilc_wlan_cfg_set(wilc, 0, WID_QOS_ENABLE, c_val, 1, 0, 0)) + if (!wilc_wlan_cfg_set(vif, 0, WID_QOS_ENABLE, c_val, 1, 0, 0)) goto _fail_; c_val[0] = NO_POWERSAVE; - if (!wilc_wlan_cfg_set(wilc, 0, WID_POWER_MANAGEMENT, c_val, 1, 0, 0)) + if (!wilc_wlan_cfg_set(vif, 0, WID_POWER_MANAGEMENT, c_val, 1, 0, 0)) goto _fail_; c_val[0] = NO_SECURITY; /* NO_ENCRYPT, 0x79 */ - if (!wilc_wlan_cfg_set(wilc, 0, WID_11I_MODE, c_val, 1, 0, 0)) + if (!wilc_wlan_cfg_set(vif, 0, WID_11I_MODE, c_val, 1, 0, 0)) goto _fail_; c_val[0] = OPEN_SYSTEM; - if (!wilc_wlan_cfg_set(wilc, 0, WID_AUTH_TYPE, c_val, 1, 0, 0)) + if (!wilc_wlan_cfg_set(vif, 0, WID_AUTH_TYPE, c_val, 1, 0, 0)) goto _fail_; strcpy(c_val, "123456790abcdef1234567890"); - if (!wilc_wlan_cfg_set(wilc, 0, WID_WEP_KEY_VALUE, c_val, + if (!wilc_wlan_cfg_set(vif, 0, WID_WEP_KEY_VALUE, c_val, (strlen(c_val) + 1), 0, 0)) goto _fail_; strcpy(c_val, "12345678"); - if (!wilc_wlan_cfg_set(wilc, 0, WID_11I_PSK, c_val, (strlen(c_val)), 0, + if (!wilc_wlan_cfg_set(vif, 0, WID_11I_PSK, c_val, (strlen(c_val)), 0, 0)) goto _fail_; strcpy(c_val, "password"); - if (!wilc_wlan_cfg_set(wilc, 0, WID_1X_KEY, c_val, (strlen(c_val) + 1), + if (!wilc_wlan_cfg_set(vif, 0, WID_1X_KEY, c_val, (strlen(c_val) + 1), 0, 0)) goto _fail_; @@ -623,106 +552,106 @@ static int linux_wlan_init_test_config(struct net_device *dev, c_val[1] = 168; c_val[2] = 1; c_val[3] = 112; - if (!wilc_wlan_cfg_set(wilc, 0, WID_1X_SERV_ADDR, c_val, 4, 0, 0)) + if (!wilc_wlan_cfg_set(vif, 0, WID_1X_SERV_ADDR, c_val, 4, 0, 0)) goto _fail_; c_val[0] = 3; - if (!wilc_wlan_cfg_set(wilc, 0, WID_LISTEN_INTERVAL, c_val, 1, 0, 0)) + if (!wilc_wlan_cfg_set(vif, 0, WID_LISTEN_INTERVAL, c_val, 1, 0, 0)) goto _fail_; c_val[0] = 3; - if (!wilc_wlan_cfg_set(wilc, 0, WID_DTIM_PERIOD, c_val, 1, 0, 0)) + if (!wilc_wlan_cfg_set(vif, 0, WID_DTIM_PERIOD, c_val, 1, 0, 0)) goto _fail_; c_val[0] = NORMAL_ACK; - if (!wilc_wlan_cfg_set(wilc, 0, WID_ACK_POLICY, c_val, 1, 0, 0)) + if (!wilc_wlan_cfg_set(vif, 0, WID_ACK_POLICY, c_val, 1, 0, 0)) goto _fail_; c_val[0] = 0; - if (!wilc_wlan_cfg_set(wilc, 0, WID_USER_CONTROL_ON_TX_POWER, c_val, 1, + if (!wilc_wlan_cfg_set(vif, 0, WID_USER_CONTROL_ON_TX_POWER, c_val, 1, 0, 0)) goto _fail_; c_val[0] = 48; - if (!wilc_wlan_cfg_set(wilc, 0, WID_TX_POWER_LEVEL_11A, c_val, 1, 0, + if (!wilc_wlan_cfg_set(vif, 0, WID_TX_POWER_LEVEL_11A, c_val, 1, 0, 0)) goto _fail_; c_val[0] = 28; - if (!wilc_wlan_cfg_set(wilc, 0, WID_TX_POWER_LEVEL_11B, c_val, 1, 0, + if (!wilc_wlan_cfg_set(vif, 0, WID_TX_POWER_LEVEL_11B, c_val, 1, 0, 0)) goto _fail_; *((int *)c_val) = 100; - if (!wilc_wlan_cfg_set(wilc, 0, WID_BEACON_INTERVAL, c_val, 2, 0, 0)) + if (!wilc_wlan_cfg_set(vif, 0, WID_BEACON_INTERVAL, c_val, 2, 0, 0)) goto _fail_; c_val[0] = REKEY_DISABLE; - if (!wilc_wlan_cfg_set(wilc, 0, WID_REKEY_POLICY, c_val, 1, 0, 0)) + if (!wilc_wlan_cfg_set(vif, 0, WID_REKEY_POLICY, c_val, 1, 0, 0)) goto _fail_; *((int *)c_val) = 84600; - if (!wilc_wlan_cfg_set(wilc, 0, WID_REKEY_PERIOD, c_val, 4, 0, 0)) + if (!wilc_wlan_cfg_set(vif, 0, WID_REKEY_PERIOD, c_val, 4, 0, 0)) goto _fail_; *((int *)c_val) = 500; - if (!wilc_wlan_cfg_set(wilc, 0, WID_REKEY_PACKET_COUNT, c_val, 4, 0, + if (!wilc_wlan_cfg_set(vif, 0, WID_REKEY_PACKET_COUNT, c_val, 4, 0, 0)) goto _fail_; c_val[0] = 1; - if (!wilc_wlan_cfg_set(wilc, 0, WID_SHORT_SLOT_ALLOWED, c_val, 1, 0, + if (!wilc_wlan_cfg_set(vif, 0, WID_SHORT_SLOT_ALLOWED, c_val, 1, 0, 0)) goto _fail_; c_val[0] = G_SELF_CTS_PROT; - if (!wilc_wlan_cfg_set(wilc, 0, WID_11N_ERP_PROT_TYPE, c_val, 1, 0, 0)) + if (!wilc_wlan_cfg_set(vif, 0, WID_11N_ERP_PROT_TYPE, c_val, 1, 0, 0)) goto _fail_; c_val[0] = 1; - if (!wilc_wlan_cfg_set(wilc, 0, WID_11N_ENABLE, c_val, 1, 0, 0)) + if (!wilc_wlan_cfg_set(vif, 0, WID_11N_ENABLE, c_val, 1, 0, 0)) goto _fail_; c_val[0] = HT_MIXED_MODE; - if (!wilc_wlan_cfg_set(wilc, 0, WID_11N_OPERATING_MODE, c_val, 1, 0, + if (!wilc_wlan_cfg_set(vif, 0, WID_11N_OPERATING_MODE, c_val, 1, 0, 0)) goto _fail_; c_val[0] = 1; - if (!wilc_wlan_cfg_set(wilc, 0, WID_11N_TXOP_PROT_DISABLE, c_val, 1, 0, + if (!wilc_wlan_cfg_set(vif, 0, WID_11N_TXOP_PROT_DISABLE, c_val, 1, 0, 0)) goto _fail_; memcpy(c_val, mac_add, 6); - if (!wilc_wlan_cfg_set(wilc, 0, WID_MAC_ADDR, c_val, 6, 0, 0)) + if (!wilc_wlan_cfg_set(vif, 0, WID_MAC_ADDR, c_val, 6, 0, 0)) goto _fail_; c_val[0] = DETECT_PROTECT_REPORT; - if (!wilc_wlan_cfg_set(wilc, 0, WID_11N_OBSS_NONHT_DETECTION, c_val, 1, + if (!wilc_wlan_cfg_set(vif, 0, WID_11N_OBSS_NONHT_DETECTION, c_val, 1, 0, 0)) goto _fail_; c_val[0] = RTS_CTS_NONHT_PROT; - if (!wilc_wlan_cfg_set(wilc, 0, WID_11N_HT_PROT_TYPE, c_val, 1, 0, 0)) + if (!wilc_wlan_cfg_set(vif, 0, WID_11N_HT_PROT_TYPE, c_val, 1, 0, 0)) goto _fail_; c_val[0] = 0; - if (!wilc_wlan_cfg_set(wilc, 0, WID_11N_RIFS_PROT_ENABLE, c_val, 1, 0, + if (!wilc_wlan_cfg_set(vif, 0, WID_11N_RIFS_PROT_ENABLE, c_val, 1, 0, 0)) goto _fail_; c_val[0] = MIMO_MODE; - if (!wilc_wlan_cfg_set(wilc, 0, WID_11N_SMPS_MODE, c_val, 1, 0, 0)) + if (!wilc_wlan_cfg_set(vif, 0, WID_11N_SMPS_MODE, c_val, 1, 0, 0)) goto _fail_; c_val[0] = 7; - if (!wilc_wlan_cfg_set(wilc, 0, WID_11N_CURRENT_TX_MCS, c_val, 1, 0, + if (!wilc_wlan_cfg_set(vif, 0, WID_11N_CURRENT_TX_MCS, c_val, 1, 0, 0)) goto _fail_; c_val[0] = 1; - if (!wilc_wlan_cfg_set(wilc, 0, WID_11N_IMMEDIATE_BA_ENABLED, c_val, 1, + if (!wilc_wlan_cfg_set(vif, 0, WID_11N_IMMEDIATE_BA_ENABLED, c_val, 1, 1, 1)) goto _fail_; @@ -748,7 +677,6 @@ void wilc1000_wlan_deinit(struct net_device *dev) if (wl->initialized) { netdev_info(dev, "Deinitializing wilc1000...\n"); - PRINT_D(INIT_DBG, "Disabling IRQ\n"); if (!wl->dev_irq_num && wl->hif_func->disable_interrupt) { mutex_lock(&wl->hif_cs); @@ -758,37 +686,26 @@ void wilc1000_wlan_deinit(struct net_device *dev) if (&wl->txq_event) up(&wl->txq_event); - PRINT_D(INIT_DBG, "Deinitializing Threads\n"); wlan_deinitialize_threads(dev); - - PRINT_D(INIT_DBG, "Deinitializing IRQ\n"); deinit_irq(dev); wilc_wlan_stop(wl); - - PRINT_D(INIT_DBG, "Deinitializing WILC Wlan\n"); wilc_wlan_cleanup(dev); #if defined(PLAT_ALLWINNER_A20) || defined(PLAT_ALLWINNER_A23) || defined(PLAT_ALLWINNER_A31) if (!wl->dev_irq_num && wl->hif_func->disable_interrupt) { - - PRINT_D(INIT_DBG, "Disabling IRQ 2\n"); - mutex_lock(&wl->hif_cs); wl->hif_func->disable_interrupt(wl); mutex_unlock(&wl->hif_cs); } #endif - - PRINT_D(INIT_DBG, "Deinitializing Locks\n"); wlan_deinit_locks(dev); wl->initialized = false; - PRINT_D(INIT_DBG, "wilc1000 deinitialization Done\n"); - + netdev_dbg(dev, "wilc1000 deinitialization Done\n"); } else { - PRINT_D(INIT_DBG, "wilc1000 is not initialized\n"); + netdev_dbg(dev, "wilc1000 is not initialized\n"); } } @@ -800,8 +717,6 @@ static int wlan_init_locks(struct net_device *dev) vif = netdev_priv(dev); wl = vif->wilc; - PRINT_D(INIT_DBG, "Initializing Locks ...\n"); - mutex_init(&wl->hif_cs); mutex_init(&wl->rxq_cs); @@ -826,8 +741,6 @@ static int wlan_deinit_locks(struct net_device *dev) vif = netdev_priv(dev); wilc = vif->wilc; - PRINT_D(INIT_DBG, "De-Initializing Locks\n"); - if (&wilc->hif_cs) mutex_destroy(&wilc->hif_cs); @@ -845,12 +758,10 @@ static int wlan_initialize_threads(struct net_device *dev) vif = netdev_priv(dev); wilc = vif->wilc; - PRINT_D(INIT_DBG, "Initializing Threads ...\n"); - PRINT_D(INIT_DBG, "Creating kthread for transmission\n"); wilc->txq_thread = kthread_run(linux_wlan_txq_task, (void *)dev, "K_TXQ_TASK"); if (!wilc->txq_thread) { - PRINT_ER("couldn't create TXQ thread\n"); + netdev_err(dev, "couldn't create TXQ thread\n"); wilc->close = 0; return -ENOBUFS; } @@ -863,11 +774,11 @@ static void wlan_deinitialize_threads(struct net_device *dev) { struct wilc_vif *vif; struct wilc *wl; + vif = netdev_priv(dev); wl = vif->wilc; wl->close = 1; - PRINT_D(INIT_DBG, "Deinitializing Threads\n"); if (&wl->txq_event) up(&wl->txq_event); @@ -891,20 +802,17 @@ int wilc1000_wlan_init(struct net_device *dev, struct wilc_vif *vif) ret = wilc_wlan_init(dev); if (ret < 0) { - PRINT_ER("Initializing WILC_Wlan FAILED\n"); ret = -EIO; goto _fail_locks_; } if (wl->gpio >= 0 && init_irq(dev)) { - PRINT_ER("couldn't initialize IRQ\n"); ret = -EIO; goto _fail_locks_; } ret = wlan_initialize_threads(dev); if (ret < 0) { - PRINT_ER("Initializing Threads FAILED\n"); ret = -EIO; goto _fail_wilc_wlan_; } @@ -912,45 +820,41 @@ int wilc1000_wlan_init(struct net_device *dev, struct wilc_vif *vif) if (!wl->dev_irq_num && wl->hif_func->enable_interrupt && wl->hif_func->enable_interrupt(wl)) { - PRINT_ER("couldn't initialize IRQ\n"); ret = -EIO; goto _fail_irq_init_; } if (wilc_wlan_get_firmware(dev)) { - PRINT_ER("Can't get firmware\n"); ret = -EIO; goto _fail_irq_enable_; } ret = wilc1000_firmware_download(dev); if (ret < 0) { - PRINT_ER("Failed to download firmware\n"); ret = -EIO; goto _fail_irq_enable_; } ret = linux_wlan_start_firmware(dev); if (ret < 0) { - PRINT_ER("Failed to start firmware\n"); ret = -EIO; goto _fail_irq_enable_; } - if (wilc_wlan_cfg_get(wl, 1, WID_FIRMWARE_VERSION, 1, 0)) { + if (wilc_wlan_cfg_get(vif, 1, WID_FIRMWARE_VERSION, 1, 0)) { int size; - char Firmware_ver[20]; + char firmware_ver[20]; - size = wilc_wlan_cfg_get_val( - WID_FIRMWARE_VERSION, - Firmware_ver, sizeof(Firmware_ver)); - Firmware_ver[size] = '\0'; - PRINT_D(INIT_DBG, "***** Firmware Ver = %s *******\n", Firmware_ver); + size = wilc_wlan_cfg_get_val(WID_FIRMWARE_VERSION, + firmware_ver, + sizeof(firmware_ver)); + firmware_ver[size] = '\0'; + netdev_dbg(dev, "Firmware Ver = %s\n", firmware_ver); } - ret = linux_wlan_init_test_config(dev, wl); + ret = linux_wlan_init_test_config(dev, vif); if (ret < 0) { - PRINT_ER("Failed to configure firmware\n"); + netdev_err(dev, "Failed to configure firmware\n"); ret = -EIO; goto _fail_fw_start_; } @@ -974,9 +878,9 @@ _fail_wilc_wlan_: wilc_wlan_cleanup(dev); _fail_locks_: wlan_deinit_locks(dev); - PRINT_ER("WLAN Iinitialization FAILED\n"); + netdev_err(dev, "WLAN Iinitialization FAILED\n"); } else { - PRINT_D(INIT_DBG, "wilc1000 already initialized\n"); + netdev_dbg(dev, "wilc1000 already initialized\n"); } return ret; } @@ -1003,7 +907,7 @@ int wilc_mac_open(struct net_device *ndev) vif = netdev_priv(ndev); wl = vif->wilc; - if (!wl|| !wl->dev) { + if (!wl || !wl->dev) { netdev_err(ndev, "wilc1000: SPI device not ready\n"); return -ENODEV; } @@ -1011,31 +915,45 @@ int wilc_mac_open(struct net_device *ndev) vif = netdev_priv(ndev); wilc = vif->wilc; priv = wiphy_priv(vif->ndev->ieee80211_ptr->wiphy); - PRINT_D(INIT_DBG, "MAC OPEN[%p]\n", ndev); + netdev_dbg(ndev, "MAC OPEN[%p]\n", ndev); ret = wilc_init_host_int(ndev); - if (ret < 0) { - PRINT_ER("Failed to initialize host interface\n"); - + if (ret < 0) return ret; - } - PRINT_D(INIT_DBG, "*** re-init ***\n"); ret = wilc1000_wlan_init(ndev, vif); if (ret < 0) { - PRINT_ER("Failed to initialize wilc1000\n"); wilc_deinit_host_int(ndev); return ret; } - wilc_set_machw_change_vir_if(ndev, false); - - wilc_get_mac_address(vif, mac_add); - PRINT_D(INIT_DBG, "Mac address: %pM\n", mac_add); - for (i = 0; i < wl->vif_num; i++) { if (ndev == wl->vif[i]->ndev) { + if (vif->iftype == AP_MODE) { + wilc_set_wfi_drv_handler(vif, + wilc_get_vif_idx(vif), + 0); + } else if (!wilc_wlan_get_num_conn_ifcs(wilc)) { + wilc_set_wfi_drv_handler(vif, + wilc_get_vif_idx(vif), + wilc->open_ifcs); + } else { + if (memcmp(wilc->vif[i ^ 1]->bssid, + wilc->vif[i ^ 1]->src_addr, 6)) + wilc_set_wfi_drv_handler(vif, + wilc_get_vif_idx(vif), + 0); + else + wilc_set_wfi_drv_handler(vif, + wilc_get_vif_idx(vif), + 1); + } + wilc_set_operation_mode(vif, vif->iftype); + + wilc_get_mac_address(vif, mac_add); + netdev_dbg(ndev, "Mac address: %pM\n", mac_add); memcpy(wl->vif[i]->src_addr, mac_add, ETH_ALEN); + break; } } @@ -1043,7 +961,7 @@ int wilc_mac_open(struct net_device *ndev) memcpy(ndev->dev_addr, wl->vif[i]->src_addr, ETH_ALEN); if (!is_valid_ether_addr(ndev->dev_addr)) { - PRINT_ER("Error: Wrong MAC address\n"); + netdev_err(ndev, "Wrong MAC address\n"); wilc_deinit_host_int(ndev); wilc1000_wlan_deinit(ndev); return -EINVAL; @@ -1065,7 +983,7 @@ int wilc_mac_open(struct net_device *ndev) static struct net_device_stats *mac_stats(struct net_device *dev) { - struct wilc_vif *vif= netdev_priv(dev); + struct wilc_vif *vif = netdev_priv(dev); return &vif->netstats; } @@ -1080,57 +998,41 @@ static void wilc_set_multicast_list(struct net_device *dev) priv = wiphy_priv(dev->ieee80211_ptr->wiphy); vif = netdev_priv(dev); - hif_drv = (struct host_if_drv *)priv->hWILCWFIDrv; - - if (!dev) - return; + hif_drv = (struct host_if_drv *)priv->hif_drv; - PRINT_D(INIT_DBG, "Setting Multicast List with count = %d.\n", - dev->mc.count); - - if (dev->flags & IFF_PROMISC) { - PRINT_D(INIT_DBG, "Set promiscuous mode ON, retrive all packets\n"); + if (dev->flags & IFF_PROMISC) return; - } if ((dev->flags & IFF_ALLMULTI) || (dev->mc.count) > WILC_MULTICAST_TABLE_SIZE) { - PRINT_D(INIT_DBG, "Disable multicast filter, retrive all multicast packets\n"); wilc_setup_multicast_filter(vif, false, 0); return; } if ((dev->mc.count) == 0) { - PRINT_D(INIT_DBG, "Enable multicast filter, retrive directed packets only.\n"); wilc_setup_multicast_filter(vif, true, 0); return; } netdev_for_each_mc_addr(ha, dev) { memcpy(wilc_multicast_mac_addr_list[i], ha->addr, ETH_ALEN); - PRINT_D(INIT_DBG, "Entry[%d]: %x:%x:%x:%x:%x:%x\n", i, - wilc_multicast_mac_addr_list[i][0], - wilc_multicast_mac_addr_list[i][1], - wilc_multicast_mac_addr_list[i][2], - wilc_multicast_mac_addr_list[i][3], - wilc_multicast_mac_addr_list[i][4], - wilc_multicast_mac_addr_list[i][5]); + netdev_dbg(dev, "Entry[%d]: %x:%x:%x:%x:%x:%x\n", i, + wilc_multicast_mac_addr_list[i][0], + wilc_multicast_mac_addr_list[i][1], + wilc_multicast_mac_addr_list[i][2], + wilc_multicast_mac_addr_list[i][3], + wilc_multicast_mac_addr_list[i][4], + wilc_multicast_mac_addr_list[i][5]); i++; } wilc_setup_multicast_filter(vif, true, (dev->mc.count)); - - return; } static void linux_wlan_tx_complete(void *priv, int status) { - struct tx_complete_data *pv_data = (struct tx_complete_data *)priv; + struct tx_complete_data *pv_data = priv; - if (status == 1) - PRINT_D(TX_DBG, "Packet sent successfully - Size = %d - Address = %p - SKB = %p\n", pv_data->size, pv_data->buff, pv_data->skb); - else - PRINT_D(TX_DBG, "Couldn't send packet - Size = %d - Address = %p - SKB = %p\n", pv_data->size, pv_data->buff, pv_data->skb); dev_kfree_skb(pv_data->skb); kfree(pv_data); } @@ -1148,16 +1050,13 @@ int wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev) vif = netdev_priv(ndev); wilc = vif->wilc; - PRINT_D(TX_DBG, "Sending packet just received from TCP/IP\n"); - if (skb->dev != ndev) { - PRINT_ER("Packet not destined to this device\n"); + netdev_err(ndev, "Packet not destined to this device\n"); return 0; } tx_data = kmalloc(sizeof(*tx_data), GFP_ATOMIC); if (!tx_data) { - PRINT_ER("Failed to allocate memory for tx_data structure\n"); dev_kfree_skb(skb); netif_wake_queue(ndev); return 0; @@ -1169,21 +1068,19 @@ int wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev) eth_h = (struct ethhdr *)(skb->data); if (eth_h->h_proto == 0x8e88) - PRINT_D(INIT_DBG, "EAPOL transmitted\n"); + netdev_dbg(ndev, "EAPOL transmitted\n"); ih = (struct iphdr *)(skb->data + sizeof(struct ethhdr)); udp_buf = (char *)ih + sizeof(struct iphdr); if ((udp_buf[1] == 68 && udp_buf[3] == 67) || (udp_buf[1] == 67 && udp_buf[3] == 68)) - PRINT_D(GENERIC_DBG, "DHCP Message transmitted, type:%x %x %x\n", - udp_buf[248], udp_buf[249], udp_buf[250]); + netdev_dbg(ndev, "DHCP Message transmitted, type:%x %x %x\n", + udp_buf[248], udp_buf[249], udp_buf[250]); - PRINT_D(TX_DBG, "Sending packet - Size = %d - Address = %p - SKB = %p\n", tx_data->size, tx_data->buff, tx_data->skb); - PRINT_D(TX_DBG, "Adding tx packet to TX Queue\n"); vif->netstats.tx_packets++; vif->netstats.tx_bytes += tx_data->size; - tx_data->pBssid = wilc->vif[vif->u8IfIdx]->bssid; + tx_data->bssid = wilc->vif[vif->idx]->bssid; queue_count = wilc_wlan_txq_add_net_pkt(ndev, (void *)tx_data, tx_data->buff, tx_data->size, linux_wlan_tx_complete); @@ -1206,39 +1103,29 @@ int wilc_mac_close(struct net_device *ndev) vif = netdev_priv(ndev); if (!vif || !vif->ndev || !vif->ndev->ieee80211_ptr || - !vif->ndev->ieee80211_ptr->wiphy) { - PRINT_ER("vif = NULL\n"); + !vif->ndev->ieee80211_ptr->wiphy) return 0; - } priv = wiphy_priv(vif->ndev->ieee80211_ptr->wiphy); wl = vif->wilc; - if (!priv) { - PRINT_ER("priv = NULL\n"); + if (!priv) return 0; - } - hif_drv = (struct host_if_drv *)priv->hWILCWFIDrv; + hif_drv = (struct host_if_drv *)priv->hif_drv; - PRINT_D(GENERIC_DBG, "Mac close\n"); + netdev_dbg(ndev, "Mac close\n"); - if (!wl) { - PRINT_ER("wl = NULL\n"); + if (!wl) return 0; - } - if (!hif_drv) { - PRINT_ER("hif_drv = NULL\n"); + if (!hif_drv) return 0; - } - if ((wl->open_ifcs) > 0) { + if ((wl->open_ifcs) > 0) wl->open_ifcs--; - } else { - PRINT_ER("ERROR: MAC close called while number of opened interfaces is zero\n"); + else return 0; - } if (vif->ndev) { netif_stop_queue(vif->ndev); @@ -1247,7 +1134,7 @@ int wilc_mac_close(struct net_device *ndev) } if (wl->open_ifcs == 0) { - PRINT_D(GENERIC_DBG, "Deinitializing wilc1000\n"); + netdev_dbg(ndev, "Deinitializing wilc1000\n"); wl->close = 1; wilc1000_wlan_deinit(ndev); WILC_WFI_deinit_mon_interface(); @@ -1278,7 +1165,7 @@ static int mac_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) switch (cmd) { case SIOCSIWPRIV: { - struct iwreq *wrq = (struct iwreq *) req; + struct iwreq *wrq = (struct iwreq *)req; size = wrq->u.data.length; @@ -1291,16 +1178,14 @@ static int mac_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) if (strncasecmp(buff, "RSSI", length) == 0) { priv = wiphy_priv(vif->ndev->ieee80211_ptr->wiphy); ret = wilc_get_rssi(vif, &rssi); - if (ret) - PRINT_ER("Failed to send get rssi param's message queue "); - PRINT_INFO(GENERIC_DBG, "RSSI :%d\n", rssi); + netdev_info(ndev, "RSSI :%d\n", rssi); rssi += 5; snprintf(buff, size, "rssi %d", rssi); if (copy_to_user(wrq->u.data.pointer, buff, size)) { - PRINT_ER("%s: failed to copy data to user buffer\n", __func__); + netdev_err(ndev, "failed to copy\n"); ret = -EFAULT; goto done; } @@ -1311,7 +1196,7 @@ static int mac_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) default: { - PRINT_INFO(GENERIC_DBG, "Command - %d - has been received\n", cmd); + netdev_info(ndev, "Command - %d - has been received\n", cmd); ret = -EOPNOTSUPP; goto done; } @@ -1333,6 +1218,9 @@ void wilc_frmw_to_linux(struct wilc *wilc, u8 *buff, u32 size, u32 pkt_offset) struct net_device *wilc_netdev; struct wilc_vif *vif; + if (!wilc) + return; + wilc_netdev = get_if_handler(wilc, buff); if (!wilc_netdev) return; @@ -1345,18 +1233,11 @@ void wilc_frmw_to_linux(struct wilc *wilc, u8 *buff, u32 size, u32 pkt_offset) buff_to_send = buff; skb = dev_alloc_skb(frame_len); - if (!skb) { - PRINT_ER("Low memory - packet droped\n"); + if (!skb) return; - } - if (!wilc || !wilc_netdev) - PRINT_ER("wilc_netdev in wilc is NULL"); skb->dev = wilc_netdev; - if (!skb->dev) - PRINT_ER("skb->dev is NULL\n"); - memcpy(skb_put(skb, frame_len), buff_to_send, frame_len); skb->protocol = eth_type_trans(skb, wilc_netdev); @@ -1364,7 +1245,7 @@ void wilc_frmw_to_linux(struct wilc *wilc, u8 *buff, u32 size, u32 pkt_offset) vif->netstats.rx_bytes += frame_len; skb->ip_summed = CHECKSUM_UNNECESSARY; stats = netif_rx(skb); - PRINT_D(RX_DBG, "netif_rx ret value is: %d\n", stats); + netdev_dbg(wilc_netdev, "netif_rx ret value is: %d\n", stats); } } @@ -1403,7 +1284,7 @@ void wilc_netdev_cleanup(struct wilc *wilc) release_firmware(wilc->firmware); if (wilc && (wilc->vif[0]->ndev || wilc->vif[1]->ndev)) { - wilc_lock_timeout(wilc, &close_exit_sync, 12 * 1000); + wilc_lock_timeout(wilc, &close_exit_sync, 5 * 1000); for (i = 0; i < NUM_CONCURRENT_IFC; i++) if (wilc->vif[i]->ndev) @@ -1424,7 +1305,7 @@ EXPORT_SYMBOL_GPL(wilc_netdev_cleanup); int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type, int gpio, const struct wilc_hif_func *ops) { - int i; + int i, ret; struct wilc_vif *vif; struct net_device *ndev; struct wilc *wl; @@ -1444,10 +1325,8 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type, for (i = 0; i < NUM_CONCURRENT_IFC; i++) { ndev = alloc_etherdev(sizeof(struct wilc_vif)); - if (!ndev) { - PRINT_ER("Failed to allocate ethernet dev\n"); - return -1; - } + if (!ndev) + return -ENOMEM; vif = netdev_priv(ndev); memset(vif, 0, sizeof(struct wilc_vif)); @@ -1457,7 +1336,7 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type, else strcpy(ndev->name, "p2p%d"); - vif->u8IfIdx = wl->vif_num; + vif->idx = wl->vif_num; vif->wilc = *wilc; wl->vif[i] = vif; wl->vif[wl->vif_num]->ndev = ndev; @@ -1466,13 +1345,14 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type, { struct wireless_dev *wdev; + wdev = wilc_create_wiphy(ndev, dev); if (dev) SET_NETDEV_DEV(ndev, dev); if (!wdev) { - PRINT_ER("Can't register WILC Wiphy\n"); + netdev_err(ndev, "Can't register WILC Wiphy\n"); return -1; } @@ -1485,11 +1365,9 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type, vif->netstats.tx_bytes = 0; } - if (register_netdev(ndev)) { - PRINT_ER("Device couldn't be registered - %s\n", - ndev->name); - return -1; - } + ret = register_netdev(ndev); + if (ret) + return ret; vif->iftype = STATION_MODE; vif->mac_opened = 0; diff --git a/drivers/staging/wilc1000/linux_wlan_common.h b/drivers/staging/wilc1000/linux_wlan_common.h deleted file mode 100644 index 5d40f05124c1..000000000000 --- a/drivers/staging/wilc1000/linux_wlan_common.h +++ /dev/null @@ -1,166 +0,0 @@ -#ifndef LINUX_WLAN_COMMON_H -#define LINUX_WLAN_COMMON_H - -enum debug_region { - Generic_debug = 0, - Hostapd_debug, - Hostinf_debug, - CFG80211_debug, - Coreconfig_debug, - Interrupt_debug, - TX_debug, - RX_debug, - Lock_debug, - Tcp_enhance, - Spin_debug, - - Init_debug, - Bus_debug, - Mem_debug, - Firmware_debug, - COMP = 0xFFFFFFFF, -}; - -#define GENERIC_DBG (1 << Generic_debug) -#define HOSTAPD_DBG (1 << Hostapd_debug) -#define HOSTINF_DBG (1 << Hostinf_debug) -#define CORECONFIG_DBG (1 << Coreconfig_debug) -#define CFG80211_DBG (1 << CFG80211_debug) -#define INT_DBG (1 << Interrupt_debug) -#define TX_DBG (1 << TX_debug) -#define RX_DBG (1 << RX_debug) -#define LOCK_DBG (1 << Lock_debug) -#define TCP_ENH (1 << Tcp_enhance) -#define SPIN_DEBUG (1 << Spin_debug) -#define INIT_DBG (1 << Init_debug) -#define BUS_DBG (1 << Bus_debug) -#define MEM_DBG (1 << Mem_debug) -#define FIRM_DBG (1 << Firmware_debug) - -#if defined (WILC_DEBUGFS) -extern atomic_t WILC_REGION; -extern atomic_t WILC_DEBUG_LEVEL; - -#define DEBUG BIT(0) -#define INFO BIT(1) -#define WRN BIT(2) -#define ERR BIT(3) - -#define PRINT_D(region, ...) \ - do { \ - if ((atomic_read(&WILC_DEBUG_LEVEL) & DEBUG) && \ - ((atomic_read(&WILC_REGION)) & (region))) { \ - printk("DBG [%s: %d]", __func__, __LINE__); \ - printk(__VA_ARGS__); \ - } \ - } while (0) - -#define PRINT_INFO(region, ...) \ - do { \ - if ((atomic_read(&WILC_DEBUG_LEVEL) & INFO) && \ - ((atomic_read(&WILC_REGION)) & (region))) { \ - printk("INFO [%s]", __func__); \ - printk(__VA_ARGS__); \ - } \ - } while (0) - -#define PRINT_WRN(region, ...) \ - do { \ - if ((atomic_read(&WILC_DEBUG_LEVEL) & WRN) && \ - ((atomic_read(&WILC_REGION)) & (region))) { \ - printk("WRN [%s: %d]", __func__, __LINE__); \ - printk(__VA_ARGS__); \ - } \ - } while (0) - -#define PRINT_ER(...) \ - do { \ - if ((atomic_read(&WILC_DEBUG_LEVEL) & ERR)) { \ - printk("ERR [%s: %d]", __func__, __LINE__); \ - printk(__VA_ARGS__); \ - } \ - } while (0) - -#else - -#define REGION (INIT_DBG | GENERIC_DBG | CFG80211_DBG | FIRM_DBG | HOSTAPD_DBG) - -#define DEBUG 1 -#define INFO 0 -#define WRN 0 - -#define PRINT_D(region, ...) \ - do { \ - if (DEBUG == 1 && ((REGION)&(region))) { \ - printk("DBG [%s: %d]", __func__, __LINE__); \ - printk(__VA_ARGS__); \ - } \ - } while (0) - -#define PRINT_INFO(region, ...) \ - do { \ - if (INFO == 1 && ((REGION)&(region))) { \ - printk("INFO [%s]", __func__); \ - printk(__VA_ARGS__); \ - } \ - } while (0) - -#define PRINT_WRN(region, ...) \ - do { \ - if (WRN == 1 && ((REGION)&(region))) { \ - printk("WRN [%s: %d]", __func__, __LINE__); \ - printk(__VA_ARGS__); \ - } \ - } while (0) - -#define PRINT_ER(...) \ - do { \ - printk("ERR [%s: %d]", __func__, __LINE__); \ - printk(__VA_ARGS__); \ - } while (0) - -#endif - -#define FN_IN /* PRINT_D(">>> \n") */ -#define FN_OUT /* PRINT_D("<<<\n") */ - -#define LINUX_RX_SIZE (96 * 1024) -#define LINUX_TX_SIZE (64 * 1024) - - -#define WILC_MULTICAST_TABLE_SIZE 8 - -#if defined (BEAGLE_BOARD) - #define SPI_CHANNEL 4 - - #if SPI_CHANNEL == 4 - #define MODALIAS "wilc_spi4" - #define GPIO_NUM 162 - #else - #define MODALIAS "wilc_spi3" - #define GPIO_NUM 133 - #endif -#elif defined(PLAT_WMS8304) /* rachel */ - #define MODALIAS "wilc_spi" - #define GPIO_NUM 139 -#elif defined (PLAT_RKXXXX) - #define MODALIAS "WILC_IRQ" - #define GPIO_NUM RK30_PIN3_PD2 /* RK30_PIN3_PA1 */ -/* RK30_PIN3_PD2 */ -/* RK2928_PIN1_PA7 */ - -#elif defined(CUSTOMER_PLATFORM) -/* - TODO : specify MODALIAS name and GPIO number. This is certainly necessary for SPI interface. - * - * ex) - * #define MODALIAS "WILC_SPI" - * #define GPIO_NUM 139 - */ - -#else -/* base on SAMA5D3_Xplained Board */ - #define MODALIAS "WILC_SPI" - #define GPIO_NUM 0x44 -#endif -#endif diff --git a/drivers/staging/wilc1000/wilc_debugfs.c b/drivers/staging/wilc1000/wilc_debugfs.c index 27c653a0cdf9..fcbc95d19d8e 100644 --- a/drivers/staging/wilc1000/wilc_debugfs.c +++ b/drivers/staging/wilc1000/wilc_debugfs.c @@ -23,11 +23,12 @@ static struct dentry *wilc_dir; /* * -------------------------------------------------------------------------------- */ +#define DEBUG BIT(0) +#define INFO BIT(1) +#define WRN BIT(2) +#define ERR BIT(3) -#define DBG_REGION_ALL (GENERIC_DBG | HOSTAPD_DBG | HOSTINF_DBG | CORECONFIG_DBG | CFG80211_DBG | INT_DBG | TX_DBG | RX_DBG | LOCK_DBG | INIT_DBG | BUS_DBG | MEM_DBG) #define DBG_LEVEL_ALL (DEBUG | INFO | WRN | ERR) -atomic_t WILC_REGION = ATOMIC_INIT(INIT_DBG | GENERIC_DBG | CFG80211_DBG | FIRM_DBG | HOSTAPD_DBG); -EXPORT_SYMBOL_GPL(WILC_REGION); atomic_t WILC_DEBUG_LEVEL = ATOMIC_INIT(ERR); EXPORT_SYMBOL_GPL(WILC_DEBUG_LEVEL); @@ -68,48 +69,9 @@ static ssize_t wilc_debug_level_write(struct file *filp, const char __user *buf, atomic_set(&WILC_DEBUG_LEVEL, (int)flag); if (flag == 0) - printk("Debug-level disabled\n"); + printk(KERN_INFO "Debug-level disabled\n"); else - printk("Debug-level enabled\n"); - - return count; -} - -static ssize_t wilc_debug_region_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) -{ - char buf[128]; - int res = 0; - - /* only allow read from start */ - if (*ppos > 0) - return 0; - - res = scnprintf(buf, sizeof(buf), "Debug region: %x\n", atomic_read(&WILC_REGION)); - - return simple_read_from_buffer(userbuf, count, ppos, buf, res); -} - -static ssize_t wilc_debug_region_write(struct file *filp, const char *buf, size_t count, loff_t *ppos) -{ - char buffer[128] = {}; - int flag; - - if (count > sizeof(buffer)) - return -EINVAL; - - if (copy_from_user(buffer, buf, count)) { - return -EFAULT; - } - - flag = buffer[0] - '0'; - - if (flag > DBG_REGION_ALL) { - printk("%s, value (0x%08x) is out of range, stay previous flag (0x%08x)\n", __func__, flag, atomic_read(&WILC_REGION)); - return -EFAULT; - } - - atomic_set(&WILC_REGION, (int)flag); - printk("new debug-region is %x\n", atomic_read(&WILC_REGION)); + printk(KERN_INFO "Debug-level enabled\n"); return count; } @@ -130,12 +92,11 @@ struct wilc_debugfs_info_t { const char *name; int perm; unsigned int data; - struct file_operations fops; + const struct file_operations fops; }; static struct wilc_debugfs_info_t debugfs_info[] = { { "wilc_debug_level", 0666, (DEBUG | ERR), FOPS(NULL, wilc_debug_level_read, wilc_debug_level_write, NULL), }, - { "wilc_debug_region", 0666, (INIT_DBG | GENERIC_DBG | CFG80211_DBG), FOPS(NULL, wilc_debug_region_read, wilc_debug_region_write, NULL), }, }; static int __init wilc_debugfs_init(void) diff --git a/drivers/staging/wilc1000/wilc_msgqueue.c b/drivers/staging/wilc1000/wilc_msgqueue.c index 098390cdf319..6cb894e58f6d 100644 --- a/drivers/staging/wilc1000/wilc_msgqueue.c +++ b/drivers/staging/wilc1000/wilc_msgqueue.c @@ -1,7 +1,6 @@ #include "wilc_msgqueue.h" #include <linux/spinlock.h> -#include "linux_wlan_common.h" #include <linux/errno.h> #include <linux/slab.h> @@ -11,13 +10,13 @@ * @note copied from FLO glue implementatuion * @version 1.0 */ -int wilc_mq_create(WILC_MsgQueueHandle *pHandle) +int wilc_mq_create(struct message_queue *mq) { - spin_lock_init(&pHandle->strCriticalSection); - sema_init(&pHandle->hSem, 0); - pHandle->pstrMessageList = NULL; - pHandle->u32ReceiversCount = 0; - pHandle->bExiting = false; + spin_lock_init(&mq->lock); + sema_init(&mq->sem, 0); + INIT_LIST_HEAD(&mq->msg_list); + mq->recv_count = 0; + mq->exiting = false; return 0; } @@ -27,21 +26,22 @@ int wilc_mq_create(WILC_MsgQueueHandle *pHandle) * @note copied from FLO glue implementatuion * @version 1.0 */ -int wilc_mq_destroy(WILC_MsgQueueHandle *pHandle) +int wilc_mq_destroy(struct message_queue *mq) { - pHandle->bExiting = true; + struct message *msg; + + mq->exiting = true; /* Release any waiting receiver thread. */ - while (pHandle->u32ReceiversCount > 0) { - up(&pHandle->hSem); - pHandle->u32ReceiversCount--; + while (mq->recv_count > 0) { + up(&mq->sem); + mq->recv_count--; } - while (pHandle->pstrMessageList) { - Message *pstrMessge = pHandle->pstrMessageList->pstrNext; - - kfree(pHandle->pstrMessageList); - pHandle->pstrMessageList = pstrMessge; + while (!list_empty(&mq->msg_list)) { + msg = list_first_entry(&mq->msg_list, struct message, list); + list_del(&msg->list); + kfree(msg->buf); } return 0; @@ -53,53 +53,39 @@ int wilc_mq_destroy(WILC_MsgQueueHandle *pHandle) * @note copied from FLO glue implementatuion * @version 1.0 */ -int wilc_mq_send(WILC_MsgQueueHandle *pHandle, - const void *pvSendBuffer, u32 u32SendBufferSize) +int wilc_mq_send(struct message_queue *mq, + const void *send_buf, u32 send_buf_size) { unsigned long flags; - Message *pstrMessage = NULL; + struct message *new_msg = NULL; - if ((!pHandle) || (u32SendBufferSize == 0) || (!pvSendBuffer)) { - PRINT_ER("pHandle or pvSendBuffer is null\n"); - return -EFAULT; - } + if (!mq || (send_buf_size == 0) || !send_buf) + return -EINVAL; - if (pHandle->bExiting) { - PRINT_ER("pHandle fail\n"); + if (mq->exiting) return -EFAULT; - } /* construct a new message */ - pstrMessage = kmalloc(sizeof(Message), GFP_ATOMIC); - if (!pstrMessage) + new_msg = kmalloc(sizeof(*new_msg), GFP_ATOMIC); + if (!new_msg) return -ENOMEM; - pstrMessage->u32Length = u32SendBufferSize; - pstrMessage->pstrNext = NULL; - pstrMessage->pvBuffer = kmemdup(pvSendBuffer, u32SendBufferSize, - GFP_ATOMIC); - if (!pstrMessage->pvBuffer) { - kfree(pstrMessage); + new_msg->len = send_buf_size; + INIT_LIST_HEAD(&new_msg->list); + new_msg->buf = kmemdup(send_buf, send_buf_size, GFP_ATOMIC); + if (!new_msg->buf) { + kfree(new_msg); return -ENOMEM; } - spin_lock_irqsave(&pHandle->strCriticalSection, flags); + spin_lock_irqsave(&mq->lock, flags); /* add it to the message queue */ - if (!pHandle->pstrMessageList) { - pHandle->pstrMessageList = pstrMessage; - } else { - Message *pstrTailMsg = pHandle->pstrMessageList; + list_add_tail(&new_msg->list, &mq->msg_list); - while (pstrTailMsg->pstrNext) - pstrTailMsg = pstrTailMsg->pstrNext; - - pstrTailMsg->pstrNext = pstrMessage; - } + spin_unlock_irqrestore(&mq->lock, flags); - spin_unlock_irqrestore(&pHandle->strCriticalSection, flags); - - up(&pHandle->hSem); + up(&mq->sem); return 0; } @@ -110,62 +96,49 @@ int wilc_mq_send(WILC_MsgQueueHandle *pHandle, * @note copied from FLO glue implementatuion * @version 1.0 */ -int wilc_mq_recv(WILC_MsgQueueHandle *pHandle, - void *pvRecvBuffer, u32 u32RecvBufferSize, - u32 *pu32ReceivedLength) +int wilc_mq_recv(struct message_queue *mq, + void *recv_buf, u32 recv_buf_size, u32 *recv_len) { - Message *pstrMessage; + struct message *msg; unsigned long flags; - if ((!pHandle) || (u32RecvBufferSize == 0) - || (!pvRecvBuffer) || (!pu32ReceivedLength)) { - PRINT_ER("pHandle or pvRecvBuffer is null\n"); + if (!mq || (recv_buf_size == 0) || !recv_buf || !recv_len) return -EINVAL; - } - if (pHandle->bExiting) { - PRINT_ER("pHandle fail\n"); + if (mq->exiting) return -EFAULT; - } - - spin_lock_irqsave(&pHandle->strCriticalSection, flags); - pHandle->u32ReceiversCount++; - spin_unlock_irqrestore(&pHandle->strCriticalSection, flags); - down(&pHandle->hSem); - - if (pHandle->bExiting) { - PRINT_ER("pHandle fail\n"); - return -EFAULT; - } + spin_lock_irqsave(&mq->lock, flags); + mq->recv_count++; + spin_unlock_irqrestore(&mq->lock, flags); - spin_lock_irqsave(&pHandle->strCriticalSection, flags); + down(&mq->sem); + spin_lock_irqsave(&mq->lock, flags); - pstrMessage = pHandle->pstrMessageList; - if (!pstrMessage) { - spin_unlock_irqrestore(&pHandle->strCriticalSection, flags); - PRINT_ER("pstrMessage is null\n"); + if (list_empty(&mq->msg_list)) { + spin_unlock_irqrestore(&mq->lock, flags); + up(&mq->sem); return -EFAULT; } /* check buffer size */ - if (u32RecvBufferSize < pstrMessage->u32Length) { - spin_unlock_irqrestore(&pHandle->strCriticalSection, flags); - up(&pHandle->hSem); - PRINT_ER("u32RecvBufferSize overflow\n"); + msg = list_first_entry(&mq->msg_list, struct message, list); + if (recv_buf_size < msg->len) { + spin_unlock_irqrestore(&mq->lock, flags); + up(&mq->sem); return -EOVERFLOW; } /* consume the message */ - pHandle->u32ReceiversCount--; - memcpy(pvRecvBuffer, pstrMessage->pvBuffer, pstrMessage->u32Length); - *pu32ReceivedLength = pstrMessage->u32Length; + mq->recv_count--; + memcpy(recv_buf, msg->buf, msg->len); + *recv_len = msg->len; - pHandle->pstrMessageList = pstrMessage->pstrNext; + list_del(&msg->list); - kfree(pstrMessage->pvBuffer); - kfree(pstrMessage); + kfree(msg->buf); + kfree(msg); - spin_unlock_irqrestore(&pHandle->strCriticalSection, flags); + spin_unlock_irqrestore(&mq->lock, flags); return 0; } diff --git a/drivers/staging/wilc1000/wilc_msgqueue.h b/drivers/staging/wilc1000/wilc_msgqueue.h index d7e0328bacee..846a4840e6e7 100644 --- a/drivers/staging/wilc1000/wilc_msgqueue.h +++ b/drivers/staging/wilc1000/wilc_msgqueue.h @@ -1,94 +1,28 @@ #ifndef __WILC_MSG_QUEUE_H__ #define __WILC_MSG_QUEUE_H__ -/*! - * @file wilc_msgqueue.h - * @brief Message Queue OS wrapper functionality - * @author syounan - * @sa wilc_oswrapper.h top level OS wrapper file - * @date 30 Aug 2010 - * @version 1.0 - */ - #include <linux/semaphore.h> - -/* Message Queue type is a structure */ -typedef struct __Message_struct { - void *pvBuffer; - u32 u32Length; - struct __Message_struct *pstrNext; -} Message; - -typedef struct __MessageQueue_struct { - struct semaphore hSem; - spinlock_t strCriticalSection; - bool bExiting; - u32 u32ReceiversCount; - Message *pstrMessageList; -} WILC_MsgQueueHandle; - -/*! - * @brief Creates a new Message queue - * @details Creates a new Message queue, if the feature - * CONFIG_WILC_MSG_QUEUE_IPC_NAME is enabled and pstrAttrs->pcName - * is not Null, then this message queue can be used for IPC with - * any other message queue having the same name in the system - * @param[in,out] pHandle handle to the message queue object - * @param[in] pstrAttrs Optional attributes, NULL for default - * @return Error code indicating success/failure - * @author syounan - * @date 30 Aug 2010 - * @version 1.0 - */ -int wilc_mq_create(WILC_MsgQueueHandle *pHandle); - -/*! - * @brief Sends a message - * @details Sends a message, this API will block until the message is - * actually sent or until it is timedout (as long as the feature - * CONFIG_WILC_MSG_QUEUE_TIMEOUT is enabled and pstrAttrs->u32Timeout - * is not set to WILC_OS_INFINITY), zero timeout is a valid value - * @param[in] pHandle handle to the message queue object - * @param[in] pvSendBuffer pointer to the data to send - * @param[in] u32SendBufferSize the size of the data to send - * @param[in] pstrAttrs Optional attributes, NULL for default - * @return Error code indicating success/failure - * @author syounan - * @date 30 Aug 2010 - * @version 1.0 - */ -int wilc_mq_send(WILC_MsgQueueHandle *pHandle, - const void *pvSendBuffer, u32 u32SendBufferSize); - -/*! - * @brief Receives a message - * @details Receives a message, this API will block until a message is - * received or until it is timedout (as long as the feature - * CONFIG_WILC_MSG_QUEUE_TIMEOUT is enabled and pstrAttrs->u32Timeout - * is not set to WILC_OS_INFINITY), zero timeout is a valid value - * @param[in] pHandle handle to the message queue object - * @param[out] pvRecvBuffer pointer to a buffer to fill with the received message - * @param[in] u32RecvBufferSize the size of the receive buffer - * @param[out] pu32ReceivedLength the length of received data - * @param[in] pstrAttrs Optional attributes, NULL for default - * @return Error code indicating success/failure - * @author syounan - * @date 30 Aug 2010 - * @version 1.0 - */ -int wilc_mq_recv(WILC_MsgQueueHandle *pHandle, - void *pvRecvBuffer, u32 u32RecvBufferSize, - u32 *pu32ReceivedLength); - -/*! - * @brief Destroys an existing Message queue - * @param[in] pHandle handle to the message queue object - * @param[in] pstrAttrs Optional attributes, NULL for default - * @return Error code indicating success/failure - * @author syounan - * @date 30 Aug 2010 - * @version 1.0 - */ -int wilc_mq_destroy(WILC_MsgQueueHandle *pHandle); +#include <linux/list.h> + +struct message { + void *buf; + u32 len; + struct list_head list; +}; + +struct message_queue { + struct semaphore sem; + spinlock_t lock; + bool exiting; + u32 recv_count; + struct list_head msg_list; +}; + +int wilc_mq_create(struct message_queue *mq); +int wilc_mq_send(struct message_queue *mq, + const void *send_buf, u32 send_buf_size); +int wilc_mq_recv(struct message_queue *mq, + void *recv_buf, u32 recv_buf_size, u32 *recv_len); +int wilc_mq_destroy(struct message_queue *mq); #endif diff --git a/drivers/staging/wilc1000/wilc_sdio.c b/drivers/staging/wilc1000/wilc_sdio.c index e961b5004902..a839a7967dd8 100644 --- a/drivers/staging/wilc1000/wilc_sdio.c +++ b/drivers/staging/wilc1000/wilc_sdio.c @@ -30,18 +30,19 @@ static const struct sdio_device_id wilc_sdio_ids[] = { #define WILC_SDIO_BLOCK_SIZE 512 -typedef struct { +struct wilc_sdio { bool irq_gpio; u32 block_size; int nint; #define MAX_NUN_INT_THRPT_ENH2 (5) /* Max num interrupts allowed in registers 0xf7, 0xf8 */ int has_thrpt_enh3; -} wilc_sdio_t; +}; -static wilc_sdio_t g_sdio; +static struct wilc_sdio g_sdio; static int sdio_write_reg(struct wilc *wilc, u32 addr, u32 data); static int sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data); +static int sdio_init(struct wilc *wilc, bool resume); static void wilc_sdio_interrupt(struct sdio_func *func) { @@ -50,7 +51,7 @@ static void wilc_sdio_interrupt(struct sdio_func *func) sdio_claim_host(func); } -static int wilc_sdio_cmd52(struct wilc *wilc, sdio_cmd52_t *cmd) +static int wilc_sdio_cmd52(struct wilc *wilc, struct sdio_cmd52 *cmd) { struct sdio_func *func = container_of(wilc->dev, struct sdio_func, dev); int ret; @@ -80,7 +81,7 @@ static int wilc_sdio_cmd52(struct wilc *wilc, sdio_cmd52_t *cmd) } -static int wilc_sdio_cmd53(struct wilc *wilc, sdio_cmd53_t *cmd) +static int wilc_sdio_cmd53(struct wilc *wilc, struct sdio_cmd53 *cmd) { struct sdio_func *func = container_of(wilc->dev, struct sdio_func, dev); int size, ret; @@ -142,11 +143,82 @@ static void linux_sdio_remove(struct sdio_func *func) wilc_netdev_cleanup(sdio_get_drvdata(func)); } +static int sdio_reset(struct wilc *wilc) +{ + struct sdio_cmd52 cmd; + int ret; + struct sdio_func *func = dev_to_sdio_func(wilc->dev); + + cmd.read_write = 1; + cmd.function = 0; + cmd.raw = 0; + cmd.address = 0x6; + cmd.data = 0x8; + ret = wilc_sdio_cmd52(wilc, &cmd); + if (ret) { + dev_err(&func->dev, "Fail cmd 52, reset cmd ...\n"); + return ret; + } + return 0; +} + +static int wilc_sdio_suspend(struct device *dev) +{ + struct sdio_func *func = dev_to_sdio_func(dev); + struct wilc *wilc = sdio_get_drvdata(func); + int ret; + + dev_info(dev, "sdio suspend\n"); + chip_wakeup(wilc); + + if (!wilc->suspend_event) { + wilc_chip_sleep_manually(wilc); + } else { + host_sleep_notify(wilc); + chip_allow_sleep(wilc); + } + + ret = sdio_reset(wilc); + if (ret) { + dev_err(&func->dev, "Fail reset sdio\n"); + return ret; + } + sdio_claim_host(func); + + return 0; +} + +static int wilc_sdio_resume(struct device *dev) +{ + struct sdio_func *func = dev_to_sdio_func(dev); + struct wilc *wilc = sdio_get_drvdata(func); + + dev_info(dev, "sdio resume\n"); + sdio_release_host(func); + chip_wakeup(wilc); + sdio_init(wilc, true); + + if (wilc->suspend_event) + host_wakeup_notify(wilc); + + chip_allow_sleep(wilc); + + return 0; +} + +static const struct dev_pm_ops wilc_sdio_pm_ops = { + .suspend = wilc_sdio_suspend, + .resume = wilc_sdio_resume, +}; + static struct sdio_driver wilc1000_sdio_driver = { .name = SDIO_MODALIAS, .id_table = wilc_sdio_ids, .probe = linux_sdio_probe, .remove = linux_sdio_remove, + .drv = { + .pm = &wilc_sdio_pm_ops, + } }; module_driver(wilc1000_sdio_driver, sdio_register_driver, @@ -185,11 +257,6 @@ static void wilc_sdio_disable_interrupt(struct wilc *dev) dev_info(&func->dev, "wilc_sdio_disable_interrupt OUT\n"); } -static int wilc_sdio_init(void) -{ - return 1; -} - /******************************************** * * Function 0 @@ -199,7 +266,7 @@ static int wilc_sdio_init(void) static int sdio_set_func0_csa_address(struct wilc *wilc, u32 adr) { struct sdio_func *func = dev_to_sdio_func(wilc->dev); - sdio_cmd52_t cmd; + struct sdio_cmd52 cmd; int ret; /** @@ -240,7 +307,7 @@ _fail_: static int sdio_set_func0_block_size(struct wilc *wilc, u32 block_size) { struct sdio_func *func = dev_to_sdio_func(wilc->dev); - sdio_cmd52_t cmd; + struct sdio_cmd52 cmd; int ret; cmd.read_write = 1; @@ -276,7 +343,7 @@ _fail_: static int sdio_set_func1_block_size(struct wilc *wilc, u32 block_size) { struct sdio_func *func = dev_to_sdio_func(wilc->dev); - sdio_cmd52_t cmd; + struct sdio_cmd52 cmd; int ret; cmd.read_write = 1; @@ -315,7 +382,7 @@ static int sdio_write_reg(struct wilc *wilc, u32 addr, u32 data) data = cpu_to_le32(data); if ((addr >= 0xf0) && (addr <= 0xff)) { - sdio_cmd52_t cmd; + struct sdio_cmd52 cmd; cmd.read_write = 1; cmd.function = 0; @@ -329,7 +396,7 @@ static int sdio_write_reg(struct wilc *wilc, u32 addr, u32 data) goto _fail_; } } else { - sdio_cmd53_t cmd; + struct sdio_cmd53 cmd; /** * set the AHB address @@ -364,7 +431,7 @@ static int sdio_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size) { struct sdio_func *func = dev_to_sdio_func(wilc->dev); u32 block_size = g_sdio.block_size; - sdio_cmd53_t cmd; + struct sdio_cmd53 cmd; int nblk, nleft, ret; cmd.read_write = 1; @@ -455,7 +522,7 @@ static int sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data) int ret; if ((addr >= 0xf0) && (addr <= 0xff)) { - sdio_cmd52_t cmd; + struct sdio_cmd52 cmd; cmd.read_write = 0; cmd.function = 0; @@ -469,7 +536,7 @@ static int sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data) } *data = cmd.data; } else { - sdio_cmd53_t cmd; + struct sdio_cmd53 cmd; if (!sdio_set_func0_csa_address(wilc, addr)) goto _fail_; @@ -504,7 +571,7 @@ static int sdio_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size) { struct sdio_func *func = dev_to_sdio_func(wilc->dev); u32 block_size = g_sdio.block_size; - sdio_cmd53_t cmd; + struct sdio_cmd53 cmd; int nblk, nleft, ret; cmd.read_write = 0; @@ -600,22 +667,16 @@ static int sdio_deinit(struct wilc *wilc) return 1; } -static int sdio_init(struct wilc *wilc) +static int sdio_init(struct wilc *wilc, bool resume) { struct sdio_func *func = dev_to_sdio_func(wilc->dev); - sdio_cmd52_t cmd; + struct sdio_cmd52 cmd; int loop, ret; u32 chipid; - memset(&g_sdio, 0, sizeof(wilc_sdio_t)); - - g_sdio.irq_gpio = (wilc->dev_irq_num); - - if (!wilc_sdio_init()) { - dev_err(&func->dev, "Failed io init bus...\n"); - return 0; - } else { - return 0; + if (!resume) { + memset(&g_sdio, 0, sizeof(struct wilc_sdio)); + g_sdio.irq_gpio = wilc->dev_irq_num; } /** @@ -706,16 +767,19 @@ static int sdio_init(struct wilc *wilc) /** * make sure can read back chip id correctly **/ - if (!sdio_read_reg(wilc, 0x1000, &chipid)) { - dev_err(&func->dev, "Fail cmd read chip id...\n"); - goto _fail_; + if (!resume) { + if (!sdio_read_reg(wilc, 0x1000, &chipid)) { + dev_err(&func->dev, "Fail cmd read chip id...\n"); + goto _fail_; + } + dev_err(&func->dev, "chipid (%08x)\n", chipid); + if ((chipid & 0xfff) > 0x2a0) + g_sdio.has_thrpt_enh3 = 1; + else + g_sdio.has_thrpt_enh3 = 0; + dev_info(&func->dev, "has_thrpt_enh3 = %d...\n", + g_sdio.has_thrpt_enh3); } - dev_err(&func->dev, "chipid (%08x)\n", chipid); - if ((chipid & 0xfff) > 0x2a0) - g_sdio.has_thrpt_enh3 = 1; - else - g_sdio.has_thrpt_enh3 = 0; - dev_info(&func->dev, "has_thrpt_enh3 = %d...\n", g_sdio.has_thrpt_enh3); return 1; @@ -727,7 +791,7 @@ _fail_: static int sdio_read_size(struct wilc *wilc, u32 *size) { u32 tmp; - sdio_cmd52_t cmd; + struct sdio_cmd52 cmd; /** * Read DMA count in words @@ -756,7 +820,7 @@ static int sdio_read_int(struct wilc *wilc, u32 *int_status) { struct sdio_func *func = dev_to_sdio_func(wilc->dev); u32 tmp; - sdio_cmd52_t cmd; + struct sdio_cmd52 cmd; sdio_read_size(wilc, &tmp); @@ -835,7 +899,7 @@ static int sdio_clear_int_ext(struct wilc *wilc, u32 val) if ((val & EN_VMM) == EN_VMM) reg |= BIT(7); if (reg) { - sdio_cmd52_t cmd; + struct sdio_cmd52 cmd; cmd.read_write = 1; cmd.function = 0; @@ -865,7 +929,7 @@ static int sdio_clear_int_ext(struct wilc *wilc, u32 val) ret = 1; for (i = 0; i < g_sdio.nint; i++) { if (flags & 1) { - sdio_cmd52_t cmd; + struct sdio_cmd52 cmd; cmd.read_write = 1; cmd.function = 0; @@ -913,7 +977,7 @@ static int sdio_clear_int_ext(struct wilc *wilc, u32 val) vmm_ctl |= BIT(2); if (vmm_ctl) { - sdio_cmd52_t cmd; + struct sdio_cmd52 cmd; cmd.read_write = 1; cmd.function = 0; diff --git a/drivers/staging/wilc1000/wilc_spi.c b/drivers/staging/wilc1000/wilc_spi.c index 86de50c9f7f5..d41b8b6790af 100644 --- a/drivers/staging/wilc1000/wilc_spi.c +++ b/drivers/staging/wilc1000/wilc_spi.c @@ -18,19 +18,18 @@ #include <linux/spi/spi.h> #include <linux/of_gpio.h> -#include "linux_wlan_common.h" #include <linux/string.h> #include "wilc_wlan_if.h" #include "wilc_wlan.h" #include "wilc_wfi_netdevice.h" -typedef struct { +struct wilc_spi { int crc_off; int nint; int has_thrpt_enh; -} wilc_spi_t; +}; -static wilc_spi_t g_spi; +static struct wilc_spi g_spi; static int wilc_spi_read(struct wilc *wilc, u32, u8 *, u32); static int wilc_spi_write(struct wilc *wilc, u32, u8 *, u32); @@ -120,8 +119,6 @@ static u8 crc7(u8 crc, const u8 *buffer, u32 len) #define USE_SPI_DMA 0 -static const struct wilc1000_ops wilc1000_spi_ops; - static int wilc_bus_probe(struct spi_device *spi) { int ret, gpio; @@ -153,7 +150,7 @@ static const struct of_device_id wilc1000_of_match[] = { }; MODULE_DEVICE_TABLE(of, wilc1000_of_match); -struct spi_driver wilc1000_spi_driver = { +static struct spi_driver wilc1000_spi_driver = { .driver = { .name = MODALIAS, .of_match_table = wilc1000_of_match, @@ -382,9 +379,8 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz, break; } - if (result != N_OK) { + if (result != N_OK) return result; - } if (!g_spi.crc_off) wb[len - 1] = (crc7(0x7f, (const u8 *)&wb[0], len - 1)) << 1; @@ -421,9 +417,8 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz, return result; } /* zero spi write buffers. */ - for (wix = len; wix < len2; wix++) { + for (wix = len; wix < len2; wix++) wb[wix] = 0; - } rix = len; if (wilc_spi_tx_rx(wilc, wb, rb, len2)) { @@ -447,8 +442,9 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz, /* } while(&rptr[1] <= &rb[len2]); */ if (rsp != cmd) { - dev_err(&spi->dev, "Failed cmd response, cmd (%02x)" - ", resp (%02x)\n", cmd, rsp); + dev_err(&spi->dev, + "Failed cmd response, cmd (%02x), resp (%02x)\n", + cmd, rsp); result = N_FAIL; return result; } @@ -516,7 +512,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz, crc[0] = rb[rix++]; crc[1] = rb[rix++]; } else { - dev_err(&spi->dev,"buffer overrun when reading crc.\n"); + dev_err(&spi->dev, "buffer overrun when reading crc.\n"); result = N_FAIL; return result; } @@ -525,9 +521,8 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz, int ix; /* some data may be read in response to dummy bytes. */ - for (ix = 0; (rix < len2) && (ix < sz); ) { + for (ix = 0; (rix < len2) && (ix < sz); ) b[ix++] = rb[rix++]; - } sz -= ix; @@ -682,7 +677,7 @@ static int spi_data_write(struct wilc *wilc, u8 *b, u32 sz) **/ if (!g_spi.crc_off) { if (wilc_spi_tx(wilc, crc, 2)) { - dev_err(&spi->dev,"Failed data block crc write, bus error...\n"); + dev_err(&spi->dev, "Failed data block crc write, bus error...\n"); result = N_FAIL; break; } @@ -713,9 +708,8 @@ static int spi_internal_write(struct wilc *wilc, u32 adr, u32 dat) dat = cpu_to_le32(dat); result = spi_cmd_complete(wilc, CMD_INTERNAL_WRITE, adr, (u8 *)&dat, 4, 0); - if (result != N_OK) { + if (result != N_OK) dev_err(&spi->dev, "Failed internal write cmd...\n"); - } return result; } @@ -758,9 +752,8 @@ static int wilc_spi_write_reg(struct wilc *wilc, u32 addr, u32 data) } result = spi_cmd_complete(wilc, cmd, addr, (u8 *)&data, 4, clockless); - if (result != N_OK) { + if (result != N_OK) dev_err(&spi->dev, "Failed cmd, write reg (%08x)...\n", addr); - } return result; } @@ -788,9 +781,8 @@ static int wilc_spi_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size) * Data **/ result = spi_data_write(wilc, buf, size); - if (result != N_OK) { + if (result != N_OK) dev_err(&spi->dev, "Failed block data write...\n"); - } return 1; } @@ -852,7 +844,7 @@ static int _wilc_spi_deinit(struct wilc *wilc) return 1; } -static int wilc_spi_init(struct wilc *wilc) +static int wilc_spi_init(struct wilc *wilc, bool resume) { struct spi_device *spi = to_spi_device(wilc->dev); u32 reg; @@ -869,7 +861,7 @@ static int wilc_spi_init(struct wilc *wilc) return 1; } - memset(&g_spi, 0, sizeof(wilc_spi_t)); + memset(&g_spi, 0, sizeof(struct wilc_spi)); /** * configure protocol @@ -1076,7 +1068,7 @@ static int wilc_spi_clear_int_ext(struct wilc *wilc, u32 val) ret = wilc_spi_write_reg(wilc, WILC_VMM_CORE_CTL, 1); if (!ret) { - dev_err(&spi->dev,"fail write reg vmm_core_ctl...\n"); + dev_err(&spi->dev, "fail write reg vmm_core_ctl...\n"); goto _fail_; } } @@ -1126,9 +1118,9 @@ static int wilc_spi_sync_ext(struct wilc *wilc, int nint) return 0; } - for (i = 0; (i < 5) && (nint > 0); i++, nint--) { + for (i = 0; (i < 5) && (nint > 0); i++, nint--) reg |= (BIT((27 + i))); - } + ret = wilc_spi_write_reg(wilc, WILC_INTR_ENABLE, reg); if (!ret) { dev_err(&spi->dev, "Failed write reg (%08x)...\n", @@ -1143,9 +1135,8 @@ static int wilc_spi_sync_ext(struct wilc *wilc, int nint) return 0; } - for (i = 0; (i < 3) && (nint > 0); i++, nint--) { + for (i = 0; (i < 3) && (nint > 0); i++, nint--) reg |= BIT(i); - } ret = wilc_spi_read_reg(wilc, WILC_INTR2_ENABLE, ®); if (!ret) { diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c index 53fb2d4bb0bd..b76622d1adc3 100644 --- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c +++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c @@ -74,6 +74,10 @@ static const struct ieee80211_txrx_stypes } }; +static const struct wiphy_wowlan_support wowlan_support = { + .flags = WIPHY_WOWLAN_ANY +}; + #define WILC_WFI_DWELL_PASSIVE 100 #define WILC_WFI_DWELL_ACTIVE 40 @@ -89,7 +93,7 @@ static const struct ieee80211_txrx_stypes extern int wilc_mac_open(struct net_device *ndev); extern int wilc_mac_close(struct net_device *ndev); -static tstrNetworkInfo last_scanned_shadow[MAX_NUM_SCANNED_NETWORKS_SHADOW]; +static struct network_info last_scanned_shadow[MAX_NUM_SCANNED_NETWORKS_SHADOW]; static u32 last_scanned_cnt; struct timer_list wilc_during_ip_timer; static struct timer_list hAgingTimer; @@ -153,7 +157,7 @@ static u8 wlan_channel = INVALID_CHANNEL; static u8 curr_channel; static u8 p2p_oui[] = {0x50, 0x6f, 0x9A, 0x09}; static u8 p2p_local_random = 0x01; -static u8 p2p_recv_random = 0x00; +static u8 p2p_recv_random; static u8 p2p_vendor_spec[] = {0xdd, 0x05, 0x00, 0x08, 0x40, 0x03}; static bool wilc_ie; @@ -188,29 +192,29 @@ static void clear_shadow_scan(void) if (op_ifcs == 0) { del_timer_sync(&hAgingTimer); - PRINT_INFO(CORECONFIG_DBG, "destroy aging timer\n"); for (i = 0; i < last_scanned_cnt; i++) { - if (last_scanned_shadow[last_scanned_cnt].pu8IEs) { - kfree(last_scanned_shadow[i].pu8IEs); - last_scanned_shadow[last_scanned_cnt].pu8IEs = NULL; + if (last_scanned_shadow[last_scanned_cnt].ies) { + kfree(last_scanned_shadow[i].ies); + last_scanned_shadow[last_scanned_cnt].ies = NULL; } - wilc_free_join_params(last_scanned_shadow[i].pJoinParams); - last_scanned_shadow[i].pJoinParams = NULL; + kfree(last_scanned_shadow[i].join_params); + last_scanned_shadow[i].join_params = NULL; } last_scanned_cnt = 0; } } -static u32 get_rssi_avg(tstrNetworkInfo *network_info) +static u32 get_rssi_avg(struct network_info *network_info) { u8 i; int rssi_v = 0; - u8 num_rssi = (network_info->strRssi.u8Full) ? NUM_RSSI : (network_info->strRssi.u8Index); + u8 num_rssi = (network_info->str_rssi.u8Full) ? + NUM_RSSI : (network_info->str_rssi.u8Index); for (i = 0; i < num_rssi; i++) - rssi_v += network_info->strRssi.as8RSSI[i]; + rssi_v += network_info->str_rssi.as8RSSI[i]; rssi_v /= num_rssi; return rssi_v; @@ -224,28 +228,36 @@ static void refresh_scan(void *user_void, u8 all, bool direct_scan) int i; int rssi = 0; - priv = (struct wilc_priv *)user_void; + priv = user_void; wiphy = priv->dev->ieee80211_ptr->wiphy; for (i = 0; i < last_scanned_cnt; i++) { - tstrNetworkInfo *network_info; + struct network_info *network_info; network_info = &last_scanned_shadow[i]; - if (!network_info->u8Found || all) { + if (!network_info->found || all) { s32 freq; struct ieee80211_channel *channel; if (network_info) { - freq = ieee80211_channel_to_frequency((s32)network_info->u8channel, IEEE80211_BAND_2GHZ); + freq = ieee80211_channel_to_frequency((s32)network_info->ch, IEEE80211_BAND_2GHZ); channel = ieee80211_get_channel(wiphy, freq); rssi = get_rssi_avg(network_info); - if (memcmp("DIRECT-", network_info->au8ssid, 7) || + if (memcmp("DIRECT-", network_info->ssid, 7) || direct_scan) { - bss = cfg80211_inform_bss(wiphy, channel, CFG80211_BSS_FTYPE_UNKNOWN, network_info->au8bssid, network_info->u64Tsf, network_info->u16CapInfo, - network_info->u16BeaconPeriod, (const u8 *)network_info->pu8IEs, - (size_t)network_info->u16IEsLen, (((s32)rssi) * 100), GFP_KERNEL); + bss = cfg80211_inform_bss(wiphy, + channel, + CFG80211_BSS_FTYPE_UNKNOWN, + network_info->bssid, + network_info->tsf_hi, + network_info->cap_info, + network_info->beacon_period, + (const u8 *)network_info->ies, + (size_t)network_info->ies_len, + (s32)rssi * 100, + GFP_KERNEL); cfg80211_put_bss(wiphy, bss); } } @@ -258,7 +270,7 @@ static void reset_shadow_found(void) int i; for (i = 0; i < last_scanned_cnt; i++) - last_scanned_shadow[i].u8Found = 0; + last_scanned_shadow[i].found = 0; } static void update_scan_time(void) @@ -266,7 +278,7 @@ static void update_scan_time(void) int i; for (i = 0; i < last_scanned_cnt; i++) - last_scanned_shadow[i].u32TimeRcvdInScan = jiffies; + last_scanned_shadow[i].time_scan = jiffies; } static void remove_network_from_shadow(unsigned long arg) @@ -276,13 +288,12 @@ static void remove_network_from_shadow(unsigned long arg) for (i = 0; i < last_scanned_cnt; i++) { - if (time_after(now, last_scanned_shadow[i].u32TimeRcvdInScan + (unsigned long)(SCAN_RESULT_EXPIRE))) { - PRINT_D(CFG80211_DBG, "Network expired in ScanShadow: %s\n", last_scanned_shadow[i].au8ssid); - - kfree(last_scanned_shadow[i].pu8IEs); - last_scanned_shadow[i].pu8IEs = NULL; + if (time_after(now, last_scanned_shadow[i].time_scan + + (unsigned long)(SCAN_RESULT_EXPIRE))) { + kfree(last_scanned_shadow[i].ies); + last_scanned_shadow[i].ies = NULL; - wilc_free_join_params(last_scanned_shadow[i].pJoinParams); + kfree(last_scanned_shadow[i].join_params); for (j = i; (j < last_scanned_cnt - 1); j++) last_scanned_shadow[j] = last_scanned_shadow[j + 1]; @@ -291,37 +302,31 @@ static void remove_network_from_shadow(unsigned long arg) } } - PRINT_D(CFG80211_DBG, "Number of cached networks: %d\n", - last_scanned_cnt); if (last_scanned_cnt != 0) { hAgingTimer.data = arg; mod_timer(&hAgingTimer, jiffies + msecs_to_jiffies(AGING_TIME)); - } else { - PRINT_D(CFG80211_DBG, "No need to restart Aging timer\n"); } } static void clear_duringIP(unsigned long arg) { - PRINT_D(GENERIC_DBG, "GO:IP Obtained , enable scan\n"); wilc_optaining_ip = false; } -static int is_network_in_shadow(tstrNetworkInfo *pstrNetworkInfo, +static int is_network_in_shadow(struct network_info *pstrNetworkInfo, void *user_void) { int state = -1; int i; if (last_scanned_cnt == 0) { - PRINT_D(CFG80211_DBG, "Starting Aging timer\n"); hAgingTimer.data = (unsigned long)user_void; mod_timer(&hAgingTimer, jiffies + msecs_to_jiffies(AGING_TIME)); state = -1; } else { for (i = 0; i < last_scanned_cnt; i++) { - if (memcmp(last_scanned_shadow[i].au8bssid, - pstrNetworkInfo->au8bssid, 6) == 0) { + if (memcmp(last_scanned_shadow[i].bssid, + pstrNetworkInfo->bssid, 6) == 0) { state = i; break; } @@ -330,58 +335,57 @@ static int is_network_in_shadow(tstrNetworkInfo *pstrNetworkInfo, return state; } -static void add_network_to_shadow(tstrNetworkInfo *pstrNetworkInfo, +static void add_network_to_shadow(struct network_info *pstrNetworkInfo, void *user_void, void *pJoinParams) { int ap_found = is_network_in_shadow(pstrNetworkInfo, user_void); u32 ap_index = 0; u8 rssi_index = 0; - if (last_scanned_cnt >= MAX_NUM_SCANNED_NETWORKS_SHADOW) { - PRINT_D(CFG80211_DBG, "Shadow network reached its maximum limit\n"); + if (last_scanned_cnt >= MAX_NUM_SCANNED_NETWORKS_SHADOW) return; - } + if (ap_found == -1) { ap_index = last_scanned_cnt; last_scanned_cnt++; } else { ap_index = ap_found; } - rssi_index = last_scanned_shadow[ap_index].strRssi.u8Index; - last_scanned_shadow[ap_index].strRssi.as8RSSI[rssi_index++] = pstrNetworkInfo->s8rssi; + rssi_index = last_scanned_shadow[ap_index].str_rssi.u8Index; + last_scanned_shadow[ap_index].str_rssi.as8RSSI[rssi_index++] = pstrNetworkInfo->rssi; if (rssi_index == NUM_RSSI) { rssi_index = 0; - last_scanned_shadow[ap_index].strRssi.u8Full = 1; - } - last_scanned_shadow[ap_index].strRssi.u8Index = rssi_index; - last_scanned_shadow[ap_index].s8rssi = pstrNetworkInfo->s8rssi; - last_scanned_shadow[ap_index].u16CapInfo = pstrNetworkInfo->u16CapInfo; - last_scanned_shadow[ap_index].u8SsidLen = pstrNetworkInfo->u8SsidLen; - memcpy(last_scanned_shadow[ap_index].au8ssid, - pstrNetworkInfo->au8ssid, pstrNetworkInfo->u8SsidLen); - memcpy(last_scanned_shadow[ap_index].au8bssid, - pstrNetworkInfo->au8bssid, ETH_ALEN); - last_scanned_shadow[ap_index].u16BeaconPeriod = pstrNetworkInfo->u16BeaconPeriod; - last_scanned_shadow[ap_index].u8DtimPeriod = pstrNetworkInfo->u8DtimPeriod; - last_scanned_shadow[ap_index].u8channel = pstrNetworkInfo->u8channel; - last_scanned_shadow[ap_index].u16IEsLen = pstrNetworkInfo->u16IEsLen; - last_scanned_shadow[ap_index].u64Tsf = pstrNetworkInfo->u64Tsf; + last_scanned_shadow[ap_index].str_rssi.u8Full = 1; + } + last_scanned_shadow[ap_index].str_rssi.u8Index = rssi_index; + last_scanned_shadow[ap_index].rssi = pstrNetworkInfo->rssi; + last_scanned_shadow[ap_index].cap_info = pstrNetworkInfo->cap_info; + last_scanned_shadow[ap_index].ssid_len = pstrNetworkInfo->ssid_len; + memcpy(last_scanned_shadow[ap_index].ssid, + pstrNetworkInfo->ssid, pstrNetworkInfo->ssid_len); + memcpy(last_scanned_shadow[ap_index].bssid, + pstrNetworkInfo->bssid, ETH_ALEN); + last_scanned_shadow[ap_index].beacon_period = pstrNetworkInfo->beacon_period; + last_scanned_shadow[ap_index].dtim_period = pstrNetworkInfo->dtim_period; + last_scanned_shadow[ap_index].ch = pstrNetworkInfo->ch; + last_scanned_shadow[ap_index].ies_len = pstrNetworkInfo->ies_len; + last_scanned_shadow[ap_index].tsf_hi = pstrNetworkInfo->tsf_hi; if (ap_found != -1) - kfree(last_scanned_shadow[ap_index].pu8IEs); - last_scanned_shadow[ap_index].pu8IEs = - kmalloc(pstrNetworkInfo->u16IEsLen, GFP_KERNEL); - memcpy(last_scanned_shadow[ap_index].pu8IEs, - pstrNetworkInfo->pu8IEs, pstrNetworkInfo->u16IEsLen); - last_scanned_shadow[ap_index].u32TimeRcvdInScan = jiffies; - last_scanned_shadow[ap_index].u32TimeRcvdInScanCached = jiffies; - last_scanned_shadow[ap_index].u8Found = 1; + kfree(last_scanned_shadow[ap_index].ies); + last_scanned_shadow[ap_index].ies = kmalloc(pstrNetworkInfo->ies_len, + GFP_KERNEL); + memcpy(last_scanned_shadow[ap_index].ies, + pstrNetworkInfo->ies, pstrNetworkInfo->ies_len); + last_scanned_shadow[ap_index].time_scan = jiffies; + last_scanned_shadow[ap_index].time_scan_cached = jiffies; + last_scanned_shadow[ap_index].found = 1; if (ap_found != -1) - wilc_free_join_params(last_scanned_shadow[ap_index].pJoinParams); - last_scanned_shadow[ap_index].pJoinParams = pJoinParams; + kfree(last_scanned_shadow[ap_index].join_params); + last_scanned_shadow[ap_index].join_params = pJoinParams; } static void CfgScanResult(enum scan_event scan_event, - tstrNetworkInfo *network_info, + struct network_info *network_info, void *user_void, void *join_params) { @@ -391,7 +395,7 @@ static void CfgScanResult(enum scan_event scan_event, struct ieee80211_channel *channel; struct cfg80211_bss *bss = NULL; - priv = (struct wilc_priv *)user_void; + priv = user_void; if (priv->bCfgScanning) { if (scan_event == SCAN_EVENT_NETWORK_FOUND) { wiphy = priv->dev->ieee80211_ptr->wiphy; @@ -400,67 +404,53 @@ static void CfgScanResult(enum scan_event scan_event, return; if (wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC && - (((s32)network_info->s8rssi * 100) < 0 || - ((s32)network_info->s8rssi * 100) > 100)) { - PRINT_ER("wiphy signal type fial\n"); + (((s32)network_info->rssi * 100) < 0 || + ((s32)network_info->rssi * 100) > 100)) return; - } if (network_info) { - s32Freq = ieee80211_channel_to_frequency((s32)network_info->u8channel, IEEE80211_BAND_2GHZ); + s32Freq = ieee80211_channel_to_frequency((s32)network_info->ch, IEEE80211_BAND_2GHZ); channel = ieee80211_get_channel(wiphy, s32Freq); if (!channel) return; - PRINT_INFO(CFG80211_DBG, "Network Info:: CHANNEL Frequency: %d, RSSI: %d, CapabilityInfo: %d," - "BeaconPeriod: %d\n", channel->center_freq, (((s32)network_info->s8rssi) * 100), - network_info->u16CapInfo, network_info->u16BeaconPeriod); - - if (network_info->bNewNetwork) { + if (network_info->new_network) { if (priv->u32RcvdChCount < MAX_NUM_SCANNED_NETWORKS) { - PRINT_D(CFG80211_DBG, "Network %s found\n", network_info->au8ssid); priv->u32RcvdChCount++; - if (!join_params) - PRINT_INFO(CORECONFIG_DBG, ">> Something really bad happened\n"); add_network_to_shadow(network_info, priv, join_params); - if (!(memcmp("DIRECT-", network_info->au8ssid, 7))) { - bss = cfg80211_inform_bss(wiphy, channel, CFG80211_BSS_FTYPE_UNKNOWN, network_info->au8bssid, network_info->u64Tsf, network_info->u16CapInfo, - network_info->u16BeaconPeriod, (const u8 *)network_info->pu8IEs, - (size_t)network_info->u16IEsLen, (((s32)network_info->s8rssi) * 100), GFP_KERNEL); + if (!(memcmp("DIRECT-", network_info->ssid, 7))) { + bss = cfg80211_inform_bss(wiphy, + channel, + CFG80211_BSS_FTYPE_UNKNOWN, + network_info->bssid, + network_info->tsf_hi, + network_info->cap_info, + network_info->beacon_period, + (const u8 *)network_info->ies, + (size_t)network_info->ies_len, + (s32)network_info->rssi * 100, + GFP_KERNEL); cfg80211_put_bss(wiphy, bss); } - - - } else { - PRINT_ER("Discovered networks exceeded the max limit\n"); } } else { u32 i; for (i = 0; i < priv->u32RcvdChCount; i++) { - if (memcmp(last_scanned_shadow[i].au8bssid, network_info->au8bssid, 6) == 0) { - PRINT_D(CFG80211_DBG, "Update RSSI of %s\n", last_scanned_shadow[i].au8ssid); - - last_scanned_shadow[i].s8rssi = network_info->s8rssi; - last_scanned_shadow[i].u32TimeRcvdInScan = jiffies; + if (memcmp(last_scanned_shadow[i].bssid, network_info->bssid, 6) == 0) { + last_scanned_shadow[i].rssi = network_info->rssi; + last_scanned_shadow[i].time_scan = jiffies; break; } } } } } else if (scan_event == SCAN_EVENT_DONE) { - PRINT_D(CFG80211_DBG, "Scan Done[%p]\n", priv->dev); - PRINT_D(CFG80211_DBG, "Refreshing Scan ...\n"); refresh_scan(priv, 1, false); - if (priv->u32RcvdChCount > 0) - PRINT_D(CFG80211_DBG, "%d Network(s) found\n", priv->u32RcvdChCount); - else - PRINT_D(CFG80211_DBG, "No networks found\n"); - down(&(priv->hSemScanReq)); if (priv->pstrScanReq) { @@ -473,7 +463,6 @@ static void CfgScanResult(enum scan_event scan_event, } else if (scan_event == SCAN_EVENT_ABORTED) { down(&(priv->hSemScanReq)); - PRINT_D(CFG80211_DBG, "Scan Aborted\n"); if (priv->pstrScanReq) { update_scan_time(); refresh_scan(priv, 1, false); @@ -490,9 +479,9 @@ static void CfgScanResult(enum scan_event scan_event, int wilc_connecting; static void CfgConnectResult(enum conn_event enuConnDisconnEvent, - tstrConnectInfo *pstrConnectInfo, + struct connect_info *pstrConnectInfo, u8 u8MacStatus, - tstrDisconnectNotifInfo *pstrDisconnectNotifInfo, + struct disconnect_info *pstrDisconnectNotifInfo, void *pUserVoid) { struct wilc_priv *priv; @@ -504,49 +493,47 @@ static void CfgConnectResult(enum conn_event enuConnDisconnEvent, wilc_connecting = 0; - priv = (struct wilc_priv *)pUserVoid; + priv = pUserVoid; dev = priv->dev; vif = netdev_priv(dev); wl = vif->wilc; - pstrWFIDrv = (struct host_if_drv *)priv->hWILCWFIDrv; + pstrWFIDrv = (struct host_if_drv *)priv->hif_drv; if (enuConnDisconnEvent == CONN_DISCONN_EVENT_CONN_RESP) { u16 u16ConnectStatus; - u16ConnectStatus = pstrConnectInfo->u16ConnectStatus; - - PRINT_D(CFG80211_DBG, " Connection response received = %d\n", u8MacStatus); + u16ConnectStatus = pstrConnectInfo->status; if ((u8MacStatus == MAC_DISCONNECTED) && - (pstrConnectInfo->u16ConnectStatus == SUCCESSFUL_STATUSCODE)) { + (pstrConnectInfo->status == SUCCESSFUL_STATUSCODE)) { u16ConnectStatus = WLAN_STATUS_UNSPECIFIED_FAILURE; - wilc_wlan_set_bssid(priv->dev, NullBssid); + wilc_wlan_set_bssid(priv->dev, NullBssid, + STATION_MODE); eth_zero_addr(wilc_connected_ssid); if (!pstrWFIDrv->p2p_connect) wlan_channel = INVALID_CHANNEL; - PRINT_ER("Unspecified failure: Connection status %d : MAC status = %d\n", u16ConnectStatus, u8MacStatus); + netdev_err(dev, "Unspecified failure\n"); } if (u16ConnectStatus == WLAN_STATUS_SUCCESS) { bool bNeedScanRefresh = false; u32 i; - PRINT_INFO(CFG80211_DBG, "Connection Successful:: BSSID: %x%x%x%x%x%x\n", pstrConnectInfo->au8bssid[0], - pstrConnectInfo->au8bssid[1], pstrConnectInfo->au8bssid[2], pstrConnectInfo->au8bssid[3], pstrConnectInfo->au8bssid[4], pstrConnectInfo->au8bssid[5]); - memcpy(priv->au8AssociatedBss, pstrConnectInfo->au8bssid, ETH_ALEN); + memcpy(priv->au8AssociatedBss, pstrConnectInfo->bssid, ETH_ALEN); for (i = 0; i < last_scanned_cnt; i++) { - if (memcmp(last_scanned_shadow[i].au8bssid, - pstrConnectInfo->au8bssid, ETH_ALEN) == 0) { + if (memcmp(last_scanned_shadow[i].bssid, + pstrConnectInfo->bssid, + ETH_ALEN) == 0) { unsigned long now = jiffies; if (time_after(now, - last_scanned_shadow[i].u32TimeRcvdInScanCached + (unsigned long)(nl80211_SCAN_RESULT_EXPIRE - (1 * HZ)))) { + last_scanned_shadow[i].time_scan_cached + + (unsigned long)(nl80211_SCAN_RESULT_EXPIRE - (1 * HZ)))) bNeedScanRefresh = true; - } break; } @@ -556,34 +543,27 @@ static void CfgConnectResult(enum conn_event enuConnDisconnEvent, refresh_scan(priv, 1, true); } - - PRINT_D(CFG80211_DBG, "Association request info elements length = %zu\n", pstrConnectInfo->ReqIEsLen); - - PRINT_D(CFG80211_DBG, "Association response info elements length = %d\n", pstrConnectInfo->u16RespIEsLen); - - cfg80211_connect_result(dev, pstrConnectInfo->au8bssid, - pstrConnectInfo->pu8ReqIEs, pstrConnectInfo->ReqIEsLen, - pstrConnectInfo->pu8RespIEs, pstrConnectInfo->u16RespIEsLen, + cfg80211_connect_result(dev, pstrConnectInfo->bssid, + pstrConnectInfo->req_ies, pstrConnectInfo->req_ies_len, + pstrConnectInfo->resp_ies, pstrConnectInfo->resp_ies_len, u16ConnectStatus, GFP_KERNEL); } else if (enuConnDisconnEvent == CONN_DISCONN_EVENT_DISCONN_NOTIF) { wilc_optaining_ip = false; - PRINT_ER("Received MAC_DISCONNECTED from firmware with reason %d on dev [%p]\n", - pstrDisconnectNotifInfo->u16reason, priv->dev); p2p_local_random = 0x01; p2p_recv_random = 0x00; wilc_ie = false; eth_zero_addr(priv->au8AssociatedBss); - wilc_wlan_set_bssid(priv->dev, NullBssid); + wilc_wlan_set_bssid(priv->dev, NullBssid, STATION_MODE); eth_zero_addr(wilc_connected_ssid); if (!pstrWFIDrv->p2p_connect) wlan_channel = INVALID_CHANNEL; if ((pstrWFIDrv->IFC_UP) && (dev == wl->vif[1]->ndev)) { - pstrDisconnectNotifInfo->u16reason = 3; + pstrDisconnectNotifInfo->reason = 3; } else if ((!pstrWFIDrv->IFC_UP) && (dev == wl->vif[1]->ndev)) { - pstrDisconnectNotifInfo->u16reason = 1; + pstrDisconnectNotifInfo->reason = 1; } - cfg80211_disconnected(dev, pstrDisconnectNotifInfo->u16reason, pstrDisconnectNotifInfo->ie, + cfg80211_disconnected(dev, pstrDisconnectNotifInfo->reason, pstrDisconnectNotifInfo->ie, pstrDisconnectNotifInfo->ie_len, false, GFP_KERNEL); } @@ -601,13 +581,12 @@ static int set_channel(struct wiphy *wiphy, vif = netdev_priv(priv->dev); channelnum = ieee80211_frequency_to_channel(chandef->chan->center_freq); - PRINT_D(CFG80211_DBG, "Setting channel %d with frequency %d\n", channelnum, chandef->chan->center_freq); curr_channel = channelnum; result = wilc_set_mac_chnl_num(vif, channelnum); if (result != 0) - PRINT_ER("Error in setting channel %d\n", channelnum); + netdev_err(priv->dev, "Error in setting channel\n"); return result; } @@ -628,38 +607,33 @@ static int scan(struct wiphy *wiphy, struct cfg80211_scan_request *request) priv->u32RcvdChCount = 0; - wilc_set_wfi_drv_handler(vif, wilc_get_vif_idx(vif)); reset_shadow_found(); priv->bCfgScanning = true; if (request->n_channels <= MAX_NUM_SCANNED_NETWORKS) { - for (i = 0; i < request->n_channels; i++) { + for (i = 0; i < request->n_channels; i++) au8ScanChanList[i] = (u8)ieee80211_frequency_to_channel(request->channels[i]->center_freq); - PRINT_INFO(CFG80211_DBG, "ScanChannel List[%d] = %d,", i, au8ScanChanList[i]); - } - - PRINT_D(CFG80211_DBG, "Requested num of scan channel %d\n", request->n_channels); - PRINT_D(CFG80211_DBG, "Scan Request IE len = %zu\n", request->ie_len); - - PRINT_D(CFG80211_DBG, "Number of SSIDs %d\n", request->n_ssids); if (request->n_ssids >= 1) { - strHiddenNetwork.pstrHiddenNetworkInfo = kmalloc(request->n_ssids * sizeof(struct hidden_network), GFP_KERNEL); - strHiddenNetwork.u8ssidnum = request->n_ssids; + strHiddenNetwork.net_info = + kmalloc_array(request->n_ssids, + sizeof(struct hidden_network), + GFP_KERNEL); + if (!strHiddenNetwork.net_info) + return -ENOMEM; + strHiddenNetwork.n_ssids = request->n_ssids; for (i = 0; i < request->n_ssids; i++) { if (request->ssids[i].ssid && request->ssids[i].ssid_len != 0) { - strHiddenNetwork.pstrHiddenNetworkInfo[i].pu8ssid = kmalloc(request->ssids[i].ssid_len, GFP_KERNEL); - memcpy(strHiddenNetwork.pstrHiddenNetworkInfo[i].pu8ssid, request->ssids[i].ssid, request->ssids[i].ssid_len); - strHiddenNetwork.pstrHiddenNetworkInfo[i].u8ssidlen = request->ssids[i].ssid_len; + strHiddenNetwork.net_info[i].ssid = kmalloc(request->ssids[i].ssid_len, GFP_KERNEL); + memcpy(strHiddenNetwork.net_info[i].ssid, request->ssids[i].ssid, request->ssids[i].ssid_len); + strHiddenNetwork.net_info[i].ssid_len = request->ssids[i].ssid_len; } else { - PRINT_D(CFG80211_DBG, "Received one NULL SSID\n"); - strHiddenNetwork.u8ssidnum -= 1; + strHiddenNetwork.n_ssids -= 1; } } - PRINT_D(CFG80211_DBG, "Trigger Scan Request\n"); s32Error = wilc_scan(vif, USER_SCAN, ACTIVE_SCAN, au8ScanChanList, request->n_channels, @@ -667,7 +641,6 @@ static int scan(struct wiphy *wiphy, struct cfg80211_scan_request *request) request->ie_len, CfgScanResult, (void *)priv, &strHiddenNetwork); } else { - PRINT_D(CFG80211_DBG, "Trigger Scan Request\n"); s32Error = wilc_scan(vif, USER_SCAN, ACTIVE_SCAN, au8ScanChanList, request->n_channels, @@ -676,14 +649,11 @@ static int scan(struct wiphy *wiphy, struct cfg80211_scan_request *request) (void *)priv, NULL); } } else { - PRINT_ER("Requested num of scanned channels is greater than the max, supported" - " channels\n"); + netdev_err(priv->dev, "Requested scanned channels over\n"); } - if (s32Error != 0) { + if (s32Error != 0) s32Error = -EBUSY; - PRINT_WRN(CFG80211_DBG, "Device is busy: Error(%d)\n", s32Error); - } return s32Error; } @@ -695,98 +665,52 @@ static int connect(struct wiphy *wiphy, struct net_device *dev, u32 i; u8 u8security = NO_ENCRYPT; enum AUTHTYPE tenuAuth_type = ANY; - char *pcgroup_encrypt_val = NULL; - char *pccipher_group = NULL; - char *pcwpa_version = NULL; struct wilc_priv *priv; struct host_if_drv *pstrWFIDrv; - tstrNetworkInfo *pstrNetworkInfo = NULL; + struct network_info *pstrNetworkInfo = NULL; struct wilc_vif *vif; wilc_connecting = 1; priv = wiphy_priv(wiphy); vif = netdev_priv(priv->dev); - pstrWFIDrv = (struct host_if_drv *)(priv->hWILCWFIDrv); - - wilc_set_wfi_drv_handler(vif, wilc_get_vif_idx(vif)); + pstrWFIDrv = (struct host_if_drv *)priv->hif_drv; - PRINT_D(CFG80211_DBG, "Connecting to SSID [%s] on netdev [%p] host if [%p]\n", sme->ssid, dev, priv->hWILCWFIDrv); - if (!(strncmp(sme->ssid, "DIRECT-", 7))) { - PRINT_D(CFG80211_DBG, "Connected to Direct network,OBSS disabled\n"); + if (!(strncmp(sme->ssid, "DIRECT-", 7))) pstrWFIDrv->p2p_connect = 1; - } else { + else pstrWFIDrv->p2p_connect = 0; - } - PRINT_INFO(CFG80211_DBG, "Required SSID = %s\n , AuthType = %d\n", sme->ssid, sme->auth_type); for (i = 0; i < last_scanned_cnt; i++) { - if ((sme->ssid_len == last_scanned_shadow[i].u8SsidLen) && - memcmp(last_scanned_shadow[i].au8ssid, + if ((sme->ssid_len == last_scanned_shadow[i].ssid_len) && + memcmp(last_scanned_shadow[i].ssid, sme->ssid, sme->ssid_len) == 0) { - PRINT_INFO(CFG80211_DBG, "Network with required SSID is found %s\n", sme->ssid); - if (!sme->bssid) { - PRINT_INFO(CFG80211_DBG, "BSSID is not passed from the user\n"); + if (!sme->bssid) break; - } else { - if (memcmp(last_scanned_shadow[i].au8bssid, + else + if (memcmp(last_scanned_shadow[i].bssid, sme->bssid, - ETH_ALEN) == 0) { - PRINT_INFO(CFG80211_DBG, "BSSID is passed from the user and matched\n"); + ETH_ALEN) == 0) break; - } - } } } if (i < last_scanned_cnt) { - PRINT_D(CFG80211_DBG, "Required bss is in scan results\n"); - pstrNetworkInfo = &last_scanned_shadow[i]; - - PRINT_INFO(CFG80211_DBG, "network BSSID to be associated: %x%x%x%x%x%x\n", - pstrNetworkInfo->au8bssid[0], pstrNetworkInfo->au8bssid[1], - pstrNetworkInfo->au8bssid[2], pstrNetworkInfo->au8bssid[3], - pstrNetworkInfo->au8bssid[4], pstrNetworkInfo->au8bssid[5]); } else { s32Error = -ENOENT; - if (last_scanned_cnt == 0) - PRINT_D(CFG80211_DBG, "No Scan results yet\n"); - else - PRINT_D(CFG80211_DBG, "Required bss not in scan results: Error(%d)\n", s32Error); - - goto done; + wilc_connecting = 0; + return s32Error; } - priv->WILC_WFI_wep_default = 0; memset(priv->WILC_WFI_wep_key, 0, sizeof(priv->WILC_WFI_wep_key)); memset(priv->WILC_WFI_wep_key_len, 0, sizeof(priv->WILC_WFI_wep_key_len)); - PRINT_INFO(CFG80211_DBG, "sme->crypto.wpa_versions=%x\n", sme->crypto.wpa_versions); - PRINT_INFO(CFG80211_DBG, "sme->crypto.cipher_group=%x\n", sme->crypto.cipher_group); - - PRINT_INFO(CFG80211_DBG, "sme->crypto.n_ciphers_pairwise=%d\n", sme->crypto.n_ciphers_pairwise); - - if (INFO) { - for (i = 0; i < sme->crypto.n_ciphers_pairwise; i++) - PRINT_D(CORECONFIG_DBG, "sme->crypto.ciphers_pairwise[%d]=%x\n", i, sme->crypto.ciphers_pairwise[i]); - } - if (sme->crypto.cipher_group != NO_ENCRYPT) { - pcwpa_version = "Default"; - PRINT_D(CORECONFIG_DBG, ">> sme->crypto.wpa_versions: %x\n", sme->crypto.wpa_versions); if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_WEP40) { u8security = ENCRYPT_ENABLED | WEP; - pcgroup_encrypt_val = "WEP40"; - pccipher_group = "WLAN_CIPHER_SUITE_WEP40"; - PRINT_INFO(CFG80211_DBG, "WEP Default Key Idx = %d\n", sme->key_idx); - if (INFO) { - for (i = 0; i < sme->key_len; i++) - PRINT_D(CORECONFIG_DBG, "WEP Key Value[%d] = %d\n", i, sme->key[i]); - } - priv->WILC_WFI_wep_default = sme->key_idx; priv->WILC_WFI_wep_key_len[sme->key_idx] = sme->key_len; memcpy(priv->WILC_WFI_wep_key[sme->key_idx], sme->key, sme->key_len); @@ -801,10 +725,7 @@ static int connect(struct wiphy *wiphy, struct net_device *dev, sme->key_idx); } else if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_WEP104) { u8security = ENCRYPT_ENABLED | WEP | WEP_EXTENDED; - pcgroup_encrypt_val = "WEP104"; - pccipher_group = "WLAN_CIPHER_SUITE_WEP104"; - priv->WILC_WFI_wep_default = sme->key_idx; priv->WILC_WFI_wep_key_len[sme->key_idx] = sme->key_len; memcpy(priv->WILC_WFI_wep_key[sme->key_idx], sme->key, sme->key_len); @@ -820,31 +741,21 @@ static int connect(struct wiphy *wiphy, struct net_device *dev, } else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2) { if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_TKIP) { u8security = ENCRYPT_ENABLED | WPA2 | TKIP; - pcgroup_encrypt_val = "WPA2_TKIP"; - pccipher_group = "TKIP"; } else { u8security = ENCRYPT_ENABLED | WPA2 | AES; - pcgroup_encrypt_val = "WPA2_AES"; - pccipher_group = "AES"; } - pcwpa_version = "WPA_VERSION_2"; } else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1) { if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_TKIP) { u8security = ENCRYPT_ENABLED | WPA | TKIP; - pcgroup_encrypt_val = "WPA_TKIP"; - pccipher_group = "TKIP"; } else { u8security = ENCRYPT_ENABLED | WPA | AES; - pcgroup_encrypt_val = "WPA_AES"; - pccipher_group = "AES"; } - pcwpa_version = "WPA_VERSION_1"; } else { s32Error = -ENOTSUPP; - PRINT_ER("Not supported cipher: Error(%d)\n", s32Error); - - goto done; + netdev_err(dev, "Not supported cipher\n"); + wilc_connecting = 0; + return s32Error; } } @@ -859,22 +770,17 @@ static int connect(struct wiphy *wiphy, struct net_device *dev, } } - PRINT_D(CFG80211_DBG, "Adding key with cipher group = %x\n", sme->crypto.cipher_group); - - PRINT_D(CFG80211_DBG, "Authentication Type = %d\n", sme->auth_type); switch (sme->auth_type) { case NL80211_AUTHTYPE_OPEN_SYSTEM: - PRINT_D(CFG80211_DBG, "In OPEN SYSTEM\n"); tenuAuth_type = OPEN_SYSTEM; break; case NL80211_AUTHTYPE_SHARED_KEY: tenuAuth_type = SHARED_KEY; - PRINT_D(CFG80211_DBG, "In SHARED KEY\n"); break; default: - PRINT_D(CFG80211_DBG, "Automatic Authentation type = %d\n", sme->auth_type); + break; } if (sme->crypto.n_akm_suites) { @@ -888,33 +794,26 @@ static int connect(struct wiphy *wiphy, struct net_device *dev, } } - - PRINT_INFO(CFG80211_DBG, "Required Channel = %d\n", pstrNetworkInfo->u8channel); - - PRINT_INFO(CFG80211_DBG, "Group encryption value = %s\n Cipher Group = %s\n WPA version = %s\n", - pcgroup_encrypt_val, pccipher_group, pcwpa_version); - - curr_channel = pstrNetworkInfo->u8channel; + curr_channel = pstrNetworkInfo->ch; if (!pstrWFIDrv->p2p_connect) - wlan_channel = pstrNetworkInfo->u8channel; + wlan_channel = pstrNetworkInfo->ch; - wilc_wlan_set_bssid(dev, pstrNetworkInfo->au8bssid); + wilc_wlan_set_bssid(dev, pstrNetworkInfo->bssid, STATION_MODE); - s32Error = wilc_set_join_req(vif, pstrNetworkInfo->au8bssid, sme->ssid, + s32Error = wilc_set_join_req(vif, pstrNetworkInfo->bssid, sme->ssid, sme->ssid_len, sme->ie, sme->ie_len, CfgConnectResult, (void *)priv, u8security, tenuAuth_type, - pstrNetworkInfo->u8channel, - pstrNetworkInfo->pJoinParams); + pstrNetworkInfo->ch, + pstrNetworkInfo->join_params); if (s32Error != 0) { - PRINT_ER("wilc_set_join_req(): Error(%d)\n", s32Error); + netdev_err(dev, "wilc_set_join_req(): Error\n"); s32Error = -ENOENT; - goto done; + wilc_connecting = 0; + return s32Error; } -done: - return s32Error; } @@ -930,12 +829,10 @@ static int disconnect(struct wiphy *wiphy, struct net_device *dev, u16 reason_co priv = wiphy_priv(wiphy); vif = netdev_priv(priv->dev); - pstrWFIDrv = (struct host_if_drv *)priv->hWILCWFIDrv; + pstrWFIDrv = (struct host_if_drv *)priv->hif_drv; if (!pstrWFIDrv->p2p_connect) wlan_channel = INVALID_CHANNEL; - wilc_wlan_set_bssid(priv->dev, NullBssid); - - PRINT_D(CFG80211_DBG, "Disconnecting with reason code(%d)\n", reason_code); + wilc_wlan_set_bssid(priv->dev, NullBssid, STATION_MODE); p2p_local_random = 0x01; p2p_recv_random = 0x00; @@ -944,7 +841,7 @@ static int disconnect(struct wiphy *wiphy, struct net_device *dev, u16 reason_co s32Error = wilc_disconnect(vif, reason_code); if (s32Error != 0) { - PRINT_ER("Error in disconnecting: Error(%d)\n", s32Error); + netdev_err(priv->dev, "Error in disconnecting\n"); s32Error = -EINVAL; } @@ -957,7 +854,6 @@ static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, { s32 s32Error = 0, KeyLen = params->key_len; - u32 i; struct wilc_priv *priv; const u8 *pu8RxMic = NULL; const u8 *pu8TxMic = NULL; @@ -972,29 +868,13 @@ static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, vif = netdev_priv(netdev); wl = vif->wilc; - PRINT_D(CFG80211_DBG, "Adding key with cipher suite = %x\n", params->cipher); - - PRINT_D(CFG80211_DBG, "%p %p %d\n", wiphy, netdev, key_index); - - PRINT_D(CFG80211_DBG, "key %x %x %x\n", params->key[0], - params->key[1], - params->key[2]); - - switch (params->cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: if (priv->wdev->iftype == NL80211_IFTYPE_AP) { - priv->WILC_WFI_wep_default = key_index; priv->WILC_WFI_wep_key_len[key_index] = params->key_len; memcpy(priv->WILC_WFI_wep_key[key_index], params->key, params->key_len); - PRINT_D(CFG80211_DBG, "Adding AP WEP Default key Idx = %d\n", key_index); - PRINT_D(CFG80211_DBG, "Adding AP WEP Key len= %d\n", params->key_len); - - for (i = 0; i < params->key_len; i++) - PRINT_D(CFG80211_DBG, "WEP AP key val[%d] = %x\n", i, params->key[i]); - tenuAuth_type = OPEN_SYSTEM; if (params->cipher == WLAN_CIPHER_SUITE_WEP40) @@ -1008,16 +888,9 @@ static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, break; } if (memcmp(params->key, priv->WILC_WFI_wep_key[key_index], params->key_len)) { - priv->WILC_WFI_wep_default = key_index; priv->WILC_WFI_wep_key_len[key_index] = params->key_len; memcpy(priv->WILC_WFI_wep_key[key_index], params->key, params->key_len); - PRINT_D(CFG80211_DBG, "Adding WEP Default key Idx = %d\n", key_index); - PRINT_D(CFG80211_DBG, "Adding WEP Key length = %d\n", params->key_len); - if (INFO) { - for (i = 0; i < params->key_len; i++) - PRINT_INFO(CFG80211_DBG, "WEP key value[%d] = %d\n", i, params->key[i]); - } wilc_add_wep_key_bss_sta(vif, params->key, params->key_len, key_index); } @@ -1068,22 +941,12 @@ static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, priv->wilc_gtk[key_index]->key_len = params->key_len; priv->wilc_gtk[key_index]->seq_len = params->seq_len; - if (INFO) { - for (i = 0; i < params->key_len; i++) - PRINT_INFO(CFG80211_DBG, "Adding group key value[%d] = %x\n", i, params->key[i]); - for (i = 0; i < params->seq_len; i++) - PRINT_INFO(CFG80211_DBG, "Adding group seq value[%d] = %x\n", i, params->seq[i]); - } - - wilc_add_rx_gtk(vif, params->key, KeyLen, key_index, params->seq_len, params->seq, pu8RxMic, pu8TxMic, AP_MODE, u8gmode); } else { - PRINT_INFO(CFG80211_DBG, "STA Address: %x%x%x%x%x\n", mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3], mac_addr[4]); - if (params->cipher == WLAN_CIPHER_SUITE_TKIP) u8pmode = ENCRYPT_ENABLED | WPA | TKIP; else @@ -1105,14 +968,6 @@ static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, if ((params->seq_len) > 0) priv->wilc_ptk[key_index]->seq = kmalloc(params->seq_len, GFP_KERNEL); - if (INFO) { - for (i = 0; i < params->key_len; i++) - PRINT_INFO(CFG80211_DBG, "Adding pairwise key value[%d] = %x\n", i, params->key[i]); - - for (i = 0; i < params->seq_len; i++) - PRINT_INFO(CFG80211_DBG, "Adding group seq value[%d] = %x\n", i, params->seq[i]); - } - memcpy(priv->wilc_ptk[key_index]->key, params->key, params->key_len); if ((params->seq_len) > 0) @@ -1156,10 +1011,6 @@ static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, memcpy(g_key_gtk_params.seq, params->seq, params->seq_len); } g_key_gtk_params.cipher = params->cipher; - - PRINT_D(CFG80211_DBG, "key %x %x %x\n", g_key_gtk_params.key[0], - g_key_gtk_params.key[1], - g_key_gtk_params.key[2]); g_gtk_keys_saved = true; } @@ -1193,27 +1044,18 @@ static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, memcpy(g_key_ptk_params.seq, params->seq, params->seq_len); } g_key_ptk_params.cipher = params->cipher; - - PRINT_D(CFG80211_DBG, "key %x %x %x\n", g_key_ptk_params.key[0], - g_key_ptk_params.key[1], - g_key_ptk_params.key[2]); g_ptk_keys_saved = true; } wilc_add_ptk(vif, params->key, KeyLen, mac_addr, pu8RxMic, pu8TxMic, STATION_MODE, u8mode, key_index); - PRINT_D(CFG80211_DBG, "Adding pairwise key\n"); - if (INFO) { - for (i = 0; i < params->key_len; i++) - PRINT_INFO(CFG80211_DBG, "Adding pairwise key value[%d] = %d\n", i, params->key[i]); - } } } break; default: - PRINT_ER("Not supported cipher: Error(%d)\n", s32Error); + netdev_err(netdev, "Not supported cipher\n"); s32Error = -ENOTSUPP; } @@ -1270,18 +1112,14 @@ static int del_key(struct wiphy *wiphy, struct net_device *netdev, kfree(g_key_gtk_params.seq); g_key_gtk_params.seq = NULL; - wilc_set_machw_change_vir_if(netdev, false); } if (key_index >= 0 && key_index <= 3) { memset(priv->WILC_WFI_wep_key[key_index], 0, priv->WILC_WFI_wep_key_len[key_index]); priv->WILC_WFI_wep_key_len[key_index] = 0; - - PRINT_D(CFG80211_DBG, "Removing WEP key with index = %d\n", key_index); wilc_remove_wep_key(vif, key_index); } else { - PRINT_D(CFG80211_DBG, "Removing all installed keys\n"); - wilc_remove_key(priv->hWILCWFIDrv, mac_addr); + wilc_remove_key(priv->hif_drv, mac_addr); } return 0; @@ -1293,26 +1131,17 @@ static int get_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, { struct wilc_priv *priv; struct key_params key_params; - u32 i; priv = wiphy_priv(wiphy); if (!pairwise) { - PRINT_D(CFG80211_DBG, "Getting group key idx: %x\n", key_index); - key_params.key = priv->wilc_gtk[key_index]->key; key_params.cipher = priv->wilc_gtk[key_index]->cipher; key_params.key_len = priv->wilc_gtk[key_index]->key_len; key_params.seq = priv->wilc_gtk[key_index]->seq; key_params.seq_len = priv->wilc_gtk[key_index]->seq_len; - if (INFO) { - for (i = 0; i < key_params.key_len; i++) - PRINT_INFO(CFG80211_DBG, "Retrieved key value %x\n", key_params.key[i]); - } } else { - PRINT_D(CFG80211_DBG, "Getting pairwise key\n"); - key_params.key = priv->wilc_ptk[key_index]->key; key_params.cipher = priv->wilc_ptk[key_index]->cipher; key_params.key_len = priv->wilc_ptk[key_index]->key_len; @@ -1334,11 +1163,7 @@ static int set_default_key(struct wiphy *wiphy, struct net_device *netdev, u8 ke priv = wiphy_priv(wiphy); vif = netdev_priv(priv->dev); - PRINT_D(CFG80211_DBG, "Setting default key with idx = %d\n", key_index); - - if (key_index != priv->WILC_WFI_wep_default) { - wilc_set_wep_default_keyid(vif, key_index); - } + wilc_set_wep_default_keyid(vif, key_index); return 0; } @@ -1355,10 +1180,6 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev, vif = netdev_priv(dev); if (vif->iftype == AP_MODE || vif->iftype == GO_MODE) { - PRINT_D(HOSTAPD_DBG, "Getting station parameters\n"); - - PRINT_INFO(HOSTAPD_DBG, ": %x%x%x%x%x\n", mac[0], mac[1], mac[2], mac[3], mac[4]); - for (i = 0; i < NUM_STA_ASSOCIATED; i++) { if (!(memcmp(mac, priv->assoc_stainfo.au8Sta_AssociatedBss[i], ETH_ALEN))) { associatedsta = i; @@ -1367,7 +1188,7 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev, } if (associatedsta == -1) { - PRINT_ER("Station required is not associated\n"); + netdev_err(dev, "sta required is not associated\n"); return -ENOENT; } @@ -1375,7 +1196,6 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev, wilc_get_inactive_time(vif, mac, &inactive_time); sinfo->inactive_time = 1000 * inactive_time; - PRINT_D(CFG80211_DBG, "Inactive time %d\n", sinfo->inactive_time); } if (vif->iftype == STATION_MODE) { @@ -1400,9 +1220,6 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev, wilc_enable_tcp_ack_filter(true); else if (strStatistics.link_speed != DEFAULT_LINK_SPEED) wilc_enable_tcp_ack_filter(false); - - PRINT_D(CORECONFIG_DBG, "*** stats[%d][%d][%d][%d][%d]\n", sinfo->signal, sinfo->rx_packets, sinfo->tx_packets, - sinfo->tx_failed, sinfo->txrate.legacy); } return 0; } @@ -1410,14 +1227,13 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev, static int change_bss(struct wiphy *wiphy, struct net_device *dev, struct bss_parameters *params) { - PRINT_D(CFG80211_DBG, "Changing Bss parametrs\n"); return 0; } static int set_wiphy_params(struct wiphy *wiphy, u32 changed) { s32 s32Error = 0; - struct cfg_param_val pstrCfgParamVal; + struct cfg_param_attr pstrCfgParamVal; struct wilc_priv *priv; struct wilc_vif *vif; @@ -1425,37 +1241,28 @@ static int set_wiphy_params(struct wiphy *wiphy, u32 changed) vif = netdev_priv(priv->dev); pstrCfgParamVal.flag = 0; - PRINT_D(CFG80211_DBG, "Setting Wiphy params\n"); if (changed & WIPHY_PARAM_RETRY_SHORT) { - PRINT_D(CFG80211_DBG, "Setting WIPHY_PARAM_RETRY_SHORT %d\n", - priv->dev->ieee80211_ptr->wiphy->retry_short); pstrCfgParamVal.flag |= RETRY_SHORT; pstrCfgParamVal.short_retry_limit = priv->dev->ieee80211_ptr->wiphy->retry_short; } if (changed & WIPHY_PARAM_RETRY_LONG) { - PRINT_D(CFG80211_DBG, "Setting WIPHY_PARAM_RETRY_LONG %d\n", priv->dev->ieee80211_ptr->wiphy->retry_long); pstrCfgParamVal.flag |= RETRY_LONG; pstrCfgParamVal.long_retry_limit = priv->dev->ieee80211_ptr->wiphy->retry_long; } if (changed & WIPHY_PARAM_FRAG_THRESHOLD) { - PRINT_D(CFG80211_DBG, "Setting WIPHY_PARAM_FRAG_THRESHOLD %d\n", priv->dev->ieee80211_ptr->wiphy->frag_threshold); pstrCfgParamVal.flag |= FRAG_THRESHOLD; pstrCfgParamVal.frag_threshold = priv->dev->ieee80211_ptr->wiphy->frag_threshold; } if (changed & WIPHY_PARAM_RTS_THRESHOLD) { - PRINT_D(CFG80211_DBG, "Setting WIPHY_PARAM_RTS_THRESHOLD %d\n", priv->dev->ieee80211_ptr->wiphy->rts_threshold); - pstrCfgParamVal.flag |= RTS_THRESHOLD; pstrCfgParamVal.rts_threshold = priv->dev->ieee80211_ptr->wiphy->rts_threshold; } - PRINT_D(CFG80211_DBG, "Setting CFG params in the host interface\n"); s32Error = wilc_hif_set_cfg(vif, &pstrCfgParamVal); if (s32Error) - PRINT_ER("Error in setting WIPHY PARAMS\n"); - + netdev_err(priv->dev, "Error in setting WIPHY PARAMS\n"); return s32Error; } @@ -1470,19 +1277,16 @@ static int set_pmksa(struct wiphy *wiphy, struct net_device *netdev, struct wilc_priv *priv = wiphy_priv(wiphy); vif = netdev_priv(priv->dev); - PRINT_D(CFG80211_DBG, "Setting PMKSA\n"); for (i = 0; i < priv->pmkid_list.numpmkid; i++) { if (!memcmp(pmksa->bssid, priv->pmkid_list.pmkidlist[i].bssid, ETH_ALEN)) { flag = PMKID_FOUND; - PRINT_D(CFG80211_DBG, "PMKID already exists\n"); break; } } if (i < WILC_MAX_NUM_PMKIDS) { - PRINT_D(CFG80211_DBG, "Setting PMKID in private structure\n"); memcpy(priv->pmkid_list.pmkidlist[i].bssid, pmksa->bssid, ETH_ALEN); memcpy(priv->pmkid_list.pmkidlist[i].pmkid, pmksa->pmkid, @@ -1490,14 +1294,13 @@ static int set_pmksa(struct wiphy *wiphy, struct net_device *netdev, if (!(flag == PMKID_FOUND)) priv->pmkid_list.numpmkid++; } else { - PRINT_ER("Invalid PMKID index\n"); + netdev_err(netdev, "Invalid PMKID index\n"); s32Error = -EINVAL; } - if (!s32Error) { - PRINT_D(CFG80211_DBG, "Setting pmkid in the host interface\n"); + if (!s32Error) s32Error = wilc_set_pmkid_info(vif, &priv->pmkid_list); - } + return s32Error; } @@ -1509,12 +1312,9 @@ static int del_pmksa(struct wiphy *wiphy, struct net_device *netdev, struct wilc_priv *priv = wiphy_priv(wiphy); - PRINT_D(CFG80211_DBG, "Deleting PMKSA keys\n"); - for (i = 0; i < priv->pmkid_list.numpmkid; i++) { if (!memcmp(pmksa->bssid, priv->pmkid_list.pmkidlist[i].bssid, ETH_ALEN)) { - PRINT_D(CFG80211_DBG, "Reseting PMKID values\n"); memset(&priv->pmkid_list.pmkidlist[i], 0, sizeof(struct host_if_pmkid)); break; } @@ -1541,8 +1341,6 @@ static int flush_pmksa(struct wiphy *wiphy, struct net_device *netdev) { struct wilc_priv *priv = wiphy_priv(wiphy); - PRINT_D(CFG80211_DBG, "Flushing PMKID key values\n"); - memset(&priv->pmkid_list, 0, sizeof(struct host_if_pmkid_attr)); return 0; @@ -1569,7 +1367,6 @@ static void WILC_WFI_CfgParseRxAction(u8 *buf, u32 len) } if (wlan_channel != INVALID_CHANNEL) { if (channel_list_attr_index) { - PRINT_D(GENERIC_DBG, "Modify channel list attribute\n"); for (i = channel_list_attr_index + 3; i < ((channel_list_attr_index + 3) + buf[channel_list_attr_index + 1]); i++) { if (buf[i] == 0x51) { for (j = i + 2; j < ((i + 2) + buf[i + 1]); j++) { @@ -1581,7 +1378,6 @@ static void WILC_WFI_CfgParseRxAction(u8 *buf, u32 len) } if (op_channel_attr_index) { - PRINT_D(GENERIC_DBG, "Modify operating channel attribute\n"); buf[op_channel_attr_index + 6] = 0x51; buf[op_channel_attr_index + 7] = wlan_channel; } @@ -1611,7 +1407,6 @@ static void WILC_WFI_CfgParseTxAction(u8 *buf, u32 len, bool bOperChan, u8 iftyp } if (wlan_channel != INVALID_CHANNEL && bOperChan) { if (channel_list_attr_index) { - PRINT_D(GENERIC_DBG, "Modify channel list attribute\n"); for (i = channel_list_attr_index + 3; i < ((channel_list_attr_index + 3) + buf[channel_list_attr_index + 1]); i++) { if (buf[i] == 0x51) { for (j = i + 2; j < ((i + 2) + buf[i + 1]); j++) { @@ -1623,14 +1418,13 @@ static void WILC_WFI_CfgParseTxAction(u8 *buf, u32 len, bool bOperChan, u8 iftyp } if (op_channel_attr_index) { - PRINT_D(GENERIC_DBG, "Modify operating channel attribute\n"); buf[op_channel_attr_index + 6] = 0x51; buf[op_channel_attr_index + 7] = wlan_channel; } } } -void WILC_WFI_p2p_rx (struct net_device *dev, u8 *buff, u32 size) +void WILC_WFI_p2p_rx(struct net_device *dev, u8 *buff, u32 size) { struct wilc_priv *priv; u32 header, pkt_offset; @@ -1639,7 +1433,7 @@ void WILC_WFI_p2p_rx (struct net_device *dev, u8 *buff, u32 size) s32 s32Freq; priv = wiphy_priv(dev->ieee80211_ptr->wiphy); - pstrWFIDrv = (struct host_if_drv *)priv->hWILCWFIDrv; + pstrWFIDrv = (struct host_if_drv *)priv->hif_drv; memcpy(&header, (buff - HOST_HDR_OFFSET), HOST_HDR_OFFSET); @@ -1647,41 +1441,29 @@ void WILC_WFI_p2p_rx (struct net_device *dev, u8 *buff, u32 size) if (pkt_offset & IS_MANAGMEMENT_CALLBACK) { if (buff[FRAME_TYPE_ID] == IEEE80211_STYPE_PROBE_RESP) { - PRINT_D(GENERIC_DBG, "Probe response ACK\n"); cfg80211_mgmt_tx_status(priv->wdev, priv->u64tx_cookie, buff, size, true, GFP_KERNEL); return; } else { - if (pkt_offset & IS_MGMT_STATUS_SUCCES) { - PRINT_D(GENERIC_DBG, "Success Ack - Action frame category: %x Action Subtype: %d Dialog T: %x OR %x\n", buff[ACTION_CAT_ID], buff[ACTION_SUBTYPE_ID], - buff[ACTION_SUBTYPE_ID + 1], buff[P2P_PUB_ACTION_SUBTYPE + 1]); + if (pkt_offset & IS_MGMT_STATUS_SUCCES) cfg80211_mgmt_tx_status(priv->wdev, priv->u64tx_cookie, buff, size, true, GFP_KERNEL); - } else { - PRINT_D(GENERIC_DBG, "Fail Ack - Action frame category: %x Action Subtype: %d Dialog T: %x OR %x\n", buff[ACTION_CAT_ID], buff[ACTION_SUBTYPE_ID], - buff[ACTION_SUBTYPE_ID + 1], buff[P2P_PUB_ACTION_SUBTYPE + 1]); + else cfg80211_mgmt_tx_status(priv->wdev, priv->u64tx_cookie, buff, size, false, GFP_KERNEL); - } return; } } else { - PRINT_D(GENERIC_DBG, "Rx Frame Type:%x\n", buff[FRAME_TYPE_ID]); - s32Freq = ieee80211_channel_to_frequency(curr_channel, IEEE80211_BAND_2GHZ); if (ieee80211_is_action(buff[FRAME_TYPE_ID])) { - PRINT_D(GENERIC_DBG, "Rx Action Frame Type: %x %x\n", buff[ACTION_SUBTYPE_ID], buff[P2P_PUB_ACTION_SUBTYPE]); - if (priv->bCfgScanning && time_after_eq(jiffies, (unsigned long)pstrWFIDrv->p2p_timeout)) { - PRINT_D(GENERIC_DBG, "Receiving action frames from wrong channels\n"); + netdev_dbg(dev, "Receiving action wrong ch\n"); return; } if (buff[ACTION_CAT_ID] == PUB_ACTION_ATTR_ID) { switch (buff[ACTION_SUBTYPE_ID]) { case GAS_INTIAL_REQ: - PRINT_D(GENERIC_DBG, "GAS INITIAL REQ %x\n", buff[ACTION_SUBTYPE_ID]); break; case GAS_INTIAL_RSP: - PRINT_D(GENERIC_DBG, "GAS INITIAL RSP %x\n", buff[ACTION_SUBTYPE_ID]); break; case PUBLIC_ACT_VENDORSPEC: @@ -1692,7 +1474,6 @@ void WILC_WFI_p2p_rx (struct net_device *dev, u8 *buff, u32 size) if (!memcmp(p2p_vendor_spec, &buff[i], 6)) { p2p_recv_random = buff[i + 6]; wilc_ie = true; - PRINT_D(GENERIC_DBG, "WILC Vendor specific IE:%02x\n", p2p_recv_random); break; } } @@ -1709,32 +1490,31 @@ void WILC_WFI_p2p_rx (struct net_device *dev, u8 *buff, u32 size) } } } else { - PRINT_D(GENERIC_DBG, "PEER WILL BE GO LocaRand=%02x RecvRand %02x\n", p2p_local_random, p2p_recv_random); + netdev_dbg(dev, "PEER WILL BE GO LocaRand=%02x RecvRand %02x\n", p2p_local_random, p2p_recv_random); } } if ((buff[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_REQ || buff[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_RSP) && (wilc_ie)) { - PRINT_D(GENERIC_DBG, "Sending P2P to host without extra elemnt\n"); cfg80211_rx_mgmt(priv->wdev, s32Freq, 0, buff, size - 7, 0); return; } break; default: - PRINT_D(GENERIC_DBG, "NOT HANDLED PUBLIC ACTION FRAME TYPE:%x\n", buff[ACTION_SUBTYPE_ID]); + netdev_dbg(dev, "NOT HANDLED PUBLIC ACTION FRAME TYPE:%x\n", buff[ACTION_SUBTYPE_ID]); break; } } } - cfg80211_rx_mgmt(priv->wdev, s32Freq, 0, buff, size - 7, 0); + cfg80211_rx_mgmt(priv->wdev, s32Freq, 0, buff, size, 0); } } static void WILC_WFI_mgmt_tx_complete(void *priv, int status) { - struct p2p_mgmt_data *pv_data = (struct p2p_mgmt_data *)priv; + struct p2p_mgmt_data *pv_data = priv; kfree(pv_data->buff); @@ -1745,9 +1525,7 @@ static void WILC_WFI_RemainOnChannelReady(void *pUserVoid) { struct wilc_priv *priv; - priv = (struct wilc_priv *)pUserVoid; - - PRINT_D(HOSTINF_DBG, "Remain on channel ready\n"); + priv = pUserVoid; priv->bInP2PlistenState = true; @@ -1762,20 +1540,15 @@ static void WILC_WFI_RemainOnChannelExpired(void *pUserVoid, u32 u32SessionID) { struct wilc_priv *priv; - priv = (struct wilc_priv *)pUserVoid; + priv = pUserVoid; if (u32SessionID == priv->strRemainOnChanParams.u32ListenSessionID) { - PRINT_D(GENERIC_DBG, "Remain on channel expired\n"); - priv->bInP2PlistenState = false; cfg80211_remain_on_channel_expired(priv->wdev, priv->strRemainOnChanParams.u64ListenCookie, priv->strRemainOnChanParams.pstrListenChan, GFP_KERNEL); - } else { - PRINT_D(GENERIC_DBG, "Received ID 0x%x Expected ID 0x%x (No match)\n", u32SessionID - , priv->strRemainOnChanParams.u32ListenSessionID); } } @@ -1791,11 +1564,8 @@ static int remain_on_channel(struct wiphy *wiphy, priv = wiphy_priv(wiphy); vif = netdev_priv(priv->dev); - PRINT_D(GENERIC_DBG, "Remaining on channel %d\n", chan->hw_value); - - if (wdev->iftype == NL80211_IFTYPE_AP) { - PRINT_D(GENERIC_DBG, "Required remain-on-channel while in AP mode"); + netdev_dbg(vif->ndev, "Required while in AP mode\n"); return s32Error; } @@ -1826,8 +1596,6 @@ static int cancel_remain_on_channel(struct wiphy *wiphy, priv = wiphy_priv(wiphy); vif = netdev_priv(priv->dev); - PRINT_D(CFG80211_DBG, "Cancel remain on channel\n"); - s32Error = wilc_listen_state_expired(vif, priv->strRemainOnChanParams.u32ListenSessionID); return s32Error; } @@ -1851,7 +1619,7 @@ static int mgmt_tx(struct wiphy *wiphy, vif = netdev_priv(wdev->netdev); priv = wiphy_priv(wiphy); - pstrWFIDrv = (struct host_if_drv *)priv->hWILCWFIDrv; + pstrWFIDrv = (struct host_if_drv *)priv->hif_drv; *cookie = (unsigned long)buf; priv->u64tx_cookie = *cookie; @@ -1859,49 +1627,36 @@ static int mgmt_tx(struct wiphy *wiphy, if (ieee80211_is_mgmt(mgmt->frame_control)) { mgmt_tx = kmalloc(sizeof(struct p2p_mgmt_data), GFP_KERNEL); - if (!mgmt_tx) { - PRINT_ER("Failed to allocate memory for mgmt_tx structure\n"); + if (!mgmt_tx) return -EFAULT; - } + mgmt_tx->buff = kmalloc(buf_len, GFP_KERNEL); if (!mgmt_tx->buff) { - PRINT_ER("Failed to allocate memory for mgmt_tx buff\n"); kfree(mgmt_tx); - return -EFAULT; + return -ENOMEM; } + memcpy(mgmt_tx->buff, buf, len); mgmt_tx->size = len; if (ieee80211_is_probe_resp(mgmt->frame_control)) { - PRINT_D(GENERIC_DBG, "TX: Probe Response\n"); - PRINT_D(GENERIC_DBG, "Setting channel: %d\n", chan->hw_value); wilc_set_mac_chnl_num(vif, chan->hw_value); curr_channel = chan->hw_value; } else if (ieee80211_is_action(mgmt->frame_control)) { - PRINT_D(GENERIC_DBG, "ACTION FRAME:%x\n", (u16)mgmt->frame_control); - - if (buf[ACTION_CAT_ID] == PUB_ACTION_ATTR_ID) { if (buf[ACTION_SUBTYPE_ID] != PUBLIC_ACT_VENDORSPEC || buf[P2P_PUB_ACTION_SUBTYPE] != GO_NEG_CONF) { - PRINT_D(GENERIC_DBG, "Setting channel: %d\n", chan->hw_value); wilc_set_mac_chnl_num(vif, chan->hw_value); curr_channel = chan->hw_value; } switch (buf[ACTION_SUBTYPE_ID]) { case GAS_INTIAL_REQ: - { - PRINT_D(GENERIC_DBG, "GAS INITIAL REQ %x\n", buf[ACTION_SUBTYPE_ID]); break; - } case GAS_INTIAL_RSP: - { - PRINT_D(GENERIC_DBG, "GAS INITIAL RSP %x\n", buf[ACTION_SUBTYPE_ID]); break; - } case PUBLIC_ACT_VENDORSPEC: { @@ -1916,8 +1671,6 @@ static int mgmt_tx(struct wiphy *wiphy, if ((buf[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_REQ || buf[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_RSP || buf[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_REQ || buf[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_RSP)) { if (p2p_local_random > p2p_recv_random) { - PRINT_D(GENERIC_DBG, "LOCAL WILL BE GO LocaRand=%02x RecvRand %02x\n", p2p_local_random, p2p_recv_random); - for (i = P2P_PUB_ACTION_SUBTYPE + 2; i < len; i++) { if (buf[i] == P2PELEM_ATTR_ID && !(memcmp(p2p_oui, &buf[i + 2], 4))) { if (buf[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_REQ || buf[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_RSP) @@ -1933,13 +1686,11 @@ static int mgmt_tx(struct wiphy *wiphy, mgmt_tx->buff[len + sizeof(p2p_vendor_spec)] = p2p_local_random; mgmt_tx->size = buf_len; } - } else { - PRINT_D(GENERIC_DBG, "PEER WILL BE GO LocaRand=%02x RecvRand %02x\n", p2p_local_random, p2p_recv_random); } } } else { - PRINT_D(GENERIC_DBG, "Not a P2P public action frame\n"); + netdev_dbg(vif->ndev, "Not a P2P public action frame\n"); } break; @@ -1947,24 +1698,18 @@ static int mgmt_tx(struct wiphy *wiphy, default: { - PRINT_D(GENERIC_DBG, "NOT HANDLED PUBLIC ACTION FRAME TYPE:%x\n", buf[ACTION_SUBTYPE_ID]); + netdev_dbg(vif->ndev, "NOT HANDLED PUBLIC ACTION FRAME TYPE:%x\n", buf[ACTION_SUBTYPE_ID]); break; } } } - PRINT_D(GENERIC_DBG, "TX: ACTION FRAME Type:%x : Chan:%d\n", buf[ACTION_SUBTYPE_ID], chan->hw_value); pstrWFIDrv->p2p_timeout = (jiffies + msecs_to_jiffies(wait)); - - PRINT_D(GENERIC_DBG, "Current Jiffies: %lu Timeout:%llu\n", - jiffies, pstrWFIDrv->p2p_timeout); } wilc_wlan_txq_add_mgmt_pkt(wdev->netdev, mgmt_tx, mgmt_tx->buff, mgmt_tx->size, WILC_WFI_mgmt_tx_complete); - } else { - PRINT_D(GENERIC_DBG, "This function transmits only management frames\n"); } return 0; } @@ -1977,10 +1722,7 @@ static int mgmt_tx_cancel_wait(struct wiphy *wiphy, struct host_if_drv *pstrWFIDrv; priv = wiphy_priv(wiphy); - pstrWFIDrv = (struct host_if_drv *)priv->hWILCWFIDrv; - - - PRINT_D(GENERIC_DBG, "Tx Cancel wait :%lu\n", jiffies); + pstrWFIDrv = (struct host_if_drv *)priv->hif_drv; pstrWFIDrv->p2p_timeout = jiffies; if (!priv->bInP2PlistenState) { @@ -2007,7 +1749,6 @@ void wilc_mgmt_frame_register(struct wiphy *wiphy, struct wireless_dev *wdev, if (!frame_type) return; - PRINT_D(GENERIC_DBG, "Frame registering Frame Type: %x: Boolean: %d\n", frame_type, reg); switch (frame_type) { case PROBE_REQ: { @@ -2029,17 +1770,14 @@ void wilc_mgmt_frame_register(struct wiphy *wiphy, struct wireless_dev *wdev, } } - if (!wl->initialized) { - PRINT_D(GENERIC_DBG, "Return since mac is closed\n"); + if (!wl->initialized) return; - } wilc_frame_register(vif, frame_type, reg); } static int set_cqm_rssi_config(struct wiphy *wiphy, struct net_device *dev, s32 rssi_thold, u32 rssi_hyst) { - PRINT_D(CFG80211_DBG, "Setting CQM RSSi Function\n"); return 0; } @@ -2049,8 +1787,6 @@ static int dump_station(struct wiphy *wiphy, struct net_device *dev, struct wilc_priv *priv; struct wilc_vif *vif; - PRINT_D(CFG80211_DBG, "Dumping station information\n"); - if (idx != 0) return -ENOENT; @@ -2070,17 +1806,13 @@ static int set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, struct wilc_priv *priv; struct wilc_vif *vif; - PRINT_D(CFG80211_DBG, " Power save Enabled= %d , TimeOut = %d\n", enabled, timeout); - if (!wiphy) return -ENOENT; priv = wiphy_priv(wiphy); vif = netdev_priv(priv->dev); - if (!priv->hWILCWFIDrv) { - PRINT_ER("Driver is NULL\n"); + if (!priv->hif_drv) return -EIO; - } if (wilc_enable_ps) wilc_set_power_mgmt(vif, enabled, timeout); @@ -2094,286 +1826,73 @@ static int change_virtual_intf(struct wiphy *wiphy, struct net_device *dev, { struct wilc_priv *priv; struct wilc_vif *vif; - u8 interface_type; - u16 TID = 0; - u8 i; struct wilc *wl; vif = netdev_priv(dev); priv = wiphy_priv(wiphy); wl = vif->wilc; - - PRINT_D(HOSTAPD_DBG, "In Change virtual interface function\n"); - PRINT_D(HOSTAPD_DBG, "Wireless interface name =%s\n", dev->name); p2p_local_random = 0x01; p2p_recv_random = 0x00; wilc_ie = false; wilc_optaining_ip = false; del_timer(&wilc_during_ip_timer); - PRINT_D(GENERIC_DBG, "Changing virtual interface, enable scan\n"); - - if (g_ptk_keys_saved && g_gtk_keys_saved) { - wilc_set_machw_change_vir_if(dev, true); - } switch (type) { case NL80211_IFTYPE_STATION: wilc_connecting = 0; - PRINT_D(HOSTAPD_DBG, "Interface type = NL80211_IFTYPE_STATION\n"); - dev->ieee80211_ptr->iftype = type; priv->wdev->iftype = type; vif->monitor_flag = 0; vif->iftype = STATION_MODE; + wilc_set_operation_mode(vif, STATION_MODE); memset(priv->assoc_stainfo.au8Sta_AssociatedBss, 0, MAX_NUM_STA * ETH_ALEN); - interface_type = vif->iftype; - vif->iftype = STATION_MODE; - - if (wl->initialized) { - wilc_del_all_rx_ba_session(vif, wl->vif[0]->bssid, - TID); - wilc_wait_msg_queue_idle(); - - up(&wl->cfg_event); - - wilc1000_wlan_deinit(dev); - wilc1000_wlan_init(dev, vif); - wilc_initialized = 1; - vif->iftype = interface_type; - - wilc_set_wfi_drv_handler(vif, - wilc_get_vif_idx(wl->vif[0])); - wilc_set_mac_address(wl->vif[0], wl->vif[0]->src_addr); - wilc_set_operation_mode(vif, STATION_MODE); - - if (g_wep_keys_saved) { - wilc_set_wep_default_keyid(wl->vif[0], - g_key_wep_params.key_idx); - wilc_add_wep_key_bss_sta(wl->vif[0], - g_key_wep_params.key, - g_key_wep_params.key_len, - g_key_wep_params.key_idx); - } - - wilc_flush_join_req(vif); - - if (g_ptk_keys_saved && g_gtk_keys_saved) { - PRINT_D(CFG80211_DBG, "ptk %x %x %x\n", g_key_ptk_params.key[0], - g_key_ptk_params.key[1], - g_key_ptk_params.key[2]); - PRINT_D(CFG80211_DBG, "gtk %x %x %x\n", g_key_gtk_params.key[0], - g_key_gtk_params.key[1], - g_key_gtk_params.key[2]); - add_key(wl->vif[0]->ndev->ieee80211_ptr->wiphy, - wl->vif[0]->ndev, - g_add_ptk_key_params.key_idx, - g_add_ptk_key_params.pairwise, - g_add_ptk_key_params.mac_addr, - (struct key_params *)(&g_key_ptk_params)); - - add_key(wl->vif[0]->ndev->ieee80211_ptr->wiphy, - wl->vif[0]->ndev, - g_add_gtk_key_params.key_idx, - g_add_gtk_key_params.pairwise, - g_add_gtk_key_params.mac_addr, - (struct key_params *)(&g_key_gtk_params)); - } - - if (wl->initialized) { - for (i = 0; i < num_reg_frame; i++) { - PRINT_D(INIT_DBG, "Frame registering Type: %x - Reg: %d\n", vif->g_struct_frame_reg[i].frame_type, - vif->g_struct_frame_reg[i].reg); - wilc_frame_register(vif, - vif->g_struct_frame_reg[i].frame_type, - vif->g_struct_frame_reg[i].reg); - } - } - wilc_enable_ps = true; - wilc_set_power_mgmt(vif, 1, 0); - } + wilc_enable_ps = true; + wilc_set_power_mgmt(vif, 1, 0); break; case NL80211_IFTYPE_P2P_CLIENT: - wilc_enable_ps = false; - wilc_set_power_mgmt(vif, 0, 0); wilc_connecting = 0; - PRINT_D(HOSTAPD_DBG, "Interface type = NL80211_IFTYPE_P2P_CLIENT\n"); - - wilc_del_all_rx_ba_session(vif, wl->vif[0]->bssid, TID); - dev->ieee80211_ptr->iftype = type; priv->wdev->iftype = type; vif->monitor_flag = 0; - - PRINT_D(HOSTAPD_DBG, "Downloading P2P_CONCURRENCY_FIRMWARE\n"); vif->iftype = CLIENT_MODE; + wilc_set_operation_mode(vif, STATION_MODE); - - if (wl->initialized) { - wilc_wait_msg_queue_idle(); - - wilc1000_wlan_deinit(dev); - wilc1000_wlan_init(dev, vif); - wilc_initialized = 1; - - wilc_set_wfi_drv_handler(vif, - wilc_get_vif_idx(wl->vif[0])); - wilc_set_mac_address(wl->vif[0], wl->vif[0]->src_addr); - wilc_set_operation_mode(vif, STATION_MODE); - - if (g_wep_keys_saved) { - wilc_set_wep_default_keyid(wl->vif[0], - g_key_wep_params.key_idx); - wilc_add_wep_key_bss_sta(wl->vif[0], - g_key_wep_params.key, - g_key_wep_params.key_len, - g_key_wep_params.key_idx); - } - - wilc_flush_join_req(vif); - - if (g_ptk_keys_saved && g_gtk_keys_saved) { - PRINT_D(CFG80211_DBG, "ptk %x %x %x\n", g_key_ptk_params.key[0], - g_key_ptk_params.key[1], - g_key_ptk_params.key[2]); - PRINT_D(CFG80211_DBG, "gtk %x %x %x\n", g_key_gtk_params.key[0], - g_key_gtk_params.key[1], - g_key_gtk_params.key[2]); - add_key(wl->vif[0]->ndev->ieee80211_ptr->wiphy, - wl->vif[0]->ndev, - g_add_ptk_key_params.key_idx, - g_add_ptk_key_params.pairwise, - g_add_ptk_key_params.mac_addr, - (struct key_params *)(&g_key_ptk_params)); - - add_key(wl->vif[0]->ndev->ieee80211_ptr->wiphy, - wl->vif[0]->ndev, - g_add_gtk_key_params.key_idx, - g_add_gtk_key_params.pairwise, - g_add_gtk_key_params.mac_addr, - (struct key_params *)(&g_key_gtk_params)); - } - - refresh_scan(priv, 1, true); - wilc_set_machw_change_vir_if(dev, false); - - if (wl->initialized) { - for (i = 0; i < num_reg_frame; i++) { - PRINT_D(INIT_DBG, "Frame registering Type: %x - Reg: %d\n", vif->g_struct_frame_reg[i].frame_type, - vif->g_struct_frame_reg[i].reg); - wilc_frame_register(vif, - vif->g_struct_frame_reg[i].frame_type, - vif->g_struct_frame_reg[i].reg); - } - } - } + wilc_enable_ps = false; + wilc_set_power_mgmt(vif, 0, 0); break; case NL80211_IFTYPE_AP: wilc_enable_ps = false; - PRINT_D(HOSTAPD_DBG, "Interface type = NL80211_IFTYPE_AP %d\n", type); dev->ieee80211_ptr->iftype = type; priv->wdev->iftype = type; vif->iftype = AP_MODE; - PRINT_D(CORECONFIG_DBG, "priv->hWILCWFIDrv[%p]\n", priv->hWILCWFIDrv); - - PRINT_D(HOSTAPD_DBG, "Downloading AP firmware\n"); - wilc_wlan_get_firmware(dev); - - if (wl->initialized) { - vif->iftype = AP_MODE; - wilc_mac_close(dev); - wilc_mac_open(dev); - - for (i = 0; i < num_reg_frame; i++) { - PRINT_D(INIT_DBG, "Frame registering Type: %x - Reg: %d\n", vif->g_struct_frame_reg[i].frame_type, - vif->g_struct_frame_reg[i].reg); - wilc_frame_register(vif, - vif->g_struct_frame_reg[i].frame_type, - vif->g_struct_frame_reg[i].reg); - } + + if (wl->initialized) { + wilc_set_wfi_drv_handler(vif, wilc_get_vif_idx(vif), + 0); + wilc_set_operation_mode(vif, AP_MODE); + wilc_set_power_mgmt(vif, 0, 0); } break; case NL80211_IFTYPE_P2P_GO: - PRINT_D(GENERIC_DBG, "start duringIP timer\n"); - wilc_optaining_ip = true; mod_timer(&wilc_during_ip_timer, jiffies + msecs_to_jiffies(during_ip_time)); - wilc_set_power_mgmt(vif, 0, 0); - wilc_del_all_rx_ba_session(vif, wl->vif[0]->bssid, TID); - wilc_enable_ps = false; - PRINT_D(HOSTAPD_DBG, "Interface type = NL80211_IFTYPE_GO\n"); + wilc_set_operation_mode(vif, AP_MODE); dev->ieee80211_ptr->iftype = type; priv->wdev->iftype = type; - - PRINT_D(CORECONFIG_DBG, "priv->hWILCWFIDrv[%p]\n", priv->hWILCWFIDrv); - - PRINT_D(HOSTAPD_DBG, "Downloading P2P_CONCURRENCY_FIRMWARE\n"); - - vif->iftype = GO_MODE; - wilc_wait_msg_queue_idle(); - wilc1000_wlan_deinit(dev); - wilc1000_wlan_init(dev, vif); - wilc_initialized = 1; - - wilc_set_wfi_drv_handler(vif, wilc_get_vif_idx(wl->vif[0])); - wilc_set_mac_address(wl->vif[0], wl->vif[0]->src_addr); - wilc_set_operation_mode(vif, AP_MODE); - - if (g_wep_keys_saved) { - wilc_set_wep_default_keyid(wl->vif[0], - g_key_wep_params.key_idx); - wilc_add_wep_key_bss_sta(wl->vif[0], - g_key_wep_params.key, - g_key_wep_params.key_len, - g_key_wep_params.key_idx); - } - - wilc_flush_join_req(vif); - - if (g_ptk_keys_saved && g_gtk_keys_saved) { - PRINT_D(CFG80211_DBG, "ptk %x %x %x cipher %x\n", g_key_ptk_params.key[0], - g_key_ptk_params.key[1], - g_key_ptk_params.key[2], - g_key_ptk_params.cipher); - PRINT_D(CFG80211_DBG, "gtk %x %x %x cipher %x\n", g_key_gtk_params.key[0], - g_key_gtk_params.key[1], - g_key_gtk_params.key[2], - g_key_gtk_params.cipher); - add_key(wl->vif[0]->ndev->ieee80211_ptr->wiphy, - wl->vif[0]->ndev, - g_add_ptk_key_params.key_idx, - g_add_ptk_key_params.pairwise, - g_add_ptk_key_params.mac_addr, - (struct key_params *)(&g_key_ptk_params)); - - add_key(wl->vif[0]->ndev->ieee80211_ptr->wiphy, - wl->vif[0]->ndev, - g_add_gtk_key_params.key_idx, - g_add_gtk_key_params.pairwise, - g_add_gtk_key_params.mac_addr, - (struct key_params *)(&g_key_gtk_params)); - } - - if (wl->initialized) { - for (i = 0; i < num_reg_frame; i++) { - PRINT_D(INIT_DBG, "Frame registering Type: %x - Reg: %d\n", vif->g_struct_frame_reg[i].frame_type, - vif->g_struct_frame_reg[i].reg); - wilc_frame_register(vif, - vif->g_struct_frame_reg[i].frame_type, - vif->g_struct_frame_reg[i].reg); - } - } + wilc_enable_ps = false; + wilc_set_power_mgmt(vif, 0, 0); break; default: - PRINT_ER("Unknown interface type= %d\n", type); + netdev_err(dev, "Unknown interface type= %d\n", type); return -EINVAL; } @@ -2391,18 +1910,15 @@ static int start_ap(struct wiphy *wiphy, struct net_device *dev, priv = wiphy_priv(wiphy); vif = netdev_priv(dev); - wl = vif ->wilc; - PRINT_D(HOSTAPD_DBG, "Starting ap\n"); - - PRINT_D(HOSTAPD_DBG, "Interval = %d\n DTIM period = %d\n Head length = %zu Tail length = %zu\n", - settings->beacon_interval, settings->dtim_period, beacon->head_len, beacon->tail_len); + wl = vif->wilc; s32Error = set_channel(wiphy, &settings->chandef); if (s32Error != 0) - PRINT_ER("Error in setting channel\n"); + netdev_err(dev, "Error in setting channel\n"); - wilc_wlan_set_bssid(dev, wl->vif[0]->src_addr); + wilc_wlan_set_bssid(dev, wl->vif[vif->idx]->src_addr, AP_MODE); + wilc_set_power_mgmt(vif, 0, 0); s32Error = wilc_add_beacon(vif, settings->beacon_interval, settings->dtim_period, beacon->head_len, @@ -2421,8 +1937,6 @@ static int change_beacon(struct wiphy *wiphy, struct net_device *dev, priv = wiphy_priv(wiphy); vif = netdev_priv(priv->dev); - PRINT_D(HOSTAPD_DBG, "Setting beacon\n"); - s32Error = wilc_add_beacon(vif, 0, 0, beacon->head_len, (u8 *)beacon->head, beacon->tail_len, @@ -2444,14 +1958,12 @@ static int stop_ap(struct wiphy *wiphy, struct net_device *dev) priv = wiphy_priv(wiphy); vif = netdev_priv(priv->dev); - PRINT_D(HOSTAPD_DBG, "Deleting beacon\n"); - - wilc_wlan_set_bssid(dev, NullBssid); + wilc_wlan_set_bssid(dev, NullBssid, AP_MODE); s32Error = wilc_del_beacon(vif); if (s32Error) - PRINT_ER("Host delete beacon fail\n"); + netdev_err(dev, "Host delete beacon fail\n"); return s32Error; } @@ -2477,14 +1989,6 @@ static int add_station(struct wiphy *wiphy, struct net_device *dev, strStaParams.rates_len = params->supported_rates_len; strStaParams.rates = params->supported_rates; - PRINT_D(CFG80211_DBG, "Adding station parameters %d\n", params->aid); - - PRINT_D(CFG80211_DBG, "BSSID = %x%x%x%x%x%x\n", priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid][0], priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid][1], priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid][2], priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid][3], priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid][4], - priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid][5]); - PRINT_D(HOSTAPD_DBG, "ASSOC ID = %d\n", strStaParams.aid); - PRINT_D(HOSTAPD_DBG, "Number of supported rates = %d\n", - strStaParams.rates_len); - if (!params->ht_capa) { strStaParams.ht_supported = false; } else { @@ -2502,26 +2006,9 @@ static int add_station(struct wiphy *wiphy, struct net_device *dev, strStaParams.flags_mask = params->sta_flags_mask; strStaParams.flags_set = params->sta_flags_set; - PRINT_D(HOSTAPD_DBG, "IS HT supported = %d\n", - strStaParams.ht_supported); - PRINT_D(HOSTAPD_DBG, "Capability Info = %d\n", - strStaParams.ht_capa_info); - PRINT_D(HOSTAPD_DBG, "AMPDU Params = %d\n", - strStaParams.ht_ampdu_params); - PRINT_D(HOSTAPD_DBG, "HT Extended params = %d\n", - strStaParams.ht_ext_params); - PRINT_D(HOSTAPD_DBG, "Tx Beamforming Cap = %d\n", - strStaParams.ht_tx_bf_cap); - PRINT_D(HOSTAPD_DBG, "Antenna selection info = %d\n", - strStaParams.ht_ante_sel); - PRINT_D(HOSTAPD_DBG, "Flag Mask = %d\n", - strStaParams.flags_mask); - PRINT_D(HOSTAPD_DBG, "Flag Set = %d\n", - strStaParams.flags_set); - s32Error = wilc_add_station(vif, &strStaParams); if (s32Error) - PRINT_ER("Host add station fail\n"); + netdev_err(dev, "Host add station fail\n"); } return s32Error; @@ -2542,21 +2029,14 @@ static int del_station(struct wiphy *wiphy, struct net_device *dev, vif = netdev_priv(dev); if (vif->iftype == AP_MODE || vif->iftype == GO_MODE) { - PRINT_D(HOSTAPD_DBG, "Deleting station\n"); - - - if (!mac) { - PRINT_D(HOSTAPD_DBG, "All associated stations\n"); + if (!mac) s32Error = wilc_del_allstation(vif, priv->assoc_stainfo.au8Sta_AssociatedBss); - } else { - PRINT_D(HOSTAPD_DBG, "With mac address: %x%x%x%x%x%x\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); - } s32Error = wilc_del_station(vif, mac); if (s32Error) - PRINT_ER("Host delete station fail\n"); + netdev_err(dev, "Host delete station fail\n"); } return s32Error; } @@ -2569,9 +2049,6 @@ static int change_station(struct wiphy *wiphy, struct net_device *dev, struct add_sta_param strStaParams = { {0} }; struct wilc_vif *vif; - - PRINT_D(HOSTAPD_DBG, "Change station paramters\n"); - if (!wiphy) return -EFAULT; @@ -2584,14 +2061,6 @@ static int change_station(struct wiphy *wiphy, struct net_device *dev, strStaParams.rates_len = params->supported_rates_len; strStaParams.rates = params->supported_rates; - PRINT_D(HOSTAPD_DBG, "BSSID = %x%x%x%x%x%x\n", - strStaParams.bssid[0], strStaParams.bssid[1], - strStaParams.bssid[2], strStaParams.bssid[3], - strStaParams.bssid[4], strStaParams.bssid[5]); - PRINT_D(HOSTAPD_DBG, "ASSOC ID = %d\n", strStaParams.aid); - PRINT_D(HOSTAPD_DBG, "Number of supported rates = %d\n", - strStaParams.rates_len); - if (!params->ht_capa) { strStaParams.ht_supported = false; } else { @@ -2609,26 +2078,9 @@ static int change_station(struct wiphy *wiphy, struct net_device *dev, strStaParams.flags_mask = params->sta_flags_mask; strStaParams.flags_set = params->sta_flags_set; - PRINT_D(HOSTAPD_DBG, "IS HT supported = %d\n", - strStaParams.ht_supported); - PRINT_D(HOSTAPD_DBG, "Capability Info = %d\n", - strStaParams.ht_capa_info); - PRINT_D(HOSTAPD_DBG, "AMPDU Params = %d\n", - strStaParams.ht_ampdu_params); - PRINT_D(HOSTAPD_DBG, "HT Extended params = %d\n", - strStaParams.ht_ext_params); - PRINT_D(HOSTAPD_DBG, "Tx Beamforming Cap = %d\n", - strStaParams.ht_tx_bf_cap); - PRINT_D(HOSTAPD_DBG, "Antenna selection info = %d\n", - strStaParams.ht_ante_sel); - PRINT_D(HOSTAPD_DBG, "Flag Mask = %d\n", - strStaParams.flags_mask); - PRINT_D(HOSTAPD_DBG, "Flag Set = %d\n", - strStaParams.flags_set); - s32Error = wilc_edit_station(vif, &strStaParams); if (s32Error) - PRINT_ER("Host edit station fail\n"); + netdev_err(dev, "Host edit station fail\n"); } return s32Error; } @@ -2645,34 +2097,87 @@ static struct wireless_dev *add_virtual_intf(struct wiphy *wiphy, struct net_device *new_ifc = NULL; priv = wiphy_priv(wiphy); - - - - PRINT_D(HOSTAPD_DBG, "Adding monitor interface[%p]\n", priv->wdev->netdev); - vif = netdev_priv(priv->wdev->netdev); if (type == NL80211_IFTYPE_MONITOR) { - PRINT_D(HOSTAPD_DBG, "Monitor interface mode: Initializing mon interface virtual device driver\n"); - PRINT_D(HOSTAPD_DBG, "Adding monitor interface[%p]\n", vif->ndev); new_ifc = WILC_WFI_init_mon_interface(name, vif->ndev); if (new_ifc) { - PRINT_D(HOSTAPD_DBG, "Setting monitor flag in private structure\n"); vif = netdev_priv(priv->wdev->netdev); vif->monitor_flag = 1; - } else - PRINT_ER("Error in initializing monitor interface\n "); + } } return priv->wdev; } static int del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) { - PRINT_D(HOSTAPD_DBG, "Deleting virtual interface\n"); return 0; } +static int wilc_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow) +{ + struct wilc_priv *priv = wiphy_priv(wiphy); + struct wilc_vif *vif = netdev_priv(priv->dev); + + if (!wow && wilc_wlan_get_num_conn_ifcs(vif->wilc)) + vif->wilc->suspend_event = true; + else + vif->wilc->suspend_event = false; + + return 0; +} + +static int wilc_resume(struct wiphy *wiphy) +{ + struct wilc_priv *priv = wiphy_priv(wiphy); + struct wilc_vif *vif = netdev_priv(priv->dev); + + netdev_info(vif->ndev, "cfg resume\n"); + return 0; +} + +static void wilc_set_wakeup(struct wiphy *wiphy, bool enabled) +{ + struct wilc_priv *priv = wiphy_priv(wiphy); + struct wilc_vif *vif = netdev_priv(priv->dev); + + netdev_info(vif->ndev, "cfg set wake up = %d\n", enabled); +} + +static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, + enum nl80211_tx_power_setting type, int mbm) +{ + int ret; + s32 tx_power = MBM_TO_DBM(mbm); + struct wilc_priv *priv = wiphy_priv(wiphy); + struct wilc_vif *vif = netdev_priv(priv->dev); + + if (tx_power < 0) + tx_power = 0; + else if (tx_power > 18) + tx_power = 18; + ret = wilc_set_tx_power(vif, tx_power); + if (ret) + netdev_err(vif->ndev, "Failed to set tx power\n"); + + return ret; +} + +static int get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, + int *dbm) +{ + int ret; + struct wilc_priv *priv = wiphy_priv(wiphy); + struct wilc_vif *vif = netdev_priv(priv->dev); + + ret = wilc_get_tx_power(vif, (u8 *)dbm); + if (ret) + netdev_err(vif->ndev, "Failed to get tx power\n"); + + return ret; +} + static struct cfg80211_ops wilc_cfg80211_ops = { .set_monitor_channel = set_channel, .scan = scan, @@ -2708,55 +2213,25 @@ static struct cfg80211_ops wilc_cfg80211_ops = { .set_power_mgmt = set_power_mgmt, .set_cqm_rssi_config = set_cqm_rssi_config, -}; - -int WILC_WFI_update_stats(struct wiphy *wiphy, u32 pktlen, u8 changed) -{ - struct wilc_priv *priv; - - priv = wiphy_priv(wiphy); - switch (changed) { - case WILC_WFI_RX_PKT: - { - priv->netstats.rx_packets++; - priv->netstats.rx_bytes += pktlen; - priv->netstats.rx_time = get_jiffies_64(); - } - break; - - case WILC_WFI_TX_PKT: - { - priv->netstats.tx_packets++; - priv->netstats.tx_bytes += pktlen; - priv->netstats.tx_time = get_jiffies_64(); + .suspend = wilc_suspend, + .resume = wilc_resume, + .set_wakeup = wilc_set_wakeup, + .set_tx_power = set_tx_power, + .get_tx_power = get_tx_power, - } - break; - - default: - break; - } - return 0; -} +}; static struct wireless_dev *WILC_WFI_CfgAlloc(void) { struct wireless_dev *wdev; - - PRINT_D(CFG80211_DBG, "Allocating wireless device\n"); - wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL); - if (!wdev) { - PRINT_ER("Cannot allocate wireless device\n"); + if (!wdev) goto _fail_; - } wdev->wiphy = wiphy_new(&wilc_cfg80211_ops, sizeof(struct wilc_priv)); - if (!wdev->wiphy) { - PRINT_ER("Cannot allocate wiphy\n"); + if (!wdev->wiphy) goto _fail_mem_; - } WILC_WFI_band_2ghz.ht_cap.ht_supported = 1; WILC_WFI_band_2ghz.ht_cap.cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT); @@ -2780,11 +2255,9 @@ struct wireless_dev *wilc_create_wiphy(struct net_device *net, struct device *de struct wireless_dev *wdev; s32 s32Error = 0; - PRINT_D(CFG80211_DBG, "Registering wifi device\n"); - wdev = WILC_WFI_CfgAlloc(); if (!wdev) { - PRINT_ER("CfgAlloc Failed\n"); + netdev_err(net, "wiphy new allocate failed\n"); return NULL; } @@ -2792,9 +2265,10 @@ struct wireless_dev *wilc_create_wiphy(struct net_device *net, struct device *de sema_init(&(priv->SemHandleUpdateStats), 1); priv->wdev = wdev; wdev->wiphy->max_scan_ssids = MAX_NUM_PROBED_SSID; +#ifdef CONFIG_PM + wdev->wiphy->wowlan = &wowlan_support; +#endif wdev->wiphy->max_num_pmkids = WILC_MAX_NUM_PMKIDS; - PRINT_INFO(CFG80211_DBG, "Max number of PMKIDs = %d\n", wdev->wiphy->max_num_pmkids); - wdev->wiphy->max_scan_ie_len = 1000; wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; wdev->wiphy->cipher_suites = cipher_suites; @@ -2807,20 +2281,11 @@ struct wireless_dev *wilc_create_wiphy(struct net_device *net, struct device *de wdev->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; wdev->iftype = NL80211_IFTYPE_STATION; - - - PRINT_INFO(CFG80211_DBG, "Max scan ids = %d,Max scan IE len = %d,Signal Type = %d,Interface Modes = %d,Interface Type = %d\n", - wdev->wiphy->max_scan_ssids, wdev->wiphy->max_scan_ie_len, wdev->wiphy->signal_type, - wdev->wiphy->interface_modes, wdev->iftype); - set_wiphy_dev(wdev->wiphy, dev); s32Error = wiphy_register(wdev->wiphy); - if (s32Error) { - PRINT_ER("Cannot register wiphy device\n"); - } else { - PRINT_D(CFG80211_DBG, "Successful Registering\n"); - } + if (s32Error) + netdev_err(net, "Cannot register wiphy device\n"); priv->dev = net; return wdev; @@ -2832,26 +2297,21 @@ int wilc_init_host_int(struct net_device *net) struct wilc_priv *priv; - PRINT_D(INIT_DBG, "Host[%p][%p]\n", net, net->ieee80211_ptr); priv = wdev_priv(net->ieee80211_ptr); if (op_ifcs == 0) { setup_timer(&hAgingTimer, remove_network_from_shadow, 0); setup_timer(&wilc_during_ip_timer, clear_duringIP, 0); } op_ifcs++; - if (s32Error < 0) { - PRINT_ER("Failed to creat refresh Timer\n"); - return s32Error; - } priv->gbAutoRateAdjusted = false; priv->bInP2PlistenState = false; sema_init(&(priv->hSemScanReq), 1); - s32Error = wilc_init(net, &priv->hWILCWFIDrv); + s32Error = wilc_init(net, &priv->hif_drv); if (s32Error) - PRINT_ER("Error while initializing hostinterface\n"); + netdev_err(net, "Error while initializing hostinterface\n"); return s32Error; } @@ -2874,39 +2334,28 @@ int wilc_deinit_host_int(struct net_device *net) s32Error = wilc_deinit(vif); clear_shadow_scan(); - if (op_ifcs == 0) { - PRINT_D(CORECONFIG_DBG, "destroy during ip\n"); + if (op_ifcs == 0) del_timer_sync(&wilc_during_ip_timer); - } if (s32Error) - PRINT_ER("Error while deintializing host interface\n"); + netdev_err(net, "Error while deintializing host interface\n"); return s32Error; } void wilc_free_wiphy(struct net_device *net) { - PRINT_D(CFG80211_DBG, "Unregistering wiphy\n"); - - if (!net) { - PRINT_D(INIT_DBG, "net_device is NULL\n"); + if (!net) return; - } - if (!net->ieee80211_ptr) { - PRINT_D(INIT_DBG, "ieee80211_ptr is NULL\n"); + if (!net->ieee80211_ptr) return; - } - if (!net->ieee80211_ptr->wiphy) { - PRINT_D(INIT_DBG, "wiphy is NULL\n"); + if (!net->ieee80211_ptr->wiphy) return; - } wiphy_unregister(net->ieee80211_ptr->wiphy); - PRINT_D(INIT_DBG, "Freeing wiphy\n"); wiphy_free(net->ieee80211_ptr->wiphy); kfree(net->ieee80211_ptr); } diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h index ab53d9d59081..85a3810d7bb5 100644 --- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h +++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h @@ -12,7 +12,6 @@ struct wireless_dev *wilc_create_wiphy(struct net_device *net, struct device *dev); void wilc_free_wiphy(struct net_device *net); -int WILC_WFI_update_stats(struct wiphy *wiphy, u32 pktlen, u8 changed); int wilc_deinit_host_int(struct net_device *net); int wilc_init_host_int(struct net_device *net); void WILC_WFI_monitor_rx(u8 *buff, u32 size); diff --git a/drivers/staging/wilc1000/wilc_wfi_netdevice.h b/drivers/staging/wilc1000/wilc_wfi_netdevice.h index 98ac8ed04a06..4123cffe3a6e 100644 --- a/drivers/staging/wilc1000/wilc_wfi_netdevice.h +++ b/drivers/staging/wilc1000/wilc_wfi_netdevice.h @@ -35,8 +35,6 @@ #include <linux/skbuff.h> #include <linux/ieee80211.h> #include <net/cfg80211.h> -#include <linux/ieee80211.h> -#include <net/cfg80211.h> #include <net/ieee80211_radiotap.h> #include <linux/if_arp.h> #include <linux/in6.h> @@ -121,10 +119,9 @@ struct wilc_priv { spinlock_t lock; struct net_device *dev; struct napi_struct napi; - struct host_if_drv *hWILCWFIDrv; + struct host_if_drv *hif_drv; struct host_if_pmkid_attr pmkid_list; struct WILC_WFI_stats netstats; - u8 WILC_WFI_wep_default; u8 WILC_WFI_wep_key[4][WLAN_KEY_LEN_WEP104]; u8 WILC_WFI_wep_key_len[4]; /* The real interface that the monitor is on */ @@ -149,7 +146,7 @@ typedef struct { } struct_frame_reg; struct wilc_vif { - u8 u8IfIdx; + u8 idx; u8 iftype; int monitor_flag; int mac_opened; @@ -160,6 +157,7 @@ struct wilc_vif { u8 bssid[ETH_ALEN]; struct host_if_drv *hif_drv; struct net_device *ndev; + u8 mode; }; struct wilc { @@ -215,6 +213,9 @@ struct wilc { const struct firmware *firmware; struct device *dev; + bool suspend_event; + + struct rf_info dummy_statistics; }; struct WILC_WFI_mon_priv { @@ -225,17 +226,13 @@ int wilc1000_wlan_init(struct net_device *dev, struct wilc_vif *vif); void wilc_frmw_to_linux(struct wilc *wilc, u8 *buff, u32 size, u32 pkt_offset); void wilc_mac_indicate(struct wilc *wilc, int flag); -void wilc_rx_complete(struct wilc *wilc); -void wilc_dbg(u8 *buff); - int wilc_lock_timeout(struct wilc *wilc, void *, u32 timeout); void wilc_netdev_cleanup(struct wilc *wilc); int wilc_netdev_init(struct wilc **wilc, struct device *, int io_type, int gpio, const struct wilc_hif_func *ops); void wilc1000_wlan_deinit(struct net_device *dev); void WILC_WFI_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size); -u16 wilc_set_machw_change_vir_if(struct net_device *dev, bool value); int wilc_wlan_get_firmware(struct net_device *dev); -int wilc_wlan_set_bssid(struct net_device *wilc_netdev, u8 *bssid); +int wilc_wlan_set_bssid(struct net_device *wilc_netdev, u8 *bssid, u8 mode); #endif diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c index 83af51bb83e8..fd938fb43dd3 100644 --- a/drivers/staging/wilc1000/wilc_wlan.c +++ b/drivers/staging/wilc1000/wilc_wlan.c @@ -3,54 +3,24 @@ #include "wilc_wfi_netdevice.h" #include "wilc_wlan_cfg.h" -#ifdef WILC_OPTIMIZE_SLEEP_INT -static inline void chip_allow_sleep(struct wilc *wilc); -#endif -static inline void chip_wakeup(struct wilc *wilc); -static u32 dbgflag = N_INIT | N_ERR | N_INTR | N_TXQ | N_RXQ; - -/* FIXME: replace with dev_debug() */ -static void wilc_debug(u32 flag, char *fmt, ...) -{ - char buf[256]; - va_list args; - - if (flag & dbgflag) { - va_start(args, fmt); - vsprintf(buf, fmt, args); - va_end(args); - - wilc_dbg(buf); - } -} - static CHIP_PS_STATE_T chip_ps_state = CHIP_WAKEDUP; static inline void acquire_bus(struct wilc *wilc, BUS_ACQUIRE_T acquire) { mutex_lock(&wilc->hif_cs); - #ifndef WILC_OPTIMIZE_SLEEP_INT - if (chip_ps_state != CHIP_WAKEDUP) - #endif - { - if (acquire == ACQUIRE_AND_WAKEUP) - chip_wakeup(wilc); - } + if (acquire == ACQUIRE_AND_WAKEUP) + chip_wakeup(wilc); } static inline void release_bus(struct wilc *wilc, BUS_RELEASE_T release) { - #ifdef WILC_OPTIMIZE_SLEEP_INT if (release == RELEASE_ALLOW_SLEEP) chip_allow_sleep(wilc); - #endif mutex_unlock(&wilc->hif_cs); } -#ifdef TCP_ACK_FILTER -static void wilc_wlan_txq_remove(struct txq_entry_t *tqe) +static void wilc_wlan_txq_remove(struct wilc *wilc, struct txq_entry_t *tqe) { - if (tqe == wilc->txq_head) { wilc->txq_head = tqe->next; if (wilc->txq_head) @@ -65,7 +35,6 @@ static void wilc_wlan_txq_remove(struct txq_entry_t *tqe) } wilc->txq_entries -= 1; } -#endif static struct txq_entry_t * wilc_wlan_txq_remove_from_head(struct net_device *dev) @@ -117,18 +86,18 @@ static void wilc_wlan_txq_add_to_tail(struct net_device *dev, wilc->txq_tail = tqe; } wilc->txq_entries += 1; - PRINT_D(TX_DBG, "Number of entries in TxQ = %d\n", wilc->txq_entries); spin_unlock_irqrestore(&wilc->txq_spinlock, flags); - PRINT_D(TX_DBG, "Wake the txq_handling\n"); - up(&wilc->txq_event); } -static int wilc_wlan_txq_add_to_head(struct wilc *wilc, struct txq_entry_t *tqe) +static int wilc_wlan_txq_add_to_head(struct wilc_vif *vif, + struct txq_entry_t *tqe) { unsigned long flags; + struct wilc *wilc = vif->wilc; + if (wilc_lock_timeout(wilc, &wilc->txq_add_to_head_cs, CFG_PKTS_TIMEOUT)) return -1; @@ -147,17 +116,14 @@ static int wilc_wlan_txq_add_to_head(struct wilc *wilc, struct txq_entry_t *tqe) wilc->txq_head = tqe; } wilc->txq_entries += 1; - PRINT_D(TX_DBG, "Number of entries in TxQ = %d\n", wilc->txq_entries); spin_unlock_irqrestore(&wilc->txq_spinlock, flags); up(&wilc->txq_add_to_head_cs); up(&wilc->txq_event); - PRINT_D(TX_DBG, "Wake up the txq_handler\n"); return 0; } -#ifdef TCP_ACK_FILTER struct ack_session_info; struct ack_session_info { u32 seq_num; @@ -173,7 +139,6 @@ struct pending_acks_info { struct txq_entry_t *txqe; }; - #define NOT_TCP_ACK (-1) #define MAX_TCP_SESSION 25 @@ -192,19 +157,20 @@ static inline int init_tcp_tracking(void) static inline int add_tcp_session(u32 src_prt, u32 dst_prt, u32 seq) { - ack_session_info[tcp_session].seq_num = seq; - ack_session_info[tcp_session].bigger_ack_num = 0; - ack_session_info[tcp_session].src_port = src_prt; - ack_session_info[tcp_session].dst_port = dst_prt; - tcp_session++; - - PRINT_D(TCP_ENH, "TCP Session %d to Ack %d\n", tcp_session, seq); + if (tcp_session < 2 * MAX_TCP_SESSION) { + ack_session_info[tcp_session].seq_num = seq; + ack_session_info[tcp_session].bigger_ack_num = 0; + ack_session_info[tcp_session].src_port = src_prt; + ack_session_info[tcp_session].dst_port = dst_prt; + tcp_session++; + } return 0; } static inline int update_tcp_session(u32 index, u32 ack) { - if (ack > ack_session_info[index].bigger_ack_num) + if (index < 2 * MAX_TCP_SESSION && + ack > ack_session_info[index].bigger_ack_num) ack_session_info[index].bigger_ack_num = ack; return 0; } @@ -212,7 +178,7 @@ static inline int update_tcp_session(u32 index, u32 ack) static inline int add_tcp_pending_ack(u32 ack, u32 session_index, struct txq_entry_t *txqe) { - if (pending_acks < MAX_PENDING_ACKS) { + if (pending_base + pending_acks < MAX_PENDING_ACKS) { pending_acks_info[pending_base + pending_acks].ack_num = ack; pending_acks_info[pending_base + pending_acks].txqe = txqe; pending_acks_info[pending_base + pending_acks].session_index = session_index; @@ -221,19 +187,9 @@ static inline int add_tcp_pending_ack(u32 ack, u32 session_index, } return 0; } -static inline int remove_TCP_related(struct wilc *wilc) -{ - unsigned long flags; - - spin_lock_irqsave(&wilc->txq_spinlock, flags); - - spin_unlock_irqrestore(&wilc->txq_spinlock, flags); - return 0; -} -static inline int tcp_process(struct net_device *dev, struct txq_entry_t *tqe) +static inline void tcp_process(struct net_device *dev, struct txq_entry_t *tqe) { - int ret; u8 *eth_hdr_ptr; u8 *buffer = tqe->buffer; unsigned short h_proto; @@ -245,10 +201,11 @@ static inline int tcp_process(struct net_device *dev, struct txq_entry_t *tqe) vif = netdev_priv(dev); wilc = vif->wilc; + spin_lock_irqsave(&wilc->txq_spinlock, flags); eth_hdr_ptr = &buffer[0]; h_proto = ntohs(*((unsigned short *)ð_hdr_ptr[12])); - if (h_proto == 0x0800) { + if (h_proto == ETH_P_IP) { u8 *ip_hdr_ptr; u8 protocol; @@ -278,7 +235,8 @@ static inline int tcp_process(struct net_device *dev, struct txq_entry_t *tqe) (u32)tcp_hdr_ptr[11]; for (i = 0; i < tcp_session; i++) { - if (ack_session_info[i].seq_num == seq_no) { + if (i < 2 * MAX_TCP_SESSION && + ack_session_info[i].seq_num == seq_no) { update_tcp_session(i, ack_no); break; } @@ -288,15 +246,9 @@ static inline int tcp_process(struct net_device *dev, struct txq_entry_t *tqe) add_tcp_pending_ack(ack_no, i, tqe); } - - } else { - ret = 0; } - } else { - ret = 0; } spin_unlock_irqrestore(&wilc->txq_spinlock, flags); - return ret; } static int wilc_wlan_txq_filter_dup_tcp_ack(struct net_device *dev) @@ -311,14 +263,15 @@ static int wilc_wlan_txq_filter_dup_tcp_ack(struct net_device *dev) spin_lock_irqsave(&wilc->txq_spinlock, wilc->txq_spinlock_flags); for (i = pending_base; i < (pending_base + pending_acks); i++) { + if (i >= MAX_PENDING_ACKS || + pending_acks_info[i].session_index >= 2 * MAX_TCP_SESSION) + break; if (pending_acks_info[i].ack_num < ack_session_info[pending_acks_info[i].session_index].bigger_ack_num) { struct txq_entry_t *tqe; - PRINT_D(TCP_ENH, "DROP ACK: %u\n", - pending_acks_info[i].ack_num); tqe = pending_acks_info[i].txqe; if (tqe) { - wilc_wlan_txq_remove(tqe); + wilc_wlan_txq_remove(wilc, tqe); tqe->status = 1; if (tqe->tx_complete_func) tqe->tx_complete_func(tqe->priv, @@ -345,50 +298,39 @@ static int wilc_wlan_txq_filter_dup_tcp_ack(struct net_device *dev) return 1; } -#endif -static bool enabled = false; +static bool enabled; void wilc_enable_tcp_ack_filter(bool value) { enabled = value; } -#ifdef TCP_ACK_FILTER -static bool is_tcp_ack_filter_enabled(void) -{ - return enabled; -} -#endif - -static int wilc_wlan_txq_add_cfg_pkt(struct wilc *wilc, u8 *buffer, u32 buffer_size) +static int wilc_wlan_txq_add_cfg_pkt(struct wilc_vif *vif, u8 *buffer, + u32 buffer_size) { struct txq_entry_t *tqe; + struct wilc *wilc = vif->wilc; - PRINT_D(TX_DBG, "Adding config packet ...\n"); + netdev_dbg(vif->ndev, "Adding config packet ...\n"); if (wilc->quit) { - PRINT_D(TX_DBG, "Return due to clear function\n"); + netdev_dbg(vif->ndev, "Return due to clear function\n"); up(&wilc->cfg_event); return 0; } tqe = kmalloc(sizeof(*tqe), GFP_ATOMIC); - if (!tqe) { - PRINT_ER("Failed to allocate memory\n"); + if (!tqe) return 0; - } tqe->type = WILC_CFG_PKT; tqe->buffer = buffer; tqe->buffer_size = buffer_size; tqe->tx_complete_func = NULL; tqe->priv = NULL; -#ifdef TCP_ACK_FILTER tqe->tcp_pending_ack_idx = NOT_TCP_ACK; -#endif - PRINT_D(TX_DBG, "Adding the config packet at the Queue tail\n"); - if (wilc_wlan_txq_add_to_head(wilc, tqe)) + if (wilc_wlan_txq_add_to_head(vif, tqe)) return 0; return 1; } @@ -415,12 +357,9 @@ int wilc_wlan_txq_add_net_pkt(struct net_device *dev, void *priv, u8 *buffer, tqe->tx_complete_func = func; tqe->priv = priv; - PRINT_D(TX_DBG, "Adding mgmt packet at the Queue tail\n"); -#ifdef TCP_ACK_FILTER tqe->tcp_pending_ack_idx = NOT_TCP_ACK; - if (is_tcp_ack_filter_enabled()) + if (enabled) tcp_process(dev, tqe); -#endif wilc_wlan_txq_add_to_tail(dev, tqe); return wilc->txq_entries; } @@ -446,10 +385,7 @@ int wilc_wlan_txq_add_mgmt_pkt(struct net_device *dev, void *priv, u8 *buffer, tqe->buffer_size = buffer_size; tqe->tx_complete_func = func; tqe->priv = priv; -#ifdef TCP_ACK_FILTER tqe->tcp_pending_ack_idx = NOT_TCP_ACK; -#endif - PRINT_D(TX_DBG, "Adding Network packet at the Queue tail\n"); wilc_wlan_txq_add_to_tail(dev, tqe); return 1; } @@ -483,32 +419,26 @@ static struct txq_entry_t *wilc_wlan_txq_get_next(struct wilc *wilc, static int wilc_wlan_rxq_add(struct wilc *wilc, struct rxq_entry_t *rqe) { - if (wilc->quit) return 0; mutex_lock(&wilc->rxq_cs); if (!wilc->rxq_head) { - PRINT_D(RX_DBG, "Add to Queue head\n"); rqe->next = NULL; wilc->rxq_head = rqe; wilc->rxq_tail = rqe; } else { - PRINT_D(RX_DBG, "Add to Queue tail\n"); wilc->rxq_tail->next = rqe; rqe->next = NULL; wilc->rxq_tail = rqe; } wilc->rxq_entries += 1; - PRINT_D(RX_DBG, "Number of queue entries: %d\n", wilc->rxq_entries); mutex_unlock(&wilc->rxq_cs); return wilc->rxq_entries; } static struct rxq_entry_t *wilc_wlan_rxq_remove(struct wilc *wilc) { - - PRINT_D(RX_DBG, "Getting rxQ element\n"); if (wilc->rxq_head) { struct rxq_entry_t *rqe; @@ -516,29 +446,26 @@ static struct rxq_entry_t *wilc_wlan_rxq_remove(struct wilc *wilc) rqe = wilc->rxq_head; wilc->rxq_head = wilc->rxq_head->next; wilc->rxq_entries -= 1; - PRINT_D(RX_DBG, "RXQ entries decreased\n"); mutex_unlock(&wilc->rxq_cs); return rqe; } - PRINT_D(RX_DBG, "Nothing to get from Q\n"); return NULL; } -#ifdef WILC_OPTIMIZE_SLEEP_INT - -static inline void chip_allow_sleep(struct wilc *wilc) +void chip_allow_sleep(struct wilc *wilc) { u32 reg = 0; wilc->hif_func->hif_read_reg(wilc, 0xf0, ®); wilc->hif_func->hif_write_reg(wilc, 0xf0, reg & ~BIT(0)); + wilc->hif_func->hif_write_reg(wilc, 0xfa, 0); } +EXPORT_SYMBOL_GPL(chip_allow_sleep); -static inline void chip_wakeup(struct wilc *wilc) +void chip_wakeup(struct wilc *wilc) { - u32 reg, clk_status_reg, trials = 0; - u32 sleep_time; + u32 reg, clk_status_reg; if ((wilc->io_type & 0x1) == HIF_SPI) { do { @@ -548,13 +475,12 @@ static inline void chip_wakeup(struct wilc *wilc) do { usleep_range(2 * 1000, 2 * 1000); - if ((wilc_get_chipid(wilc, true) == 0)) - wilc_debug(N_ERR, "Couldn't read chip id. Wake up failed\n"); - - } while ((wilc_get_chipid(wilc, true) == 0) && ((++trials % 3) == 0)); - + wilc_get_chipid(wilc, true); + } while (wilc_get_chipid(wilc, true) == 0); } while (wilc_get_chipid(wilc, true) == 0); } else if ((wilc->io_type & 0x1) == HIF_SDIO) { + wilc->hif_func->hif_write_reg(wilc, 0xfa, 1); + udelay(200); wilc->hif_func->hif_read_reg(wilc, 0xf0, ®); do { wilc->hif_func->hif_write_reg(wilc, 0xf0, @@ -562,14 +488,11 @@ static inline void chip_wakeup(struct wilc *wilc) wilc->hif_func->hif_read_reg(wilc, 0xf1, &clk_status_reg); - while (((clk_status_reg & 0x1) == 0) && (((++trials) % 3) == 0)) { + while ((clk_status_reg & 0x1) == 0) { usleep_range(2 * 1000, 2 * 1000); wilc->hif_func->hif_read_reg(wilc, 0xf1, &clk_status_reg); - - if ((clk_status_reg & 0x1) == 0) - wilc_debug(N_ERR, "clocks still OFF. Wake up failed\n"); } if ((clk_status_reg & 0x1) == 0) { wilc->hif_func->hif_write_reg(wilc, 0xf0, @@ -579,11 +502,7 @@ static inline void chip_wakeup(struct wilc *wilc) } if (chip_ps_state == CHIP_SLEEPING_MANUAL) { - wilc->hif_func->hif_read_reg(wilc, 0x1C0C, ®); - reg &= ~BIT(0); - wilc->hif_func->hif_write_reg(wilc, 0x1C0C, reg); - - if (wilc_get_chipid(wilc, false) >= 0x1002b0) { + if (wilc_get_chipid(wilc, false) < 0x1002b0) { u32 val32; wilc->hif_func->hif_read_reg(wilc, 0x1e1c, &val32); @@ -597,71 +516,37 @@ static inline void chip_wakeup(struct wilc *wilc) } chip_ps_state = CHIP_WAKEDUP; } -#else -static inline void chip_wakeup(struct wilc *wilc) -{ - u32 reg, trials = 0; - - do { - if ((wilc->io_type & 0x1) == HIF_SPI) { - wilc->hif_func->hif_read_reg(wilc, 1, ®); - wilc->hif_func->hif_write_reg(wilc, 1, reg & ~BIT(1)); - wilc->hif_func->hif_write_reg(wilc, 1, reg | BIT(1)); - wilc->hif_func->hif_write_reg(wilc, 1, reg & ~BIT(1)); - } else if ((wilc->io_type & 0x1) == HIF_SDIO) { - wilc->hif_func->hif_read_reg(wilc, 0xf0, ®); - wilc->hif_func->hif_write_reg(wilc, 0xf0, - reg & ~BIT(0)); - wilc->hif_func->hif_write_reg(wilc, 0xf0, - reg | BIT(0)); - wilc->hif_func->hif_write_reg(wilc, 0xf0, - reg & ~BIT(0)); - } - - do { - mdelay(3); +EXPORT_SYMBOL_GPL(chip_wakeup); - if ((wilc_get_chipid(wilc, true) == 0)) - wilc_debug(N_ERR, "Couldn't read chip id. Wake up failed\n"); - - } while ((wilc_get_chipid(wilc, true) == 0) && ((++trials % 3) == 0)); - - } while (wilc_get_chipid(wilc, true) == 0); - - if (chip_ps_state == CHIP_SLEEPING_MANUAL) { - wilc->hif_func->hif_read_reg(wilc, 0x1C0C, ®); - reg &= ~BIT(0); - wilc->hif_func->hif_write_reg(wilc, 0x1C0C, reg); - - if (wilc_get_chipid(wilc, false) >= 0x1002b0) { - u32 val32; - - wilc->hif_func->hif_read_reg(wilc, 0x1e1c, &val32); - val32 |= BIT(6); - wilc->hif_func->hif_write_reg(wilc, 0x1e1c, val32); - - wilc->hif_func->hif_read_reg(wilc, 0x1e9c, &val32); - val32 |= BIT(6); - wilc->hif_func->hif_write_reg(wilc, 0x1e9c, val32); - } - } - chip_ps_state = CHIP_WAKEDUP; -} -#endif void wilc_chip_sleep_manually(struct wilc *wilc) { if (chip_ps_state != CHIP_WAKEDUP) return; acquire_bus(wilc, ACQUIRE_ONLY); -#ifdef WILC_OPTIMIZE_SLEEP_INT chip_allow_sleep(wilc); -#endif wilc->hif_func->hif_write_reg(wilc, 0x10a8, 1); chip_ps_state = CHIP_SLEEPING_MANUAL; release_bus(wilc, RELEASE_ONLY); } +EXPORT_SYMBOL_GPL(wilc_chip_sleep_manually); + +void host_wakeup_notify(struct wilc *wilc) +{ + acquire_bus(wilc, ACQUIRE_ONLY); + wilc->hif_func->hif_write_reg(wilc, 0x10b0, 1); + release_bus(wilc, RELEASE_ONLY); +} +EXPORT_SYMBOL_GPL(host_wakeup_notify); + +void host_sleep_notify(struct wilc *wilc) +{ + acquire_bus(wilc, ACQUIRE_ONLY); + wilc->hif_func->hif_write_reg(wilc, 0x10ac, 1); + release_bus(wilc, RELEASE_ONLY); +} +EXPORT_SYMBOL_GPL(host_sleep_notify); int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count) { @@ -690,10 +575,7 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count) wilc_lock_timeout(wilc, &wilc->txq_add_to_head_cs, CFG_PKTS_TIMEOUT); -#ifdef TCP_ACK_FILTER wilc_wlan_txq_filter_dup_tcp_ack(dev); -#endif - PRINT_D(TX_DBG, "Getting the head of the TxQ\n"); tqe = wilc_wlan_txq_get_first(wilc); i = 0; sum = 0; @@ -709,65 +591,48 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count) vmm_sz = HOST_HDR_OFFSET; vmm_sz += tqe->buffer_size; - PRINT_D(TX_DBG, "VMM Size before alignment = %d\n", vmm_sz); + if (vmm_sz & 0x3) vmm_sz = (vmm_sz + 4) & ~0x3; if ((sum + vmm_sz) > LINUX_TX_SIZE) break; - PRINT_D(TX_DBG, "VMM Size AFTER alignment = %d\n", vmm_sz); vmm_table[i] = vmm_sz / 4; - PRINT_D(TX_DBG, "VMMTable entry size = %d\n", - vmm_table[i]); - - if (tqe->type == WILC_CFG_PKT) { + if (tqe->type == WILC_CFG_PKT) vmm_table[i] |= BIT(10); - PRINT_D(TX_DBG, "VMMTable entry changed for CFG packet = %d\n", vmm_table[i]); - } vmm_table[i] = cpu_to_le32(vmm_table[i]); i++; sum += vmm_sz; - PRINT_D(TX_DBG, "sum = %d\n", sum); tqe = wilc_wlan_txq_get_next(wilc, tqe); } else { break; } } while (1); - if (i == 0) { - PRINT_D(TX_DBG, "Nothing in TX-Q\n"); + if (i == 0) break; - } else { - PRINT_D(TX_DBG, "Mark the last entry in VMM table - number of previous entries = %d\n", i); - vmm_table[i] = 0x0; - } + vmm_table[i] = 0x0; + acquire_bus(wilc, ACQUIRE_AND_WAKEUP); counter = 0; do { - ret = wilc->hif_func->hif_read_reg(wilc, WILC_HOST_TX_CTRL, - ®); - if (!ret) { - wilc_debug(N_ERR, "[wilc txq]: fail can't read reg vmm_tbl_entry..\n"); + ret = wilc->hif_func->hif_read_reg(wilc, + WILC_HOST_TX_CTRL, + ®); + if (!ret) break; - } if ((reg & 0x1) == 0) { - PRINT_D(TX_DBG, "Writing VMM table ... with Size = %d\n", ((i + 1) * 4)); break; } else { counter++; if (counter > 200) { counter = 0; - PRINT_D(TX_DBG, "Looping in tx ctrl , forcce quit\n"); ret = wilc->hif_func->hif_write_reg(wilc, WILC_HOST_TX_CTRL, 0); break; } - PRINT_WRN(GENERIC_DBG, "[wilc txq]: warn, vmm table not clear yet, wait...\n"); - release_bus(wilc, RELEASE_ALLOW_SLEEP); - usleep_range(3000, 3000); - acquire_bus(wilc, ACQUIRE_AND_WAKEUP); } } while (!wilc->quit); @@ -777,32 +642,24 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count) timeout = 200; do { ret = wilc->hif_func->hif_block_tx(wilc, WILC_VMM_TBL_RX_SHADOW_BASE, (u8 *)vmm_table, ((i + 1) * 4)); - if (!ret) { - wilc_debug(N_ERR, "ERR block TX of VMM table.\n"); + if (!ret) break; - } - ret = wilc->hif_func->hif_write_reg(wilc, WILC_HOST_VMM_CTL, - 0x2); - if (!ret) { - wilc_debug(N_ERR, "[wilc txq]: fail can't write reg host_vmm_ctl..\n"); + ret = wilc->hif_func->hif_write_reg(wilc, + WILC_HOST_VMM_CTL, + 0x2); + if (!ret) break; - } do { ret = wilc->hif_func->hif_read_reg(wilc, WILC_HOST_VMM_CTL, ®); - if (!ret) { - wilc_debug(N_ERR, "[wilc txq]: fail can't read reg host_vmm_ctl..\n"); + if (!ret) break; - } if ((reg >> 2) & 0x1) { entries = ((reg >> 3) & 0x3f); break; } else { release_bus(wilc, RELEASE_ALLOW_SLEEP); - usleep_range(3000, 3000); - acquire_bus(wilc, ACQUIRE_AND_WAKEUP); - PRINT_WRN(GENERIC_DBG, "Can't get VMM entery - reg = %2x\n", reg); } } while (--timeout); if (timeout <= 0) { @@ -814,19 +671,13 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count) break; if (entries == 0) { - PRINT_WRN(GENERIC_DBG, "[wilc txq]: no more buffer in the chip (reg: %08x), retry later [[ %d, %x ]]\n", reg, i, vmm_table[i - 1]); - ret = wilc->hif_func->hif_read_reg(wilc, WILC_HOST_TX_CTRL, ®); - if (!ret) { - wilc_debug(N_ERR, "[wilc txq]: fail can't read reg WILC_HOST_TX_CTRL..\n"); + if (!ret) break; - } reg &= ~BIT(0); ret = wilc->hif_func->hif_write_reg(wilc, WILC_HOST_TX_CTRL, reg); - if (!ret) { - wilc_debug(N_ERR, "[wilc txq]: fail can't write reg WILC_HOST_TX_CTRL..\n"); + if (!ret) break; - } break; } else { break; @@ -866,7 +717,7 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count) if (tqe->type == WILC_CFG_PKT) { buffer_offset = ETH_CONFIG_PKT_HDR_OFFSET; } else if (tqe->type == WILC_NET_PKT) { - char *bssid = ((struct tx_complete_data *)(tqe->priv))->pBssid; + char *bssid = ((struct tx_complete_data *)(tqe->priv))->bssid; buffer_offset = ETH_ETHERNET_HDR_OFFSET; memcpy(&txb[offset + 4], bssid, 6); @@ -882,10 +733,9 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count) if (tqe->tx_complete_func) tqe->tx_complete_func(tqe->priv, tqe->status); - #ifdef TCP_ACK_FILTER - if (tqe->tcp_pending_ack_idx != NOT_TCP_ACK) + if (tqe->tcp_pending_ack_idx != NOT_TCP_ACK && + tqe->tcp_pending_ack_idx < MAX_PENDING_ACKS) pending_acks_info[tqe->tcp_pending_ack_idx].txqe = NULL; - #endif kfree(tqe); } else { break; @@ -895,16 +745,12 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count) acquire_bus(wilc, ACQUIRE_AND_WAKEUP); ret = wilc->hif_func->hif_clear_int_ext(wilc, ENABLE_TX_VMM); - if (!ret) { - wilc_debug(N_ERR, "[wilc txq]: fail can't start tx VMM ...\n"); + if (!ret) goto _end_; - } ret = wilc->hif_func->hif_block_tx_ext(wilc, 0, txb, offset); - if (!ret) { - wilc_debug(N_ERR, "[wilc txq]: fail can't block tx ext...\n"); + if (!ret) goto _end_; - } _end_: @@ -915,14 +761,13 @@ _end_: up(&wilc->txq_add_to_head_cs); wilc->txq_exit = 1; - PRINT_D(TX_DBG, "THREAD: Exiting txq\n"); *txq_count = wilc->txq_entries; return ret; } static void wilc_wlan_handle_rxq(struct wilc *wilc) { - int offset = 0, size, has_packet = 0; + int offset = 0, size; u8 *buffer; struct rxq_entry_t *rqe; @@ -930,19 +775,15 @@ static void wilc_wlan_handle_rxq(struct wilc *wilc) do { if (wilc->quit) { - PRINT_D(RX_DBG, "exit 1st do-while due to Clean_UP function\n"); up(&wilc->cfg_event); break; } rqe = wilc_wlan_rxq_remove(wilc); - if (!rqe) { - PRINT_D(RX_DBG, "nothing in the queue - exit 1st do-while\n"); + if (!rqe) break; - } + buffer = rqe->buffer; size = rqe->buffer_size; - PRINT_D(RX_DBG, "rxQ entery Size = %d - Address = %p\n", - size, buffer); offset = 0; do { @@ -950,21 +791,16 @@ static void wilc_wlan_handle_rxq(struct wilc *wilc) u32 pkt_len, pkt_offset, tp_len; int is_cfg_packet; - PRINT_D(RX_DBG, "In the 2nd do-while\n"); memcpy(&header, &buffer[offset], 4); header = cpu_to_le32(header); - PRINT_D(RX_DBG, "Header = %04x - Offset = %d\n", - header, offset); is_cfg_packet = (header >> 31) & 0x1; pkt_offset = (header >> 22) & 0x1ff; tp_len = (header >> 11) & 0x7ff; pkt_len = header & 0x7ff; - if (pkt_len == 0 || tp_len == 0) { - wilc_debug(N_RXQ, "[wilc rxq]: data corrupt, packet len or tp_len is 0 [%d][%d]\n", pkt_len, tp_len); + if (pkt_len == 0 || tp_len == 0) break; - } #define IS_MANAGMEMENT 0x100 #define IS_MANAGMEMENT_CALLBACK 0x080 @@ -983,14 +819,12 @@ static void wilc_wlan_handle_rxq(struct wilc *wilc) &buffer[offset], pkt_len, pkt_offset); - has_packet = 1; } } else { struct wilc_cfg_rsp rsp; wilc_wlan_cfg_indicate_rx(wilc, &buffer[pkt_offset + offset], pkt_len, &rsp); if (rsp.type == WILC_CFG_RSP) { - PRINT_D(RX_DBG, "wilc->cfg_seq_no = %d - rsp.seq_no = %d\n", wilc->cfg_seq_no, rsp.seq_no); if (wilc->cfg_seq_no == rsp.seq_no) up(&wilc->cfg_event); } else if (rsp.type == WILC_CFG_RSP_STATUS) { @@ -1006,14 +840,9 @@ static void wilc_wlan_handle_rxq(struct wilc *wilc) break; } while (1); kfree(rqe); - - if (has_packet) - wilc_rx_complete(wilc); - } while (1); wilc->rxq_exit = 1; - PRINT_D(RX_DBG, "THREAD: Exiting RX thread\n"); } static void wilc_unknown_isr_ext(struct wilc *wilc) @@ -1032,18 +861,13 @@ static void wilc_pllupdate_isr_ext(struct wilc *wilc, u32 int_stats) else mdelay(WILC_PLL_TO_SPI); - while (!(ISWILC1000(wilc_get_chipid(wilc, true)) && --trials)) { - PRINT_D(TX_DBG, "PLL update retrying\n"); + while (!(ISWILC1000(wilc_get_chipid(wilc, true)) && --trials)) mdelay(1); - } } static void wilc_sleeptimer_isr_ext(struct wilc *wilc, u32 int_stats1) { wilc->hif_func->hif_clear_int_ext(wilc, SLEEP_INT_CLR); -#ifndef WILC_OPTIMIZE_SLEEP_INT - chip_ps_state = CHIP_SLEEPING_AUTO; -#endif } static void wilc_wlan_handle_isr_ext(struct wilc *wilc, u32 int_status) @@ -1055,14 +879,11 @@ static void wilc_wlan_handle_isr_ext(struct wilc *wilc, u32 int_status) int ret = 0; struct rxq_entry_t *rqe; - size = ((int_status & 0x7fff) << 2); + size = (int_status & 0x7fff) << 2; while (!size && retries < 10) { - u32 time = 0; - - wilc_debug(N_ERR, "RX Size equal zero ... Trying to read it again for %d time\n", time++); wilc->hif_func->hif_read_size(wilc, &size); - size = ((size & 0x7fff) << 2); + size = (size & 0x7fff) << 2; retries++; } @@ -1070,21 +891,17 @@ static void wilc_wlan_handle_isr_ext(struct wilc *wilc, u32 int_status) if (LINUX_RX_SIZE - offset < size) offset = 0; - if (wilc->rx_buffer) { + if (wilc->rx_buffer) buffer = &wilc->rx_buffer[offset]; - } else { - wilc_debug(N_ERR, "[wilc isr]: fail Rx Buffer is NULL...drop the packets (%d)\n", size); + else goto _end_; - } wilc->hif_func->hif_clear_int_ext(wilc, DATA_INT_CLR | ENABLE_RX_VMM); ret = wilc->hif_func->hif_block_rx_ext(wilc, 0, buffer, size); - if (!ret) { - wilc_debug(N_ERR, "[wilc isr]: fail block rx...\n"); + if (!ret) goto _end_; - } _end_: if (ret) { offset += size; @@ -1093,7 +910,6 @@ _end_: if (rqe) { rqe->buffer = buffer; rqe->buffer_size = size; - PRINT_D(RX_DBG, "rxq entery Size= %d - Address = %p\n", rqe->buffer_size, rqe->buffer); wilc_wlan_rxq_add(wilc, rqe); } } @@ -1111,23 +927,21 @@ void wilc_handle_isr(struct wilc *wilc) if (int_status & PLL_INT_EXT) wilc_pllupdate_isr_ext(wilc, int_status); - if (int_status & DATA_INT_EXT) { + if (int_status & DATA_INT_EXT) wilc_wlan_handle_isr_ext(wilc, int_status); - #ifndef WILC_OPTIMIZE_SLEEP_INT - chip_ps_state = CHIP_WAKEDUP; - #endif - } + if (int_status & SLEEP_INT_EXT) wilc_sleeptimer_isr_ext(wilc, int_status); - if (!(int_status & (ALL_INT_EXT))) { + if (!(int_status & (ALL_INT_EXT))) wilc_unknown_isr_ext(wilc); - } + release_bus(wilc, RELEASE_ALLOW_SLEEP); } EXPORT_SYMBOL_GPL(wilc_handle_isr); -int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer, u32 buffer_size) +int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer, + u32 buffer_size) { u32 offset; u32 addr, size, size2, blksz; @@ -1139,12 +953,9 @@ int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer, u32 buffer_ dma_buffer = kmalloc(blksz, GFP_KERNEL); if (!dma_buffer) { ret = -EIO; - PRINT_ER("Can't allocate buffer for firmware download IO error\n "); goto _fail_1; } - PRINT_D(INIT_DBG, "Downloading firmware size = %d ...\n", buffer_size); - offset = 0; do { memcpy(&addr, &buffer[offset], 4); @@ -1160,8 +971,8 @@ int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer, u32 buffer_ size2 = blksz; memcpy(dma_buffer, &buffer[offset], size2); - ret = wilc->hif_func->hif_block_tx(wilc, addr, dma_buffer, - size2); + ret = wilc->hif_func->hif_block_tx(wilc, addr, + dma_buffer, size2); if (!ret) break; @@ -1173,10 +984,8 @@ int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer, u32 buffer_ if (!ret) { ret = -EIO; - PRINT_ER("Can't download firmware IO error\n "); goto _fail_; } - PRINT_D(INIT_DBG, "Offset = %d\n", offset); } while (offset < buffer_size); _fail_: @@ -1203,7 +1012,6 @@ int wilc_wlan_start(struct wilc *wilc) acquire_bus(wilc, ACQUIRE_ONLY); ret = wilc->hif_func->hif_write_reg(wilc, WILC_VMM_CORE_CFG, reg); if (!ret) { - wilc_debug(N_ERR, "[wilc start]: fail write reg vmm_core_cfg...\n"); release_bus(wilc, RELEASE_ONLY); ret = -EIO; return ret; @@ -1226,7 +1034,7 @@ int wilc_wlan_start(struct wilc *wilc) #ifdef WILC_EXT_PA_INV_TX_RX reg |= WILC_HAVE_EXT_PA_INV_TX_RX; #endif - + reg |= WILC_HAVE_USE_IRQ_AS_HOST_WAKE; reg |= WILC_HAVE_LEGACY_RF_SETTINGS; #ifdef XTAL_24 reg |= WILC_HAVE_XTAL_24; @@ -1237,7 +1045,6 @@ int wilc_wlan_start(struct wilc *wilc) ret = wilc->hif_func->hif_write_reg(wilc, WILC_GP_REG_1, reg); if (!ret) { - wilc_debug(N_ERR, "[wilc start]: fail write WILC_GP_REG_1 ...\n"); release_bus(wilc, RELEASE_ONLY); ret = -EIO; return ret; @@ -1247,7 +1054,6 @@ int wilc_wlan_start(struct wilc *wilc) ret = wilc->hif_func->hif_read_reg(wilc, 0x1000, &chipid); if (!ret) { - wilc_debug(N_ERR, "[wilc start]: fail read reg 0x1000 ...\n"); release_bus(wilc, RELEASE_ONLY); ret = -EIO; return ret; @@ -1268,22 +1074,16 @@ int wilc_wlan_start(struct wilc *wilc) return (ret < 0) ? ret : 0; } -void wilc_wlan_global_reset(struct wilc *wilc) -{ - acquire_bus(wilc, ACQUIRE_AND_WAKEUP); - wilc->hif_func->hif_write_reg(wilc, WILC_GLB_RESET_0, 0x0); - release_bus(wilc, RELEASE_ONLY); -} int wilc_wlan_stop(struct wilc *wilc) { u32 reg = 0; int ret; u8 timeout = 10; + acquire_bus(wilc, ACQUIRE_AND_WAKEUP); ret = wilc->hif_func->hif_read_reg(wilc, WILC_GLB_RESET_0, ®); if (!ret) { - PRINT_ER("Error while reading reg\n"); release_bus(wilc, RELEASE_ALLOW_SLEEP); return ret; } @@ -1291,40 +1091,32 @@ int wilc_wlan_stop(struct wilc *wilc) reg &= ~BIT(10); ret = wilc->hif_func->hif_write_reg(wilc, WILC_GLB_RESET_0, reg); if (!ret) { - PRINT_ER("Error while writing reg\n"); release_bus(wilc, RELEASE_ALLOW_SLEEP); return ret; } do { - ret = wilc->hif_func->hif_read_reg(wilc, WILC_GLB_RESET_0, ®); + ret = wilc->hif_func->hif_read_reg(wilc, + WILC_GLB_RESET_0, ®); if (!ret) { - PRINT_ER("Error while reading reg\n"); release_bus(wilc, RELEASE_ALLOW_SLEEP); return ret; } - PRINT_D(GENERIC_DBG, "Read RESET Reg %x : Retry%d\n", - reg, timeout); if ((reg & BIT(10))) { - PRINT_D(GENERIC_DBG, "Bit 10 not reset : Retry %d\n", - timeout); reg &= ~BIT(10); - ret = wilc->hif_func->hif_write_reg(wilc, WILC_GLB_RESET_0, - reg); + ret = wilc->hif_func->hif_write_reg(wilc, + WILC_GLB_RESET_0, + reg); timeout--; } else { - PRINT_D(GENERIC_DBG, "Bit 10 reset after : Retry %d\n", - timeout); - ret = wilc->hif_func->hif_read_reg(wilc, WILC_GLB_RESET_0, - ®); + ret = wilc->hif_func->hif_read_reg(wilc, + WILC_GLB_RESET_0, + ®); if (!ret) { - PRINT_ER("Error while reading reg\n"); release_bus(wilc, RELEASE_ALLOW_SLEEP); return ret; } - PRINT_D(GENERIC_DBG, "Read RESET Reg %x : Retry%d\n", - reg, timeout); break; } @@ -1379,23 +1171,22 @@ void wilc_wlan_cleanup(struct net_device *dev) acquire_bus(wilc, ACQUIRE_AND_WAKEUP); ret = wilc->hif_func->hif_read_reg(wilc, WILC_GP_REG_0, ®); - if (!ret) { - PRINT_ER("Error while reading reg\n"); + if (!ret) release_bus(wilc, RELEASE_ALLOW_SLEEP); - } - PRINT_ER("Writing ABORT reg\n"); + ret = wilc->hif_func->hif_write_reg(wilc, WILC_GP_REG_0, (reg | ABORT_INT)); - if (!ret) { - PRINT_ER("Error while writing reg\n"); + if (!ret) release_bus(wilc, RELEASE_ALLOW_SLEEP); - } + release_bus(wilc, RELEASE_ALLOW_SLEEP); wilc->hif_func->hif_deinit(NULL); } -static int wilc_wlan_cfg_commit(struct wilc *wilc, int type, u32 drv_handler) +static int wilc_wlan_cfg_commit(struct wilc_vif *vif, int type, + u32 drv_handler) { + struct wilc *wilc = vif->wilc; struct wilc_cfg_frame *cfg = &wilc->cfg_frame; int total_len = wilc->cfg_frame_offset + 4 + DRIVER_HANDLER_SIZE; int seq_no = wilc->cfg_seq_no % 256; @@ -1414,17 +1205,18 @@ static int wilc_wlan_cfg_commit(struct wilc *wilc, int type, u32 drv_handler) cfg->wid_header[7] = (u8)(driver_handler >> 24); wilc->cfg_seq_no = seq_no; - if (!wilc_wlan_txq_add_cfg_pkt(wilc, &cfg->wid_header[0], total_len)) + if (!wilc_wlan_txq_add_cfg_pkt(vif, &cfg->wid_header[0], total_len)) return -1; return 0; } -int wilc_wlan_cfg_set(struct wilc *wilc, int start, u32 wid, u8 *buffer, +int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u32 wid, u8 *buffer, u32 buffer_size, int commit, u32 drv_handler) { u32 offset; int ret_size; + struct wilc *wilc = vif->wilc; if (wilc->cfg_frame_in_use) return 0; @@ -1439,17 +1231,18 @@ int wilc_wlan_cfg_set(struct wilc *wilc, int start, u32 wid, u8 *buffer, wilc->cfg_frame_offset = offset; if (commit) { - PRINT_D(TX_DBG, "[WILC]PACKET Commit with sequence number %d\n", - wilc->cfg_seq_no); - PRINT_D(RX_DBG, "Processing cfg_set()\n"); + netdev_dbg(vif->ndev, + "[WILC]PACKET Commit with sequence number %d\n", + wilc->cfg_seq_no); + netdev_dbg(vif->ndev, "Processing cfg_set()\n"); wilc->cfg_frame_in_use = 1; - if (wilc_wlan_cfg_commit(wilc, WILC_CFG_SET, drv_handler)) + if (wilc_wlan_cfg_commit(vif, WILC_CFG_SET, drv_handler)) ret_size = 0; if (wilc_lock_timeout(wilc, &wilc->cfg_event, CFG_PKTS_TIMEOUT)) { - PRINT_D(TX_DBG, "Set Timed Out\n"); + netdev_dbg(vif->ndev, "Set Timed Out\n"); ret_size = 0; } wilc->cfg_frame_in_use = 0; @@ -1460,11 +1253,12 @@ int wilc_wlan_cfg_set(struct wilc *wilc, int start, u32 wid, u8 *buffer, return ret_size; } -int wilc_wlan_cfg_get(struct wilc *wilc, int start, u32 wid, int commit, +int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u32 wid, int commit, u32 drv_handler) { u32 offset; int ret_size; + struct wilc *wilc = vif->wilc; if (wilc->cfg_frame_in_use) return 0; @@ -1481,15 +1275,14 @@ int wilc_wlan_cfg_get(struct wilc *wilc, int start, u32 wid, int commit, if (commit) { wilc->cfg_frame_in_use = 1; - if (wilc_wlan_cfg_commit(wilc, WILC_CFG_QUERY, drv_handler)) + if (wilc_wlan_cfg_commit(vif, WILC_CFG_QUERY, drv_handler)) ret_size = 0; if (wilc_lock_timeout(wilc, &wilc->cfg_event, CFG_PKTS_TIMEOUT)) { - PRINT_D(TX_DBG, "Get Timed Out\n"); + netdev_dbg(vif->ndev, "Get Timed Out\n"); ret_size = 0; } - PRINT_D(GENERIC_DBG, "[WILC]Get Response received\n"); wilc->cfg_frame_in_use = 0; wilc->cfg_frame_offset = 0; wilc->cfg_seq_no += 1; @@ -1500,9 +1293,43 @@ int wilc_wlan_cfg_get(struct wilc *wilc, int start, u32 wid, int commit, int wilc_wlan_cfg_get_val(u32 wid, u8 *buffer, u32 buffer_size) { - int ret; + return wilc_wlan_cfg_get_wid_value((u16)wid, buffer, buffer_size); +} - ret = wilc_wlan_cfg_get_wid_value((u16)wid, buffer, buffer_size); +int wilc_send_config_pkt(struct wilc_vif *vif, u8 mode, struct wid *wids, + u32 count, u32 drv) +{ + int i; + int ret = 0; + + if (mode == GET_CFG) { + for (i = 0; i < count; i++) { + if (!wilc_wlan_cfg_get(vif, !i, + wids[i].id, + (i == count - 1), + drv)) { + ret = -ETIMEDOUT; + break; + } + } + for (i = 0; i < count; i++) { + wids[i].size = wilc_wlan_cfg_get_val(wids[i].id, + wids[i].val, + wids[i].size); + } + } else if (mode == SET_CFG) { + for (i = 0; i < count; i++) { + if (!wilc_wlan_cfg_set(vif, !i, + wids[i].id, + wids[i].val, + wids[i].size, + (i == count - 1), + drv)) { + ret = -ETIMEDOUT; + break; + } + } + } return ret; } @@ -1524,18 +1351,18 @@ static u32 init_chip(struct net_device *dev) if ((chipid & 0xfff) != 0xa0) { ret = wilc->hif_func->hif_read_reg(wilc, 0x1118, ®); if (!ret) { - wilc_debug(N_ERR, "[wilc start]: fail read reg 0x1118 ...\n"); + netdev_err(dev, "fail read reg 0x1118\n"); return ret; } reg |= BIT(0); ret = wilc->hif_func->hif_write_reg(wilc, 0x1118, reg); if (!ret) { - wilc_debug(N_ERR, "[wilc start]: fail write reg 0x1118 ...\n"); + netdev_err(dev, "fail write reg 0x1118\n"); return ret; } ret = wilc->hif_func->hif_write_reg(wilc, 0xc0000, 0x71); if (!ret) { - wilc_debug(N_ERR, "[wilc start]: fail write reg 0xc0000 ...\n"); + netdev_err(dev, "fail write reg 0xc0000\n"); return ret; } } @@ -1545,36 +1372,31 @@ static u32 init_chip(struct net_device *dev) return ret; } -u32 wilc_get_chipid(struct wilc *wilc, u8 update) +u32 wilc_get_chipid(struct wilc *wilc, bool update) { static u32 chipid; u32 tempchipid = 0; - u32 rfrevid; + u32 rfrevid = 0; - if (chipid == 0 || update != 0) { + if (chipid == 0 || update) { wilc->hif_func->hif_read_reg(wilc, 0x1000, &tempchipid); wilc->hif_func->hif_read_reg(wilc, 0x13f4, &rfrevid); if (!ISWILC1000(tempchipid)) { chipid = 0; - goto _fail_; + return chipid; } if (tempchipid == 0x1002a0) { - if (rfrevid == 0x1) { - } else { + if (rfrevid != 0x1) tempchipid = 0x1002a1; - } } else if (tempchipid == 0x1002b0) { - if (rfrevid == 3) { - } else if (rfrevid == 4) { + if (rfrevid == 0x4) tempchipid = 0x1002b1; - } else { + else if (rfrevid != 0x3) tempchipid = 0x1002b2; - } } chipid = tempchipid; } -_fail_: return chipid; } @@ -1586,34 +1408,31 @@ int wilc_wlan_init(struct net_device *dev) wilc = vif->wilc; - PRINT_D(INIT_DBG, "Initializing WILC_Wlan ...\n"); + wilc->quit = 0; - if (!wilc->hif_func->hif_init(wilc)) { + if (!wilc->hif_func->hif_init(wilc, false)) { ret = -EIO; goto _fail_; } - if (!wilc_wlan_cfg_init(wilc_debug)) { + if (!wilc_wlan_cfg_init()) { ret = -ENOBUFS; goto _fail_; } if (!wilc->tx_buffer) wilc->tx_buffer = kmalloc(LINUX_TX_SIZE, GFP_KERNEL); - PRINT_D(TX_DBG, "wilc->tx_buffer = %p\n", wilc->tx_buffer); if (!wilc->tx_buffer) { ret = -ENOBUFS; - PRINT_ER("Can't allocate Tx Buffer"); goto _fail_; } if (!wilc->rx_buffer) wilc->rx_buffer = kmalloc(LINUX_RX_SIZE, GFP_KERNEL); - PRINT_D(TX_DBG, "wilc->rx_buffer =%p\n", wilc->rx_buffer); + if (!wilc->rx_buffer) { ret = -ENOBUFS; - PRINT_ER("Can't allocate Rx Buffer"); goto _fail_; } @@ -1621,9 +1440,7 @@ int wilc_wlan_init(struct net_device *dev) ret = -EIO; goto _fail_; } -#ifdef TCP_ACK_FILTER init_tcp_tracking(); -#endif return 1; @@ -1636,35 +1453,3 @@ _fail_: return ret; } - -u16 wilc_set_machw_change_vir_if(struct net_device *dev, bool value) -{ - u16 ret; - u32 reg; - struct wilc_vif *vif; - struct wilc *wilc; - - vif = netdev_priv(dev); - wilc = vif->wilc; - - mutex_lock(&wilc->hif_cs); - ret = wilc->hif_func->hif_read_reg(wilc, WILC_CHANGING_VIR_IF, - ®); - if (!ret) - PRINT_ER("Error while Reading reg WILC_CHANGING_VIR_IF\n"); - - if (value) - reg |= BIT(31); - else - reg &= ~BIT(31); - - ret = wilc->hif_func->hif_write_reg(wilc, WILC_CHANGING_VIR_IF, - reg); - - if (!ret) - PRINT_ER("Error while writing reg WILC_CHANGING_VIR_IF\n"); - - mutex_unlock(&wilc->hif_cs); - - return ret; -} diff --git a/drivers/staging/wilc1000/wilc_wlan.h b/drivers/staging/wilc1000/wilc_wlan.h index 2edd7445f4a3..bcd4bfa5accc 100644 --- a/drivers/staging/wilc1000/wilc_wlan.h +++ b/drivers/staging/wilc1000/wilc_wlan.h @@ -106,6 +106,7 @@ #define WILC_HAVE_LEGACY_RF_SETTINGS BIT(5) #define WILC_HAVE_XTAL_24 BIT(6) #define WILC_HAVE_DISABLE_WILC_UART BIT(7) +#define WILC_HAVE_USE_IRQ_AS_HOST_WAKE BIT(8) /******************************************** * @@ -127,6 +128,11 @@ #define WILC_PLL_TO_SPI 2 #define ABORT_INT BIT(31) +#define LINUX_RX_SIZE (96 * 1024) +#define LINUX_TX_SIZE (64 * 1024) + +#define MODALIAS "WILC_SPI" +#define GPIO_NUM 0x44 /*******************************************/ /* E0 and later Interrupt flags. */ /*******************************************/ @@ -226,7 +232,7 @@ struct rxq_entry_t { ********************************************/ struct wilc; struct wilc_hif_func { - int (*hif_init)(struct wilc *); + int (*hif_init)(struct wilc *, bool resume); int (*hif_deinit)(struct wilc *); int (*hif_read_reg)(struct wilc *, u32, u32 *); int (*hif_write_reg)(struct wilc *, u32, u32); @@ -267,8 +273,10 @@ struct wilc_cfg_rsp { }; struct wilc; +struct wilc_vif; -int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer, u32 buffer_size); +int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer, + u32 buffer_size); int wilc_wlan_start(struct wilc *); int wilc_wlan_stop(struct wilc *); int wilc_wlan_txq_add_net_pkt(struct net_device *dev, void *priv, u8 *buffer, @@ -276,9 +284,9 @@ int wilc_wlan_txq_add_net_pkt(struct net_device *dev, void *priv, u8 *buffer, int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count); void wilc_handle_isr(struct wilc *wilc); void wilc_wlan_cleanup(struct net_device *dev); -int wilc_wlan_cfg_set(struct wilc *wilc, int start, u32 wid, u8 *buffer, +int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u32 wid, u8 *buffer, u32 buffer_size, int commit, u32 drv_handler); -int wilc_wlan_cfg_get(struct wilc *wilc, int start, u32 wid, int commit, +int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u32 wid, int commit, u32 drv_handler); int wilc_wlan_cfg_get_val(u32 wid, u8 *buffer, u32 buffer_size); int wilc_wlan_txq_add_mgmt_pkt(struct net_device *dev, void *priv, u8 *buffer, @@ -292,9 +300,12 @@ int wilc_mac_xmit(struct sk_buff *skb, struct net_device *dev); int wilc_mac_open(struct net_device *ndev); int wilc_mac_close(struct net_device *ndev); -int wilc_wlan_set_bssid(struct net_device *wilc_netdev, u8 *pBSSID); void WILC_WFI_p2p_rx(struct net_device *dev, u8 *buff, u32 size); - +void host_wakeup_notify(struct wilc *wilc); +void host_sleep_notify(struct wilc *wilc); extern bool wilc_enable_ps; - +void chip_allow_sleep(struct wilc *wilc); +void chip_wakeup(struct wilc *wilc); +int wilc_send_config_pkt(struct wilc_vif *vif, u8 mode, struct wid *wids, + u32 count, u32 drv); #endif diff --git a/drivers/staging/wilc1000/wilc_wlan_cfg.c b/drivers/staging/wilc1000/wilc_wlan_cfg.c index b72c77bb35f1..b3425b9cec94 100644 --- a/drivers/staging/wilc1000/wilc_wlan_cfg.c +++ b/drivers/staging/wilc1000/wilc_wlan_cfg.c @@ -18,10 +18,15 @@ * Global Data * ********************************************/ +enum cfg_cmd_type { + CFG_BYTE_CMD = 0, + CFG_HWORD_CMD = 1, + CFG_WORD_CMD = 2, + CFG_STR_CMD = 3, + CFG_BIN_CMD = 4 +}; -typedef struct { - wilc_debug_func dPrint; - +struct wilc_mac_cfg { int mac_status; u8 mac_address[7]; u8 ip_address[5]; @@ -31,7 +36,7 @@ typedef struct { u8 supp_rate[24]; u8 wep_key[28]; u8 i_psk[66]; - u8 hardwareProductVersion[33]; + u8 hw_product_version[33]; u8 phyversion[17]; u8 supp_username[21]; u8 supp_password[64]; @@ -40,11 +45,11 @@ typedef struct { u8 firmware_info[8]; u8 scan_result[256]; u8 scan_result1[256]; -} wilc_mac_cfg_t; +}; -static wilc_mac_cfg_t g_mac; +static struct wilc_mac_cfg g_mac; -static wilc_cfg_byte_t g_cfg_byte[] = { +static struct wilc_cfg_byte g_cfg_byte[] = { {WID_BSS_TYPE, 0}, {WID_CURRENT_TX_RATE, 0}, {WID_CURRENT_CHANNEL, 0}, @@ -87,7 +92,7 @@ static wilc_cfg_byte_t g_cfg_byte[] = { {WID_NIL, 0} }; -static wilc_cfg_hword_t g_cfg_hword[] = { +static struct wilc_cfg_hword g_cfg_hword[] = { {WID_LINK_LOSS_THRESHOLD, 0}, {WID_RTS_THRESHOLD, 0}, {WID_FRAG_THRESHOLD, 0}, @@ -108,7 +113,7 @@ static wilc_cfg_hword_t g_cfg_hword[] = { {WID_NIL, 0} }; -static wilc_cfg_word_t g_cfg_word[] = { +static struct wilc_cfg_word g_cfg_word[] = { {WID_FAILED_COUNT, 0}, {WID_RETRY_COUNT, 0}, {WID_MULTIPLE_RETRY_COUNT, 0}, @@ -131,25 +136,22 @@ static wilc_cfg_word_t g_cfg_word[] = { }; -static wilc_cfg_str_t g_cfg_str[] = { +static struct wilc_cfg_str g_cfg_str[] = { {WID_SSID, g_mac.ssid}, /* 33 + 1 bytes */ {WID_FIRMWARE_VERSION, g_mac.firmware_version}, {WID_OPERATIONAL_RATE_SET, g_mac.supp_rate}, {WID_BSSID, g_mac.bssid}, /* 6 bytes */ {WID_WEP_KEY_VALUE, g_mac.wep_key}, /* 27 bytes */ {WID_11I_PSK, g_mac.i_psk}, /* 65 bytes */ - /* {WID_11E_P_ACTION_REQ, g_mac.action_req}, */ - {WID_HARDWARE_VERSION, g_mac.hardwareProductVersion}, + {WID_HARDWARE_VERSION, g_mac.hw_product_version}, {WID_MAC_ADDR, g_mac.mac_address}, {WID_PHY_VERSION, g_mac.phyversion}, {WID_SUPP_USERNAME, g_mac.supp_username}, {WID_SUPP_PASSWORD, g_mac.supp_password}, {WID_SITE_SURVEY_RESULTS, g_mac.scan_result}, {WID_SITE_SURVEY_RESULTS, g_mac.scan_result1}, - /* {WID_RX_POWER_LEVEL, g_mac.channel_rssi}, */ {WID_ASSOC_REQ_INFO, g_mac.assoc_req}, {WID_ASSOC_RES_INFO, g_mac.assoc_rsp}, - /* {WID_11N_P_ACTION_REQ, g_mac.action_req}, */ {WID_FIRMWARE_INFO, g_mac.firmware_version}, {WID_IP_ADDRESS, g_mac.ip_address}, {WID_NIL, NULL} @@ -270,13 +272,12 @@ static int wilc_wlan_cfg_set_bin(u8 *frame, u32 offset, u16 id, u8 *b, u32 size) static void wilc_wlan_parse_response_frame(u8 *info, int size) { u32 wid, len = 0, i = 0; - static int seq; while (size > 0) { i = 0; wid = info[0] | (info[1] << 8); wid = cpu_to_le32(wid); - PRINT_INFO(GENERIC_DBG, "Processing response for %d seq %d\n", wid, seq++); + switch ((wid >> 12) & 0x7) { case WID_CHAR: do { @@ -329,10 +330,6 @@ static void wilc_wlan_parse_response_frame(u8 *info, int size) if (wid == WID_SITE_SURVEY_RESULTS) { static int toggle; - PRINT_INFO(GENERIC_DBG, "Site survey results received[%d]\n", - size); - - PRINT_INFO(GENERIC_DBG, "Site survey results value[%d]toggle[%d]\n", size, toggle); i += toggle; toggle ^= 1; } @@ -354,14 +351,14 @@ static void wilc_wlan_parse_response_frame(u8 *info, int size) static int wilc_wlan_parse_info_frame(u8 *info, int size) { - wilc_mac_cfg_t *pd = &g_mac; + struct wilc_mac_cfg *pd = &g_mac; u32 wid, len; int type = WILC_CFG_RSP_STATUS; wid = info[0] | (info[1] << 8); len = info[2]; - PRINT_INFO(GENERIC_DBG, "Status Len = %d Id= %d\n", len, wid); + if ((len == 1) && (wid == WID_STATUS)) { pd->mac_status = info[3]; type = WILC_CFG_RSP_STATUS; @@ -381,21 +378,31 @@ int wilc_wlan_cfg_set_wid(u8 *frame, u32 offset, u16 id, u8 *buf, int size) u8 type = (id >> 12) & 0xf; int ret = 0; - if (type == 0) { /* byte command */ + switch (type) { + case CFG_BYTE_CMD: if (size >= 1) ret = wilc_wlan_cfg_set_byte(frame, offset, id, *buf); - } else if (type == 1) { /* half word command */ + break; + + case CFG_HWORD_CMD: if (size >= 2) - ret = wilc_wlan_cfg_set_hword(frame, offset, id, *((u16 *)buf)); - } else if (type == 2) { /* word command */ + ret = wilc_wlan_cfg_set_hword(frame, offset, id, + *((u16 *)buf)); + break; + + case CFG_WORD_CMD: if (size >= 4) - ret = wilc_wlan_cfg_set_word(frame, offset, id, *((u32 *)buf)); - } else if (type == 3) { /* string command */ + ret = wilc_wlan_cfg_set_word(frame, offset, id, + *((u32 *)buf)); + break; + + case CFG_STR_CMD: ret = wilc_wlan_cfg_set_str(frame, offset, id, buf, size); - } else if (type == 4) { /* binary command */ + break; + + case CFG_BIN_CMD: ret = wilc_wlan_cfg_set_bin(frame, offset, id, buf, size); - } else { - g_mac.dPrint(N_ERR, "illegal id\n"); + break; } return ret; @@ -427,7 +434,7 @@ int wilc_wlan_cfg_get_wid_value(u16 wid, u8 *buffer, u32 buffer_size) } i = 0; - if (type == 0) { /* byte command */ + if (type == CFG_BYTE_CMD) { do { if (g_cfg_byte[i].id == WID_NIL) break; @@ -439,7 +446,7 @@ int wilc_wlan_cfg_get_wid_value(u16 wid, u8 *buffer, u32 buffer_size) } i++; } while (1); - } else if (type == 1) { /* half word command */ + } else if (type == CFG_HWORD_CMD) { do { if (g_cfg_hword[i].id == WID_NIL) break; @@ -451,7 +458,7 @@ int wilc_wlan_cfg_get_wid_value(u16 wid, u8 *buffer, u32 buffer_size) } i++; } while (1); - } else if (type == 2) { /* word command */ + } else if (type == CFG_WORD_CMD) { do { if (g_cfg_word[i].id == WID_NIL) break; @@ -463,7 +470,7 @@ int wilc_wlan_cfg_get_wid_value(u16 wid, u8 *buffer, u32 buffer_size) } i++; } while (1); - } else if (type == 3) { /* string command */ + } else if (type == CFG_STR_CMD) { do { if (g_cfg_str[i].id == WID_NIL) break; @@ -475,8 +482,6 @@ int wilc_wlan_cfg_get_wid_value(u16 wid, u8 *buffer, u32 buffer_size) if (g_cfg_str[i].id == WID_SITE_SURVEY_RESULTS) { static int toggle; - PRINT_INFO(GENERIC_DBG, "Site survey results value[%d]\n", - size); i += toggle; toggle ^= 1; @@ -488,8 +493,6 @@ int wilc_wlan_cfg_get_wid_value(u16 wid, u8 *buffer, u32 buffer_size) } i++; } while (1); - } else { - g_mac.dPrint(N_ERR, "[CFG]: illegal type (%08x)\n", wid); } return ret; @@ -522,7 +525,6 @@ int wilc_wlan_cfg_indicate_rx(struct wilc *wilc, u8 *frame, int size, rsp->type = wilc_wlan_parse_info_frame(frame, size); rsp->seq_no = msg_id; /*call host interface info parse as well*/ - PRINT_INFO(RX_DBG, "Info message received\n"); wilc_gnrl_async_info_received(wilc, frame - 4, size + 4); break; @@ -532,14 +534,10 @@ int wilc_wlan_cfg_indicate_rx(struct wilc *wilc, u8 *frame, int size, break; case 'S': - PRINT_INFO(RX_DBG, "Scan Notification Received\n"); wilc_scan_complete_received(wilc, frame - 4, size + 4); break; default: - PRINT_INFO(RX_DBG, "Receive unknown message type[%d-%d-%d-%d-%d-%d-%d-%d]\n", - frame[0], frame[1], frame[2], frame[3], frame[4], - frame[5], frame[6], frame[7]); rsp->type = 0; rsp->seq_no = msg_id; ret = 0; @@ -549,9 +547,8 @@ int wilc_wlan_cfg_indicate_rx(struct wilc *wilc, u8 *frame, int size, return ret; } -int wilc_wlan_cfg_init(wilc_debug_func func) +int wilc_wlan_cfg_init(void) { - memset((void *)&g_mac, 0, sizeof(wilc_mac_cfg_t)); - g_mac.dPrint = func; + memset((void *)&g_mac, 0, sizeof(struct wilc_mac_cfg)); return 1; } diff --git a/drivers/staging/wilc1000/wilc_wlan_cfg.h b/drivers/staging/wilc1000/wilc_wlan_cfg.h index 5f74eb83562f..b8641a273547 100644 --- a/drivers/staging/wilc1000/wilc_wlan_cfg.h +++ b/drivers/staging/wilc1000/wilc_wlan_cfg.h @@ -10,25 +10,25 @@ #ifndef WILC_WLAN_CFG_H #define WILC_WLAN_CFG_H -typedef struct { +struct wilc_cfg_byte { u16 id; u16 val; -} wilc_cfg_byte_t; +}; -typedef struct { +struct wilc_cfg_hword { u16 id; u16 val; -} wilc_cfg_hword_t; +}; -typedef struct { +struct wilc_cfg_word { u32 id; u32 val; -} wilc_cfg_word_t; +}; -typedef struct { +struct wilc_cfg_str { u32 id; u8 *str; -} wilc_cfg_str_t; +}; struct wilc; int wilc_wlan_cfg_set_wid(u8 *frame, u32 offset, u16 id, u8 *buf, int size); @@ -36,6 +36,6 @@ int wilc_wlan_cfg_get_wid(u8 *frame, u32 offset, u16 id); int wilc_wlan_cfg_get_wid_value(u16 wid, u8 *buffer, u32 buffer_size); int wilc_wlan_cfg_indicate_rx(struct wilc *wilc, u8 *frame, int size, struct wilc_cfg_rsp *rsp); -int wilc_wlan_cfg_init(wilc_debug_func func); +int wilc_wlan_cfg_init(void); #endif diff --git a/drivers/staging/wilc1000/wilc_wlan_if.h b/drivers/staging/wilc1000/wilc_wlan_if.h index 618903caff54..83cf84dd63b5 100644 --- a/drivers/staging/wilc1000/wilc_wlan_if.h +++ b/drivers/staging/wilc1000/wilc_wlan_if.h @@ -11,7 +11,6 @@ #define WILC_WLAN_IF_H #include <linux/semaphore.h> -#include "linux_wlan_common.h" #include <linux/netdevice.h> /******************************************** @@ -51,26 +50,24 @@ * ********************************************/ -typedef struct { +struct sdio_cmd52 { u32 read_write: 1; u32 function: 3; u32 raw: 1; u32 address: 17; u32 data: 8; -} sdio_cmd52_t; +}; -typedef struct { - /* struct { */ +struct sdio_cmd53 { u32 read_write: 1; u32 function: 3; u32 block_mode: 1; u32 increment: 1; u32 address: 17; u32 count: 9; - /* } bit; */ u8 *buffer; u32 block_size; -} sdio_cmd53_t; +}; #define WILC_MAC_INDICATE_STATUS 0x1 #define WILC_MAC_STATUS_INIT -1 @@ -82,7 +79,7 @@ typedef struct { struct tx_complete_data { int size; void *buff; - u8 *pBssid; + u8 *bssid; struct sk_buff *skb; }; @@ -95,12 +92,10 @@ typedef void (*wilc_tx_complete_func_t)(void *, int); * Wlan Configuration ID * ********************************************/ - +#define WILC_MULTICAST_TABLE_SIZE 8 #define MAX_SSID_LEN 33 #define MAX_RATES_SUPPORTED 12 -#define INFINITE_SLEEP_TIME ((u32)0xFFFFFFFF) - typedef enum { SUPP_RATES_IE = 1, EXT_SUPP_RATES_IE = 50, @@ -300,6 +295,13 @@ enum wid_type { WID_TYPE_FORCE_32BIT = 0xFFFFFFFF }; +struct wid { + u16 id; + enum wid_type type; + s32 size; + s8 *val; +}; + typedef enum { WID_NIL = 0xffff, @@ -761,6 +763,7 @@ typedef enum { WID_DEL_BEACON = 0x00CA, WID_LOGTerminal_Switch = 0x00CD, + WID_TX_POWER = 0x00CE, /* EMAC Short WID list */ /* RTS Threshold */ /* @@ -832,7 +835,6 @@ typedef enum { /* Custom Integer WID list */ WID_GET_INACTIVE_TIME = 0x2084, - WID_SET_DRV_HANDLER = 0X2085, WID_SET_OPERATION_MODE = 0X2086, /* EMAC String WID list */ WID_SSID = 0x3000, @@ -865,6 +867,7 @@ typedef enum { WID_MODEL_NAME = 0x3027, /*Added for CAPI tool */ WID_MODEL_NUM = 0x3028, /*Added for CAPI tool */ WID_DEVICE_NAME = 0x3029, /*Added for CAPI tool */ + WID_SET_DRV_HANDLER = 0x3030, /* NMAC String WID list */ WID_11N_P_ACTION_REQ = 0x3080, @@ -911,8 +914,6 @@ typedef enum { struct wilc; int wilc_wlan_init(struct net_device *dev); -void wilc_bus_set_max_speed(void); -void wilc_bus_set_default_speed(void); -u32 wilc_get_chipid(struct wilc *wilc, u8 update); +u32 wilc_get_chipid(struct wilc *wilc, bool update); #endif diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c index 8c1e3f06a215..8bad018eda47 100644 --- a/drivers/staging/wlan-ng/cfg80211.c +++ b/drivers/staging/wlan-ng/cfg80211.c @@ -73,13 +73,13 @@ static int prism2_domibset_uint32(wlandevice_t *wlandev, u32 did, u32 data) { struct p80211msg_dot11req_mibset msg; p80211item_uint32_t *mibitem = - (p80211item_uint32_t *) &msg.mibattribute.data; + (p80211item_uint32_t *)&msg.mibattribute.data; msg.msgcode = DIDmsg_dot11req_mibset; mibitem->did = did; mibitem->data = data; - return p80211req_dorequest(wlandev, (u8 *) &msg); + return p80211req_dorequest(wlandev, (u8 *)&msg); } static int prism2_domibset_pstr32(wlandevice_t *wlandev, @@ -87,14 +87,14 @@ static int prism2_domibset_pstr32(wlandevice_t *wlandev, { struct p80211msg_dot11req_mibset msg; p80211item_pstr32_t *mibitem = - (p80211item_pstr32_t *) &msg.mibattribute.data; + (p80211item_pstr32_t *)&msg.mibattribute.data; msg.msgcode = DIDmsg_dot11req_mibset; mibitem->did = did; mibitem->data.len = len; memcpy(mibitem->data.data, data, len); - return p80211req_dorequest(wlandev, (u8 *) &msg); + return p80211req_dorequest(wlandev, (u8 *)&msg); } /* The interface functions, called by the cfg80211 layer */ @@ -239,7 +239,9 @@ static int prism2_del_key(struct wiphy *wiphy, struct net_device *dev, int result = 0; /* There is no direct way in the hardware (AFAIK) of removing - a key, so we will cheat by setting the key to a bogus value */ + * a key, so we will cheat by setting the key to a bogus value + */ + /* send key to driver */ switch (key_index) { case 0: @@ -315,7 +317,7 @@ static int prism2_get_station(struct wiphy *wiphy, struct net_device *dev, if (wlandev->mlmerequest == NULL) return -EOPNOTSUPP; - result = wlandev->mlmerequest(wlandev, (struct p80211msg *) &quality); + result = wlandev->mlmerequest(wlandev, (struct p80211msg *)&quality); if (result == 0) { sinfo->txrate.legacy = quality.txrate.data; @@ -387,7 +389,7 @@ static int prism2_scan(struct wiphy *wiphy, msg1.maxchanneltime.data = 250; msg1.minchanneltime.data = 200; - result = p80211req_dorequest(wlandev, (u8 *) &msg1); + result = p80211req_dorequest(wlandev, (u8 *)&msg1); if (result) { err = prism2_result2err(msg1.resultcode.data); goto exit; @@ -402,7 +404,7 @@ static int prism2_scan(struct wiphy *wiphy, msg2.msgcode = DIDmsg_dot11req_scan_results; msg2.bssindex.data = i; - result = p80211req_dorequest(wlandev, (u8 *) &msg2); + result = p80211req_dorequest(wlandev, (u8 *)&msg2); if ((result != 0) || (msg2.resultcode.data != P80211ENUM_resultcode_success)) { break; @@ -417,7 +419,7 @@ static int prism2_scan(struct wiphy *wiphy, bss = cfg80211_inform_bss(wiphy, ieee80211_get_channel(wiphy, freq), CFG80211_BSS_FTYPE_UNKNOWN, - (const u8 *) &(msg2.bssid.data.data), + (const u8 *)&(msg2.bssid.data.data), msg2.timestamp.data, msg2.capinfo.data, msg2.beaconperiod.data, ie_buf, @@ -558,12 +560,12 @@ static int prism2_connect(struct wiphy *wiphy, struct net_device *dev, (u8 *)sme->key); if (result) goto exit; - } /* Assume we should set privacy invoked and exclude unencrypted - We could possibly use sme->privacy here, but the assumption - seems reasonable anyway */ + * We could possible use sme->privacy here, but the assumption + * seems reasonable anyways + */ result = prism2_domibset_uint32(wlandev, DIDmib_dot11smt_dot11PrivacyTable_dot11PrivacyInvoked, P80211ENUM_truth_true); @@ -578,7 +580,8 @@ static int prism2_connect(struct wiphy *wiphy, struct net_device *dev, } else { /* Assume we should unset privacy invoked - and exclude unencrypted */ + * and exclude unencrypted + */ result = prism2_domibset_uint32(wlandev, DIDmib_dot11smt_dot11PrivacyTable_dot11PrivacyInvoked, P80211ENUM_truth_false); @@ -590,17 +593,17 @@ static int prism2_connect(struct wiphy *wiphy, struct net_device *dev, P80211ENUM_truth_false); if (result) goto exit; - } /* Now do the actual join. Note there is no way that I can - see to request a specific bssid */ + * see to request a specific bssid + */ msg_join.msgcode = DIDmsg_lnxreq_autojoin; memcpy(msg_join.ssid.data.data, sme->ssid, length); msg_join.ssid.data.len = length; - result = p80211req_dorequest(wlandev, (u8 *) &msg_join); + result = p80211req_dorequest(wlandev, (u8 *)&msg_join); exit: if (result) @@ -623,7 +626,7 @@ static int prism2_disconnect(struct wiphy *wiphy, struct net_device *dev, memcpy(msg_join.ssid.data.data, "---", 3); msg_join.ssid.data.len = 3; - result = p80211req_dorequest(wlandev, (u8 *) &msg_join); + result = p80211req_dorequest(wlandev, (u8 *)&msg_join); if (result) err = -EFAULT; @@ -679,12 +682,12 @@ static int prism2_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, int result; int err = 0; - mibitem = (p80211item_uint32_t *) &msg.mibattribute.data; + mibitem = (p80211item_uint32_t *)&msg.mibattribute.data; msg.msgcode = DIDmsg_dot11req_mibget; mibitem->did = DIDmib_dot11phy_dot11PhyTxPowerTable_dot11CurrentTxPowerLevel; - result = p80211req_dorequest(wlandev, (u8 *) &msg); + result = p80211req_dorequest(wlandev, (u8 *)&msg); if (result) { err = -EFAULT; diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h index 8dfe4381ddf7..cec6d0ba3b65 100644 --- a/drivers/staging/wlan-ng/hfa384x.h +++ b/drivers/staging/wlan-ng/hfa384x.h @@ -1360,7 +1360,6 @@ void hfa384x_destroy(hfa384x_t *hw); int hfa384x_corereset(hfa384x_t *hw, int holdtime, int settletime, int genesis); -int hfa384x_drvr_commtallies(hfa384x_t *hw); int hfa384x_drvr_disable(hfa384x_t *hw, u16 macport); int hfa384x_drvr_enable(hfa384x_t *hw, u16 macport); int hfa384x_drvr_flashdl_enable(hfa384x_t *hw); @@ -1391,10 +1390,6 @@ static inline int hfa384x_drvr_setconfig16(hfa384x_t *hw, u16 rid, u16 val) } int -hfa384x_drvr_getconfig_async(hfa384x_t *hw, - u16 rid, ctlx_usercb_t usercb, void *usercb_data); - -int hfa384x_drvr_setconfig_async(hfa384x_t *hw, u16 rid, void *buf, diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c index 7551ac25d89d..21a92df85931 100644 --- a/drivers/staging/wlan-ng/hfa384x_usb.c +++ b/drivers/staging/wlan-ng/hfa384x_usb.c @@ -126,8 +126,6 @@ #include <linux/usb.h> #include <linux/byteorder/generic.h> -#define SUBMIT_URB(u, f) usb_submit_urb(u, f) - #include "p80211types.h" #include "p80211hdr.h" #include "p80211mgmt.h" @@ -145,11 +143,11 @@ enum cmd_mode { DOASYNC }; -#define THROTTLE_JIFFIES (HZ/8) +#define THROTTLE_JIFFIES (HZ / 8) #define URB_ASYNC_UNLINK 0 #define USB_QUEUE_BULK 0 -#define ROUNDUP64(a) (((a)+63)&~63) +#define ROUNDUP64(a) (((a) + 63) & ~63) #ifdef DEBUG_USB static void dbprint_urb(struct urb *urb); @@ -213,8 +211,6 @@ unlocked_usbctlx_cancel_async(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx); static void hfa384x_cb_status(hfa384x_t *hw, const hfa384x_usbctlx_t *ctlx); -static void hfa384x_cb_rrid(hfa384x_t *hw, const hfa384x_usbctlx_t *ctlx); - static int usbctlx_get_status(const hfa384x_usb_cmdresp_t *cmdresp, hfa384x_cmdresult_t *result); @@ -332,7 +328,7 @@ static int submit_rx_urb(hfa384x_t *hw, gfp_t memflags) int result; skb = dev_alloc_skb(sizeof(hfa384x_usbin_t)); - if (skb == NULL) { + if (!skb) { result = -ENOMEM; goto done; } @@ -348,7 +344,7 @@ static int submit_rx_urb(hfa384x_t *hw, gfp_t memflags) result = -ENOLINK; if (!hw->wlandev->hwremoved && !test_bit(WORK_RX_HALT, &hw->usb_flags)) { - result = SUBMIT_URB(&hw->rx_urb, memflags); + result = usb_submit_urb(&hw->rx_urb, memflags); /* Check whether we need to reset the RX pipe */ if (result == -EPIPE) { @@ -397,7 +393,7 @@ static int submit_tx_urb(hfa384x_t *hw, struct urb *tx_urb, gfp_t memflags) if (netif_running(netdev)) { if (!hw->wlandev->hwremoved && !test_bit(WORK_TX_HALT, &hw->usb_flags)) { - result = SUBMIT_URB(tx_urb, memflags); + result = usb_submit_urb(tx_urb, memflags); /* Test whether we need to reset the TX pipe */ if (result == -EPIPE) { @@ -816,43 +812,6 @@ static void hfa384x_cb_status(hfa384x_t *hw, const hfa384x_usbctlx_t *ctlx) } } -/*---------------------------------------------------------------- -* hfa384x_cb_rrid -* -* CTLX completion handler for async RRID type control exchanges. -* -* Note: If the handling is changed here, it should probably be -* changed in dorrid as well. -* -* Arguments: -* hw hw struct -* ctlx completed CTLX -* -* Returns: -* nothing -* -* Side effects: -* -* Call context: -* interrupt -----------------------------------------------------------------*/ -static void hfa384x_cb_rrid(hfa384x_t *hw, const hfa384x_usbctlx_t *ctlx) -{ - if (ctlx->usercb != NULL) { - hfa384x_rridresult_t rridresult; - - if (ctlx->state != CTLX_COMPLETE) { - memset(&rridresult, 0, sizeof(rridresult)); - rridresult.rid = le16_to_cpu(ctlx->outbuf.rridreq.rid); - } else { - usbctlx_get_rridresult(&ctlx->inbuf.rridresp, - &rridresult); - } - - ctlx->usercb(hw, &rridresult, ctlx->usercb_data); - } -} - static inline int hfa384x_docmd_wait(hfa384x_t *hw, hfa384x_metacmd_t *cmd) { return hfa384x_docmd(hw, DOWAIT, cmd, NULL, NULL, NULL); @@ -1012,7 +971,6 @@ int hfa384x_cmd_initialize(hfa384x_t *hw) ----------------------------------------------------------------*/ int hfa384x_cmd_disable(hfa384x_t *hw, u16 macport) { - int result = 0; hfa384x_metacmd_t cmd; cmd.cmd = HFA384x_CMD_CMDCODE_SET(HFA384x_CMDCODE_DISABLE) | @@ -1021,9 +979,7 @@ int hfa384x_cmd_disable(hfa384x_t *hw, u16 macport) cmd.parm1 = 0; cmd.parm2 = 0; - result = hfa384x_docmd_wait(hw, &cmd); - - return result; + return hfa384x_docmd_wait(hw, &cmd); } /*---------------------------------------------------------------- @@ -1048,7 +1004,6 @@ int hfa384x_cmd_disable(hfa384x_t *hw, u16 macport) ----------------------------------------------------------------*/ int hfa384x_cmd_enable(hfa384x_t *hw, u16 macport) { - int result = 0; hfa384x_metacmd_t cmd; cmd.cmd = HFA384x_CMD_CMDCODE_SET(HFA384x_CMDCODE_ENABLE) | @@ -1057,9 +1012,7 @@ int hfa384x_cmd_enable(hfa384x_t *hw, u16 macport) cmd.parm1 = 0; cmd.parm2 = 0; - result = hfa384x_docmd_wait(hw, &cmd); - - return result; + return hfa384x_docmd_wait(hw, &cmd); } /*---------------------------------------------------------------- @@ -1093,7 +1046,6 @@ int hfa384x_cmd_enable(hfa384x_t *hw, u16 macport) ----------------------------------------------------------------*/ int hfa384x_cmd_monitor(hfa384x_t *hw, u16 enable) { - int result = 0; hfa384x_metacmd_t cmd; cmd.cmd = HFA384x_CMD_CMDCODE_SET(HFA384x_CMDCODE_MONITOR) | @@ -1102,9 +1054,7 @@ int hfa384x_cmd_monitor(hfa384x_t *hw, u16 enable) cmd.parm1 = 0; cmd.parm2 = 0; - result = hfa384x_docmd_wait(hw, &cmd); - - return result; + return hfa384x_docmd_wait(hw, &cmd); } /*---------------------------------------------------------------- @@ -1148,7 +1098,6 @@ int hfa384x_cmd_monitor(hfa384x_t *hw, u16 enable) int hfa384x_cmd_download(hfa384x_t *hw, u16 mode, u16 lowaddr, u16 highaddr, u16 codelen) { - int result = 0; hfa384x_metacmd_t cmd; pr_debug("mode=%d, lowaddr=0x%04x, highaddr=0x%04x, codelen=%d\n", @@ -1161,9 +1110,7 @@ int hfa384x_cmd_download(hfa384x_t *hw, u16 mode, u16 lowaddr, cmd.parm1 = highaddr; cmd.parm2 = codelen; - result = hfa384x_docmd_wait(hw, &cmd); - - return result; + return hfa384x_docmd_wait(hw, &cmd); } /*---------------------------------------------------------------- @@ -1351,7 +1298,7 @@ hfa384x_docmd(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx; ctlx = usbctlx_alloc(); - if (ctlx == NULL) { + if (!ctlx) { result = -ENOMEM; goto done; } @@ -1441,7 +1388,7 @@ hfa384x_dorrid(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx; ctlx = usbctlx_alloc(); - if (ctlx == NULL) { + if (!ctlx) { result = -ENOMEM; goto done; } @@ -1522,7 +1469,7 @@ hfa384x_dowrid(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx; ctlx = usbctlx_alloc(); - if (ctlx == NULL) { + if (!ctlx) { result = -ENOMEM; goto done; } @@ -1610,7 +1557,7 @@ hfa384x_dormem(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx; ctlx = usbctlx_alloc(); - if (ctlx == NULL) { + if (!ctlx) { result = -ENOMEM; goto done; } @@ -1703,7 +1650,7 @@ hfa384x_dowmem(hfa384x_t *hw, pr_debug("page=0x%04x offset=0x%04x len=%d\n", page, offset, len); ctlx = usbctlx_alloc(); - if (ctlx == NULL) { + if (!ctlx) { result = -ENOMEM; goto done; } @@ -1747,37 +1694,6 @@ done: } /*---------------------------------------------------------------- -* hfa384x_drvr_commtallies -* -* Send a commtallies inquiry to the MAC. Note that this is an async -* call that will result in an info frame arriving sometime later. -* -* Arguments: -* hw device structure -* -* Returns: -* zero success. -* -* Side effects: -* -* Call context: -* process -----------------------------------------------------------------*/ -int hfa384x_drvr_commtallies(hfa384x_t *hw) -{ - hfa384x_metacmd_t cmd; - - cmd.cmd = HFA384x_CMDCODE_INQ; - cmd.parm0 = HFA384x_IT_COMMTALLIES; - cmd.parm1 = 0; - cmd.parm2 = 0; - - hfa384x_docmd_async(hw, &cmd, NULL, NULL, NULL); - - return 0; -} - -/*---------------------------------------------------------------- * hfa384x_drvr_disable * * Issues the disable command to stop communications on one of @@ -2122,41 +2038,6 @@ int hfa384x_drvr_getconfig(hfa384x_t *hw, u16 rid, void *buf, u16 len) } /*---------------------------------------------------------------- - * hfa384x_drvr_getconfig_async - * - * Performs the sequence necessary to perform an async read of - * of a config/info item. - * - * Arguments: - * hw device structure - * rid config/info record id (host order) - * buf host side record buffer. Upon return it will - * contain the body portion of the record (minus the - * RID and len). - * len buffer length (in bytes, should match record length) - * cbfn caller supplied callback, called when the command - * is done (successful or not). - * cbfndata pointer to some caller supplied data that will be - * passed in as an argument to the cbfn. - * - * Returns: - * nothing the cbfn gets a status argument identifying if - * any errors occur. - * Side effects: - * Queues an hfa384x_usbcmd_t for subsequent execution. - * - * Call context: - * Any - ----------------------------------------------------------------*/ -int -hfa384x_drvr_getconfig_async(hfa384x_t *hw, - u16 rid, ctlx_usercb_t usercb, void *usercb_data) -{ - return hfa384x_dorrid_async(hw, rid, NULL, 0, - hfa384x_cb_rrid, usercb, usercb_data); -} - -/*---------------------------------------------------------------- * hfa384x_drvr_setconfig_async * * Performs the sequence necessary to write a config/info item. @@ -2810,8 +2691,7 @@ void hfa384x_tx_timeout(wlandevice_t *wlandev) static void hfa384x_usbctlx_reaper_task(unsigned long data) { hfa384x_t *hw = (hfa384x_t *)data; - struct list_head *entry; - struct list_head *temp; + hfa384x_usbctlx_t *ctlx, *temp; unsigned long flags; spin_lock_irqsave(&hw->ctlxq.lock, flags); @@ -2819,10 +2699,7 @@ static void hfa384x_usbctlx_reaper_task(unsigned long data) /* This list is guaranteed to be empty if someone * has unplugged the adapter. */ - list_for_each_safe(entry, temp, &hw->ctlxq.reapable) { - hfa384x_usbctlx_t *ctlx; - - ctlx = list_entry(entry, hfa384x_usbctlx_t, list); + list_for_each_entry_safe(ctlx, temp, &hw->ctlxq.reapable, list) { list_del(&ctlx->list); kfree(ctlx); } @@ -2847,8 +2724,7 @@ static void hfa384x_usbctlx_reaper_task(unsigned long data) static void hfa384x_usbctlx_completion_task(unsigned long data) { hfa384x_t *hw = (hfa384x_t *)data; - struct list_head *entry; - struct list_head *temp; + hfa384x_usbctlx_t *ctlx, *temp; unsigned long flags; int reap = 0; @@ -2858,11 +2734,7 @@ static void hfa384x_usbctlx_completion_task(unsigned long data) /* This list is guaranteed to be empty if someone * has unplugged the adapter ... */ - list_for_each_safe(entry, temp, &hw->ctlxq.completing) { - hfa384x_usbctlx_t *ctlx; - - ctlx = list_entry(entry, hfa384x_usbctlx_t, list); - + list_for_each_entry_safe(ctlx, temp, &hw->ctlxq.completing, list) { /* Call the completion function that this * command was assigned, assuming it has one. */ @@ -3051,7 +2923,7 @@ static void hfa384x_usbctlxq_run(hfa384x_t *hw) hw->ctlx_urb.transfer_flags |= USB_QUEUE_BULK; /* Now submit the URB and update the CTLX's state */ - result = SUBMIT_URB(&hw->ctlx_urb, GFP_ATOMIC); + result = usb_submit_urb(&hw->ctlx_urb, GFP_ATOMIC); if (result == 0) { /* This CTLX is now running on the active queue */ head->state = CTLX_REQ_SUBMITTED; @@ -3574,7 +3446,7 @@ static void hfa384x_int_rxmonitor(wlandevice_t *wlandev, } skb = dev_alloc_skb(skblen); - if (skb == NULL) + if (!skb) return; /* only prepend the prism header if in the right mode */ @@ -3985,8 +3857,7 @@ static void hfa384x_usb_throttlefn(unsigned long data) pr_debug("flags=0x%lx\n", hw->usb_flags); if (!hw->wlandev->hwremoved && ((test_and_clear_bit(THROTTLE_RX, &hw->usb_flags) && - !test_and_set_bit(WORK_RX_RESUME, &hw->usb_flags)) - | + !test_and_set_bit(WORK_RX_RESUME, &hw->usb_flags)) | (test_and_clear_bit(THROTTLE_TX, &hw->usb_flags) && !test_and_set_bit(WORK_TX_RESUME, &hw->usb_flags)) )) { diff --git a/drivers/staging/wlan-ng/p80211conv.c b/drivers/staging/wlan-ng/p80211conv.c index 1b02cdf9d1fa..0a8f3960d465 100644 --- a/drivers/staging/wlan-ng/p80211conv.c +++ b/drivers/staging/wlan-ng/p80211conv.c @@ -49,7 +49,8 @@ * * -------------------------------------------------------------------- * -*================================================================ */ +*================================================================ +*/ #include <linux/module.h> #include <linux/kernel.h> @@ -101,12 +102,12 @@ static u8 oui_8021h[] = { 0x00, 0x00, 0xf8 }; * * Call context: * May be called in interrupt or non-interrupt context -----------------------------------------------------------------*/ +*---------------------------------------------------------------- +*/ int skb_ether_to_p80211(wlandevice_t *wlandev, u32 ethconv, struct sk_buff *skb, union p80211_hdr *p80211_hdr, struct p80211_metawep *p80211_wep) { - __le16 fc; u16 proto; struct wlan_ethhdr e_hdr; @@ -148,11 +149,11 @@ int skb_ether_to_p80211(wlandevice_t *wlandev, u32 ethconv, /* tack on SNAP */ e_snap = - (struct wlan_snap *) skb_push(skb, + (struct wlan_snap *)skb_push(skb, sizeof(struct wlan_snap)); e_snap->type = htons(proto); - if (ethconv == WLAN_ETHCONV_8021h - && p80211_stt_findproto(proto)) { + if (ethconv == WLAN_ETHCONV_8021h && + p80211_stt_findproto(proto)) { memcpy(e_snap->oui, oui_8021h, WLAN_IEEE_OUI_LEN); } else { @@ -162,12 +163,11 @@ int skb_ether_to_p80211(wlandevice_t *wlandev, u32 ethconv, /* tack on llc */ e_llc = - (struct wlan_llc *) skb_push(skb, + (struct wlan_llc *)skb_push(skb, sizeof(struct wlan_llc)); e_llc->dsap = 0xAA; /* SNAP, see IEEE 802 */ e_llc->ssap = 0xAA; e_llc->ctl = 0x03; - } } @@ -202,8 +202,8 @@ int skb_ether_to_p80211(wlandevice_t *wlandev, u32 ethconv, p80211_wep->data = NULL; - if ((wlandev->hostwep & HOSTWEP_PRIVACYINVOKED) - && (wlandev->hostwep & HOSTWEP_ENCRYPT)) { + if ((wlandev->hostwep & HOSTWEP_PRIVACYINVOKED) && + (wlandev->hostwep & HOSTWEP_ENCRYPT)) { /* XXXX need to pick keynum other than default? */ p80211_wep->data = kmalloc(skb->len, GFP_ATOMIC); @@ -215,8 +215,8 @@ int skb_ether_to_p80211(wlandevice_t *wlandev, u32 ethconv, p80211_wep->iv, p80211_wep->icv); if (foo) { netdev_warn(wlandev->netdev, - "Host en-WEP failed, dropping frame (%d).\n", - foo); + "Host en-WEP failed, dropping frame (%d).\n", + foo); return 2; } fc |= cpu_to_le16(WLAN_SET_FC_ISWEP(1)); @@ -238,10 +238,10 @@ static void orinoco_spy_gather(wlandevice_t *wlandev, char *mac, int i; /* Gather wireless spy statistics: for each packet, compare the - * source address with out list, and if match, get the stats... */ + * source address with out list, and if match, get the stats... + */ for (i = 0; i < wlandev->spy_number; i++) { - if (!memcmp(wlandev->spy_address[i], mac, ETH_ALEN)) { memcpy(wlandev->spy_address[i], mac, ETH_ALEN); wlandev->spy_stat[i].level = rxmeta->signal; @@ -273,7 +273,8 @@ static void orinoco_spy_gather(wlandevice_t *wlandev, char *mac, * * Call context: * May be called in interrupt or non-interrupt context -----------------------------------------------------------------*/ +*---------------------------------------------------------------- +*/ int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv, struct sk_buff *skb) { @@ -293,19 +294,19 @@ int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv, payload_length = skb->len - WLAN_HDR_A3_LEN - WLAN_CRC_LEN; payload_offset = WLAN_HDR_A3_LEN; - w_hdr = (union p80211_hdr *) skb->data; + w_hdr = (union p80211_hdr *)skb->data; /* setup some vars for convenience */ fc = le16_to_cpu(w_hdr->a3.fc); if ((WLAN_GET_FC_TODS(fc) == 0) && (WLAN_GET_FC_FROMDS(fc) == 0)) { ether_addr_copy(daddr, w_hdr->a3.a1); ether_addr_copy(saddr, w_hdr->a3.a2); - } else if ((WLAN_GET_FC_TODS(fc) == 0) - && (WLAN_GET_FC_FROMDS(fc) == 1)) { + } else if ((WLAN_GET_FC_TODS(fc) == 0) && + (WLAN_GET_FC_FROMDS(fc) == 1)) { ether_addr_copy(daddr, w_hdr->a3.a1); ether_addr_copy(saddr, w_hdr->a3.a3); - } else if ((WLAN_GET_FC_TODS(fc) == 1) - && (WLAN_GET_FC_FROMDS(fc) == 0)) { + } else if ((WLAN_GET_FC_TODS(fc) == 1) && + (WLAN_GET_FC_FROMDS(fc) == 0)) { ether_addr_copy(daddr, w_hdr->a3.a3); ether_addr_copy(saddr, w_hdr->a3.a2); } else { @@ -320,18 +321,19 @@ int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv, } /* perform de-wep if necessary.. */ - if ((wlandev->hostwep & HOSTWEP_PRIVACYINVOKED) && WLAN_GET_FC_ISWEP(fc) - && (wlandev->hostwep & HOSTWEP_DECRYPT)) { + if ((wlandev->hostwep & HOSTWEP_PRIVACYINVOKED) && + WLAN_GET_FC_ISWEP(fc) && + (wlandev->hostwep & HOSTWEP_DECRYPT)) { if (payload_length <= 8) { netdev_err(netdev, "WEP frame too short (%u).\n", skb->len); return 1; } foo = wep_decrypt(wlandev, skb->data + payload_offset + 4, - payload_length - 8, -1, - skb->data + payload_offset, - skb->data + payload_offset + - payload_length - 4); + payload_length - 8, -1, + skb->data + payload_offset, + skb->data + payload_offset + + payload_length - 4); if (foo) { /* de-wep failed, drop skb. */ pr_debug("Host de-WEP failed, dropping frame (%d).\n", @@ -350,11 +352,11 @@ int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv, wlandev->rx.decrypt++; } - e_hdr = (struct wlan_ethhdr *) (skb->data + payload_offset); + e_hdr = (struct wlan_ethhdr *)(skb->data + payload_offset); - e_llc = (struct wlan_llc *) (skb->data + payload_offset); + e_llc = (struct wlan_llc *)(skb->data + payload_offset); e_snap = - (struct wlan_snap *) (skb->data + payload_offset + + (struct wlan_snap *)(skb->data + payload_offset + sizeof(struct wlan_llc)); /* Test for the various encodings */ @@ -369,7 +371,7 @@ int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv, /* A bogus length ethfrm has been encap'd. */ /* Is someone trying an oflow attack? */ netdev_err(netdev, "ENCAP frame too large (%d > %d)\n", - payload_length, netdev->mtu + ETH_HLEN); + payload_length, netdev->mtu + ETH_HLEN); return 1; } @@ -379,15 +381,15 @@ int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv, skb_trim(skb, skb->len - WLAN_CRC_LEN); } else if ((payload_length >= sizeof(struct wlan_llc) + - sizeof(struct wlan_snap)) - && (e_llc->dsap == 0xaa) - && (e_llc->ssap == 0xaa) - && (e_llc->ctl == 0x03) - && - (((memcmp(e_snap->oui, oui_rfc1042, WLAN_IEEE_OUI_LEN) == 0) - && (ethconv == WLAN_ETHCONV_8021h) - && (p80211_stt_findproto(le16_to_cpu(e_snap->type)))) - || (memcmp(e_snap->oui, oui_rfc1042, WLAN_IEEE_OUI_LEN) != + sizeof(struct wlan_snap)) && + (e_llc->dsap == 0xaa) && + (e_llc->ssap == 0xaa) && + (e_llc->ctl == 0x03) && + (((memcmp(e_snap->oui, oui_rfc1042, + WLAN_IEEE_OUI_LEN) == 0) && + (ethconv == WLAN_ETHCONV_8021h) && + (p80211_stt_findproto(le16_to_cpu(e_snap->type)))) || + (memcmp(e_snap->oui, oui_rfc1042, WLAN_IEEE_OUI_LEN) != 0))) { pr_debug("SNAP+RFC1042 len: %d\n", payload_length); /* it's a SNAP + RFC1042 frame && protocol is in STT */ @@ -398,7 +400,7 @@ int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv, /* A bogus length ethfrm has been sent. */ /* Is someone trying an oflow attack? */ netdev_err(netdev, "SNAP frame too large (%d > %d)\n", - payload_length, netdev->mtu); + payload_length, netdev->mtu); return 1; } @@ -415,13 +417,14 @@ int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv, skb_trim(skb, skb->len - WLAN_CRC_LEN); } else if ((payload_length >= sizeof(struct wlan_llc) + - sizeof(struct wlan_snap)) - && (e_llc->dsap == 0xaa) - && (e_llc->ssap == 0xaa) - && (e_llc->ctl == 0x03)) { + sizeof(struct wlan_snap)) && + (e_llc->dsap == 0xaa) && + (e_llc->ssap == 0xaa) && + (e_llc->ctl == 0x03)) { pr_debug("802.1h/RFC1042 len: %d\n", payload_length); /* it's an 802.1h frame || (an RFC1042 && protocol not in STT) - build a DIXII + RFC894 */ + * build a DIXII + RFC894 + */ /* Test for an overlength frame */ if ((payload_length - sizeof(struct wlan_llc) - @@ -430,9 +433,9 @@ int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv, /* A bogus length ethfrm has been sent. */ /* Is someone trying an oflow attack? */ netdev_err(netdev, "DIXII frame too large (%ld > %d)\n", - (long int)(payload_length - - sizeof(struct wlan_llc) - - sizeof(struct wlan_snap)), netdev->mtu); + (long int)(payload_length - + sizeof(struct wlan_llc) - + sizeof(struct wlan_snap)), netdev->mtu); return 1; } @@ -465,7 +468,7 @@ int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv, /* A bogus length ethfrm has been sent. */ /* Is someone trying an oflow attack? */ netdev_err(netdev, "OTHER frame too large (%d > %d)\n", - payload_length, netdev->mtu); + payload_length, netdev->mtu); return 1; } @@ -480,7 +483,6 @@ int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv, /* chop off the 802.11 CRC */ skb_trim(skb, skb->len - WLAN_CRC_LEN); - } /* @@ -521,14 +523,15 @@ int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv, * * Call context: * May be called in interrupt or non-interrupt context -----------------------------------------------------------------*/ +*---------------------------------------------------------------- +*/ int p80211_stt_findproto(u16 proto) { /* Always return found for now. This is the behavior used by the */ - /* Zoom Win95 driver when 802.1h mode is selected */ + /* Zoom Win95 driver when 802.1h mode is selected */ /* TODO: If necessary, add an actual search we'll probably - need this to match the CMAC's way of doing things. - Need to do some testing to confirm. + * need this to match the CMAC's way of doing things. + * Need to do some testing to confirm. */ if (proto == ETH_P_AARP) /* APPLETALK */ @@ -551,24 +554,25 @@ int p80211_stt_findproto(u16 proto) * * Call context: * May be called in interrupt or non-interrupt context -----------------------------------------------------------------*/ +*---------------------------------------------------------------- +*/ void p80211skb_rxmeta_detach(struct sk_buff *skb) { struct p80211_rxmeta *rxmeta; struct p80211_frmmeta *frmmeta; /* Sanity checks */ - if (skb == NULL) { /* bad skb */ + if (!skb) { /* bad skb */ pr_debug("Called w/ null skb.\n"); return; } frmmeta = P80211SKB_FRMMETA(skb); - if (frmmeta == NULL) { /* no magic */ + if (!frmmeta) { /* no magic */ pr_debug("Called w/ bad frmmeta magic.\n"); return; } rxmeta = frmmeta->rx; - if (rxmeta == NULL) { /* bad meta ptr */ + if (!rxmeta) { /* bad meta ptr */ pr_debug("Called w/ bad rxmeta ptr.\n"); return; } @@ -595,7 +599,8 @@ void p80211skb_rxmeta_detach(struct sk_buff *skb) * * Call context: * May be called in interrupt or non-interrupt context -----------------------------------------------------------------*/ +*---------------------------------------------------------------- +*/ int p80211skb_rxmeta_attach(struct wlandevice *wlandev, struct sk_buff *skb) { int result = 0; @@ -603,7 +608,7 @@ int p80211skb_rxmeta_attach(struct wlandevice *wlandev, struct sk_buff *skb) struct p80211_frmmeta *frmmeta; /* If these already have metadata, we error out! */ - if (P80211SKB_RXMETA(skb) != NULL) { + if (P80211SKB_RXMETA(skb)) { netdev_err(wlandev->netdev, "%s: RXmeta already attached!\n", wlandev->name); result = 0; @@ -613,7 +618,7 @@ int p80211skb_rxmeta_attach(struct wlandevice *wlandev, struct sk_buff *skb) /* Allocate the rxmeta */ rxmeta = kzalloc(sizeof(struct p80211_rxmeta), GFP_ATOMIC); - if (rxmeta == NULL) { + if (!rxmeta) { netdev_err(wlandev->netdev, "%s: Failed to allocate rxmeta.\n", wlandev->name); result = 1; @@ -626,7 +631,7 @@ int p80211skb_rxmeta_attach(struct wlandevice *wlandev, struct sk_buff *skb) /* Overlay a frmmeta_t onto skb->cb */ memset(skb->cb, 0, sizeof(struct p80211_frmmeta)); - frmmeta = (struct p80211_frmmeta *) (skb->cb); + frmmeta = (struct p80211_frmmeta *)(skb->cb); frmmeta->magic = P80211_FRMMETA_MAGIC; frmmeta->rx = rxmeta; exit: @@ -648,7 +653,8 @@ exit: * * Call context: * May be called in interrupt or non-interrupt context -----------------------------------------------------------------*/ +*---------------------------------------------------------------- +*/ void p80211skb_free(struct wlandevice *wlandev, struct sk_buff *skb) { struct p80211_frmmeta *meta; diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c index a9c1e0bafa62..88255ce2871b 100644 --- a/drivers/staging/wlan-ng/p80211netdev.c +++ b/drivers/staging/wlan-ng/p80211netdev.c @@ -328,7 +328,7 @@ static int p80211knetdev_hard_start_xmit(struct sk_buff *skb, p80211_wep.data = NULL; - if (skb == NULL) + if (!skb) return NETDEV_TX_OK; if (wlandev->state != WLAN_DEVICE_OPEN) { @@ -388,7 +388,7 @@ static int p80211knetdev_hard_start_xmit(struct sk_buff *skb, goto failed; } } - if (wlandev->txframe == NULL) { + if (!wlandev->txframe) { result = 1; goto failed; } @@ -736,7 +736,7 @@ int wlan_setup(wlandevice_t *wlandev, struct device *physdev) /* Allocate and initialize the wiphy struct */ wiphy = wlan_create_wiphy(physdev, wlandev); - if (wiphy == NULL) { + if (!wiphy) { dev_err(physdev, "Failed to alloc wiphy.\n"); return 1; } @@ -744,7 +744,7 @@ int wlan_setup(wlandevice_t *wlandev, struct device *physdev) /* Allocate and initialize the struct device */ netdev = alloc_netdev(sizeof(struct wireless_dev), "wlan%d", NET_NAME_UNKNOWN, ether_setup); - if (netdev == NULL) { + if (!netdev) { dev_err(physdev, "Failed to alloc netdev.\n"); wlan_free_wiphy(wiphy); result = 1; diff --git a/drivers/staging/wlan-ng/p80211wep.c b/drivers/staging/wlan-ng/p80211wep.c index c363456d93a3..22c79703e328 100644 --- a/drivers/staging/wlan-ng/p80211wep.c +++ b/drivers/staging/wlan-ng/p80211wep.c @@ -140,8 +140,8 @@ int wep_change_key(wlandevice_t *wlandev, int keynum, u8 *key, int keylen) } /* - 4-byte IV at start of buffer, 4-byte ICV at end of buffer. - if successful, buf start is payload begin, length -= 8; + * 4-byte IV at start of buffer, 4-byte ICV at end of buffer. + * if successful, buf start is payload begin, length -= 8; */ int wep_decrypt(wlandevice_t *wlandev, u8 *buf, u32 len, int key_override, u8 *iv, u8 *icv) @@ -188,7 +188,8 @@ int wep_decrypt(wlandevice_t *wlandev, u8 *buf, u32 len, int key_override, /* Apply the RC4 to the data, update the CRC32 */ crc = ~0; - i = j = 0; + i = 0; + j = 0; for (k = 0; k < len; k++) { i = (i + 1) & 0xff; j = (j + s[i]) & 0xff; @@ -260,7 +261,8 @@ int wep_encrypt(wlandevice_t *wlandev, u8 *buf, u8 *dst, u32 len, int keynum, /* Update CRC32 then apply RC4 to the data */ crc = ~0; - i = j = 0; + i = 0; + j = 0; for (k = 0; k < len; k++) { crc = wep_crc32_table[(crc ^ buf[k]) & 0xff] ^ (crc >> 8); i = (i + 1) & 0xff; diff --git a/drivers/staging/wlan-ng/prism2fw.c b/drivers/staging/wlan-ng/prism2fw.c index 8fc80df0b53e..8564d9eb918f 100644 --- a/drivers/staging/wlan-ng/prism2fw.c +++ b/drivers/staging/wlan-ng/prism2fw.c @@ -333,6 +333,10 @@ static int prism2_fwapply(const struct ihex_binrec *rfptr, /* Make the image chunks */ result = mkimage(fchunk, &nfchunks); + if (result) { + netdev_err(wlandev->netdev, "Failed to make image chunk.\n"); + return 1; + } /* Do any plugging */ result = plugimage(fchunk, nfchunks, s3plug, ns3plug, &pda); @@ -538,7 +542,7 @@ static int mkimage(struct imgchunk *clist, unsigned int *ccnt) /* Allocate buffer space for chunks */ for (i = 0; i < *ccnt; i++) { clist[i].data = kzalloc(clist[i].len, GFP_KERNEL); - if (clist[i].data == NULL) { + if (!clist[i].data) { pr_err("failed to allocate image space, exitting.\n"); return 1; } diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c index 013a6240f193..d8ed9a05789c 100644 --- a/drivers/staging/wlan-ng/prism2mgmt.c +++ b/drivers/staging/wlan-ng/prism2mgmt.c @@ -375,7 +375,7 @@ int prism2mgmt_scan_results(wlandevice_t *wlandev, void *msgp) int count; - req = (struct p80211msg_dot11req_scan_results *) msgp; + req = msgp; req->resultcode.status = P80211ENUM_msgitem_status_data_ok; diff --git a/drivers/staging/wlan-ng/prism2mgmt.h b/drivers/staging/wlan-ng/prism2mgmt.h index 7a9f424607b7..e6472034da33 100644 --- a/drivers/staging/wlan-ng/prism2mgmt.h +++ b/drivers/staging/wlan-ng/prism2mgmt.h @@ -87,7 +87,6 @@ int prism2mgmt_autojoin(wlandevice_t *wlandev, void *msgp); * Prism2 data types ---------------------------------------------------------------*/ /* byte area conversion functions*/ -void prism2mgmt_pstr2bytearea(u8 *bytearea, p80211pstrd_t *pstr); void prism2mgmt_bytearea2pstr(u8 *bytearea, p80211pstrd_t *pstr, int len); /* byte string conversion functions*/ diff --git a/drivers/staging/wlan-ng/prism2mib.c b/drivers/staging/wlan-ng/prism2mib.c index cdda07d1c268..fe914b1f904b 100644 --- a/drivers/staging/wlan-ng/prism2mib.c +++ b/drivers/staging/wlan-ng/prism2mib.c @@ -379,7 +379,7 @@ static int prism2mib_bytearea2pstr(struct mibrec *mib, void *data) { int result; - p80211pstrd_t *pstr = (p80211pstrd_t *) data; + p80211pstrd_t *pstr = data; u8 bytebuf[MIB_TMP_MAXLEN]; if (isget) { @@ -388,7 +388,7 @@ static int prism2mib_bytearea2pstr(struct mibrec *mib, prism2mgmt_bytearea2pstr(bytebuf, pstr, mib->parm2); } else { memset(bytebuf, 0, mib->parm2); - prism2mgmt_pstr2bytearea(bytebuf, pstr); + memcpy(bytebuf, pstr->data, pstr->len); result = hfa384x_drvr_setconfig(hw, mib->parm1, bytebuf, mib->parm2); } @@ -428,7 +428,7 @@ static int prism2mib_uint32(struct mibrec *mib, struct p80211msg_dot11req_mibset *msg, void *data) { int result; - u32 *uint32 = (u32 *) data; + u32 *uint32 = data; u8 bytebuf[MIB_TMP_MAXLEN]; u16 *wordbuf = (u16 *) bytebuf; @@ -475,7 +475,7 @@ static int prism2mib_flag(struct mibrec *mib, struct p80211msg_dot11req_mibset *msg, void *data) { int result; - u32 *uint32 = (u32 *) data; + u32 *uint32 = data; u8 bytebuf[MIB_TMP_MAXLEN]; u16 *wordbuf = (u16 *) bytebuf; u32 flags; @@ -533,7 +533,7 @@ static int prism2mib_wepdefaultkey(struct mibrec *mib, void *data) { int result; - p80211pstrd_t *pstr = (p80211pstrd_t *) data; + p80211pstrd_t *pstr = data; u8 bytebuf[MIB_TMP_MAXLEN]; u16 len; @@ -543,7 +543,7 @@ static int prism2mib_wepdefaultkey(struct mibrec *mib, len = (pstr->len > 5) ? HFA384x_RID_CNFWEP128DEFAULTKEY_LEN : HFA384x_RID_CNFWEPDEFAULTKEY_LEN; memset(bytebuf, 0, len); - prism2mgmt_pstr2bytearea(bytebuf, pstr); + memcpy(bytebuf, pstr->data, pstr->len); result = hfa384x_drvr_setconfig(hw, mib->parm1, bytebuf, len); } @@ -660,7 +660,7 @@ static int prism2mib_fragmentationthreshold(struct mibrec *mib, struct p80211msg_dot11req_mibset *msg, void *data) { - u32 *uint32 = (u32 *) data; + u32 *uint32 = data; if (!isget) if ((*uint32) % 2) { @@ -705,7 +705,7 @@ static int prism2mib_priv(struct mibrec *mib, hfa384x_t *hw, struct p80211msg_dot11req_mibset *msg, void *data) { - p80211pstrd_t *pstr = (p80211pstrd_t *) data; + p80211pstrd_t *pstr = data; switch (mib->did) { case DIDmib_lnx_lnxConfigTable_lnxRSNAIE:{ @@ -759,26 +759,6 @@ void prism2mgmt_pstr2bytestr(struct hfa384x_bytestr *bytestr, } /*---------------------------------------------------------------- -* prism2mgmt_pstr2bytearea -* -* Convert the pstr data in the WLAN message structure into an hfa384x -* byte area format. -* -* Arguments: -* bytearea hfa384x byte area data type -* pstr wlan message data -* -* Returns: -* Nothing -* -----------------------------------------------------------------*/ - -void prism2mgmt_pstr2bytearea(u8 *bytearea, p80211pstrd_t *pstr) -{ - memcpy(bytearea, pstr->data, pstr->len); -} - -/*---------------------------------------------------------------- * prism2mgmt_bytestr2pstr * * Convert the data in an hfa384x byte string format into a diff --git a/drivers/staging/wlan-ng/prism2sta.c b/drivers/staging/wlan-ng/prism2sta.c index 131223afd918..64f90722b01b 100644 --- a/drivers/staging/wlan-ng/prism2sta.c +++ b/drivers/staging/wlan-ng/prism2sta.c @@ -242,7 +242,7 @@ static int prism2sta_txframe(wlandevice_t *wlandev, struct sk_buff *skb, union p80211_hdr *p80211_hdr, struct p80211_metawep *p80211_wep) { - hfa384x_t *hw = (hfa384x_t *) wlandev->priv; + hfa384x_t *hw = wlandev->priv; /* If necessary, set the 802.11 WEP bit */ if ((wlandev->hostwep & (HOSTWEP_PRIVACYINVOKED | HOSTWEP_ENCRYPT)) == @@ -279,7 +279,7 @@ static int prism2sta_txframe(wlandevice_t *wlandev, struct sk_buff *skb, */ static int prism2sta_mlmerequest(wlandevice_t *wlandev, struct p80211msg *msg) { - hfa384x_t *hw = (hfa384x_t *) wlandev->priv; + hfa384x_t *hw = wlandev->priv; int result = 0; @@ -409,7 +409,7 @@ static int prism2sta_mlmerequest(wlandevice_t *wlandev, struct p80211msg *msg) */ u32 prism2sta_ifstate(wlandevice_t *wlandev, u32 ifstate) { - hfa384x_t *hw = (hfa384x_t *) wlandev->priv; + hfa384x_t *hw = wlandev->priv; u32 result; result = P80211ENUM_resultcode_implementation_failure; @@ -583,7 +583,7 @@ u32 prism2sta_ifstate(wlandevice_t *wlandev, u32 ifstate) static int prism2sta_getcardinfo(wlandevice_t *wlandev) { int result = 0; - hfa384x_t *hw = (hfa384x_t *) wlandev->priv; + hfa384x_t *hw = wlandev->priv; u16 temp; u8 snum[HFA384x_RID_NICSERIALNUMBER_LEN]; @@ -911,7 +911,7 @@ done: */ static int prism2sta_globalsetup(wlandevice_t *wlandev) { - hfa384x_t *hw = (hfa384x_t *) wlandev->priv; + hfa384x_t *hw = wlandev->priv; /* Set the maximum frame size */ return hfa384x_drvr_setconfig16(hw, HFA384x_RID_CNFMAXDATALEN, @@ -921,7 +921,7 @@ static int prism2sta_globalsetup(wlandevice_t *wlandev) static int prism2sta_setmulticast(wlandevice_t *wlandev, netdevice_t *dev) { int result = 0; - hfa384x_t *hw = (hfa384x_t *) wlandev->priv; + hfa384x_t *hw = wlandev->priv; u16 promisc; @@ -985,7 +985,7 @@ static void prism2sta_inf_handover(wlandevice_t *wlandev, static void prism2sta_inf_tallies(wlandevice_t *wlandev, hfa384x_InfFrame_t *inf) { - hfa384x_t *hw = (hfa384x_t *) wlandev->priv; + hfa384x_t *hw = wlandev->priv; u16 *src16; u32 *dst; u32 *src32; @@ -1032,7 +1032,7 @@ static void prism2sta_inf_scanresults(wlandevice_t *wlandev, hfa384x_InfFrame_t *inf) { - hfa384x_t *hw = (hfa384x_t *) wlandev->priv; + hfa384x_t *hw = wlandev->priv; int nbss; hfa384x_ScanResult_t *sr = &(inf->info.scanresult); int i; @@ -1087,7 +1087,7 @@ static void prism2sta_inf_scanresults(wlandevice_t *wlandev, static void prism2sta_inf_hostscanresults(wlandevice_t *wlandev, hfa384x_InfFrame_t *inf) { - hfa384x_t *hw = (hfa384x_t *) wlandev->priv; + hfa384x_t *hw = wlandev->priv; int nbss; nbss = (inf->framelen - 3) / 32; @@ -1128,7 +1128,7 @@ static void prism2sta_inf_hostscanresults(wlandevice_t *wlandev, static void prism2sta_inf_chinforesults(wlandevice_t *wlandev, hfa384x_InfFrame_t *inf) { - hfa384x_t *hw = (hfa384x_t *) wlandev->priv; + hfa384x_t *hw = wlandev->priv; unsigned int i, n; hw->channel_info.results.scanchannels = @@ -1441,7 +1441,7 @@ void prism2sta_processing_defer(struct work_struct *data) static void prism2sta_inf_linkstatus(wlandevice_t *wlandev, hfa384x_InfFrame_t *inf) { - hfa384x_t *hw = (hfa384x_t *) wlandev->priv; + hfa384x_t *hw = wlandev->priv; hw->link_status_new = le16_to_cpu(inf->info.linkstatus.linkstatus); @@ -1469,7 +1469,7 @@ static void prism2sta_inf_linkstatus(wlandevice_t *wlandev, static void prism2sta_inf_assocstatus(wlandevice_t *wlandev, hfa384x_InfFrame_t *inf) { - hfa384x_t *hw = (hfa384x_t *) wlandev->priv; + hfa384x_t *hw = wlandev->priv; hfa384x_AssocStatus_t rec; int i; @@ -1530,7 +1530,7 @@ static void prism2sta_inf_assocstatus(wlandevice_t *wlandev, static void prism2sta_inf_authreq(wlandevice_t *wlandev, hfa384x_InfFrame_t *inf) { - hfa384x_t *hw = (hfa384x_t *) wlandev->priv; + hfa384x_t *hw = wlandev->priv; struct sk_buff *skb; skb = dev_alloc_skb(sizeof(*inf)); @@ -1545,7 +1545,7 @@ static void prism2sta_inf_authreq(wlandevice_t *wlandev, static void prism2sta_inf_authreq_defer(wlandevice_t *wlandev, hfa384x_InfFrame_t *inf) { - hfa384x_t *hw = (hfa384x_t *) wlandev->priv; + hfa384x_t *hw = wlandev->priv; hfa384x_authenticateStation_data_t rec; int i, added, result, cnt; @@ -1719,7 +1719,7 @@ static void prism2sta_inf_authreq_defer(wlandevice_t *wlandev, static void prism2sta_inf_psusercnt(wlandevice_t *wlandev, hfa384x_InfFrame_t *inf) { - hfa384x_t *hw = (hfa384x_t *) wlandev->priv; + hfa384x_t *hw = wlandev->priv; hw->psusercount = le16_to_cpu(inf->info.psusercnt.usercnt); } @@ -1886,7 +1886,6 @@ static wlandevice_t *create_wlan(void) hw = kzalloc(sizeof(hfa384x_t), GFP_KERNEL); if (!wlandev || !hw) { - pr_err("%s: Memory allocation failure.\n", dev_info); kfree(wlandev); kfree(hw); return NULL; diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c index 8abf3f87a2d5..41358bbc6246 100644 --- a/drivers/staging/wlan-ng/prism2usb.c +++ b/drivers/staging/wlan-ng/prism2usb.c @@ -67,7 +67,7 @@ static int prism2sta_probe_usb(struct usb_interface *interface, dev = interface_to_usbdev(interface); wlandev = create_wlan(); - if (wlandev == NULL) { + if (!wlandev) { dev_err(&interface->dev, "Memory allocation failure.\n"); result = -EIO; goto failed; @@ -139,8 +139,7 @@ static void prism2sta_disconnect_usb(struct usb_interface *interface) wlandev = (wlandevice_t *)usb_get_intfdata(interface); if (wlandev != NULL) { LIST_HEAD(cleanlist); - struct list_head *entry; - struct list_head *temp; + hfa384x_usbctlx_t *ctlx, *temp; unsigned long flags; hfa384x_t *hw = wlandev->priv; @@ -178,18 +177,15 @@ static void prism2sta_disconnect_usb(struct usb_interface *interface) tasklet_kill(&hw->completion_bh); tasklet_kill(&hw->reaper_bh); - flush_scheduled_work(); + cancel_work_sync(&hw->link_bh); + cancel_work_sync(&hw->commsqual_bh); /* Now we complete any outstanding commands * and tell everyone who is waiting for their * responses that we have shut down. */ - list_for_each(entry, &cleanlist) { - hfa384x_usbctlx_t *ctlx; - - ctlx = list_entry(entry, hfa384x_usbctlx_t, list); + list_for_each_entry(ctlx, &cleanlist, list) complete(&ctlx->done); - } /* Give any outstanding synchronous commands * a chance to complete. All they need to do @@ -199,12 +195,8 @@ static void prism2sta_disconnect_usb(struct usb_interface *interface) msleep(100); /* Now delete the CTLXs, because no-one else can now. */ - list_for_each_safe(entry, temp, &cleanlist) { - hfa384x_usbctlx_t *ctlx; - - ctlx = list_entry(entry, hfa384x_usbctlx_t, list); + list_for_each_entry_safe(ctlx, temp, &cleanlist, list) kfree(ctlx); - } /* Unhook the wlandev */ unregister_wlandev(wlandev); diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c index 89f5b55ed546..7eadf922b21f 100644 --- a/drivers/staging/xgifb/XGI_main_26.c +++ b/drivers/staging/xgifb/XGI_main_26.c @@ -226,7 +226,6 @@ void XGIRegInit(struct vb_device_info *XGI_Pr, unsigned long BaseAddr) XGI_Pr->Part4Port = BaseAddr + SIS_CRT2_PORT_14; /* 301 palette address port registers */ XGI_Pr->Part5Port = BaseAddr + SIS_CRT2_PORT_14 + 2; - } /* ------------------ Internal helper routines ----------------- */ @@ -315,10 +314,8 @@ static int XGIfb_validate_mode(struct xgifb_video_info *xgifb_info, int myindex) if (XGIbios_mode[myindex].bpp > 8) return -1; } - } goto check_memory; - } /* FIXME: for now, all is valid on XG27 */ @@ -518,7 +515,6 @@ check_memory: if (required_mem > xgifb_info->video_size) return -1; return myindex; - } static void XGIfb_search_crt2type(const char *name) @@ -655,26 +651,26 @@ static void XGIfb_pre_setmode(struct xgifb_video_info *xgifb_info) switch (xgifb_info->display2) { case XGIFB_DISP_CRT: - cr30 = (SIS_VB_OUTPUT_CRT2 | SIS_SIMULTANEOUS_VIEW_ENABLE); + cr30 = SIS_VB_OUTPUT_CRT2 | SIS_SIMULTANEOUS_VIEW_ENABLE; cr31 |= SIS_DRIVER_MODE; break; case XGIFB_DISP_LCD: - cr30 = (SIS_VB_OUTPUT_LCD | SIS_SIMULTANEOUS_VIEW_ENABLE); + cr30 = SIS_VB_OUTPUT_LCD | SIS_SIMULTANEOUS_VIEW_ENABLE; cr31 |= SIS_DRIVER_MODE; break; case XGIFB_DISP_TV: if (xgifb_info->TV_type == TVMODE_HIVISION) - cr30 = (SIS_VB_OUTPUT_HIVISION - | SIS_SIMULTANEOUS_VIEW_ENABLE); + cr30 = SIS_VB_OUTPUT_HIVISION + | SIS_SIMULTANEOUS_VIEW_ENABLE; else if (xgifb_info->TV_plug == TVPLUG_SVIDEO) - cr30 = (SIS_VB_OUTPUT_SVIDEO - | SIS_SIMULTANEOUS_VIEW_ENABLE); + cr30 = SIS_VB_OUTPUT_SVIDEO + | SIS_SIMULTANEOUS_VIEW_ENABLE; else if (xgifb_info->TV_plug == TVPLUG_COMPOSITE) - cr30 = (SIS_VB_OUTPUT_COMPOSITE - | SIS_SIMULTANEOUS_VIEW_ENABLE); + cr30 = SIS_VB_OUTPUT_COMPOSITE + | SIS_SIMULTANEOUS_VIEW_ENABLE; else if (xgifb_info->TV_plug == TVPLUG_SCART) - cr30 = (SIS_VB_OUTPUT_SCART - | SIS_SIMULTANEOUS_VIEW_ENABLE); + cr30 = SIS_VB_OUTPUT_SCART + | SIS_SIMULTANEOUS_VIEW_ENABLE; cr31 |= SIS_DRIVER_MODE; if (XGIfb_tvmode == 1 || xgifb_info->TV_type == TVMODE_PAL) @@ -2064,8 +2060,6 @@ static struct pci_driver xgifb_driver = { .remove = xgifb_remove }; - - /*****************************************************/ /* MODULE */ /*****************************************************/ diff --git a/drivers/staging/xgifb/vb_def.h b/drivers/staging/xgifb/vb_def.h index d9524a2e9ce4..94e2e3c7c264 100644 --- a/drivers/staging/xgifb/vb_def.h +++ b/drivers/staging/xgifb/vb_def.h @@ -228,7 +228,6 @@ #define RES1280x960x85 0x46 #define RES1280x960x120 0x47 - #define XG27_CR8F 0x0C #define XG27_SR36 0x30 #define XG27_SR40 0x04 diff --git a/drivers/staging/xgifb/vb_init.c b/drivers/staging/xgifb/vb_init.c index 879a7e6751ac..26b539bc6faf 100644 --- a/drivers/staging/xgifb/vb_init.c +++ b/drivers/staging/xgifb/vb_init.c @@ -57,7 +57,8 @@ XGINew_GetXG20DRAMType(struct xgi_hw_device_info *HwDeviceExtension, data = xgifb_reg_get(pVBInfo->P3d4, 0x48); /* HOTPLUG_SUPPORT */ /* for current XG20 & XG21, GPIOH is floating, driver will - * fix DDR temporarily */ + * fix DDR temporarily + */ /* DVI read GPIOH */ data &= 0x01; /* 1=DDRII, 0=DDR */ /* ~HOTPLUG_SUPPORT */ diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c index c886dd2892a4..f97c77d88173 100644 --- a/drivers/staging/xgifb/vb_setmode.c +++ b/drivers/staging/xgifb/vb_setmode.c @@ -61,7 +61,6 @@ void InitTo330Pointer(unsigned char ChipType, struct vb_device_info *pVBInfo) if (((temp & 0x88) == 0x80) || ((temp & 0x88) == 0x08)) pVBInfo->XGINew_CR97 = 0x80; } - } static void XGI_SetSeqRegs(struct vb_device_info *pVBInfo) @@ -155,7 +154,6 @@ static void XGI_ClearExt1Regs(struct vb_device_info *pVBInfo) static unsigned char XGI_SetDefaultVCLK(struct vb_device_info *pVBInfo) { - xgifb_reg_and_or(pVBInfo->P3c4, 0x31, ~0x30, 0x20); xgifb_reg_set(pVBInfo->P3c4, 0x2B, XGI_VCLKData[0].SR2B); xgifb_reg_set(pVBInfo->P3c4, 0x2C, XGI_VCLKData[0].SR2C); @@ -274,12 +272,12 @@ static void XGI_SetCRT1Timing_H(struct vb_device_info *pVBInfo, for (i = 0x01; i <= 0x04; i++) { data = pVBInfo->TimingH.data[i]; - xgifb_reg_set(pVBInfo->P3d4, (unsigned short) (i + 1), data); + xgifb_reg_set(pVBInfo->P3d4, (unsigned short)(i + 1), data); } for (i = 0x05; i <= 0x06; i++) { data = pVBInfo->TimingH.data[i]; - xgifb_reg_set(pVBInfo->P3c4, (unsigned short) (i + 6), data); + xgifb_reg_set(pVBInfo->P3c4, (unsigned short)(i + 6), data); } j = xgifb_reg_get(pVBInfo->P3c4, 0x0e); @@ -325,17 +323,17 @@ static void XGI_SetCRT1Timing_V(unsigned short ModeIdIndex, for (i = 0x00; i <= 0x01; i++) { data = pVBInfo->TimingV.data[i]; - xgifb_reg_set(pVBInfo->P3d4, (unsigned short) (i + 6), data); + xgifb_reg_set(pVBInfo->P3d4, (unsigned short)(i + 6), data); } for (i = 0x02; i <= 0x03; i++) { data = pVBInfo->TimingV.data[i]; - xgifb_reg_set(pVBInfo->P3d4, (unsigned short) (i + 0x0e), data); + xgifb_reg_set(pVBInfo->P3d4, (unsigned short)(i + 0x0e), data); } for (i = 0x04; i <= 0x05; i++) { data = pVBInfo->TimingV.data[i]; - xgifb_reg_set(pVBInfo->P3d4, (unsigned short) (i + 0x11), data); + xgifb_reg_set(pVBInfo->P3d4, (unsigned short)(i + 0x11), data); } j = xgifb_reg_get(pVBInfo->P3c4, 0x0a); @@ -433,7 +431,7 @@ static void XGI_SetXG21CRTC(unsigned short RefreshRateTableIndex, Temp2 |= 0x40; /* Temp2 + 0x40 */ Temp2 &= 0xFF; - Tempax = (unsigned char) Temp2; /* Tempax: HRE[7:0] */ + Tempax = (unsigned char)Temp2; /* Tempax: HRE[7:0] */ Tempax <<= 2; /* Tempax[7:2]: HRE[5:0] */ Tempdx >>= 6; /* Tempdx[7:6]->[1:0] HRS[9:8] */ Tempax |= Tempdx; /* HRE[5:0]HRS[9:8] */ @@ -483,11 +481,11 @@ static void XGI_SetXG21CRTC(unsigned short RefreshRateTableIndex, Temp2 |= 0x20; /* VRE + 0x20 */ Temp2 &= 0xFF; - Tempax = (unsigned char) Temp2; /* Tempax: VRE[7:0] */ + Tempax = (unsigned char)Temp2; /* Tempax: VRE[7:0] */ Tempax <<= 2; /* Tempax[7:0]; VRE[5:0]00 */ Temp1 &= 0x600; /* Temp1[10:9]: VRS[10:9] */ Temp1 >>= 9; /* Temp1[1:0]: VRS[10:9] */ - Tempbx = (unsigned char) Temp1; + Tempbx = (unsigned char)Temp1; Tempax |= Tempbx; /* Tempax[7:0]: VRE[5:0]VRS[10:9] */ Tempax &= 0x7F; /* SR3F D[7:2]->VRE D[1:0]->VRS */ @@ -592,7 +590,6 @@ static void XGI_SetXG27FPBits(struct vb_device_info *pVBInfo) xgifb_reg_and_or(pVBInfo->P3c4, 0x06, ~0xc0, temp & 0x80); /* SR09[7] enable FP output, SR09[6] 1: sigle 18bits, 0: 24bits */ xgifb_reg_and_or(pVBInfo->P3c4, 0x09, ~0xc0, temp | 0x80); - } static void xgifb_set_lcd(int chip_id, @@ -716,10 +713,10 @@ static void XGI_SetCRT1DE(unsigned short ModeIdIndex, data = xgifb_reg_get(pVBInfo->P3d4, 0x11); data &= 0x7F; xgifb_reg_set(pVBInfo->P3d4, 0x11, data); /* Unlock CRTC */ - xgifb_reg_set(pVBInfo->P3d4, 0x01, (unsigned short) (tempcx & 0xff)); + xgifb_reg_set(pVBInfo->P3d4, 0x01, (unsigned short)(tempcx & 0xff)); xgifb_reg_and_or(pVBInfo->P3d4, 0x0b, ~0x0c, - (unsigned short) ((tempcx & 0x0ff00) >> 10)); - xgifb_reg_set(pVBInfo->P3d4, 0x12, (unsigned short) (tempbx & 0xff)); + (unsigned short)((tempcx & 0x0ff00) >> 10)); + xgifb_reg_set(pVBInfo->P3d4, 0x12, (unsigned short)(tempbx & 0xff)); tempax = 0; tempbx >>= 8; @@ -930,7 +927,6 @@ static void XGI_SetXG21FPBits(struct vb_device_info *pVBInfo) xgifb_reg_and_or(pVBInfo->P3c4, 0x06, ~0x40, temp); /* SR09[7] enable FP output, SR09[6] 1: sigle 18bits, 0: dual 12bits */ xgifb_reg_and_or(pVBInfo->P3c4, 0x09, ~0xc0, temp | 0x80); - } static void XGI_SetCRT1FIFO(struct xgi_hw_device_info *HwDeviceExtension, @@ -990,7 +986,6 @@ static void XGI_SetVCLKState(struct xgi_hw_device_info *HwDeviceExtension, xgifb_reg_and_or(pVBInfo->P3c4, 0x07, 0xFC, data2); if (HwDeviceExtension->jChipType >= XG27) xgifb_reg_and_or(pVBInfo->P3c4, 0x40, 0xFC, data2 & 0x03); - } static void XGI_SetCRT1ModeRegs(struct xgi_hw_device_info *HwDeviceExtension, @@ -1072,7 +1067,6 @@ static void XGI_SetCRT1ModeRegs(struct xgi_hw_device_info *HwDeviceExtension, data = 0x6c; xgifb_reg_set(pVBInfo->P3d4, 0x52, data); } - } static void XGI_WriteDAC(unsigned short dl, @@ -1905,8 +1899,8 @@ static void XGI_GetVBInfo(unsigned short ModeIdIndex, push <<= 8; tempax = temp << 8; tempbx = tempbx | tempax; - temp = (SetCRT2ToDualEdge | SetCRT2ToYPbPr525750 | XGI_SetCRT2ToLCDA - | SetInSlaveMode | DisableCRT2Display); + temp = SetCRT2ToDualEdge | SetCRT2ToYPbPr525750 | XGI_SetCRT2ToLCDA + | SetInSlaveMode | DisableCRT2Display; temp = 0xFFFF ^ temp; tempbx &= temp; @@ -2887,7 +2881,7 @@ static void XGI_SetGroup1(unsigned short ModeIdIndex, xgifb_reg_set(pVBInfo->Part1Port, 0x0C, temp); temp = tempcx & 0x00FF; xgifb_reg_set(pVBInfo->Part1Port, 0x0D, temp); - tempcx = (pVBInfo->VGAVT - 1); + tempcx = pVBInfo->VGAVT - 1; temp = tempcx & 0x00FF; xgifb_reg_set(pVBInfo->Part1Port, 0x0E, temp); @@ -2925,7 +2919,7 @@ static void XGI_SetGroup1(unsigned short ModeIdIndex, temp = tempbx & 0x00FF; xgifb_reg_set(pVBInfo->Part1Port, 0x10, temp); temp = ((tempbx & 0xFF00) >> 8) << 4; - temp = ((tempcx & 0x000F) | (temp)); + temp = (tempcx & 0x000F) | (temp); xgifb_reg_set(pVBInfo->Part1Port, 0x11, temp); tempax = 0; @@ -4080,7 +4074,7 @@ static void XGI_SetGroup4(unsigned short ModeIdIndex, tempcx |= 0x04000; if (tempeax <= tempebx) { - tempcx = (tempcx & (~0x4000)); + tempcx = tempcx & (~0x4000); tempeax = pVBInfo->VGAVDE; } else { tempeax -= tempebx; @@ -4130,7 +4124,7 @@ static void XGI_SetGroup4(unsigned short ModeIdIndex, temp = (tempax & 0xFF00) >> 8; temp = (temp & 0x0003) << 4; xgifb_reg_set(pVBInfo->Part4Port, 0x1E, temp); - temp = (tempax & 0x00FF); + temp = tempax & 0x00FF; xgifb_reg_set(pVBInfo->Part4Port, 0x1D, temp); if (pVBInfo->VBInfo & (SetCRT2ToTV | SetCRT2ToHiVision)) { @@ -4932,7 +4926,7 @@ static void XGI_SetCRT2ModeRegs(struct vb_device_info *pVBInfo) tempcl -= ModeVGA; if (tempcl >= 0) { /* BT Color */ - tempah = (0x008 >> tempcl); + tempah = 0x008 >> tempcl; if (tempah == 0) tempah = 1; tempah |= 0x040; @@ -5073,7 +5067,6 @@ reg_and_or: } } - void XGI_UnLockCRT2(struct vb_device_info *pVBInfo) { xgifb_reg_and_or(pVBInfo->Part1Port, 0x2f, 0xFF, 0x01); diff --git a/drivers/staging/xgifb/vb_struct.h b/drivers/staging/xgifb/vb_struct.h index 0d27594554ca..2fd1a5935e1d 100644 --- a/drivers/staging/xgifb/vb_struct.h +++ b/drivers/staging/xgifb/vb_struct.h @@ -65,7 +65,6 @@ struct XGI330_TVDataTablStruct { struct SiS_TVData const *DATAPTR; }; - struct XGI_TimingHStruct { unsigned char data[8]; }; @@ -117,7 +116,6 @@ struct XGI_CRT1TableStruct { unsigned char CR[16]; }; - struct XGI301C_Tap4TimingStruct { unsigned short DE; unsigned char Reg[64]; /* C0-FF */ diff --git a/drivers/staging/xgifb/vb_table.h b/drivers/staging/xgifb/vb_table.h index f17e5b9bd333..45f2c992cd44 100644 --- a/drivers/staging/xgifb/vb_table.h +++ b/drivers/staging/xgifb/vb_table.h @@ -1140,7 +1140,6 @@ static const struct SiS_LVDSData XGI_LVDS1024x768Data_1[] = { {1344, 806, 1344, 806} /* 06 (512x384,1024x768) */ }; - static const struct SiS_LVDSData XGI_LVDS1024x768Data_2[] = { {1344, 806, 1344, 806}, {1344, 806, 1344, 806}, @@ -1228,7 +1227,6 @@ static const struct SiS_LVDSData XGI_LVDS1024x768Data_1x75[] = { {1312, 800, 1312, 800}, /* 06 (512x384,1024x768) */ }; - static const struct SiS_LVDSData XGI_LVDS1024x768Data_2x75[] = { {1312, 800, 1312, 800}, /* ; 00 (320x200,320x400,640x200,640x400) */ {1312, 800, 1312, 800}, /* ; 01 (320x350,640x350) */ @@ -2314,7 +2312,6 @@ static const unsigned char TVAntiFlickList[] = {/* NTSCAntiFlicker */ 0x00 /* ; 1 new anti-flicker ? */ }; - static const unsigned char TVEdgeList[] = { 0x00, /* ; 0 NTSC No Edge enhance */ 0x04, /* ; 1 NTSC Adaptive Edge enhance */ diff --git a/drivers/staging/xgifb/vgatypes.h b/drivers/staging/xgifb/vgatypes.h index 61fa10fd470f..de80e5c108dc 100644 --- a/drivers/staging/xgifb/vgatypes.h +++ b/drivers/staging/xgifb/vgatypes.h @@ -27,14 +27,16 @@ struct xgi_hw_device_info { /* of Linear VGA memory */ unsigned long ulVideoMemorySize; /* size, in bytes, of the - memory on the board */ + * memory on the board + */ unsigned char jChipType; /* Used to Identify Graphics Chip */ /* defined in the data structure type */ /* "XGI_CHIP_TYPE" */ unsigned char jChipRevision; /* Used to Identify Graphics - Chip Revision */ + * Chip Revision + */ unsigned char ujVBChipID; /* the ID of video bridge */ /* defined in the data structure type */ @@ -46,4 +48,3 @@ struct xgi_hw_device_info { /* Additional IOCTL for communication xgifb <> X driver */ /* If changing this, xgifb.h must also be changed (for xgifb) */ #endif - diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h index fa05e04c5531..d8414502edb4 100644 --- a/include/linux/exportfs.h +++ b/include/linux/exportfs.h @@ -97,6 +97,12 @@ enum fid_type { FILEID_FAT_WITH_PARENT = 0x72, /* + * 128 bit child FID (struct lu_fid) + * 128 bit parent FID (struct lu_fid) + */ + FILEID_LUSTRE = 0x97, + + /* * Filesystems must not use 0xff file ID. */ FILEID_INVALID = 0xff, diff --git a/include/linux/fence.h b/include/linux/fence.h index bb522011383b..605bd88246a6 100644 --- a/include/linux/fence.h +++ b/include/linux/fence.h @@ -79,6 +79,8 @@ struct fence { unsigned long flags; ktime_t timestamp; int status; + struct list_head child_list; + struct list_head active_list; }; enum fence_flag_bits { diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index 2fe939c73cd2..6670c3d25c58 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h @@ -119,6 +119,8 @@ struct st_sensor_bdu { * @addr: address of the register. * @mask_int1: mask to enable/disable IRQ on INT1 pin. * @mask_int2: mask to enable/disable IRQ on INT2 pin. + * @addr_ihl: address to enable/disable active low on the INT lines. + * @mask_ihl: mask to enable/disable active low on the INT lines. * struct ig1 - represents the Interrupt Generator 1 of sensors. * @en_addr: address of the enable ig1 register. * @en_mask: mask to write the on/off value for enable. @@ -127,6 +129,8 @@ struct st_sensor_data_ready_irq { u8 addr; u8 mask_int1; u8 mask_int2; + u8 addr_ihl; + u8 mask_ihl; struct { u8 en_addr; u8 en_mask; diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index b5894118755f..b2b16772c651 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -180,18 +180,18 @@ struct iio_event_spec { * @address: Driver specific identifier. * @scan_index: Monotonic index to give ordering in scans when read * from a buffer. - * @scan_type: Sign: 's' or 'u' to specify signed or unsigned + * @scan_type: sign: 's' or 'u' to specify signed or unsigned * realbits: Number of valid bits of data - * storage_bits: Realbits + padding + * storagebits: Realbits + padding * shift: Shift right by this before masking out * realbits. - * endianness: little or big endian * repeat: Number of times real/storage bits * repeats. When the repeat element is * more than 1, then the type element in * sysfs will show a repeat value. * Otherwise, the number of repetitions is * omitted. + * endianness: little or big endian * @info_mask_separate: What information is to be exported that is specific to * this channel. * @info_mask_shared_by_type: What information is to be exported that is shared @@ -448,7 +448,7 @@ struct iio_buffer_setup_ops { * @buffer: [DRIVER] any buffer present * @buffer_list: [INTERN] list of all buffers currently attached * @scan_bytes: [INTERN] num bytes captured to be fed to buffer demux - * @mlock: [INTERN] lock used to prevent simultaneous device state + * @mlock: [DRIVER] lock used to prevent simultaneous device state * changes * @available_scan_masks: [DRIVER] optional array of allowed bitmasks * @masklength: [INTERN] the length of the mask established from diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index ed48594e96d2..2aed04396210 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -75,6 +75,7 @@ enum irq_domain_bus_token { DOMAIN_BUS_PLATFORM_MSI, DOMAIN_BUS_NEXUS, DOMAIN_BUS_IPI, + DOMAIN_BUS_FSL_MC_MSI, }; /** diff --git a/include/linux/msi.h b/include/linux/msi.h index a2a0068a8387..8b425c66305a 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -33,6 +33,14 @@ struct platform_msi_desc { }; /** + * fsl_mc_msi_desc - FSL-MC device specific msi descriptor data + * @msi_index: The index of the MSI descriptor + */ +struct fsl_mc_msi_desc { + u16 msi_index; +}; + +/** * struct msi_desc - Descriptor structure for MSI based interrupts * @list: List head for management * @irq: The base interrupt number @@ -87,6 +95,7 @@ struct msi_desc { * tree wide cleanup. */ struct platform_msi_desc platform; + struct fsl_mc_msi_desc fsl_mc; }; }; diff --git a/include/linux/platform_data/ad5761.h b/include/linux/platform_data/ad5761.h new file mode 100644 index 000000000000..7bd8ed7d978e --- /dev/null +++ b/include/linux/platform_data/ad5761.h @@ -0,0 +1,44 @@ +/* + * AD5721, AD5721R, AD5761, AD5761R, Voltage Output Digital to Analog Converter + * + * Copyright 2016 Qtechnology A/S + * 2016 Ricardo Ribalda <ricardo.ribalda@gmail.com> + * + * Licensed under the GPL-2. + */ +#ifndef __LINUX_PLATFORM_DATA_AD5761_H__ +#define __LINUX_PLATFORM_DATA_AD5761_H__ + +/** + * enum ad5761_voltage_range - Voltage range the AD5761 is configured for. + * @AD5761_VOLTAGE_RANGE_M10V_10V: -10V to 10V + * @AD5761_VOLTAGE_RANGE_0V_10V: 0V to 10V + * @AD5761_VOLTAGE_RANGE_M5V_5V: -5V to 5V + * @AD5761_VOLTAGE_RANGE_0V_5V: 0V to 5V + * @AD5761_VOLTAGE_RANGE_M2V5_7V5: -2.5V to 7.5V + * @AD5761_VOLTAGE_RANGE_M3V_3V: -3V to 3V + * @AD5761_VOLTAGE_RANGE_0V_16V: 0V to 16V + * @AD5761_VOLTAGE_RANGE_0V_20V: 0V to 20V + */ + +enum ad5761_voltage_range { + AD5761_VOLTAGE_RANGE_M10V_10V, + AD5761_VOLTAGE_RANGE_0V_10V, + AD5761_VOLTAGE_RANGE_M5V_5V, + AD5761_VOLTAGE_RANGE_0V_5V, + AD5761_VOLTAGE_RANGE_M2V5_7V5, + AD5761_VOLTAGE_RANGE_M3V_3V, + AD5761_VOLTAGE_RANGE_0V_16V, + AD5761_VOLTAGE_RANGE_0V_20V, +}; + +/** + * struct ad5761_platform_data - AD5761 DAC driver platform data + * @voltage_range: Voltage range the AD5761 is configured for + */ + +struct ad5761_platform_data { + enum ad5761_voltage_range voltage_range; +}; + +#endif diff --git a/include/uapi/linux/iio/types.h b/include/uapi/linux/iio/types.h index 7c63bd67c36e..c077617f3304 100644 --- a/include/uapi/linux/iio/types.h +++ b/include/uapi/linux/iio/types.h @@ -37,6 +37,7 @@ enum iio_chan_type { IIO_VELOCITY, IIO_CONCENTRATION, IIO_RESISTANCE, + IIO_PH, }; enum iio_modifier { |