diff -ENwbur a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile --- a/arch/arm64/boot/dts/Makefile 2018-05-06 08:47:35.345263577 +0200 +++ b/arch/arm64/boot/dts/Makefile 2018-05-06 08:49:48.242657438 +0200 @@ -14,6 +14,7 @@ dts-dirs += hisilicon dts-dirs += marvell dts-dirs += mediatek +dts-dirs += nexell dts-dirs += nvidia dts-dirs += qcom dts-dirs += realtek diff -ENwbur a/arch/arm64/boot/dts/nexell/Makefile b/arch/arm64/boot/dts/nexell/Makefile --- a/arch/arm64/boot/dts/nexell/Makefile 1970-01-01 01:00:00.000000000 +0100 +++ b/arch/arm64/boot/dts/nexell/Makefile 2018-05-06 08:49:48.254657924 +0200 @@ -0,0 +1,5 @@ +dtb-$(CONFIG_ARCH_S5P6818) += s5p6818-nanopi-m3.dtb + +always := $(dtb-y) +subdir-y := $(dts-dirs) +clean-files := *.dtb diff -ENwbur a/arch/arm64/boot/dts/nexell/s5p6818-nanopi-m3.dts b/arch/arm64/boot/dts/nexell/s5p6818-nanopi-m3.dts --- a/arch/arm64/boot/dts/nexell/s5p6818-nanopi-m3.dts 1970-01-01 01:00:00.000000000 +0100 +++ b/arch/arm64/boot/dts/nexell/s5p6818-nanopi-m3.dts 2018-05-06 08:49:48.254657924 +0200 @@ -0,0 +1,750 @@ +/* + * Copyright (C) 2016 Nexell Co., Ltd. + * Author: Youngbok, Park + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +/dts-v1/; +#include +#include +#include +#include "s5p6818.dtsi" + +#define PMIC_PDATA_INIT(_id, _rname, _minuv, \ + _maxuv, _always_on, _boot_on, \ + _init_uv, _init_enable, _slp_slots) \ + regulator-name = _rname; \ + regulator-min-microvolt = <_minuv>; \ + regulator-max-microvolt = <_maxuv>; \ + nx,id = <_id>; \ + nx,always_on = <_always_on>; \ + nx,boot_on = <_boot_on>; \ + nx,init_enable = <_init_enable>; \ + nx,init_uV = <_init_uv>; \ + nx,sleep_slots = <_slp_slots>; + +/ { + memory { + /* Note: Samsung Artik u-boot fixates memory information to values + * specified by CONFIG_SYS_SDRAM_BASE and CONFIG_SYS_SDRAM_SIZE in + * the u-boot configuration. Values specified below are meaningless. + */ + device_type = "memory"; + reg = <0x40000000 0x40000000>; + }; + + aliases { + ethernet0 = &gmac0; + }; + + nx-v4l2 { + status = "okay"; + }; + + soc { + #include "s5p6818-pinctrl.dtsi" + + clocks { + uart0:uart@c00a9000 { clock-frequency = <147500000>; }; + uart1:uart@c00a8000 { clock-frequency = <147500000>; }; + uart2:uart@c00aa000 { clock-frequency = <147500000>; }; + uart3:uart@c00ab000 { clock-frequency = <147500000>; }; + uart4:uart@c006e000 { clock-frequency = <147500000>; }; + uart5:uart@c0084000 { clock-frequency = <147500000>; }; + pwm0:pwm0@c00ba000 { clock-frequency = <100000000>; }; + i2c0:i2c@c00ae000 { clock-frequency = <200000000>; }; + i2c1:i2c@c00af000 { clock-frequency = <200000000>; }; + i2c2:i2c@c00b0000 { clock-frequency = <200000000>; }; + vip1:vip@c00c2000 { src-force = <4>; }; + }; + + serial0:serial@c00a1000 { + status ="okay"; + }; + + serial1:serial@c00a0000 { + status ="okay"; + pinctrl-names = "default"; + pinctrl-0 = <&serial1_pin &serial1_flow>; + }; + + amba { + pl08xdma0:pl08xdma@c0000000 { + use_isr; + + ch12 { + slave_wait_flush_dma; + }; + + ch13 { + slave_wait_flush_dma; + }; + + ch14 { + slave_wait_flush_dma; + }; + + ch15 { + slave_wait_flush_dma; + }; + }; + + pl08xdma1:pl08xdma@c0001000 { + use_isr; + + ch0 { + slave_wait_flush_dma; + }; + + ch1 { + slave_wait_flush_dma; + }; + }; + }; + + dw_mmc_0:dw_mmc@c0062000 { // mappings from kernel 3.x: + bus-width = <4>; // MMC_CAP_4_BIT_DATA + cap-sd-highspeed; // DW_MCI_QUIRK_HIGHSPEED + cap-mmc-highspeed; // also DW_MCI_QUIRK_HIGHSPEED + clock-frequency = <100000000>; // bus_hz: 100 * 1000 * 1000 + card-detect-delay = <200>; // detect_delay_ms + disable-wp; // write protect: -> get_ro; feature not available for micro SD + cd-gpios = <&alive_0 1 GPIO_ACTIVE_LOW>; // card detect: CFG_SDMMC0_DETECT_IO == PAD_GPIO_ALV + 1 + nexell,drive_dly = <0x0>; // DW_MMC_DRIVE_DELAY(0) + nexell,drive_shift = <0x02>; // DW_MMC_DRIVE_PHASE(2) + nexell,sample_dly = <0x00>; // DW_MMC_SAMPLE_DELAY(0) + nexell,sample_shift = <0x01>; // DW_MMC_SAMPLE_PHASE(1) + status = "okay"; + }; + + dw_mmc_1:dw_mmc@c0068000 { + bus-width = <4>; + cap-sd-highspeed; + clock-frequency = <100000000>; + card-detect-delay = <200>; + non-removable; + keep-power-in-suspend; + nexell,drive_dly = <0x0>; + nexell,drive_shift = <0x02>; + nexell,sample_dly = <0x00>; + nexell,sample_shift = <0x01>; + mmc-pwrseq = <&wifi_powerseq>; + status = "okay"; + + /* wifi definition for brcmfmac.ko module */ + brcmf: bcrmf@1 { + compatible = "brcm,bcm4329-fmac"; + reg = <1>; + interrupt-parent = <&gpio_c>; + interrupts = <17 IRQ_TYPE_LEVEL_HIGH>; + brcm,powersave-default-off; + }; + }; + + dw_mmc_2:dw_mmc@c0069000 { + bus-width = <4>; // MMC_CAP_4_BIT_DATA + cap-sd-highspeed; // DW_MCI_QUIRK_HIGHSPEED + cap-mmc-highspeed; // also DW_MCI_QUIRK_HIGHSPEED + sd-uhs-ddr50; // MMC_CAP_UHS_DDR50 + cap-mmc-hw-reset; // MMC_CAP_HW_RESET + clock-frequency = <200000000>; // bus_hz: 200 * 1000 * 1000 + card-detect-delay = <200>; // detect_delay_ms + non-removable; // MMC_CAP_NONREMOVABLE + broken-cd; + cd-gpios = <&gpio_c 24 GPIO_ACTIVE_LOW>; // card detect: CFG_SDMMC2_DETECT_IO == PAD_GPIO_C + 24 + nexell,drive_dly = <0x0>; // DW_MMC_DRIVE_DELAY(0) + nexell,drive_shift = <0x03>; // DW_MMC_DRIVE_PHASE(3) + nexell,sample_dly = <0x00>; // DW_MMC_SAMPLE_DELAY(0) + nexell,sample_shift = <0x02>; // DW_MMC_SAMPLE_PHASE(2) + status = "okay"; + }; + + /* FIXME: bluetooth reset is piggybacked here although their data flow + * goes through serial1 */ + wifi_powerseq: wifi_powerseq { + compatible = "mmc-pwrseq-simple"; + reset-gpios = + <&gpio_b 24 GPIO_ACTIVE_LOW /* wifi */ + &gpio_b 8 GPIO_ACTIVE_LOW>; /* bluetooth */ + post-power-on-delay-ms = <50>; + }; + + i2c3_gpio:i2c@0 { + compatible = "i2c-gpio"; + gpios = <&gpio_e 31 0 /* sda */ + &gpio_e 30 0 /* scl */ + >; + i2c-gpio,delay-us = <10>;/* ~100 kHz */ + i2c-gpio,ch =<3>; + }; + + i2c3_gpio:i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + + axp228@34 { + compatible = "x-powers,axp228"; + reg = <0x34>; + interrupt-parent = <&alive_0>; // CFG_GPIO_PMIC_INTR + interrupts = <0x4 IRQ_TYPE_EDGE_FALLING>; + nx,id = <0>; + /* vdd_arm-supply = <&VCC1P1_ARM_PMIC>; */ + /* vdd_core-supply = <&VCC1P0_CORE_PMIC>; */ + regulators { + VCC_LDO1: + axp22_rtcldo{PMIC_PDATA_INIT( 0, + "axp228_rtcldo", + 3000000, 3000000, 0, 0, 3300000, + 0, 0xF) }; + VCC_LDO2: + axp22_aldo1{PMIC_PDATA_INIT( 1, + "axp228_3p3_alive", + 700000, 3300000, 1, 1, 3300000, + 1, 0xF) }; + VCC_LDO3: + axp22_aldo2{PMIC_PDATA_INIT( 2, + "axp228_1p8_alive", + 700000, 3300000, 1, 1, 1800000, + 1, 0xF) }; + VCC_LDO4: + axp22_aldo3{PMIC_PDATA_INIT( 3, + "axp228_1p0_alive", + 700000, 3300000, 1, 1, 1000000, + 1, 0xF) }; + VCC_LDO5: + axp22_dldo1{PMIC_PDATA_INIT( 4, + "axp228_wide", + 700000, 3300000, 1, 1, 3300000, + 1, 0xF) }; + VCC_LDO6: + axp22_dldo2{PMIC_PDATA_INIT( 5, + "axp228_1p8_cam", + 700000, 3300000, 0, 0, 1800000, + 0, 0xF) }; + VCC_LDO7: + axp22_dldo3{PMIC_PDATA_INIT( 6, + "axp228_dldo3", + 700000, 3300000, 0, 0, 700000, + 0, 0xF) }; + VCC_LDO8: + axp22_dldo4{PMIC_PDATA_INIT( 7, + "axp228_dldo4", + 700000, 3300000, 0, 0, 700000, + 0, 0xF) }; + VCC_LDO9: + axp22_eldo1{PMIC_PDATA_INIT( 8, + "axp228_1p8_sys", + 700000, 3300000, 1, 1, 1800000, + 1, 0xF) }; + VCC_LDO10: + axp22_eldo2{PMIC_PDATA_INIT( 9, + "axp228_3p3_wifi", + 700000, 3300000, 1, 1, 3300000, + 1, 0xF) }; + VCC_LDO11: + axp22_eldo3{PMIC_PDATA_INIT(10, + "axp228_eldo3", + 700000, 3300000, 0, 0, 700000, + 0, 0xF) }; + VCC_LDO12: + axp22_dc5ldo{PMIC_PDATA_INIT(11, + "axp228_1p2_cvbs", + 700000, 1400000, 0, 0, 1200000, + 0, 0xF) }; + VCC_DCDC1: + axp22_dcdc1{PMIC_PDATA_INIT(12, + "axp228_3p3_sys", + 1600000, 3400000, 1, 1, 3300000, + 1, 0xF) }; + VCC1P1_ARM_PMIC: + axp22_dcdc2{PMIC_PDATA_INIT(13, + "axp228_1p1_arm", + 600000, 1540000, 1, 1, 1200000, + 1, 0xF) }; + VCC1P0_CORE_PMIC: + axp22_dcdc3{PMIC_PDATA_INIT(14, + "axp228_1p0_core", + 600000, 1860000, 1, 1, 1200000, + 1, 0xF) }; + VCC_DCDC4: + axp22_dcdc4{PMIC_PDATA_INIT(15, + "axp228_1p5_sys", + 600000, 1540000, 1, 1, 1500000, + 1, 0xF) }; + VCC_DCDC5: + axp22_dcdc5{PMIC_PDATA_INIT(16, + "axp228_1p5_ddr", + 1000000, 2550000, 1, 1, 1500000, + 1, 0xF) }; + VCC_LDOIO0: + axp22_ldoio0{PMIC_PDATA_INIT(17, + "axp228_ldoio0", + 700000, 3300000, 0, 0, 1800000, + 0, 0xF) }; + VCC_LDOIO1: + axp22_ldoio1{PMIC_PDATA_INIT(18, + "axp228_ldoio1", + 700000, 3300000, 0, 0, 1000000, + 0, 0xF) }; + }; + }; + }; + + pinctrl@C0010000 { + touchpanel_irq: touchpanel-irq { + nexell,pins = "gpioc-16"; + nexell,pin-function = <1>; + nexell,pin-pull = <2>; + nexell,pin-strength = <0>; + }; + }; + + nexell_usbphy: nexell-usbphy@c0012000 { + status = "okay"; + }; + + ehci@c0030000 { + status = "okay"; + port@0 { + status = "okay"; + }; + }; + + ohci@c0020000 { + status = "okay"; + port@0 { + status = "okay"; + }; + }; + + dwc2otg@c0040000 { + gpios = <&gpio_d 21 0>; + status = "okay"; + }; + + gmac0:ethernet@c0060000 { + pinctrl-names = "default"; + pinctrl-0 = <&gmac_pins>; + + status = "okay"; + #address-cells = <0x1>; + #size-cells = <0x0>; + + snps,phy-addr = <7>; + snps,reset-gpio = <&gpio_e 22 0>; + snps,reset-active-low; + snps,reset-delays-us = <0 10000 30000>; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + ethernet_phy: ethernet-phy@3 { + reg = <3>; + fixed-link { + speed = <1000>; + full-duplex; + }; + }; + }; + }; + + i2c_0:i2c@c00a4000 { + #address-cells = <1>; + #size-cells = <0>; + status = "okay"; + + es8316_codec: es8316@11 { + #sound-dai-cells = <0>; + compatible = "everest,es8316"; + reg = <0x11>; + }; + }; + + i2c_1:i2c@c00a5000 { + status = "okay"; + }; + + i2c_2:i2c@c00a6000 { + #address-cells = <1>; + #size-cells = <0>; + status = "okay"; + + /* Note: touch sensors are registered by onewire */ + /*touchscreen@38 { + compatible = "edt,edt-ft5506"; + reg = <0x38>; + interrupt-parent = <&gpio_c>; + interrupts = <16 IRQ_TYPE_EDGE_FALLING>; + pinctrl-names = "default"; + pinctrl-0 = <&touchpanel_irq>; + touchscreen-size-x = <1280>; + touchscreen-size-y = <800>; + touchscreen-max-pressure = <255>; + };*/ + + /*touchscreen@46 { + compatible = "ite,it7260"; + reg = <0x46>; + interrupt-parent = <&gpio_c>; + interrupts = <16 IRQ_TYPE_LEVEL_LOW>; + pinctrl-names = "default"; + pinctrl-0 = <&touchpanel_irq>; + };*/ + }; + + pwm:pwm@c0018000 { + // block pwm3_pin - conflicts with spi0_miso (on spi0_bus) drawn on 2.54mm header + pinctrl-0 = <&pwm0_pin &pwm1_pin &pwm2_pin>; + samsung,pwm-outputs = <0>, <1>, <2>; + status = "okay"; + }; + + vip_1:vip@c0064000 { + status = "okay"; + }; + + clipper_1:clipper1@c0064000 { + status = "okay"; + pwms = <&pwm 1 41 0>; /* 1000000000/41 */ + interface_type = ; + pinctrl-names = "default"; + pinctrl-0 = <&vid1_data_clk &vid1_sync> ; + port = <0>; + external_sync = <0>; + data_order = ; + interlace = <0>; + regulator_names = "axp22_dldo2"; + regulator_voltages = <1800000>; + + gpios = <&gpio_c 4 0 + &gpio_c 5 0 + &gpio_c 6 0>; + + sensor { + type = ; + i2c_name = "SP2518"; + i2c_adapter = <0>; + addr = <0x30>; + }; + + power { + enable_seq = < + NX_ACTION_START NX_ACTION_TYPE_GPIO 2 1 0 NX_ACTION_END + NX_ACTION_START NX_ACTION_TYPE_PMIC 0 0 NX_ACTION_END + NX_ACTION_START NX_ACTION_TYPE_PMIC 1 0 NX_ACTION_END + NX_ACTION_START NX_ACTION_TYPE_GPIO 0 0 0 1 0 NX_ACTION_END + NX_ACTION_START NX_ACTION_TYPE_CLOCK 1 10 NX_ACTION_END + NX_ACTION_START NX_ACTION_TYPE_GPIO 0 0 0 NX_ACTION_END + NX_ACTION_START NX_ACTION_TYPE_GPIO 1 1 0 0 1 NX_ACTION_END + NX_ACTION_START NX_ACTION_TYPE_GPIO 1 1 100 NX_ACTION_END + >; + }; + }; + + dp_drm: display_drm { + status = "okay"; + ports { + port@0 { + reg = <0>; + back_color = < 0x0 >; + color_key = < 0x0 >; + /* Port 0 has two RGB planes and one video plane. These planes + * are arranged in z-order: RGB plane 0 is below plane 1, + * video plane may be set at any position in z-order. + * + * Possible names for RGB planes: "primary", "rgb", "cursor" + * Possible name for video plane: "video" + * Two RGB plane names and one video plane name may be specified in + * "plane-names" property. + * RGB plane "primary" will be used as root window. + * RGB plane "cursor" will be used for cursor. + * RGB plane "rgb" and video plane are overlay planes, normally + * not used by X-windows. + * + * Order of plane names specifies z-order of planes, top to bottom. + */ + plane-names = "cursor", "video", "primary"; + }; + port@1 { + reg = <1>; + back_color = < 0x0 >; + color_key = < 0x0 >; + /* Port 1 has one RGB plane and one video plane only. */ + plane-names = "video", "primary"; + }; + }; + }; + + dp_drm_hdmi: display_drm_hdmi { + ddc-i2c-bus = <&i2c_1>; + q_range = <1>; + status = "ok"; + }; + + dp_drm_rgb: display_drm_rgb { + remote-endpoint = <&rgb_panel>; + status = "okay"; + + dp_control { + clk_src_lv0 = <0>; + clk_div_lv0 = <16>; + clk_src_lv1 = <7>; + clk_div_lv1 = <1>; + out_format = <3>; + invert_field = <0>; + swap_rb = <0>; + yc_order = <0>; + delay_mask = < ((1<<0) | (1<<1) | (1<<2) | (1<<3)) >; + d_rgb_pvd = <0>; + d_hsync_cp1 = <0>; + d_vsync_fram = <0>; + d_de_cp2 = <7>; + vs_start_offset = <863>; + ev_start_offset = <863>; + vs_end_offset = <0>; + ev_end_offset = <0>; + }; + }; + + dp_drm_lvds: display_drm_lvds { + status = "ok"; + remote-endpoint = <&lvds_panel>; + dp_control { + clk_src_lv0 = <0>; + clk_div_lv0 = <16>; + clk_src_lv1 = <7>; + clk_div_lv1 = <1>; + out_format = <3>; + }; + }; + + rtc@c0010c00 { + status = "okay"; + }; + + tmuctrl_0: tmuctrl@c0096000 { + status = "okay"; + }; + + i2s_0:i2s@c0055000 { + #sound-dai-cells = <1>; + sample-rate = <48000>; + frame-bit = <32>; + status = "okay"; + }; + + spdif_tx: spdiftx@c0059000 { + #sound-dai-cells = <1>; + status = "okay"; + }; + + adc:adc@c0053000 { + status = "okay"; + }; + + video-codec@c0080000 { + status = "okay"; + sram = <0 0>; + }; + + scaler@c0066000 { + status = "okay"; + }; + + nano-videodev { + compatible = "nexell,nano-videodev"; + reg = <0xc0102000 0x100>; + reg-names = "mlc.0"; + status = "okay"; + }; + }; /*** soc ***/ + + panel_lvds { + compatible = "nanopi,nano-panel"; + lvds; + status = "okay"; + + port { + lvds_panel: endpoint { + }; + }; + }; + + panel_rgb { + compatible = "nanopi,nano-panel"; + status = "okay"; + + pinctrl-names = "default"; + pinctrl-0 = <&dp_rgb_vclk &dp_rgb_vsync &dp_rgb_hsync + &dp_rgb_de &dp_rgb_R &dp_rgb_G &dp_rgb_B>; + + port { + rgb_panel: endpoint { + }; + }; + }; + + spdif_out: spdif-out { + #sound-dai-cells = <0>; + compatible = "linux,spdif-dit"; + status = "okay"; + }; + + /* Audio jack output configured to use with Nexell driver. Not used. + */ + es8316_sound: es8316@i2s0 { + compatible = "nexell,nexell-es8316"; + ch = <0>; + sample-rate = <48000>; + format = "S16"; + hpin-support = <0>; + hpin-gpio = <&gpio_b 27 0>; + hpin-level = <1>; + status = "disabled"; + }; + + /* HDMI output configured to use with Nexell driver. Not used also. + * + * Note that es8316_sound and spdif_sound cannot be enabled together + * because of nexell-pcm device used by both. + */ + spdif_sound { + compatible = "nexell,spdif-transceiver"; + sample_rate = <48000>; + format = "S16"; + status = "disabled"; + }; + + jack_sound { + compatible = "simple-audio-card"; + simple-audio-card,mclk-fs = <256>; + simple-audio-card,name = "Jack"; + simple-audio-card,widgets = + "Headphone", "Headphones", + "Microphone", "Microphone"; + simple-audio-card,routing = + "Headphones", "HPOL", + "Headphones", "HPOR", + "MIC1", "Microphone"; + status = "okay"; + + simple-audio-card,dai-link@0 { + format = "i2s"; + cpu { + sound-dai = <&i2s_0 0>; + }; + + codec { + sound-dai = <&es8316_codec>; + }; + }; + }; + + hdmi_sound { + compatible = "simple-audio-card"; + simple-audio-card,mclk-fs = <256>; + simple-audio-card,name = "HDMI"; + simple-audio-card,widgets = + "Headphone", "TV Out"; + simple-audio-card,routing = + "TV Out", "spdif-out"; + status = "okay"; + + simple-audio-card,dai-link@0 { + cpu { + sound-dai = <&spdif_tx 0>; + }; + + codec { + sound-dai = <&spdif_out>; + }; + }; + }; + + thermal-zones { + cpu_thermal: cpu-thermal { + polling-delay-passive = <100>; /* milliseconds */ + polling-delay = <1000>; /* milliseconds */ + + thermal-sensors = <&tmuctrl_0>; + + trips { + cpu_alert0: cpu-alert0 { + temperature = <85000>; /* millicelsius */ + hysteresis = <2000>; /* millicelsius */ + type = "passive"; + }; + cpu_crit: cpu-crit { + temperature = <100000>; /* millicelsius */ + hysteresis = <2000>; /* millicelsius */ + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&cpu_alert0>; + cooling-device = + <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + }; + + leds { + compatible = "gpio-leds"; + + blue { + label = "blue"; + gpios = <&gpio_b 12 GPIO_ACTIVE_LOW>; + linux,default-trigger = "mmc1"; + }; + }; + + wifi_bcm4329 { /* wifi definition for bcmdhd.ko module */ + compatible = "nanopi,bcm4329"; + interrupt-parent = <&gpio_c>; + interrupts = <17 IRQ_TYPE_LEVEL_HIGH>; + }; + + nanopi-thermistor { + compatible = "friendlyarm,nanopi-thermistor"; + status = "okay"; + + io-channels = <&adc 2>; + io-channel-names = "nanopi-thermistor"; + }; + + nanopi-onewire { + interrupt-parent = <&gic>; + compatible = "friendlyarm,onewire"; + + channel-gpio = <&gpio_c 15 0>; + reg = ; + interrupts = <0 IRQ_TIMER3 0>; + irq-timer = <3>; + }; + + onewire-touch { + compatible = "friendlyarm,onewire-touch"; + interrupt-parent = <&gpio_c>; + interrupts = <16 IRQ_TYPE_NONE>; + i2c-bus = <&i2c_2>; + pinctrl-names = "default"; + pinctrl-0 = <&touchpanel_irq>; + }; +}; + diff -ENwbur a/arch/arm64/boot/dts/nexell/s5p6818.dtsi b/arch/arm64/boot/dts/nexell/s5p6818.dtsi --- a/arch/arm64/boot/dts/nexell/s5p6818.dtsi 1970-01-01 01:00:00.000000000 +0100 +++ b/arch/arm64/boot/dts/nexell/s5p6818.dtsi 2018-05-06 08:49:48.254657924 +0200 @@ -0,0 +1,959 @@ +/* + * Copyright (C) 2016 Nexell Co., Ltd. + * Author: Youngbok, Park + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include + +/ { + model = "nexell soc"; + compatible = "nexell,s5p6818"; + #address-cells = <0x1>; + #size-cells = <0x1>; + + aliases { + serial0 = &serial0; + serial1 = &serial1; + serial2 = &serial2; + serial3 = &serial3; + serial4 = &serial4; + serial5 = &serial5; + i2s0 = &i2s_0; + i2s1 = &i2s_1; + i2s2 = &i2s_2; + spi0 = &spi_0; + spi1 = &spi_1; + spi2 = &spi_2; + i2c0 = &i2c_0; + i2c1 = &i2c_1; + i2c2 = &i2c_2; + + pinctrl0 = &pinctrl_0; + }; + + psci { + compatible = "arm,psci-0.2"; + method = "smc"; + }; + + cpus { + #address-cells = <0x2>; + #size-cells = <0x0>; + + cpu0: cpu@0 { + device_type = "cpu"; + compatible = "arm,cortex-a53"; + reg = <0x0 0x0>; + enable-method = "psci"; + cpu-release-addr = < 0x1 0xc0010230 >; + #cooling-cells = <2>; + cpu-idle-states =<&CPU_SLEEP>; + }; + + cpu1: cpu@1 { + device_type = "cpu"; + compatible = "arm,cortex-a53"; + reg = <0x0 0x1>; + enable-method = "psci"; + cpu-release-addr = < 0x1 0xc0010230 >; + cpu-idle-states =<&CPU_SLEEP>; + }; + + cpu2: cpu@2 { + device_type = "cpu"; + compatible = "arm,cortex-a53"; + reg = <0x0 0x2>; + enable-method = "psci"; + cpu-release-addr = < 0x1 0xc0010230 >; + cpu-idle-states =<&CPU_SLEEP>; + }; + + cpu3: cpu@3 { + device_type = "cpu"; + compatible = "arm,cortex-a53"; + reg = <0x0 0x3>; + enable-method = "psci"; + cpu-release-addr = < 0x1 0xc0010230 >; + cpu-idle-states =<&CPU_SLEEP>; + }; + + cpu4: cpu@4 { + device_type = "cpu"; + compatible = "arm,cortex-a53"; + reg = <0x0 0x100>; + enable-method = "psci"; + cpu-release-addr = < 0x1 0xc0010230 >; + cpu-idle-states =<&CPU_SLEEP>; + }; + + cpu5: cpu@5 { + device_type = "cpu"; + compatible = "arm,cortex-a53"; + reg = <0x0 0x101>; + enable-method = "psci"; + cpu-release-addr = < 0x1 0xc0010230 >; + cpu-idle-states =<&CPU_SLEEP>; + }; + + cpu6: cpu@6 { + device_type = "cpu"; + compatible = "arm,cortex-a53"; + reg = <0x0 0x102>; + enable-method = "psci"; + cpu-release-addr = < 0x1 0xc0010230 >; + cpu-idle-states =<&CPU_SLEEP>; + }; + + cpu7: cpu@7 { + device_type = "cpu"; + compatible = "arm,cortex-a53"; + reg = <0x0 0x103>; + enable-method = "psci"; + cpu-release-addr = < 0x1 0xc0010230 >; + cpu-idle-states =<&CPU_SLEEP>; + }; + + cpu-map { + cluster0 { + core0 { + cpu = <&cpu0>; + }; + core1 { + cpu = <&cpu1>; + }; + core2 { + cpu = <&cpu2>; + }; + core3 { + cpu = <&cpu3>; + }; + }; + cluster1 { + core0 { + cpu = <&cpu4>; + }; + core1 { + cpu = <&cpu5>; + }; + core2 { + cpu = <&cpu6>; + }; + core3 { + cpu = <&cpu7>; + }; + }; + }; + + idle-states { + CPU_SLEEP: sleep { + compatible = "nexell,idle-state"; + arm,psci-suspend-param = <0x0000000>; + entry-latency-us = <150>; + exit-latency-us = <200>; + min-residency-us = <2000>; + }; + }; + }; + + pmu { + compatible = "arm,armv8-pmuv3"; + interrupt-parent = <&gic>; + interrupts = <0 IRQ_P0_PMUIRQ0 0>, + <0 IRQ_P0_PMUIRQ1 0>, + <0 IRQ_P0_PMUIRQ2 0>, + <0 IRQ_P0_PMUIRQ3 0>, + <0 IRQ_P1_PMUIRQ0 0>, + <0 IRQ_P1_PMUIRQ1 0>, + <0 IRQ_P1_PMUIRQ2 0>, + <0 IRQ_P1_PMUIRQ3 0>; + interrupt-affinity = <&cpu0>, + <&cpu1>, + <&cpu2>, + <&cpu3>, + <&cpu4>, + <&cpu5>, + <&cpu6>, + <&cpu7>; + }; + + refclk:oscillator { + compatible = "nexell,s5pxx18,pll"; + reg = <0xc0010000 0x1000>; + ref-freuecny = <24000000>; + }; + + nx-v4l2 { + compatible = "nexell,nx-v4l2"; + status = "disabled"; + }; + + nx-devfreq { + compatible = "nexell,s5pxx18-devfreq"; + pll = <0>; + supply_name = "vdd_arm_regulator"; + vdd_arm_regulator-supply = <&VCC_DCDC1>; + status = "disabled"; + }; + + soc { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + reg = <0xc0000000 0x300000>; + interrupt-parent = <&gic>; + ranges; + + #include "s5p6818-soc.dtsi" + + gic:interrupt-controller@c0009000 { + compatible = "arm,gic-400"; + interrupt-controller; + #interrupt-cells = <3>; + reg = <0xC0009000 0x1000>, <0xC000a000 0x100>; + }; + + timer@c0017000 { + compatible = "nexell,s5p6818-timer"; + reg = ; + interrupts = <0 IRQ_TIMER1 0>; + clksource = <0>; + clkevent = <1>; + clocks = <&timer0>, <&timer1>, <&pclk>; + clock-names = "timer0", "timer1", "pclk"; + }; + + tieoff@c0011000 { + compatible = "nexell,tieoff"; + reg = ; + }; + + dynamic-freq@bb000 { + compatible = "nexell,s5pxx18-cpufreq"; + reg = <0xc00bb000 0x30000>; + }; + + serial0:serial@c00a1000 { + compatible = "nexell,s5p6818-uart"; + reg = ; + interrupts = <0 IRQ_UART0 0>; + clock-names = "uart", "clk_uart_baud0"; + clocks = <&uart0>, <&uart0>; + resets = <&nexell_reset RESET_ID_UART0>; + reset-names = "uart-reset"; + soc,tieoff = , + , + ; + pinctrl-names = "default"; + pinctrl-0 = <&serial0_pin>; + status = "disabled"; + }; + + serial1:serial@c00a0000 { + compatible = "nexell,s5p6818-uart"; + reg = ; + interrupts = <0 IRQ_UART1 0>; + clock-names = "uart", "clk_uart_baud0"; + clocks = <&uart1>, <&uart1>; + resets = <&nexell_reset RESET_ID_UART1>; + reset-names = "uart-reset"; + soc,tieoff = , + , + ; + pinctrl-names = "default"; + pinctrl-0 = <&serial1_pin>; + status = "disabled"; + }; + + serial2:serial@c00a2000 { + compatible = "nexell,s5p6818-uart"; + reg = ; + interrupts = <0 IRQ_UART2 0>; + clock-names = "uart", "clk_uart_baud0"; + clocks = <&uart2>, <&uart2>; + resets = <&nexell_reset RESET_ID_UART2>; + reset-names = "uart-reset"; + soc,tieoff = , + , + ; + pinctrl-names = "default"; + pinctrl-0 = <&serial2_pin>; + status = "disabled"; + }; + + serial3:serial@c00a3000 { + compatible = "nexell,s5p6818-uart"; + reg = ; + interrupts = <0 IRQ_UART3 0>; + clock-names = "uart", "clk_uart_baud0"; + clocks = <&uart3>, <&uart3>; + resets = <&nexell_reset RESET_ID_UART3>; + reset-names = "uart-reset"; + soc,tieoff = , + , + ; + pinctrl-names = "default"; + pinctrl-0 = <&serial3_pin>; + status = "disabled"; + }; + + serial4:serial@c006d000 { + compatible = "nexell,s5p6818-uart"; + reg = ; + interrupts = <0 IRQ_UART4 0>; + clock-names = "uart", "clk_uart_baud0"; + clocks = <&uart4>, <&uart4>; + resets = <&nexell_reset RESET_ID_UART4>; + reset-names = "uart-reset"; + soc,tieoff = , + , + ; + pinctrl-names = "default"; + pinctrl-0 = <&serial4_pin>; + status = "disabled"; + }; + + serial5:serial@c006f000 { + compatible = "nexell,s5p6818-uart"; + reg = ; + interrupts = <0 IRQ_UART5 0>; + clock-names = "uart", "clk_uart_baud0"; + clocks = <&uart5>, <&uart5>; + resets = <&nexell_reset RESET_ID_UART5>; + reset-names = "uart-reset"; + soc,tieoff = , + , + ; + pinctrl-names = "default"; + pinctrl-0 = <&serial5_pin>; + status = "disabled"; + }; + + nexell_reset:reset@c0012000 { + #reset-cells = <1>; + compatible = "nexell,s5pxx18-reset"; + reg = <0xC0012000 0x3>; + status = "okay"; + }; + + pwm:pwm@c0018000 { + compatible = "nexell,s5p6818-pwm"; + reg = ; + reset-names = "pwm-reset"; + resets = <&nexell_reset RESET_ID_PWM>; + clock-names = "timers", "pwm-tclk0", "pwm-tclk1"; + clocks = <&pclk>, <&pwm0>, <&pwm2>; + #pwm-cells = <3>; + pinctrl-names = "default"; + pinctrl-0 = <&pwm0_pin &pwm1_pin &pwm2_pin &pwm3_pin>; + samsung,pwm-outputs = <0>, <1>, <2>, <3>; + status = "disabled"; + }; + + i2c_0:i2c@c00a4000 { + compatible = "nexell,s5p6818-i2c"; + reg = ; + interrupts = <0 IRQ_I2C0 0>; + clock-names = "i2c"; + clocks = <&i2c0>; + samsung,i2c-sda-delay = <100>; + samsung,i2c-max-bus-freq = <100000>; + samsung,i2c-slave-addr = <0x66>; + resets = <&nexell_reset RESET_ID_I2C0>; + reset-names = "i2c-reset"; + pinctrl-names = "default"; + pinctrl-0 = <&i2c0_pin>; + status = "disabled"; + }; + + i2c_1:i2c@c00a5000 { + compatible = "nexell,s5p6818-i2c"; + reg = ; + interrupts = <0 IRQ_I2C1 0>; + clock-names = "i2c"; + clocks = <&i2c1>; + samsung,i2c-sda-delay = <100>; + samsung,i2c-max-bus-freq = <100000>; + samsung,i2c-slave-addr = <0x66>; + resets = <&nexell_reset RESET_ID_I2C1>; + reset-names = "i2c-reset"; + pinctrl-names = "default"; + pinctrl-0 = <&i2c1_pin>; + status = "disabled"; + }; + + i2c_2:i2c@c00a6000 { + compatible = "nexell,s5p6818-i2c"; + reg = ; + interrupts = <0 IRQ_I2C2 0>; + clock-names = "i2c"; + clocks = <&i2c2>; + samsung,i2c-sda-delay = <100>; + samsung,i2c-max-bus-freq = <100000>; + samsung,i2c-slave-addr = <0x66>; + resets = <&nexell_reset RESET_ID_I2C2>; + reset-names = "i2c-reset"; + pinctrl-names = "default"; + pinctrl-0 = <&i2c2_pin>; + status = "disabled"; + }; + + dw_mmc_2:dw_mmc@c0069000 { + compatible = "nexell,s5p6818-dw-mshc"; + interrupts = <0 IRQ_SDMMC2 0>; + #address-cells = <1>; + #size-cells = <0>; + reg = ; + resets = <&nexell_reset RESET_ID_SDMMC2>; + reset-names = "dw_mmc-reset"; + clock-names = "biu","ciu"; + clocks = <&sdhc2>, <&sdhc2>; + pinctrl-names = "default"; + pinctrl-0 = <&sdmmc2_cclk &sdmmc2_cmd &sdmmc2_bus4>; + fifo-depth = <0x20>; + status = "disabled"; + }; + + dw_mmc_1:dw_mmc@c0068000 { + compatible = "nexell,s5p6818-dw-mshc"; + interrupts = <0 IRQ_SDMMC1 0>; + #address-cells = <1>; + #size-cells = <0>; + reg = ; + resets = <&nexell_reset RESET_ID_SDMMC1>; + reset-names = "dw_mmc-reset"; + clock-names = "biu","ciu"; + clocks = <&sdhc1>, <&sdhc1>; + pinctrl-names = "default"; + pinctrl-0 = <&sdmmc1_cclk &sdmmc1_cmd &sdmmc1_bus4>; + fifo-depth = <0x20>; + status = "disabled"; + }; + + dw_mmc_0:dw_mmc@c0062000 { + compatible = "nexell,s5p6818-dw-mshc"; + interrupts = <0 IRQ_SDMMC0 0>; + #address-cells = <1>; + #size-cells = <0>; + reg = ; + resets = <&nexell_reset RESET_ID_SDMMC0>; + reset-names = "dw_mmc-reset"; + clock-names = "biu", "ciu"; + clocks = <&sdhc0>, <&sdhc0>; + pinctrl-names = "default"; + pinctrl-0 = <&sdmmc0_cclk &sdmmc0_cmd &sdmmc0_bus4>; + fifo-depth = <0x20>; + status = "disabled"; + }; + + i2s_0:i2s@c0055000 { + compatible = "nexell,nexell-i2s"; + reg = ; + dmas = <&pl08xdma0 12 0>, <&pl08xdma0 13 0>; + dma-names = "tx", "rx"; + clocks = <&i2s0>; + clock-names = "i2s0"; + resets = <&nexell_reset RESET_ID_I2S0>; + reset-names = "i2s-reset"; + pinctrl-names = "default"; + pinctrl-0 = <&i2s0_bus>; + master-mode = <1>; + mclk-in = <0>; + trans-mode = <0>; + frame-bit = <32>; + sample-rate = <48000>; + pre-supply-mclk = <1>; + status = "disabled"; + }; + + i2s_1:i2s@c0056000 { + compatible = "nexell,nexell-i2s"; + reg = ; + dmas = <&pl08xdma0 14 0>, <&pl08xdma0 15 0>; + dma-names = "tx", "rx"; + clocks = <&i2s1>; + clock-names = "i2s1"; + resets = <&nexell_reset RESET_ID_I2S1>; + reset-names = "i2s-reset"; + pinctrl-names = "default"; + pinctrl-0 = <&i2s1_bus>; + master-mode = <1>; + mclk-in = <0>; + trans-mode = <0>; + frame-bit = <32>; + sample-rate = <48000>; + pre-supply-mclk = <1>; + status = "disabled"; + }; + + i2s_2:i2s@c0057000 { + compatible = "nexell,nexell-i2s"; + reg = ; + dmas = <&pl08xdma1 0 0>, <&pl08xdma1 1 0>; + dma-names = "tx", "rx"; + clocks = <&i2s2>; + clock-names = "i2s2"; + resets = <&nexell_reset RESET_ID_I2S2>; + reset-names = "i2s-reset"; + pinctrl-names = "default"; + pinctrl-0 = <&i2s2_bus>; + master-mode = <1>; + mclk-in = <0>; + trans-mode = <0>; + frame-bit = <32>; + sample-rate = <48000>; + pre-supply-mclk = <1>; + status = "disabled"; + }; + + nexell_usbphy: nexell-usbphy@c0012000 { + compatible = "nexell,nexell-usb2-phy"; + reg = ; + clocks = <&usbhost>; + clock-names = "phy"; + #phy-cells = <1>; + status = "disabled"; + }; + + ehci@c0030000 { + compatible = "nexell,nexell-ehci"; + reg = ; + interrupts = <0 IRQ_USB20HOST 0>; + clocks = <&usbhost>; + clock-names = "usbhost"; + resets = <&nexell_reset RESET_ID_USB20HOST>; + reset-names = "usbhost-reset"; + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; + port@0 { + reg = <0>; + phys = <&nexell_usbphy 1>; + status = "disabled"; + }; + port@1 { + reg = <1>; + phys = <&nexell_usbphy 2>; + status = "disabled"; + }; + }; + + ohci@c0020000 { + compatible = "nexell,nexell-ohci"; + reg = ; + interrupts = <0 IRQ_USB20HOST 0>; + clocks = <&usbhost>; + clock-names = "usbhost"; + resets = <&nexell_reset RESET_ID_USB20HOST>; + reset-names = "usbhost-reset"; + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; + port@0 { + reg = <0>; + phys = <&nexell_usbphy 1>; + status = "disabled"; + }; + }; + + dwc2otg@c0040000 { + compatible = "nexell,nexell-dwc2otg"; + reg = ; + interrupts = <0 IRQ_USB20OTG 0>; + clocks = <&otg>; + clock-names = "otg"; + resets = <&nexell_reset RESET_ID_USB20OTG>; + reset-names = "usbotg-reset"; + phys = <&nexell_usbphy 0>; + phy-names = "usb2-phy"; + dr_mode = "otg"; + g-use-dma = <1>; + g-rx-fifo-size = <1064>; + g-np-tx-fifo-size = <512>; + g-tx-fifo-size = <512 512 512 512 512 256 256 256 256 + 256 64 64 64 64 64>; + status = "disabled"; + }; + + gmac0:ethernet@c0060000 { + compatible = "nexell,s5p6818-gmac"; + clocks = <&pclk>, <&gmac>; + clock-names = "stmmaceth", "nexell_gmac_tx"; + resets = <&nexell_reset RESET_ID_DWC_GMAC>; + reset-names = "stmmaceth"; + reg = ; + interrupt-parent = <&gic>; + interrupts = <0 IRQ_GMAC 0>; + interrupt-names = "macirq"; + mac-address = [000000000000]; /* Filled in by U-Boot */ + phy-mode = "rgmii"; + snps,multicast-filter-bins = <256>; + status = "disable"; + }; + + adc:adc@c0053000 { + compatible = "nexell,s5p6818-adc"; + reg = ; + interrupts = <0 IRQ_ADC 0>; + resets = <&nexell_reset RESET_ID_ADC>; + reset-names = "adc-reset"; + clocks = <&pclk>; + clock-names = "adc"; + sample_rate = <200000>; + #io-channel-cells = <1>; + status = "disabled"; + }; + + spi_0:spi@c005b000 { + compatible = "nexell,s5p6818-spi"; + reg = ; + interrupts = <0 IRQ_SSP0 0>; + dmas = <&pl08xdma0 6 0>, <&pl08xdma0 7 0>; + dma-names = "tx", "rx"; + resets = <&nexell_reset RESET_ID_SSP0_P>, + <&nexell_reset RESET_ID_SSP0>; + reset-names = "pre-reset","spi-reset"; + #address-cells = <1>; + #size-cells = <0>; + samsung,spi-src-clk = <0>; + num-cs = <1>; + clocks = <&spi0>, <&spi0>; + clock-names = "spi", "spi_busclk0"; + pinctrl-names = "default"; + pinctrl-0 = <&spi0_bus>; + status ="disable"; + }; + + spi_1:spi@c005c000 { + compatible = "nexell,s5p6818-spi"; + reg = ; + interrupts = <0 IRQ_SSP1 0>; + dmas = <&pl08xdma0 8 0>, <&pl08xdma0 9 0>; + dma-names = "tx", "rx"; + resets = <&nexell_reset RESET_ID_SSP1_P>, + <&nexell_reset RESET_ID_SSP1>; + reset-names = "pre-reset","spi-reset"; + #address-cells = <1>; + #size-cells = <0>; + samsung,spi-src-clk = <0>; + num-cs = <1>; + clocks = <&spi1>, <&spi1>; + clock-names = "spi", "spi_busclk0"; + pinctrl-names = "default"; + pinctrl-0 = <&spi1_bus>; + status ="disable"; + }; + + spi_2:spi@c005f000 { + compatible = "nexell,s5p6818-spi"; + reg = ; + interrupts = <0 IRQ_SSP2 0>; + dmas = <&pl08xdma0 10 0>, <&pl08xdma0 11 0>; + dma-names = "tx", "rx"; + resets = <&nexell_reset RESET_ID_SSP2_P>, + <&nexell_reset RESET_ID_SSP2>; + reset-names = "pre-reset","spi-reset"; + #address-cells = <1>; + #size-cells = <0>; + samsung,spi-src-clk = <0>; + num-cs = <1>; + clocks = <&spi2>, <&spi2>; + clock-names = "spi", "spi_busclk0"; + pinctrl-names = "default"; + pinctrl-0 = <&spi2_bus>; + status ="disable"; + }; + + watchdog@c0019000 { + compatible = "nexell,nexell-wdt"; + reg = ; + interrupts = <0 IRQ_WDT 0>; + resets = <&nexell_reset RESET_ID_WDT>, + <&nexell_reset RESET_ID_WDT_POR>; + reset-names = "wdt-reset","wdt-por-reset"; + clocks = <&pclk>; + clock-names = "watchdog"; + status = "disabled"; + }; + + spdif_tx: spdiftx@c0059000 { + compatible = "nexell,nexell-spdif-tx"; + reg = ; + interrupts = <0 IRQ_SPDIFTX 0>; + dmas = <&pl08xdma1 6 0>; + dma-names = "tx"; + resets = <&nexell_reset RESET_ID_SPDIFTX>; + reset-names = "spdiftx-reset"; + clocks = <&spdiftx>; + clock-names = "spdif-tx"; + pcm-bit = <16>; + sample_rate = <48000>; + status = "disabled"; + }; + + tmuctrl_0: tmuctrl@c0096000 { + compatible = "nexell,s5p6818-tmu"; + reg = ; + interrupts = <0 IRQ_TMU0 0>; + clocks = <&pclk>; + clock-names = "tmu_apbif"; + #include "s5p6818-tmu-sensor-conf.dtsi" + soc,tieoff = ; + status = "disabled"; + }; + + tmuctrl_1: tmuctrl@c0097000 { + compatible = "nexell,s5p6818-tmu"; + reg = ; + interrupts = <0 IRQ_TMU1 0>; + clocks = <&pclk>; + clock-names = "tmu_apbif"; + #include "s5p6818-tmu-sensor-conf.dtsi" + soc,tieoff = ; + status = "disabled"; + }; + + mipi_csi:mipi_csi@c00d0000 { + compatible = "nexell,mipi_csi"; + reg = ; + clock-names = "mipi"; + clocks = <&mipi>; + reset-names = "mipi-reset", "mipi_csi-reset", "mipi_phy_s-reset"; + resets = <&nexell_reset RESET_ID_MIPI>, + <&nexell_reset RESET_ID_MIPI_CSI>, + <&nexell_reset RESET_ID_MIPI_PHY_S>; + soc,tieoff = ; + data_lane = <2>; + swap_clocklane = <0>; + swap_datalane = <0>; + pllval = <750>; + status = "disabled"; + }; + + vip_0:vip@c0063000 { + compatible = "nexell,vip"; + reg = ; + interrupts = <0 IRQ_VIP0 0>; + clock-names = "vip0"; + clocks = <&vip0>; + reset-names = "vip0-reset"; + resets = <&nexell_reset RESET_ID_VIP0>; + module = <0>; + status = "disabled"; + }; + + vip_1:vip@c0064000 { + compatible = "nexell,vip"; + reg = ; + interrupts = <0 IRQ_VIP1 0>; + clock-names = "vip1"; + clocks = <&vip1>; + reset-names = "vip1-reset"; + resets = <&nexell_reset RESET_ID_VIP1>; + module = <1>; + status = "disabled"; + }; + + vip_2:vip@c0099000 { + compatible = "nexell,vip"; + reg = ; + interrupts = <0 IRQ_VIP2 0>; + clock-names = "vip2"; + clocks = <&vip2>; + reset-names = "vip2-reset"; + resets = <&nexell_reset RESET_ID_VIP2>; + module = <2>; + status = "disabled"; + }; + + clipper_0:clipper0@c0063000 { + compatible = "nexell,nx-clipper"; + module = <0>; + status = "disabled"; + }; + + clipper_1:clipper1@c0064000 { + compatible = "nexell,nx-clipper"; + module = <1>; + status = "disabled"; + }; + + clipper_2:clipper2@c0064000 { + compatible = "nexell,nx-clipper"; + module = <2>; + status = "disabled"; + }; + + decimator_0:decimator0@c0063000 { + compatible = "nexell,nx-decimator"; + module = <0>; + status = "disabled"; + }; + + dp_drm: display_drm { + compatible = "nexell,s5pxx18-drm"; + reg = <0xc0102800 0x100>, <0xc0102c00 0x100>, + <0xc0102000 0x100>, <0xc0102400 0x100>; + reg-names = "dpc.0", "dpc.1", "mlc.0", "mlc.1"; + + interrupts = < 0 IRQ_DPC_P 0 >, <0 IRQ_DPC_S 0>; + interrupts-names = "dpc.0", "dpc.1"; + + resets = <&nexell_reset RESET_ID_DISPLAY>, + <&nexell_reset RESET_ID_DISP_TOP>; + reset-names = "rsc-display", "rsc-display-top"; + + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + port@0 { }; + port@1 { }; + }; + }; + + dp_drm_rgb: display_drm_rgb { + compatible = "nexell,s5pxx18-drm-rgb"; + reg = <0xc0101000 0x100>; + resets = <&nexell_reset RESET_ID_DISP_TOP>; + reset-names = "rsc-display-top"; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + dp_drm_lvds: display_drm_lvds { + compatible = "nexell,s5pxx18-drm-lvds"; + reg = <0xc0101000 0x100>; + resets = <&nexell_reset RESET_ID_DISP_TOP>; + reset-names = "rsc-display-top"; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + dp-resource { + reg_base = <0xc010a000 0x100>; + clk_base = <0xc0108000 3>; + resets = <&nexell_reset RESET_ID_LVDS>; + reset-names = "rsc-lvds-phy"; + }; + }; + + dp_drm_mipi: display_drm_mipi { + compatible = "nexell,s5pxx18-drm-mipi"; + reg = <0xc0101000 0x100>; + resets = <&nexell_reset RESET_ID_DISP_TOP>; + reset-names = "rsc-display-top"; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + dp-resource { + reg_base = <0xc00d0000 0x100>; + clk_base = <0xc0105000 2>; + + resets = <&nexell_reset RESET_ID_MIPI>, + <&nexell_reset RESET_ID_MIPI_DSI>, + <&nexell_reset RESET_ID_MIPI_PHY_S>, + <&nexell_reset RESET_ID_MIPI_PHY_M>; + reset-names = "rsc-mipi", "rsc-mipi-dsi", + "rsc-mipi-phy-s", "rsc-mipi-phy-m"; + soc,tieoff = , + ; + }; + }; + + dp_drm_hdmi: display_drm_hdmi{ + compatible = "nexell,s5pxx18-drm-hdmi"; + reg = <0xc0101000 0x100>; + interrupts = < 0 IRQ_HDMI 0 >; + resets = <&nexell_reset RESET_ID_DISP_TOP>; + reset-names = "rsc-display-top"; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + dp-resource { + reg_base = <0xc0000000 0x300000>; + clk_base = <0xc0109000 4>, <0xc0105000 2>; + resets = <&nexell_reset RESET_ID_HDMI_VIDEO>, + <&nexell_reset RESET_ID_HDMI_SPDIF>, + <&nexell_reset RESET_ID_HDMI_TMDS>, + <&nexell_reset RESET_ID_HDMI>, + <&nexell_reset RESET_ID_HDMI_PHY>; + reset-names = "rsc-hdmi-video", "rsc-hdmi-spdif", + "rsc-hdmi-tmds", "rsc-hdmi", "rsc-hdmi-phy"; + soc,tieoff = ; + }; + }; + + video-codec@c0080000 { + compatible = "nexell, nx-vpu"; + reg = <0xc0080000 0x4000>; + interrupts = <0 IRQ_CODA960_HOST 0>, + <0 IRQ_CODA960_JPG 0>; + resets = <&nexell_reset RESET_ID_CODA_A>, + <&nexell_reset RESET_ID_CODA_P>, + <&nexell_reset RESET_ID_CODA_C>; + reset-names = "vpu-a-reset", "vpu-p-reset", + "vpu-c-reset"; + clocks = <&pclk>, <&bclk>; + status = "disabled"; + }; + + rtc@c0010c00 { + compatible = "nexell,nx-rtc"; + reg = , <0xc0010200 0x100>; + interrupts = <0 IRQ_RTC 0>; + status = "disabled"; + }; + + scaler@c0066000 { + compatible = "nexell,scaler"; + reg = ; + interrupts = <0 IRQ_SCALER 0>; + clock-names = "scaler"; + clocks = <&scaler>; + reset-names = "scaler-reset"; + resets = <&nexell_reset RESET_ID_SCALER>; + status = "disabled"; + }; + + gpu@c0070000 { + compatible = "arm,mali-400", "arm,mali-utgard"; + reg = ; + interrupts = <0 IRQ_VR 0>, <0 IRQ_VR 0>, <0 IRQ_VR 0>, + <0 IRQ_VR 0>, <0 IRQ_VR 0>, <0 IRQ_VR 0>, + <0 IRQ_VR 0>, <0 IRQ_VR 0>, <0 IRQ_VR 0>, + <0 IRQ_VR 0>, <0 IRQ_VR 0>; + interrupt-names = "IRQGP", "IRQGPMMU", "IRQPP0", + "IRQPPMMU0", "IRQPP1", "IRQPPMMU1", + "IRQPP2", "IRQPPMMU2", "IRQPP3", + "IRQPPMMU3", "IRQPMU"; + pmu_domain_config = <0x1 0x4 0x8 0x10 0x20 0x0 0x0 0x0 + 0x0 0x2 0x0 0x0>; + pmu_switch_delay = <0xff>; + clocks = <&vr>; + clock-names = "clk_mali"; + resets = <&nexell_reset RESET_ID_VR>; + reset-names = "vr-reset"; + }; + }; /*** soc ***/ +}; diff -ENwbur a/arch/arm64/boot/dts/nexell/s5p6818-pinctrl.dtsi b/arch/arm64/boot/dts/nexell/s5p6818-pinctrl.dtsi --- a/arch/arm64/boot/dts/nexell/s5p6818-pinctrl.dtsi 1970-01-01 01:00:00.000000000 +0100 +++ b/arch/arm64/boot/dts/nexell/s5p6818-pinctrl.dtsi 2018-05-06 08:49:48.254657924 +0200 @@ -0,0 +1,519 @@ +/* + * Nexell's s5p6818 SoC pin-mux and pin-config device tree source + * + * Copyright (C) 2016 Nexell Co., Ltd. + * http://www.nexell.co.kr + * + * Nexell's s5p6818 SoC pin-mux and pin-config options are listed as + * device tree nodes in this file. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +pinctrl@C0010000 { + gpio_a: gpioa { + gpio-controller; + #gpio-cells = <2>; + + interrupt-controller; + #interrupt-cells = <2>; + }; + + gpio_b: gpiob { + gpio-controller; + #gpio-cells = <2>; + + interrupt-controller; + #interrupt-cells = <2>; + }; + + gpio_c: gpioc { + gpio-controller; + #gpio-cells = <2>; + + interrupt-controller; + #interrupt-cells = <2>; + }; + + gpio_d: gpiod { + gpio-controller; + #gpio-cells = <2>; + + interrupt-controller; + #interrupt-cells = <2>; + }; + + gpio_e: gpioe { + gpio-controller; + #gpio-cells = <2>; + + interrupt-controller; + #interrupt-cells = <2>; + }; + + alive_0: alive { + gpio-controller; + #gpio-cells = <2>; + + interrupt-controller; + #interrupt-cells = <2>; + }; + + /* Function mapping from kernel 3.x + * pin-function + * PAD_FUNC_ALT0 0 + * PAD_FUNC_ALT1 1 + * PAD_FUNC_ALT2 2 + * PAD_FUNC_ALT3 3 + * + * no control (PINCFG_TYPE_DIR) + * PAD_MODE_ALT 0 + * PAD_MODE_IN 0 + * PAD_MODE_OUT 1 + * + * no control (PINCFG_TYPE_DAT) + * PAD_LEVEL_LOW 0 + * PAD_LEVEL_HIGH 1 + * + * pin-pull + * PAD_PULL_DN 0 + * PAD_PULL_UP 1 + * PAD_PULL_OFF 2 + * + * pin-strength + * PAD_STRENGTH_0 0 + * PAD_STRENGTH_1 1 + * PAD_STRENGTH_2 2 + * PAD_STRENGTH_3 3 + */ + + /* NAND */ + nand_default: nand-default { + nand_cle: nand-cle { + nexell,pins = "gpiob-11"; + nexell,pin-function = <0>; + nexell,pin-pull = <2>; + nexell,pin-strength = <0>; + }; + + nand_ale: nand-ale { + nexell,pins = "gpiob-12"; + nexell,pin-function = <0>; + nexell,pin-pull = <2>; + nexell,pin-strength = <0>; + }; + + nand_bus8: nand-bus-width8 { + nexell,pins = "gpiob-13", "gpiob-15", "gpiob-17", + "gpiob-19", "gpiob-20", "gpiob-21", + "gpiob-22", "gpiob-23"; + nexell,pin-function = <0>; + nexell,pin-pull = <2>; + nexell,pin-strength = <0>; + }; + + nand_rnb: nand-rnb { + nexell,pins = "gpiob-14"; + nexell,pin-function = <0>; + nexell,pin-pull = <2>; + nexell,pin-strength = <0>; + }; + + nand_noe: nand-noe { + nexell,pins = "gpiob-16"; + nexell,pin-function = <0>; + nexell,pin-pull = <2>; + nexell,pin-strength = <0>; + }; + + nand_nwe: nand-nwe { + nexell,pins = "gpiob-18"; + nexell,pin-function = <0>; + nexell,pin-pull = <2>; + nexell,pin-strength = <0>; + }; + }; + + + /* GMAC */ + gmac_pins: gmac_pins { + gmac_txd: gmac-txd { + nexell,pins = "gpioe-7", "gpioe-8", "gpioe-9", + "gpioe-10"; + nexell,pin-function = <1>; + nexell,pin-pull = <2>; + nexell,pin-strength = <3>; + }; + + gmac_rxd: gmac-rxd { + nexell,pins = "gpioe-14", "gpioe-15", "gpioe-16", + "gpioe-17"; + nexell,pin-function = <1>; + nexell,pin-pull = <2>; + nexell,pin-strength = <3>; + }; + + gmac_txen: gmac-txen { + nexell,pins = "gpioe-11"; + nexell,pin-function = <1>; + nexell,pin-pull = <2>; + nexell,pin-strength = <3>; + }; + + gmac_mdc: gmac-mdc { + nexell,pins = "gpioe-20"; + nexell,pin-function = <1>; + nexell,pin-pull = <2>; + nexell,pin-strength = <3>; + }; + + gmac_mdio: gmac-mdio { + nexell,pins = "gpioe-21"; + nexell,pin-function = <1>; + nexell,pin-pull = <2>; + nexell,pin-strength = <3>; + }; + + gmac_rxclk: gmac-rxclk { + nexell,pins = "gpioe-18"; + nexell,pin-function = <1>; + nexell,pin-pull = <2>; + nexell,pin-strength = <3>; + }; + + gmac_txclk: gmac-txclk { + nexell,pins = "gpioe-24"; + nexell,pin-function = <1>; + nexell,pin-pull = <2>; + nexell,pin-strength = <2>; + }; + }; + + /* MMC0 */ + sdmmc0_cclk: sdmmc0-cclk { + nexell,pins = "gpioa-29"; + nexell,pin-function = <1>; + nexell,pin-pull = <2>; + nexell,pin-strength = <2>; + }; + + sdmmc0_cmd: sdmmc0-cmd { + nexell,pins = "gpioa-31"; + nexell,pin-function = <1>; + nexell,pin-pull = <2>; + nexell,pin-strength = <1>; + }; + + sdmmc0_bus4: sdmmc0-bus-width4 { + nexell,pins = "gpiob-1", "gpiob-3", "gpiob-5", "gpiob-7"; + nexell,pin-function = <1>; + nexell,pin-pull = <2>; + nexell,pin-strength = <1>; + }; + /* MMC1 */ + sdmmc1_cclk: sdmmc1-cclk { + nexell,pins = "gpiod-22"; + nexell,pin-function = <1>; + nexell,pin-pull = <2>; + nexell,pin-strength = <2>; + }; + + sdmmc1_cmd: sdmmc1-cmd { + nexell,pins = "gpiod-23"; + nexell,pin-function = <1>; + nexell,pin-pull = <2>; + nexell,pin-strength = <1>; + }; + + sdmmc1_bus4: sdmmc1-bus-width4 { + nexell,pins = "gpiod-24", "gpiod-25", "gpiod-26", "gpiod-27"; + nexell,pin-function = <1>; + nexell,pin-pull = <2>; + nexell,pin-strength = <1>; + }; + + /* MMC2 */ + sdmmc2_cclk: sdmmc2-cclk { + nexell,pins = "gpioc-18"; + nexell,pin-function = <2>; + nexell,pin-pull = <2>; + nexell,pin-strength = <2>; + }; + + sdmmc2_cmd: sdmmc2-cmd { + nexell,pins = "gpioc-19"; + nexell,pin-function = <2>; + nexell,pin-pull = <2>; + nexell,pin-strength = <1>; + }; + + sdmmc2_bus4: sdmmc2-bus-width4 { + nexell,pins = "gpioc-20", "gpioc-21", "gpioc-22", "gpioc-23"; + nexell,pin-function = <2>; + nexell,pin-pull = <2>; + nexell,pin-strength = <1>; + }; + + sdmmc2_bus8: sdmmc2-bus-width8 { + nexell,pins = "gpioe-21", "gpioe-22", "gpioe-23", "gpioe-24"; + nexell,pin-function = <2>; + nexell,pin-pull = <2>; + nexell,pin-strength = <1>; + }; + + /* serial */ + serial0_pin:serial0 { + nexell,pins = "gpiod-14", "gpiod-18"; + nexell,pin-function = <1>; + nexell,pin-pull = <2>; + nexell,pin-strength = <0>; + }; + + serial1_pin:serial1 { + nexell,pins = "gpiod-15", "gpiod-19"; + nexell,pin-function = <1>; + nexell,pin-pull = <2>; + nexell,pin-strength = <0>; + }; + + serial1_flow:serial1_flow { + nexell,pins = "gpioc-5", "gpioc-6"; + nexell,pin-function = <2>; + nexell,pin-pull = <2>; + nexell,pin-strength = <0>; + }; + + serial2_pin:serial2 { + nexell,pins = "gpiod-16", "gpiod-20"; + nexell,pin-function = <1>; + nexell,pin-pull = <2>; + nexell,pin-strength = <0>; + }; + + serial3_pin:serial3 { + nexell,pins = "gpiod-17", "gpiod-21"; + nexell,pin-function = <1>; + nexell,pin-pull = <2>; + nexell,pin-strength = <0>; + }; + + serial4_pin:serial4 { + nexell,pins = "gpiob-28", "gpiob-29"; + nexell,pin-function = <3>; + nexell,pin-pull = <2>; + nexell,pin-strength = <0>; + }; + + serial5_pin:serial5 { + nexell,pins = "gpiob-30", "gpiob-31"; + nexell,pin-function = <3>; + nexell,pin-pull = <2>; + nexell,pin-strength = <0>; + }; + + i2c0_pin:i2c0 { + nexell,pins = "gpiod-2", "gpiod-3"; + nexell,pin-function = <1>; + nexell,pin-pull = <2>; + nexell,pin-strength = <0>; + }; + + i2c1_pin:i2c1 { + nexell,pins = "gpiod-4", "gpiod-5"; + nexell,pin-function = <1>; + nexell,pin-pull = <2>; + nexell,pin-strength = <0>; + }; + + i2c2_pin:i2c2 { + nexell,pins = "gpiod-6", "gpiod-7"; + nexell,pin-function = <1>; + nexell,pin-pull = <2>; + nexell,pin-strength = <0>; + }; + + pwm0_pin:pwm0 { + nexell,pins = "gpiod-1"; + nexell,pin-function = <1>; + nexell,pin-pull = <0>; + nexell,pin-strength = <0>; + }; + + pwm1_pin:pwm1 { + nexell,pins = "gpioc-13"; + nexell,pin-function = <2>; + nexell,pin-pull = <0>; + nexell,pin-strength = <0>; + }; + + pwm2_pin:pwm2 { + nexell,pins = "gpioc-14"; + nexell,pin-function = <2>; + nexell,pin-pull = <0>; + nexell,pin-strength = <0>; + }; + + pwm3_pin:pwm3 { + nexell,pins = "gpiod-0"; + nexell,pin-function = <2>; + nexell,pin-pull = <0>; + nexell,pin-strength = <0>; + }; + + i2s0_bus:i2s0 { + nexell,pins = "gpiod-9", "gpiod-10", "gpiod-11", + "gpiod-12", "gpiod-13"; + nexell,pin-function = <1>; + nexell,pin-pull = <0>; + nexell,pin-strength = <0>; + }; + + i2s1_bus:i2s1 { + nexell,pins = "gpioa-28", "gpioa-30", "gpiob-0", + "gpiob-6", "gpiob-9"; + nexell,pin-function = <3>; + nexell,pin-pull = <0>; + nexell,pin-strength = <0>; + }; + + i2s2_bus:i2s2 { + i2s2_mclk: i2s2-mclk { + nexell,pins = "gpioa-28"; + nexell,pin-function = <2>; + nexell,pin-pull = <0>; + nexell,pin-strength = <0>; + }; + + i2s2_other: i2s2-other { + nexell,pins = "gpiob-2", "gpiob-4", + "gpiob-8", "gpiob-10"; + nexell,pin-function = <3>; + nexell,pin-pull = <0>; + nexell,pin-strength = <0>; + }; + }; + + spi0_bus:spi0 { + nexell,pins = "gpioc-29", "gpioc-30", "gpioc-31", "gpiod-0"; + nexell,pin-function = <1>; + nexell,pin-pull = <2>; + nexell,pin-strength = <0>; + }; + + spi1_bus:spi1 { + nexell,pins = "gpioe-14", "gpioe-15", "gpioe-18", "gpioe-19"; + nexell,pin-function = <2>; + nexell,pin-pull = <2>; + nexell,pin-strength = <0>; + }; + + spi2_bus:spi2 { + nexell,pins = "gpioc-9", "gpioc-10", "gpioc-11", "gpioc-12"; + nexell,pin-function = <2>; + nexell,pin-pull = <1>; + nexell,pin-strength = <0>; + }; + + spdiftx_pin:spdiftx { + nexell,pins = "gpioc-25"; + nexell,pin-function = <2>; + nexell,pin-pull = <0>; + nexell,pin-strength = <0>; + }; + + vid0_data_clk: vid0-data-clk { + nexell,pins = "gpiod-28", "gpiod-29", "gpiod-30", "gpiod-31", "gpioe-0", "gpioe-1", "gpioe-2", "gpioe-3", "gpioe-4"; + nexell,pin-function = <1>; + nexell,pin-pull = <2>; + nexell,pin-strength = <0>; + }; + + vid0_sync: vid0-sync { + nexell,pins = "gpioe-5", "gpioe-6"; + nexell,pin-function = <1>; + nexell,pin-pull = <2>; + nexell,pin-strength = <0>; + }; + + vid1_data_clk: vid1-data-clk { + nexell,pins = "gpioa-30", "gpiob-0", "gpiob-2", "gpiob-4", "gpiob-6", "gpiob-8", "gpiob-9", "gpiob-10", "gpioa-28"; + nexell,pin-function = <1>; + nexell,pin-pull = <2>; + nexell,pin-strength = <0>; + }; + + vid1_sync: vid1-sync { + nexell,pins = "gpioe-13", "gpioe-7"; + nexell,pin-function = <2>; + nexell,pin-pull = <2>; + nexell,pin-strength = <0>; + }; + + vid2_data_clk: vid2-data-clk { + nexell,pins = "gpioc-17", "gpioc-18", "gpioc-19", "gpioc-20", "gpioc-21", "gpioc-22", "gpioc-23", "gpioc-24", "gpioc-14"; + nexell,pin-function = <3>; + nexell,pin-pull = <2>; + nexell,pin-strength = <0>; + }; + + vid2_sync: vid2-sync { + nexell,pins = "gpioc-15", "gpioc-16"; + nexell,pin-function = <3>; + nexell,pin-pull = <2>; + nexell,pin-strength = <0>; + }; + + dp_rgb_vclk: dp-rgb-vclk { + nexell,pins = "gpioa-0"; + nexell,pin-function = <1>; + nexell,pin-pull = <2>; + nexell,pin-strength = <0>; + }; + + dp_rgb_vsync: dp-rgb-vsync { + nexell,pins = "gpioa-25"; + nexell,pin-function = <1>; + nexell,pin-pull = <2>; + nexell,pin-strength = <0>; + }; + + dp_rgb_hsync: dp-rgb-hsync { + nexell,pins = "gpioa-26"; + nexell,pin-function = <1>; + nexell,pin-pull = <2>; + nexell,pin-strength = <0>; + }; + + dp_rgb_de: dp-rgb-de { + nexell,pins = "gpioa-27"; + nexell,pin-function = <1>; + nexell,pin-pull = <2>; + nexell,pin-strength = <0>; + }; + + dp_rgb_B: dp-rgb-B { + nexell,pins = "gpioa-1", "gpioa-2", "gpioa-3", "gpioa-4", + "gpioa-5", "gpioa-6", "gpioa-7", "gpioa-8"; + nexell,pin-function = <1>; + nexell,pin-pull = <2>; + nexell,pin-strength = <0>; + }; + + dp_rgb_G: dp-rgb-G { + nexell,pins = "gpioa-9", "gpioa-10", "gpioa-11", "gpioa-12", + "gpioa-13", "gpioa-14", "gpioa-15", "gpioa-16"; + nexell,pin-function = <1>; + nexell,pin-pull = <2>; + nexell,pin-strength = <0>; + }; + + dp_rgb_R: dp-rgb-R { + nexell,pins = "gpioa-17", "gpioa-18", "gpioa-19", "gpioa-20", + "gpioa-21", "gpioa-22", "gpioa-23", "gpioa-24"; + nexell,pin-function = <1>; + nexell,pin-pull = <2>; + nexell,pin-strength = <0>; + }; +}; diff -ENwbur a/arch/arm64/boot/dts/nexell/s5p6818-soc.dtsi b/arch/arm64/boot/dts/nexell/s5p6818-soc.dtsi --- a/arch/arm64/boot/dts/nexell/s5p6818-soc.dtsi 1970-01-01 01:00:00.000000000 +0100 +++ b/arch/arm64/boot/dts/nexell/s5p6818-soc.dtsi 2018-05-06 08:49:48.254657924 +0200 @@ -0,0 +1,667 @@ +/* + * Copyright (C) 2016 Nexell Co., Ltd. + * Author: Youngbok, Park + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +pinctrl_0: pinctrl@C0010000 { + compatible = "nexell,s5p6818-pinctrl"; + reg = ; + #address-cells = <1>; + #size-cells = <1>; + interrupts = <0 IRQ_GPIOA 0>, + <0 IRQ_GPIOB 0>, + <0 IRQ_GPIOC 0>, + <0 IRQ_GPIOD 0>, + <0 IRQ_GPIOE 0>, + <0 IRQ_ALIVE 0>; +}; + +clocks { + compatible = "nexell,s5pxx18,clocks"; + reg = <0xc00bb000 0x30000>; + #address-cells = <1>; + #size-cells = <1>; + ranges; + + pll0: pll0 { + #clock-cells = <0>; + clock-names = "sys-pll0"; + clock-output-names = "pll0"; + }; + + pll1: pll1 { + #clock-cells = <0>; + clock-names = "sys-pll1"; + clock-output-names = "pll1"; + }; + + pll2: pll2 { + #clock-cells = <0>; + clock-names = "sys-pll2"; + clock-output-names = "pll2"; + }; + + pll3: pll3 { + #clock-cells = <0>; + clock-names = "sys-pll3"; + clock-output-names = "pll3"; + }; + + bclk: bclk { + #clock-cells = <0>; + clock-names = "sys-bbclk"; + clock-output-names = "bclk"; + }; + + pclk: pclk { + #clock-cells = <0>; + clock-names = "sys-bpclk"; + clock-output-names = "pclk"; + }; + + apb_pclk: apb_pclk { + #clock-cells = <0>; + clock-names = "sys-bbclk"; + clock-output-names = "apb_pclk"; + }; + + timer0:timer@c00b9000 { + #clock-cells = <0>; + clock-output-names = "timer0"; + cell-id = ; + reg = ; + clk-step = <1>; + clk-input = ; + }; + + timer1:timer@c00bb000 { + #clock-cells = <0>; + clock-output-names = "timer1"; + cell-id = ; + reg = ; + clk-step = <1>; + clk-input = ; + }; + + timer2:timer@c00bc000 { + #clock-cells = <0>; + clock-output-names = "timer2"; + cell-id = ; + reg = ; + clk-step = <1>; + clk-input = ; + }; + + timer3:timer@c00bd000 { + #clock-cells = <0>; + clock-output-names = "timer3"; + cell-id = ; + reg = ; + clk-step = <1>; + clk-input = ; + }; + + uart0:uart@c00a9000 { + #clock-cells = <0>; + clock-output-names = "uart0"; + cell-id = ; + reg = ; + clk-step = <1>; + clk-input = ; + }; + + uart1:uart@c00a8000 { + #clock-cells = <0>; + clock-output-names = "uart1"; + cell-id = ; + reg = ; + clk-step = <1>; + clk-input = ; + }; + + uart2:uart@c00aa000 { + #clock-cells = <0>; + clock-output-names = "uart2"; + cell-id = ; + reg = ; + clk-step = <1>; + clk-input = ; + }; + + uart3:uart@c00ab000 { + #clock-cells = <0>; + clock-output-names = "uart3"; + cell-id = ; + reg = ; + clk-step = <1>; + clk-input = ; + }; + + uart4:uart@c006e000 { + #clock-cells = <0>; + clock-output-names = "uart4"; + cell-id = ; + reg = ; + clk-step = <1>; + clk-input = ; + }; + + uart5:uart@c0084000 { + #clock-cells = <0>; + clock-output-names = "uart5"; + cell-id = ; + reg = ; + clk-step = <1>; + clk-input = ; + }; + + pwm0:pwm0@c00ba000 { + #clock-cells = <0>; + clock-output-names = "pwm0"; + cell-id = ; + reg = ; + clk-step = <1>; + clk-input = ; + }; + + pwm1:pwm1@c00ba000 { + #clock-cells = <0>; + clock-output-names = "pwm1"; + cell-id = ; + reg = ; + clk-step = <1>; + clk-input = ; + }; + + pwm2:pwm2@c00be000 { + #clock-cells = <0>; + clock-output-names = "pwm2"; + cell-id = ; + reg = ; + clk-step = <1>; + clk-input = ; + }; + + pwm3:pwm3@c00be000 { + #clock-cells = <0>; + clock-output-names = "pwm3"; + cell-id = ; + reg = ; + clk-step = <1>; + clk-input = ; + }; + + i2c0:i2c@c00ae000 { + #clock-cells = <0>; + clock-output-names = "i2c0"; + cell-id = ; + reg = ; + clk-step = <1>; + clk-input = ; + }; + + i2c1:i2c@c00af000 { + #clock-cells = <0>; + clock-output-names = "i2c1"; + cell-id = ; + reg = ; + clk-step = <1>; + clk-input = ; + }; + + i2c2:i2c@c00b0000 { + #clock-cells = <0>; + clock-output-names = "i2c2"; + cell-id = ; + reg = ; + clk-step = <1>; + clk-input = ; + }; + + i2s0:i2s@c00b2000 { + #clock-cells = <0>; + clock-output-names = "i2s0"; + cell-id = ; + reg = ; + clk-step = <2>; + clk-input = ; + clk-input1 = ; + }; + + i2s1:i2s@c00b3000 { + #clock-cells = <0>; + clock-output-names = "i2s1"; + cell-id = ; + reg = ; + clk-step = <2>; + clk-input = ; + clk-input1 = ; + }; + + i2s2:i2s@c00b4000 { + #clock-cells = <0>; + clock-output-names = "i2s2"; + cell-id = ; + reg = ; + clk-step = <2>; + clk-input = ; + clk-input1 = ; + }; + + sdhc0:sdhc@c00c5000 { + #clock-cells = <0>; + clock-output-names = "sdhc0"; + cell-id = ; + reg = ; + clk-step = <1>; + clk-input = ; + }; + + sdhc1:sdhc@c00cc000 { + #clock-cells = <0>; + clock-output-names = "sdhc1"; + cell-id = ; + reg = ; + clk-step = <1>; + clk-input = ; + }; + + sdhc2:sdhc@c00cd000 { + #clock-cells = <0>; + clock-output-names = "sdhc2"; + cell-id = ; + reg = ; + clk-step = <1>; + clk-input = ; + }; + + spi0:spi@c00ac000 { + #clock-cells = <0>; + clock-output-names = "spi0"; + cell-id = ; + reg = ; + clk-step = <1>; + clk-input = ; + }; + + spi1:spi@c00ad000 { + #clock-cells = <0>; + cell-id = ; + reg = ; + clk-step = <1>; + clk-input = ; + clock-output-names = "spi1"; + }; + + spi2:spi@c00a7000 { + #clock-cells = <0>; + cell-id = ; + reg = ; + clk-step = <1>; + clk-input = ; + clock-output-names = "spi2"; + }; + + vip0:vip@c00c1000 { + #clock-cells = <0>; + cell-id = ; + reg = ; + clk-step = <1>; + clk-input = ; + clock-output-names = "vip0"; + }; + + vip1:vip@c00c2000 { + #clock-cells = <0>; + cell-id = ; + reg = ; + clk-step = <1>; + clk-input = ; + clock-output-names = "vip1"; + }; + + vip2:vip@c0099000 { + #clock-cells = <0>; + cell-id = ; + reg = ; + clk-step = <1>; + clk-input = ; + clock-output-names = "vip2"; + }; + + mipi:mipi@c00ca000 { + #clock-cells = <0>; + cell-id = ; + reg = ; + clk-step = <1>; + clk-input = ; + clock-output-names = "mipi"; + }; + + gmac:gmac@c00c8000 { + #clock-cells = <0>; + cell-id = ; + reg = ; + clk-step = <2>; + clk-input = ; + clk-input1 = ; + clock-output-names = "gmac"; + src-force = <4>; + }; + + spdiftx:spdiftx@c00b8000 { + #clock-cells = <0>; + cell-id = ; + reg = ; + clk-step = <1>; + clk-input = ; + clock-output-names = "spdif-tx"; + }; + + mpegtsi:mpegtsi@c00b7000 { + #clock-cells = <0>; + cell-id = ; + reg = ; + clk-step = <1>; + clk-input = ; + clock-output-names = "mpeg-tsi"; + }; + + vr:vr@c00c3000 { + #clock-cells = <0>; + cell-id = ; + reg = ; + clk-step = <1>; + clk-input = ; + clock-output-names = "vr"; + }; + + deinterlace:deinterlace@c00b5000 { + #clock-cells = <0>; + cell-id = ; + reg = ; + clk-step = <1>; + clk-input = ; + clock-output-names = "deinterlace"; + }; + + ppm:pppm@c00c4000 { + #clock-cells = <0>; + cell-id = ; + reg = ; + clk-step = <1>; + clk-input = ; + clock-output-names = "ppm"; + }; + + vpu:vpu@c00c7000 { + #clock-cells = <0>; + cell-id = ; + reg = ; + clk-step = <1>; + clk-input = ; + clock-output-names = "vpu"; + }; + + crypto:crypto@c00c6000 { + #clock-cells = <0>; + cell-id = ; + reg = ; + clk-step = <1>; + clk-input = ; + clock-output-names = "crypto"; + }; + + scaler:scaler@c00b6000 { + #clock-cells = <0>; + clock-output-names = "scaler"; + cell-id = ; + reg = ; + clk-step = <1>; + clk-input = ; + }; + + pdm:pdm@c00cb000 { + #clock-cells = <0>; + clock-output-names = "pdm"; + cell-id = ; + reg = ; + clk-step = <1>; + clk-input = ; + }; + + usbhost:usbhost@c006b000 { + #clock-cells = <0>; + cell-id = ; + reg = ; + clk-step = <2>; + clk-input = ; + clk-input1 = ; + clock-output-names = "usbhost"; + clock-frequency = <12000000>; + }; + + otg:otg@6c00b000 { + #clock-cells = <0>; + clock-output-names = "otg"; + cell-id = ; + reg = ; + clk-step = <2>; + clk-input = ; + clk-input1 = ; + }; +}; + +amba { + compatible = "arm,amba-bus"; + reg = <0xC0000000 0x2000>; + #address-cells = <1>; + #size-cells = <1>; + interrupt-parent = <&gic>; + ranges; + + pl08xdma0:pl08xdma@c0000000 { + compatible = "arm,pl080", "arm,primecell"; + arm,primecell-periphid = <0x00041080>; + reg = ; + interrupts = <0 IRQ_DMA0 0>; + #dma-cells = <2>; + dma-channels = <8>; + dma-requests = <16>; + lli-bus-interface-ahb1; + mem-bus-interface-ahb1; + memcpy-burst-size = <256>; + memcpy-bus-width = <32>; + + /* slave channels */ + ch0 { + slave_bus_id = PL08X_DMA_NAME_UART1_TX; + slave_periph_buses = ; + }; + + ch1 { + slave_bus_id = PL08X_DMA_NAME_UART1_RX; + slave_periph_buses = ; + }; + + ch2 { + slave_bus_id = PL08X_DMA_NAME_UART0_TX; + slave_periph_buses = ; + }; + + ch3 { + slave_bus_id = PL08X_DMA_NAME_UART0_RX; + slave_periph_buses = ; + }; + + ch4 { + slave_bus_id = PL08X_DMA_NAME_UART2_TX; + slave_periph_buses = ; + }; + + ch5 { + slave_bus_id = PL08X_DMA_NAME_UART2_RX; + slave_periph_buses = ; + }; + + ch6 { + slave_bus_id = PL08X_DMA_NAME_SSP0_TX; + slave_periph_buses = ; + }; + + ch7 { + slave_bus_id = PL08X_DMA_NAME_SSP0_RX; + slave_periph_buses = ; + }; + + ch8 { + slave_bus_id = PL08X_DMA_NAME_SSP1_TX; + slave_periph_buses = ; + }; + + ch9 { + slave_bus_id = PL08X_DMA_NAME_SSP1_RX; + slave_periph_buses = ; + }; + + ch10 { + slave_bus_id = PL08X_DMA_NAME_SSP2_TX; + slave_periph_buses = ; + }; + + ch11 { + slave_bus_id = PL08X_DMA_NAME_SSP2_RX; + slave_periph_buses = ; + }; + + ch12 { + slave_bus_id = PL08X_DMA_NAME_I2S0_TX; + slave_periph_buses = ; + }; + + ch13 { + slave_bus_id = PL08X_DMA_NAME_I2S0_RX; + slave_periph_buses = ; + }; + + ch14 { + slave_bus_id = PL08X_DMA_NAME_I2S1_TX; + slave_periph_buses = ; + }; + + ch15 { + slave_bus_id = PL08X_DMA_NAME_I2S1_RX; + slave_periph_buses = ; + }; + }; + + pl08xdma1:pl08xdma@c0001000 { + compatible = "arm,pl080", "arm,primecell"; + arm,primecell-periphid = <0x00041080>; + reg = ; + interrupts = <0 IRQ_DMA1 0>; + #dma-cells = <2>; + dma-channels = <8>; + dma-requests = <16>; + lli-bus-interface-ahb1; + mem-bus-interface-ahb1; + memcpy-burst-size = <256>; + memcpy-bus-width = <32>; + + /* slave channels */ + ch0 { + slave_bus_id = PL08X_DMA_NAME_I2S2_TX; + slave_periph_buses = ; + }; + + ch1 { + slave_bus_id = PL08X_DMA_NAME_I2S2_RX; + slave_periph_buses = ; + }; + + ch2 { + slave_bus_id = PL08X_DMA_NAME_AC97_PCMOUT; + slave_periph_buses = ; + }; + + ch3 { + slave_bus_id = PL08X_DMA_NAME_AC97_PCMIN; + slave_periph_buses = ; + }; + + ch4 { + slave_bus_id = PL08X_DMA_NAME_AC97_MICIN; + slave_periph_buses = ; + }; + + ch5 { + slave_bus_id = PL08X_DMA_NAME_SPDIFRX; + slave_periph_buses = ; + }; + + ch6 { + slave_bus_id = PL08X_DMA_NAME_SPDIFTX; + slave_periph_buses = ; + }; + + ch7 { + slave_bus_id = PL08X_DMA_NAME_MPEGTSI0; + slave_periph_buses = ; + }; + + ch8 { + slave_bus_id = PL08X_DMA_NAME_MPEGTSI1; + slave_periph_buses = ; + }; + + ch9 { + slave_bus_id = PL08X_DMA_NAME_MPEGTSI2; + slave_periph_buses = ; + }; + + ch10 { + slave_bus_id = PL08X_DMA_NAME_MPEGTSI3; + slave_periph_buses = ; + }; + + ch11 { + slave_bus_id = PL08X_DMA_NAME_CRYPTO_BR; + slave_periph_buses = ; + }; + + ch12 { + slave_bus_id = PL08X_DMA_NAME_CRYPTO_BW; + slave_periph_buses = ; + }; + + ch13 { + slave_bus_id = PL08X_DMA_NAME_CRYPTO_HR; + slave_periph_buses = ; + }; + + ch14 { + slave_bus_id = PL08X_DMA_NAME_PDM; + slave_periph_buses = ; + }; + }; +}; + + diff -ENwbur a/arch/arm64/boot/dts/nexell/s5p6818-tmu-sensor-conf.dtsi b/arch/arm64/boot/dts/nexell/s5p6818-tmu-sensor-conf.dtsi --- a/arch/arm64/boot/dts/nexell/s5p6818-tmu-sensor-conf.dtsi 1970-01-01 01:00:00.000000000 +0100 +++ b/arch/arm64/boot/dts/nexell/s5p6818-tmu-sensor-conf.dtsi 2018-05-06 08:49:48.254657924 +0200 @@ -0,0 +1,31 @@ +/* + * Copyright (C) 2016 Nexell Co., Ltd. + * Author: Youngbok, Park + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include + +#thermal-sensor-cells = <0>; +samsung,tmu_gain = <5>; +samsung,tmu_reference_voltage = <16>; +samsung,tmu_noise_cancel_mode = <4>; +samsung,tmu_efuse_value = <0x5d2d>; +samsung,tmu_min_efuse_value = <16>; +samsung,tmu_max_efuse_value = <76>; +samsung,tmu_first_point_trim = <25>; +samsung,tmu_second_point_trim = <85>; +samsung,tmu_default_temp_offset = <25>; +samsung,tmu_cal_type = ; diff -ENwbur a/arch/arm64/configs/nanopim3_defconfig b/arch/arm64/configs/nanopim3_defconfig --- a/arch/arm64/configs/nanopim3_defconfig 1970-01-01 01:00:00.000000000 +0100 +++ b/arch/arm64/configs/nanopim3_defconfig 2018-05-06 08:49:48.262658250 +0200 @@ -0,0 +1,4592 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/arm64 4.14.39 Kernel Configuration +# +CONFIG_ARM64=y +CONFIG_64BIT=y +CONFIG_ARCH_PHYS_ADDR_T_64BIT=y +CONFIG_MMU=y +CONFIG_ARM64_PAGE_SHIFT=12 +CONFIG_ARM64_CONT_SHIFT=4 +CONFIG_ARCH_MMAP_RND_BITS_MIN=18 +CONFIG_ARCH_MMAP_RND_BITS_MAX=24 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_NO_IOPORT_MAP=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_ZONE_DMA=y +CONFIG_HAVE_GENERIC_GUP=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_SMP=y +CONFIG_SWIOTLB=y +CONFIG_IOMMU_HELPER=y +CONFIG_KERNEL_MODE_NEON=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_PGTABLE_LEVELS=3 +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_ARCH_PROC_KCORE_TEXT=y +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_EXTABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_CROSS_COMPILE="aarch64-linux-gnu-" +# CONFIG_COMPILE_TEST is not set +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_DEFAULT_HOSTNAME="S5P6818" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_CROSS_MEMORY_ATTACH=y +CONFIG_FHANDLE=y +CONFIG_USELIB=y +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y +CONFIG_AUDIT_WATCH=y +CONFIG_AUDIT_TREE=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_SHOW_LEVEL=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_HANDLE_DOMAIN_IRQ=y +# CONFIG_IRQ_DOMAIN_DEBUG is not set +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +CONFIG_ARCH_CLOCKSOURCE_DATA=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_ARCH_HAS_TICK_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +CONFIG_NO_HZ_IDLE=y +# CONFIG_NO_HZ_FULL is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y + +# +# CPU/Task time and stats accounting +# +CONFIG_TICK_CPU_ACCOUNTING=y +# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set +# CONFIG_IRQ_TIME_ACCOUNTING is not set +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +# CONFIG_TASKSTATS is not set + +# +# RCU Subsystem +# +CONFIG_PREEMPT_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_SRCU=y +CONFIG_TREE_SRCU=y +CONFIG_TASKS_RCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +CONFIG_BUILD_BIN2C=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=16 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 +CONFIG_GENERIC_SCHED_CLOCK=y +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_MEMCG_SWAP_ENABLED=y +# CONFIG_BLK_CGROUP is not set +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +# CONFIG_CFS_BANDWIDTH is not set +CONFIG_RT_GROUP_SCHED=y +# CONFIG_CGROUP_PIDS is not set +# CONFIG_CGROUP_RDMA is not set +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +# CONFIG_CPUSETS is not set +# CONFIG_CGROUP_DEVICE is not set +CONFIG_CGROUP_CPUACCT=y +# CONFIG_CGROUP_PERF is not set +CONFIG_CGROUP_BPF=y +CONFIG_CGROUP_DEBUG=y +CONFIG_SOCK_CGROUP_DATA=y +# CONFIG_CHECKPOINT_RESTORE is not set +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +# CONFIG_USER_NS is not set +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_SCHED_AUTOGROUP=y +# CONFIG_SYSFS_DEPRECATED is not set +# CONFIG_RELAY is not set +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +# CONFIG_RD_LZ4 is not set +# CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_BPF=y +CONFIG_EXPERT=y +CONFIG_UID16=y +CONFIG_MULTIUSER=y +# CONFIG_SGETMASK_SYSCALL is not set +CONFIG_SYSFS_SYSCALL=y +# CONFIG_SYSCTL_SYSCALL is not set +CONFIG_POSIX_TIMERS=y +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +# CONFIG_KALLSYMS_ABSOLUTE_PERCPU is not set +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_BPF_SYSCALL=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_ADVISE_SYSCALLS=y +# CONFIG_USERFAULTFD is not set +CONFIG_MEMBARRIER=y +CONFIG_EMBEDDED=y +CONFIG_HAVE_PERF_EVENTS=y +# CONFIG_PC104 is not set + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_COMPAT_BRK is not set +CONFIG_SLAB=y +# CONFIG_SLUB is not set +# CONFIG_SLOB is not set +CONFIG_SLAB_MERGE_DEFAULT=y +# CONFIG_SLAB_FREELIST_RANDOM is not set +# CONFIG_SYSTEM_DATA_VERIFICATION is not set +# CONFIG_PROFILING is not set +CONFIG_TRACEPOINTS=y +# CONFIG_KPROBES is not set +# CONFIG_JUMP_LABEL is not set +# CONFIG_UPROBES is not set +# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_GENERIC_IDLE_POLL_SETUP=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_DMA_API_DEBUG=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_RCU_TABLE_FREE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_HAVE_GCC_PLUGINS=y +# CONFIG_GCC_PLUGINS is not set +CONFIG_HAVE_CC_STACKPROTECTOR=y +# CONFIG_CC_STACKPROTECTOR is not set +CONFIG_CC_STACKPROTECTOR_NONE=y +# CONFIG_CC_STACKPROTECTOR_REGULAR is not set +# CONFIG_CC_STACKPROTECTOR_STRONG is not set +CONFIG_THIN_ARCHIVES=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_ARCH_MMAP_RND_BITS=18 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=11 +# CONFIG_HAVE_ARCH_HASH is not set +# CONFIG_ISA_BUS_API is not set +CONFIG_CLONE_BACKWARDS=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y +# CONFIG_CPU_NO_EFFICIENT_FFS is not set +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y +# CONFIG_ARCH_OPTIONAL_KERNEL_RWX is not set +# CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT is not set +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +# CONFIG_REFCOUNT_FULL is not set + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_SLABINFO=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +# CONFIG_MODULE_SIG is not set +# CONFIG_MODULE_COMPRESS is not set +# CONFIG_TRIM_UNUSED_KSYMS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLK_SCSI_REQUEST=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +# CONFIG_BLK_DEV_INTEGRITY is not set +# CONFIG_BLK_DEV_ZONED is not set +# CONFIG_BLK_CMDLINE_PARSER is not set +# CONFIG_BLK_WBT is not set +CONFIG_BLK_DEBUG_FS=y +# CONFIG_BLK_SED_OPAL is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_EFI_PARTITION=y +CONFIG_BLOCK_COMPAT=y + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +# CONFIG_IOSCHED_DEADLINE is not set +CONFIG_IOSCHED_CFQ=y +CONFIG_DEFAULT_CFQ=y +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="cfq" +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +# CONFIG_IOSCHED_BFQ is not set +CONFIG_UNINLINE_SPIN_UNLOCK=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_FREEZER=y + +# +# Platform selection +# +# CONFIG_ARCH_ACTIONS is not set +# CONFIG_ARCH_SUNXI is not set +# CONFIG_ARCH_ALPINE is not set +# CONFIG_ARCH_BCM2835 is not set +# CONFIG_ARCH_BCM_IPROC is not set +# CONFIG_ARCH_BERLIN is not set +# CONFIG_ARCH_BRCMSTB is not set +# CONFIG_ARCH_EXYNOS is not set +# CONFIG_ARCH_LAYERSCAPE is not set +# CONFIG_ARCH_LG1K is not set +# CONFIG_ARCH_HISI is not set +# CONFIG_ARCH_MEDIATEK is not set +# CONFIG_ARCH_MESON is not set +# CONFIG_ARCH_MVEBU is not set +# CONFIG_ARCH_QCOM is not set +# CONFIG_ARCH_REALTEK is not set +# CONFIG_ARCH_ROCKCHIP is not set +# CONFIG_ARCH_SEATTLE is not set +# CONFIG_ARCH_RENESAS is not set +# CONFIG_ARCH_STRATIX10 is not set +# CONFIG_ARCH_TEGRA is not set +# CONFIG_ARCH_SPRD is not set +CONFIG_ARCH_S5P6818=y +# CONFIG_ARCH_THUNDER is not set +# CONFIG_ARCH_THUNDER2 is not set +# CONFIG_ARCH_UNIPHIER is not set +# CONFIG_ARCH_VEXPRESS is not set +# CONFIG_ARCH_VULCAN is not set +# CONFIG_ARCH_XGENE is not set +# CONFIG_ARCH_ZX is not set +# CONFIG_ARCH_ZYNQMP is not set + +# +# Bus support +# +# CONFIG_PCI is not set +# CONFIG_PCI_DOMAINS is not set +# CONFIG_PCI_DOMAINS_GENERIC is not set +# CONFIG_PCI_SYSCALL is not set + +# +# DesignWare PCI Core Support +# + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set + +# +# Kernel Features +# + +# +# ARM errata workarounds via the alternatives framework +# +# CONFIG_ARM64_ERRATUM_826319 is not set +# CONFIG_ARM64_ERRATUM_827319 is not set +# CONFIG_ARM64_ERRATUM_824069 is not set +# CONFIG_ARM64_ERRATUM_819472 is not set +# CONFIG_ARM64_ERRATUM_832075 is not set +CONFIG_ARM64_ERRATUM_845719=y +CONFIG_ARM64_ERRATUM_843419=y +CONFIG_CAVIUM_ERRATUM_22375=y +CONFIG_CAVIUM_ERRATUM_23154=y +CONFIG_CAVIUM_ERRATUM_27456=y +CONFIG_CAVIUM_ERRATUM_30115=y +CONFIG_QCOM_FALKOR_ERRATUM_1003=y +CONFIG_QCOM_FALKOR_ERRATUM_1009=y +CONFIG_QCOM_QDF2400_ERRATUM_0065=y +CONFIG_QCOM_FALKOR_ERRATUM_E1041=y +CONFIG_ARM64_4K_PAGES=y +# CONFIG_ARM64_16K_PAGES is not set +# CONFIG_ARM64_64K_PAGES is not set +CONFIG_ARM64_VA_BITS_39=y +# CONFIG_ARM64_VA_BITS_48 is not set +CONFIG_ARM64_VA_BITS=39 +CONFIG_ARM64_WORKAROUND_CCI400_DVMV7=y +# CONFIG_CPU_BIG_ENDIAN is not set +CONFIG_SCHED_MC=y +CONFIG_SCHED_SMT=y +CONFIG_NR_CPUS=8 +CONFIG_HOTPLUG_CPU=y +# CONFIG_NUMA is not set +# CONFIG_PREEMPT_NONE is not set +# CONFIG_PREEMPT_VOLUNTARY is not set +CONFIG_PREEMPT=y +CONFIG_PREEMPT_COUNT=y +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +CONFIG_SCHED_HRTICK=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_HAVE_ARCH_PFN_VALID=y +CONFIG_HW_PERF_EVENTS=y +CONFIG_SYS_SUPPORTS_HUGETLBFS=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_NO_BOOTMEM=y +CONFIG_MEMORY_ISOLATION=y +# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_COMPACTION=y +CONFIG_MIGRATION=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_BOUNCE=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +# CONFIG_MEMORY_FAILURE is not set +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +# CONFIG_ARCH_WANTS_THP_SWAP is not set +CONFIG_TRANSPARENT_HUGE_PAGECACHE=y +# CONFIG_CLEANCACHE is not set +# CONFIG_FRONTSWAP is not set +CONFIG_CMA=y +# CONFIG_CMA_DEBUG is not set +CONFIG_CMA_DEBUGFS=y +CONFIG_CMA_AREAS=3 +# CONFIG_ZPOOL is not set +# CONFIG_ZBUD is not set +# CONFIG_ZSMALLOC is not set +CONFIG_GENERIC_EARLY_IOREMAP=y +# CONFIG_IDLE_PAGE_TRACKING is not set +CONFIG_FRAME_VECTOR=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_SECCOMP is not set +# CONFIG_PARAVIRT is not set +# CONFIG_PARAVIRT_TIME_ACCOUNTING is not set +# CONFIG_KEXEC is not set +# CONFIG_CRASH_DUMP is not set +# CONFIG_XEN is not set +CONFIG_FORCE_MAX_ZONEORDER=11 +CONFIG_UNMAP_KERNEL_AT_EL0=y +CONFIG_HARDEN_BRANCH_PREDICTOR=y +# CONFIG_ARMV8_DEPRECATED is not set +# CONFIG_ARM64_SW_TTBR0_PAN is not set + +# +# ARMv8.1 architectural features +# +CONFIG_ARM64_HW_AFDBM=y +CONFIG_ARM64_PAN=y +# CONFIG_ARM64_LSE_ATOMICS is not set +# CONFIG_ARM64_VHE is not set + +# +# ARMv8.2 architectural features +# +CONFIG_ARM64_UAO=y +# CONFIG_ARM64_PMEM is not set +CONFIG_ARM64_MODULE_CMODEL_LARGE=y +# CONFIG_RANDOMIZE_BASE is not set + +# +# Boot options +# +CONFIG_CMDLINE="console=ttySAC0,115200n8 root=/dev/ram0 rw initrd=0x49000000,16M ramdisk=16384" +# CONFIG_CMDLINE_FORCE is not set +# CONFIG_EFI is not set + +# +# Userspace binary formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ELFCORE=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_BINFMT_SCRIPT=y +# CONFIG_HAVE_AOUT is not set +CONFIG_BINFMT_MISC=y +CONFIG_COREDUMP=y +CONFIG_COMPAT=y +CONFIG_SYSVIPC_COMPAT=y + +# +# Power management options +# +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +# CONFIG_SUSPEND_SKIP_SYNC is not set +# CONFIG_HIBERNATION is not set +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +CONFIG_PM_AUTOSLEEP=y +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=100 +CONFIG_PM_WAKELOCKS_GC=y +CONFIG_PM=y +CONFIG_PM_DEBUG=y +# CONFIG_PM_ADVANCED_DEBUG is not set +# CONFIG_PM_TEST_SUSPEND is not set +CONFIG_PM_SLEEP_DEBUG=y +CONFIG_PM_CLK=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +CONFIG_CPU_PM=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y + +# +# CPU Power Management +# + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +# CONFIG_CPU_IDLE_GOV_LADDER is not set +CONFIG_CPU_IDLE_GOV_MENU=y +CONFIG_DT_IDLE_STATES=y + +# +# ARM CPU Idle Drivers +# +# CONFIG_ARM_CPUIDLE is not set +CONFIG_ARM_NEXELL_CPUIDLE=y +# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +# CONFIG_CPU_FREQ_STAT is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set + +# +# CPU frequency scaling drivers +# +# CONFIG_CPUFREQ_DT is not set +# CONFIG_ARM_BIG_LITTLE_CPUFREQ is not set +# CONFIG_ARM_KIRKWOOD_CPUFREQ is not set +CONFIG_ARM_NEXELL_CPUFREQ=y +# CONFIG_ARM_NEXELL_CPUFREQ_DEBUG is not set +# CONFIG_ARM_NEXELL_CPUFREQ_VOLTAGE_DEBUG is not set +# CONFIG_ARM_DYNAMIC_CLUSTER_HOTPLUG is not set +# CONFIG_NEXELL_CPUFREQ_PLL_0 is not set +CONFIG_NEXELL_CPUFREQ_PLL_1=y +CONFIG_NEXELL_CPUFREQ_PLLDEV=1 +# CONFIG_QORIQ_CPUFREQ is not set +CONFIG_NET=y +CONFIG_COMPAT_NETLINK_MESSAGES=y +CONFIG_NET_INGRESS=y + +# +# Networking options +# +CONFIG_PACKET=y +# CONFIG_PACKET_DIAG is not set +CONFIG_UNIX=y +# CONFIG_UNIX_DIAG is not set +# CONFIG_TLS is not set +CONFIG_XFRM=y +CONFIG_XFRM_ALGO=y +CONFIG_XFRM_USER=y +# CONFIG_XFRM_SUB_POLICY is not set +CONFIG_XFRM_MIGRATE=y +# CONFIG_XFRM_STATISTICS is not set +CONFIG_XFRM_IPCOMP=y +CONFIG_NET_KEY=y +CONFIG_NET_KEY_MIGRATE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +# CONFIG_IP_FIB_TRIE_STATS is not set +CONFIG_IP_MULTIPLE_TABLES=y +# CONFIG_IP_ROUTE_MULTIPATH is not set +# CONFIG_IP_ROUTE_VERBOSE is not set +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_IP_PNP_RARP=y +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE_DEMUX is not set +# CONFIG_NET_IP_TUNNEL is not set +# CONFIG_IP_MROUTE is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_NET_IPVTI is not set +# CONFIG_NET_UDP_TUNNEL is not set +# CONFIG_NET_FOU is not set +# CONFIG_INET_AH is not set +CONFIG_INET_ESP=y +# CONFIG_INET_ESP_OFFLOAD is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_INET_XFRM_TUNNEL is not set +# CONFIG_INET_TUNNEL is not set +CONFIG_INET_XFRM_MODE_TRANSPORT=y +CONFIG_INET_XFRM_MODE_TUNNEL=y +CONFIG_INET_XFRM_MODE_BEET=y +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_INET_UDP_DIAG is not set +# CONFIG_INET_RAW_DIAG is not set +# CONFIG_INET_DIAG_DESTROY is not set +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +# CONFIG_INET6_ESP_OFFLOAD is not set +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +# CONFIG_IPV6_ILA is not set +CONFIG_INET6_XFRM_TUNNEL=y +CONFIG_INET6_TUNNEL=y +CONFIG_INET6_XFRM_MODE_TRANSPORT=y +CONFIG_INET6_XFRM_MODE_TUNNEL=y +CONFIG_INET6_XFRM_MODE_BEET=y +# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set +# CONFIG_IPV6_VTI is not set +# CONFIG_IPV6_SIT is not set +# CONFIG_IPV6_TUNNEL is not set +# CONFIG_IPV6_FOU is not set +# CONFIG_IPV6_FOU_TUNNEL is not set +CONFIG_IPV6_MULTIPLE_TABLES=y +# CONFIG_IPV6_SUBTREES is not set +# CONFIG_IPV6_MROUTE is not set +# CONFIG_IPV6_SEG6_LWTUNNEL is not set +# CONFIG_IPV6_SEG6_HMAC is not set +CONFIG_NETLABEL=y +CONFIG_NETWORK_SECMARK=y +# CONFIG_NET_PTP_CLASSIFY is not set +# CONFIG_NETWORK_PHY_TIMESTAMPING is not set +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_NETLINK=y +# CONFIG_NETFILTER_NETLINK_ACCT is not set +CONFIG_NETFILTER_NETLINK_QUEUE=y +CONFIG_NETFILTER_NETLINK_LOG=y +CONFIG_NF_CONNTRACK=y +# CONFIG_NF_LOG_NETDEV is not set +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +# CONFIG_NF_CONNTRACK_TIMEOUT is not set +# CONFIG_NF_CONNTRACK_TIMESTAMP is not set +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_BROADCAST=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +# CONFIG_NF_CONNTRACK_SNMP is not set +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +# CONFIG_NF_CONNTRACK_SIP is not set +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +# CONFIG_NF_CT_NETLINK_TIMEOUT is not set +# CONFIG_NETFILTER_NETLINK_GLUE_CT is not set +# CONFIG_NF_TABLES is not set +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=y +CONFIG_NETFILTER_XT_CONNMARK=y + +# +# Xtables targets +# +# CONFIG_NETFILTER_XT_TARGET_AUDIT is not set +# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +# CONFIG_NETFILTER_XT_TARGET_CT is not set +# CONFIG_NETFILTER_XT_TARGET_DSCP is not set +# CONFIG_NETFILTER_XT_TARGET_HL is not set +# CONFIG_NETFILTER_XT_TARGET_HMARK is not set +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +# CONFIG_NETFILTER_XT_TARGET_LED is not set +# CONFIG_NETFILTER_XT_TARGET_LOG is not set +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set +# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set +# CONFIG_NETFILTER_XT_TARGET_TEE is not set +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set + +# +# Xtables matches +# +# CONFIG_NETFILTER_XT_MATCH_ADDRTYPE is not set +# CONFIG_NETFILTER_XT_MATCH_BPF is not set +# CONFIG_NETFILTER_XT_MATCH_CGROUP is not set +# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +# CONFIG_NETFILTER_XT_MATCH_CONNBYTES is not set +# CONFIG_NETFILTER_XT_MATCH_CONNLABEL is not set +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +# CONFIG_NETFILTER_XT_MATCH_CPU is not set +# CONFIG_NETFILTER_XT_MATCH_DCCP is not set +# CONFIG_NETFILTER_XT_MATCH_DEVGROUP is not set +# CONFIG_NETFILTER_XT_MATCH_DSCP is not set +CONFIG_NETFILTER_XT_MATCH_ECN=y +# CONFIG_NETFILTER_XT_MATCH_ESP is not set +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_HL=y +# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +# CONFIG_NETFILTER_XT_MATCH_L2TP is not set +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +# CONFIG_NETFILTER_XT_MATCH_MULTIPORT is not set +# CONFIG_NETFILTER_XT_MATCH_NFACCT is not set +# CONFIG_NETFILTER_XT_MATCH_OSF is not set +# CONFIG_NETFILTER_XT_MATCH_OWNER is not set +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set +# CONFIG_NETFILTER_XT_MATCH_REALM is not set +# CONFIG_NETFILTER_XT_MATCH_RECENT is not set +# CONFIG_NETFILTER_XT_MATCH_SCTP is not set +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +# CONFIG_IP_SET is not set +# CONFIG_IP_VS is not set + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=y +CONFIG_NF_CONNTRACK_IPV4=y +# CONFIG_NF_SOCKET_IPV4 is not set +# CONFIG_NF_DUP_IPV4 is not set +# CONFIG_NF_LOG_ARP is not set +# CONFIG_NF_LOG_IPV4 is not set +CONFIG_NF_REJECT_IPV4=y +# CONFIG_NF_NAT_IPV4 is not set +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +# CONFIG_IP_NF_MATCH_RPFILTER is not set +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +# CONFIG_IP_NF_TARGET_SYNPROXY is not set +# CONFIG_IP_NF_NAT is not set +CONFIG_IP_NF_MANGLE=y +# CONFIG_IP_NF_TARGET_CLUSTERIP is not set +# CONFIG_IP_NF_TARGET_ECN is not set +# CONFIG_IP_NF_TARGET_TTL is not set +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV6=y +CONFIG_NF_CONNTRACK_IPV6=y +# CONFIG_NF_SOCKET_IPV6 is not set +# CONFIG_NF_DUP_IPV6 is not set +CONFIG_NF_REJECT_IPV6=y +# CONFIG_NF_LOG_IPV6 is not set +# CONFIG_NF_NAT_IPV6 is not set +CONFIG_IP6_NF_IPTABLES=y +# CONFIG_IP6_NF_MATCH_AH is not set +# CONFIG_IP6_NF_MATCH_EUI64 is not set +# CONFIG_IP6_NF_MATCH_FRAG is not set +# CONFIG_IP6_NF_MATCH_OPTS is not set +# CONFIG_IP6_NF_MATCH_HL is not set +# CONFIG_IP6_NF_MATCH_IPV6HEADER is not set +# CONFIG_IP6_NF_MATCH_MH is not set +# CONFIG_IP6_NF_MATCH_RPFILTER is not set +# CONFIG_IP6_NF_MATCH_RT is not set +# CONFIG_IP6_NF_TARGET_HL is not set +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +# CONFIG_IP6_NF_TARGET_SYNPROXY is not set +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +# CONFIG_IP6_NF_SECURITY is not set +# CONFIG_IP6_NF_NAT is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_L2TP is not set +# CONFIG_BRIDGE is not set +CONFIG_HAVE_NET_DSA=y +# CONFIG_NET_DSA is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +# CONFIG_6LOWPAN is not set +# CONFIG_IEEE802154 is not set +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +# CONFIG_NET_SCH_CBQ is not set +CONFIG_NET_SCH_HTB=y +# CONFIG_NET_SCH_HFSC is not set +# CONFIG_NET_SCH_PRIO is not set +# CONFIG_NET_SCH_MULTIQ is not set +# CONFIG_NET_SCH_RED is not set +# CONFIG_NET_SCH_SFB is not set +# CONFIG_NET_SCH_SFQ is not set +# CONFIG_NET_SCH_TEQL is not set +# CONFIG_NET_SCH_TBF is not set +# CONFIG_NET_SCH_GRED is not set +# CONFIG_NET_SCH_DSMARK is not set +# CONFIG_NET_SCH_NETEM is not set +# CONFIG_NET_SCH_DRR is not set +# CONFIG_NET_SCH_MQPRIO is not set +# CONFIG_NET_SCH_CHOKE is not set +# CONFIG_NET_SCH_QFQ is not set +# CONFIG_NET_SCH_CODEL is not set +# CONFIG_NET_SCH_FQ_CODEL is not set +# CONFIG_NET_SCH_FQ is not set +# CONFIG_NET_SCH_HHF is not set +# CONFIG_NET_SCH_PIE is not set +# CONFIG_NET_SCH_INGRESS is not set +# CONFIG_NET_SCH_PLUG is not set +# CONFIG_NET_SCH_DEFAULT is not set + +# +# Classification +# +CONFIG_NET_CLS=y +# CONFIG_NET_CLS_BASIC is not set +# CONFIG_NET_CLS_TCINDEX is not set +# CONFIG_NET_CLS_ROUTE4 is not set +# CONFIG_NET_CLS_FW is not set +CONFIG_NET_CLS_U32=y +# CONFIG_CLS_U32_PERF is not set +# CONFIG_CLS_U32_MARK is not set +# CONFIG_NET_CLS_RSVP is not set +# CONFIG_NET_CLS_RSVP6 is not set +# CONFIG_NET_CLS_FLOW is not set +# CONFIG_NET_CLS_CGROUP is not set +# CONFIG_NET_CLS_BPF is not set +# CONFIG_NET_CLS_FLOWER is not set +# CONFIG_NET_CLS_MATCHALL is not set +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +# CONFIG_NET_EMATCH_CMP is not set +# CONFIG_NET_EMATCH_NBYTE is not set +CONFIG_NET_EMATCH_U32=y +# CONFIG_NET_EMATCH_META is not set +# CONFIG_NET_EMATCH_TEXT is not set +CONFIG_NET_CLS_ACT=y +# CONFIG_NET_ACT_POLICE is not set +# CONFIG_NET_ACT_GACT is not set +# CONFIG_NET_ACT_MIRRED is not set +# CONFIG_NET_ACT_SAMPLE is not set +# CONFIG_NET_ACT_IPT is not set +# CONFIG_NET_ACT_NAT is not set +# CONFIG_NET_ACT_PEDIT is not set +# CONFIG_NET_ACT_SIMP is not set +# CONFIG_NET_ACT_SKBEDIT is not set +# CONFIG_NET_ACT_CSUM is not set +# CONFIG_NET_ACT_VLAN is not set +# CONFIG_NET_ACT_BPF is not set +# CONFIG_NET_ACT_CONNMARK is not set +# CONFIG_NET_ACT_SKBMOD is not set +# CONFIG_NET_ACT_IFE is not set +# CONFIG_NET_ACT_TUNNEL_KEY is not set +# CONFIG_NET_CLS_IND is not set +CONFIG_NET_SCH_FIFO=y +# CONFIG_DCB is not set +CONFIG_DNS_RESOLVER=y +# CONFIG_BATMAN_ADV is not set +# CONFIG_OPENVSWITCH is not set +# CONFIG_VSOCKETS is not set +# CONFIG_NETLINK_DIAG is not set +# CONFIG_MPLS is not set +# CONFIG_NET_NSH is not set +# CONFIG_HSR is not set +# CONFIG_NET_SWITCHDEV is not set +# CONFIG_NET_L3_MASTER_DEV is not set +# CONFIG_NET_NCSI is not set +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_XPS=y +# CONFIG_CGROUP_NET_PRIO is not set +# CONFIG_CGROUP_NET_CLASSID is not set +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +# CONFIG_BPF_JIT is not set +# CONFIG_BPF_STREAM_PARSER is not set +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NET_DROP_MONITOR is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +CONFIG_BT=m +CONFIG_BT_BREDR=y +CONFIG_BT_RFCOMM=m +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=m +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_HIDP=m +CONFIG_BT_HS=y +CONFIG_BT_LE=y +# CONFIG_BT_LEDS is not set +# CONFIG_BT_SELFTEST is not set +CONFIG_BT_DEBUGFS=y + +# +# Bluetooth device drivers +# +CONFIG_BT_INTEL=m +CONFIG_BT_BCM=m +CONFIG_BT_RTL=m +CONFIG_BT_HCIBTUSB=m +CONFIG_BT_HCIBTUSB_BCM=y +CONFIG_BT_HCIBTUSB_RTL=y +# CONFIG_BT_HCIBTSDIO is not set +CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_H4=y +# CONFIG_BT_HCIUART_BCSP is not set +# CONFIG_BT_HCIUART_ATH3K is not set +# CONFIG_BT_HCIUART_3WIRE is not set +# CONFIG_BT_HCIUART_INTEL is not set +# CONFIG_BT_HCIUART_QCA is not set +# CONFIG_BT_HCIUART_AG6XX is not set +# CONFIG_BT_HCIUART_MRVL is not set +# CONFIG_BT_HCIBCM203X is not set +# CONFIG_BT_HCIBPA10X is not set +# CONFIG_BT_HCIBFUSB is not set +# CONFIG_BT_HCIVHCI is not set +# CONFIG_BT_MRVL is not set +# CONFIG_BT_ATH3K is not set +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +# CONFIG_STREAM_PARSER is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y +CONFIG_CFG80211=m +CONFIG_NL80211_TESTMODE=y +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +# CONFIG_CFG80211_CERTIFICATION_ONUS is not set +# CONFIG_CFG80211_DEFAULT_PS is not set +CONFIG_CFG80211_DEBUGFS=y +# CONFIG_CFG80211_INTERNAL_REGDB is not set +CONFIG_CFG80211_CRDA_SUPPORT=y +CONFIG_CFG80211_WEXT=y +# CONFIG_LIB80211 is not set +CONFIG_MAC80211=m +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_MINSTREL_HT=y +# CONFIG_MAC80211_RC_MINSTREL_VHT is not set +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +CONFIG_MAC80211_MESH=y +# CONFIG_MAC80211_LEDS is not set +CONFIG_MAC80211_DEBUGFS=y +# CONFIG_MAC80211_MESSAGE_TRACING is not set +# CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +# CONFIG_WIMAX is not set +CONFIG_RFKILL=m +CONFIG_RFKILL_LEDS=y +# CONFIG_RFKILL_INPUT is not set +CONFIG_RFKILL_GPIO=m +CONFIG_NET_9P=y +# CONFIG_NET_9P_DEBUG is not set +# CONFIG_CAIF is not set +# CONFIG_CEPH_LIB is not set +# CONFIG_NFC is not set +# CONFIG_PSAMPLE is not set +# CONFIG_NET_IFE is not set +# CONFIG_LWTUNNEL is not set +# CONFIG_DST_CACHE is not set +CONFIG_GRO_CELLS=y +# CONFIG_NET_DEVLINK is not set +CONFIG_MAY_USE_DEVLINK=y +CONFIG_HAVE_EBPF_JIT=y + +# +# Device Drivers +# +CONFIG_ARM_AMBA=y + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER=y +CONFIG_UEVENT_HELPER_PATH="" +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +CONFIG_FW_LOADER=y +CONFIG_FIRMWARE_IN_KERNEL=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set +CONFIG_ALLOW_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +# CONFIG_SYS_HYPERVISOR is not set +# CONFIG_GENERIC_CPU_DEVICES is not set +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=y +CONFIG_REGMAP_SPI=y +CONFIG_REGMAP_MMIO=y +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set +CONFIG_DMA_CMA=y + +# +# Default contiguous memory area size: +# +CONFIG_CMA_SIZE_MBYTES=128 +CONFIG_CMA_SIZE_SEL_MBYTES=y +# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set +# CONFIG_CMA_SIZE_SEL_MIN is not set +# CONFIG_CMA_SIZE_SEL_MAX is not set +CONFIG_CMA_ALIGNMENT=8 +CONFIG_GENERIC_ARCH_TOPOLOGY=y + +# +# Bus devices +# +# CONFIG_ARM_CCI400_PMU is not set +# CONFIG_ARM_CCI5xx_PMU is not set +# CONFIG_ARM_CCN is not set +# CONFIG_BRCMSTB_GISB_ARB is not set +# CONFIG_SIMPLE_PM_BUS is not set +# CONFIG_VEXPRESS_CONFIG is not set +# CONFIG_CONNECTOR is not set +# CONFIG_MTD is not set +CONFIG_DTC=y +CONFIG_OF=y +# CONFIG_OF_UNITTEST is not set +CONFIG_OF_FLATTREE=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_IRQ=y +CONFIG_OF_NET=y +CONFIG_OF_MDIO=y +CONFIG_OF_RESERVED_MEM=y +# CONFIG_OF_OVERLAY is not set +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_NULL_BLK is not set +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_DRBD is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_RAM is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +# CONFIG_BLK_DEV_RBD is not set +# CONFIG_NVME_FC is not set +# CONFIG_NVME_TARGET is not set + +# +# Misc devices +# +# CONFIG_SENSORS_LIS3LV02D is not set +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +# CONFIG_TI_DAC7512 is not set +# CONFIG_USB_SWITCH_FSA9480 is not set +# CONFIG_LATTICE_ECP3_CONFIG is not set +# CONFIG_SRAM is not set +CONFIG_NX_SCALER=m +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +# CONFIG_SENSORS_LIS3_SPI is not set +# CONFIG_SENSORS_LIS3_I2C is not set + +# +# Altera FPGA firmware download module +# +# CONFIG_ALTERA_STAPL is not set + +# +# Intel MIC Bus Driver +# + +# +# SCIF Bus Driver +# + +# +# VOP Bus Driver +# + +# +# Intel MIC Host Driver +# + +# +# Intel MIC Card Driver +# + +# +# SCIF Driver +# + +# +# Intel MIC Coprocessor State Management (COSM) Drivers +# + +# +# VOP Driver +# +# CONFIG_ECHO is not set +# CONFIG_CXL_BASE is not set +# CONFIG_CXL_AFU_DRIVER_OPS is not set +# CONFIG_CXL_LIB is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +# CONFIG_RAID_ATTRS is not set +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +# CONFIG_SCSI_NETLINK is not set +# CONFIG_SCSI_MQ_DEFAULT is not set +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +# CONFIG_CHR_DEV_ST is not set +# CONFIG_CHR_DEV_OSST is not set +# CONFIG_BLK_DEV_SR is not set +CONFIG_CHR_DEV_SG=y +# CONFIG_CHR_DEV_SCH is not set +# CONFIG_SCSI_CONSTANTS is not set +# CONFIG_SCSI_LOGGING is not set +# CONFIG_SCSI_SCAN_ASYNC is not set + +# +# SCSI Transports +# +# CONFIG_SCSI_SPI_ATTRS is not set +# CONFIG_SCSI_FC_ATTRS is not set +# CONFIG_SCSI_ISCSI_ATTRS is not set +# CONFIG_SCSI_SAS_ATTRS is not set +# CONFIG_SCSI_SAS_LIBSAS is not set +# CONFIG_SCSI_SRP_ATTRS is not set +CONFIG_SCSI_LOWLEVEL=y +# CONFIG_ISCSI_TCP is not set +# CONFIG_ISCSI_BOOT_SYSFS is not set +# CONFIG_SCSI_UFSHCD is not set +# CONFIG_SCSI_DEBUG is not set +# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set +# CONFIG_SCSI_DH is not set +# CONFIG_SCSI_OSD_INITIATOR is not set +CONFIG_HAVE_PATA_PLATFORM=y +# CONFIG_ATA is not set +CONFIG_MD=y +# CONFIG_BLK_DEV_MD is not set +# CONFIG_BCACHE is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=y +# CONFIG_DM_MQ_DEFAULT is not set +# CONFIG_DM_DEBUG is not set +CONFIG_DM_CRYPT=y +# CONFIG_DM_SNAPSHOT is not set +# CONFIG_DM_THIN_PROVISIONING is not set +# CONFIG_DM_CACHE is not set +# CONFIG_DM_ERA is not set +# CONFIG_DM_MIRROR is not set +# CONFIG_DM_RAID is not set +# CONFIG_DM_ZERO is not set +# CONFIG_DM_MULTIPATH is not set +# CONFIG_DM_DELAY is not set +CONFIG_DM_UEVENT=y +# CONFIG_DM_FLAKEY is not set +# CONFIG_DM_VERITY is not set +# CONFIG_DM_SWITCH is not set +# CONFIG_DM_LOG_WRITES is not set +# CONFIG_DM_INTEGRITY is not set +# CONFIG_TARGET_CORE is not set +CONFIG_NETDEVICES=y +CONFIG_MII=y +CONFIG_NET_CORE=y +# CONFIG_BONDING is not set +# CONFIG_DUMMY is not set +# CONFIG_EQUALIZER is not set +# CONFIG_IFB is not set +# CONFIG_NET_TEAM is not set +# CONFIG_MACVLAN is not set +# CONFIG_VXLAN is not set +# CONFIG_MACSEC is not set +# CONFIG_NETCONSOLE is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_TUN is not set +# CONFIG_TUN_VNET_CROSS_LE is not set +# CONFIG_VETH is not set +# CONFIG_NLMON is not set + +# +# CAIF transport drivers +# + +# +# Distributed Switch Architecture drivers +# +CONFIG_ETHERNET=y +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_ALTERA_TSE is not set +# CONFIG_NET_VENDOR_AMAZON is not set +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_AQUANTIA is not set +# CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_AURORA is not set +# CONFIG_NET_CADENCE is not set +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_DNET is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +# CONFIG_NET_VENDOR_HISILICON is not set +CONFIG_NET_VENDOR_HUAWEI=y +# CONFIG_NET_VENDOR_INTEL is not set +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_NET_VENDOR_MELLANOX=y +# CONFIG_MLXSW_CORE is not set +# CONFIG_MLXFW is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_ETHOC is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SOLARFLARE is not set +# CONFIG_NET_VENDOR_SMSC is not set +CONFIG_NET_VENDOR_STMICRO=y +CONFIG_STMMAC_ETH=y +CONFIG_STMMAC_PLATFORM=y +# CONFIG_DWMAC_DWC_QOS_ETH is not set +# CONFIG_DWMAC_GENERIC is not set +CONFIG_DWMAC_NEXELL=y +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_NET_VENDOR_SYNOPSYS=y +# CONFIG_DWC_XLGMAC is not set +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_BUS=y +# CONFIG_MDIO_BCM_UNIMAC is not set +# CONFIG_MDIO_BITBANG is not set +# CONFIG_MDIO_BUS_MUX_GPIO is not set +# CONFIG_MDIO_BUS_MUX_MMIOREG is not set +# CONFIG_MDIO_HISI_FEMAC is not set +# CONFIG_MDIO_OCTEON is not set +CONFIG_PHYLIB=y +CONFIG_SWPHY=y +# CONFIG_LED_TRIGGER_PHY is not set + +# +# MII PHY device drivers +# +# CONFIG_AMD_PHY is not set +# CONFIG_AQUANTIA_PHY is not set +# CONFIG_AT803X_PHY is not set +# CONFIG_BCM7XXX_PHY is not set +# CONFIG_BCM87XX_PHY is not set +# CONFIG_BROADCOM_PHY is not set +# CONFIG_CICADA_PHY is not set +# CONFIG_CORTINA_PHY is not set +# CONFIG_DAVICOM_PHY is not set +# CONFIG_DP83848_PHY is not set +# CONFIG_DP83867_PHY is not set +CONFIG_FIXED_PHY=y +# CONFIG_ICPLUS_PHY is not set +# CONFIG_INTEL_XWAY_PHY is not set +# CONFIG_LSI_ET1011C_PHY is not set +# CONFIG_LXT_PHY is not set +# CONFIG_MARVELL_PHY is not set +# CONFIG_MARVELL_10G_PHY is not set +# CONFIG_MICREL_PHY is not set +# CONFIG_MICROCHIP_PHY is not set +# CONFIG_MICROSEMI_PHY is not set +# CONFIG_NATIONAL_PHY is not set +# CONFIG_QSEMI_PHY is not set +CONFIG_REALTEK_PHY=y +# CONFIG_ROCKCHIP_PHY is not set +# CONFIG_SMSC_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_TERANETICS_PHY is not set +# CONFIG_VITESSE_PHY is not set +# CONFIG_XILINX_GMII2RGMII is not set +# CONFIG_MICREL_KS8995MA is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +CONFIG_USB_NET_DRIVERS=y +# CONFIG_USB_CATC is not set +# CONFIG_USB_KAWETH is not set +# CONFIG_USB_PEGASUS is not set +# CONFIG_USB_RTL8150 is not set +# CONFIG_USB_RTL8152 is not set +# CONFIG_USB_LAN78XX is not set +CONFIG_USB_USBNET=y +# CONFIG_USB_NET_AX8817X is not set +# CONFIG_USB_NET_AX88179_178A is not set +CONFIG_USB_NET_CDCETHER=y +# CONFIG_USB_NET_CDC_EEM is not set +# CONFIG_USB_NET_CDC_NCM is not set +# CONFIG_USB_NET_HUAWEI_CDC_NCM is not set +# CONFIG_USB_NET_CDC_MBIM is not set +# CONFIG_USB_NET_DM9601 is not set +# CONFIG_USB_NET_SR9700 is not set +# CONFIG_USB_NET_SR9800 is not set +# CONFIG_USB_NET_SMSC75XX is not set +# CONFIG_USB_NET_SMSC95XX is not set +# CONFIG_USB_NET_GL620A is not set +# CONFIG_USB_NET_NET1080 is not set +# CONFIG_USB_NET_PLUSB is not set +# CONFIG_USB_NET_MCS7830 is not set +# CONFIG_USB_NET_RNDIS_HOST is not set +# CONFIG_USB_NET_CDC_SUBSET is not set +# CONFIG_USB_NET_ZAURUS is not set +# CONFIG_USB_NET_CX82310_ETH is not set +# CONFIG_USB_NET_KALMIA is not set +# CONFIG_USB_NET_QMI_WWAN is not set +# CONFIG_USB_HSO is not set +# CONFIG_USB_NET_INT51X1 is not set +# CONFIG_USB_IPHETH is not set +# CONFIG_USB_SIERRA_NET is not set +# CONFIG_USB_VL600 is not set +# CONFIG_USB_NET_CH9200 is not set +CONFIG_WLAN=y +# CONFIG_WIRELESS_WDS is not set +# CONFIG_WLAN_VENDOR_ADMTEK is not set +# CONFIG_WLAN_VENDOR_ATH is not set +# CONFIG_WLAN_VENDOR_ATMEL is not set +# CONFIG_BCMDHD is not set +CONFIG_WLAN_VENDOR_BROADCOM=y +# CONFIG_B43 is not set +# CONFIG_B43LEGACY is not set +CONFIG_BRCMUTIL=m +# CONFIG_BRCMSMAC is not set +CONFIG_BRCMFMAC=m +CONFIG_BRCMFMAC_PROTO_BCDC=y +CONFIG_BRCMFMAC_SDIO=y +# CONFIG_BRCMFMAC_USB is not set +# CONFIG_BRCM_TRACING is not set +# CONFIG_BRCMDBG is not set +# CONFIG_WLAN_VENDOR_CISCO is not set +# CONFIG_WLAN_VENDOR_INTEL is not set +# CONFIG_WLAN_VENDOR_INTERSIL is not set +# CONFIG_WLAN_VENDOR_MARVELL is not set +# CONFIG_WLAN_VENDOR_MEDIATEK is not set +CONFIG_WLAN_VENDOR_RALINK=y +CONFIG_RT2X00=m +# CONFIG_RT2500USB is not set +CONFIG_RT73USB=m +# CONFIG_RT2800USB is not set +CONFIG_RT2X00_LIB_USB=m +CONFIG_RT2X00_LIB=m +CONFIG_RT2X00_LIB_FIRMWARE=y +CONFIG_RT2X00_LIB_CRYPTO=y +CONFIG_RT2X00_LIB_LEDS=y +# CONFIG_RT2X00_LIB_DEBUGFS is not set +# CONFIG_RT2X00_DEBUG is not set +# CONFIG_WLAN_VENDOR_REALTEK is not set +# CONFIG_WLAN_VENDOR_RSI is not set +# CONFIG_WLAN_VENDOR_ST is not set +# CONFIG_WLAN_VENDOR_TI is not set +# CONFIG_WLAN_VENDOR_ZYDAS is not set +CONFIG_WLAN_VENDOR_QUANTENNA=y +# CONFIG_MAC80211_HWSIM is not set +# CONFIG_USB_NET_RNDIS_WLAN is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set +# CONFIG_ISDN is not set +# CONFIG_NVM is not set + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_LEDS=y +CONFIG_INPUT_FF_MEMLESS=y +# CONFIG_INPUT_POLLDEV is not set +# CONFIG_INPUT_SPARSEKMAP is not set +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_PROPERTIES=y +# CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_AD7877 is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_AR1021_I2C is not set +# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set +# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_CHIPONE_ICN8318 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_EGALAX is not set +# CONFIG_TOUCHSCREEN_EGALAX_SERIAL is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +CONFIG_TOUCHSCREEN_GOODIX=m +# CONFIG_TOUCHSCREEN_ILI210X is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_EKTF2127 is not set +# CONFIG_TOUCHSCREEN_ELAN is not set +# CONFIG_TOUCHSCREEN_ELO is not set +# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set +# CONFIG_TOUCHSCREEN_WACOM_I2C is not set +# CONFIG_TOUCHSCREEN_MAX11801 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MMS114 is not set +# CONFIG_TOUCHSCREEN_MELFAS_MIP4 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_IMX6UL_TSC is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_MK712 is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +CONFIG_TOUCHSCREEN_EDT_FT5X06=m +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_PIXCIR is not set +# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC_SERIO is not set +# CONFIG_TOUCHSCREEN_TSC2004 is not set +# CONFIG_TOUCHSCREEN_TSC2005 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_RM_TS is not set +# CONFIG_TOUCHSCREEN_SILEAD is not set +# CONFIG_TOUCHSCREEN_SIS_I2C is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_STMFTS is not set +# CONFIG_TOUCHSCREEN_SUR40 is not set +# CONFIG_TOUCHSCREEN_SURFACE3_SPI is not set +# CONFIG_TOUCHSCREEN_SX8654 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +# CONFIG_TOUCHSCREEN_ZET6223 is not set +# CONFIG_TOUCHSCREEN_ZFORCE is not set +# CONFIG_TOUCHSCREEN_ROHM_BU21023 is not set +CONFIG_TOUCHSCREEN_IT7260=m +CONFIG_TOUCHSCREEN_HIMAX=m +CONFIG_TOUCHSCREEN_1WIRE=y +CONFIG_SENSOR_LOADER_1WIRE=m +# CONFIG_INPUT_MISC is not set +# CONFIG_RMI4_CORE is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +# CONFIG_SERIAL_NONSTANDARD is not set +# CONFIG_N_GSM is not set +# CONFIG_TRACE_SINK is not set +CONFIG_DEVMEM=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_AMBA_PL010 is not set +# CONFIG_SERIAL_AMBA_PL011 is not set +# CONFIG_SERIAL_EARLYCON_ARM_SEMIHOST is not set +# CONFIG_SERIAL_KGDB_NMI is not set +CONFIG_SERIAL_SAMSUNG=y +CONFIG_SERIAL_SAMSUNG_UARTS_4=y +CONFIG_SERIAL_SAMSUNG_UARTS=6 +CONFIG_SERIAL_SAMSUNG_CONSOLE=y +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_CONSOLE_POLL=y +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_IFX6X60 is not set +# CONFIG_SERIAL_XILINX_PS_UART is not set +# CONFIG_SERIAL_ARC is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set +# CONFIG_SERIAL_DEV_BUS is not set +# CONFIG_TTY_PRINTK is not set +# CONFIG_HVC_DCC is not set +# CONFIG_IPMI_HANDLER is not set +CONFIG_HW_RANDOM=y +# CONFIG_HW_RANDOM_TIMERIOMEM is not set +# CONFIG_R3964 is not set + +# +# PCMCIA character devices +# +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +# CONFIG_XILLYBUS is not set + +# +# I2C support +# +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=y +# CONFIG_I2C_MUX is not set +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_ALGOBIT=y + +# +# I2C Hardware Bus support +# + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CADENCE is not set +# CONFIG_I2C_CBUS_GPIO is not set +# CONFIG_I2C_DESIGNWARE_PLATFORM is not set +# CONFIG_I2C_EMEV2 is not set +CONFIG_I2C_GPIO=y +# CONFIG_I2C_NOMADIK is not set +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_PXA_PCI is not set +# CONFIG_I2C_RK3X is not set +CONFIG_HAVE_S3C2410_I2C=y +CONFIG_I2C_S3C2410=y +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_DIOLAN_U2C is not set +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +# CONFIG_I2C_TINY_USB is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_SLAVE is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_CADENCE is not set +# CONFIG_SPI_DESIGNWARE is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_FSL_SPI is not set +# CONFIG_SPI_OC_TINY is not set +# CONFIG_SPI_PL022 is not set +# CONFIG_SPI_PXA2XX_PCI is not set +# CONFIG_SPI_ROCKCHIP is not set +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_ZYNQMP_GQSPI is not set + +# +# SPI Protocol Masters +# +CONFIG_SPI_SPIDEV=y +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +# CONFIG_PPS_CLIENT_LDISC is not set +# CONFIG_PPS_CLIENT_GPIO is not set + +# +# PPS generators support +# + +# +# PTP clock support +# +# CONFIG_PTP_1588_CLOCK is not set + +# +# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks. +# +CONFIG_PINCTRL=y + +# +# Pin controllers +# +CONFIG_PINMUX=y +CONFIG_PINCONF=y +# CONFIG_DEBUG_PINCTRL is not set +# CONFIG_PINCTRL_AMD is not set +# CONFIG_PINCTRL_MCP23S08 is not set +# CONFIG_PINCTRL_SINGLE is not set +# CONFIG_PINCTRL_SX150X is not set +CONFIG_PINCTRL_NEXELL=y +CONFIG_GPIOLIB=y +CONFIG_OF_GPIO=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_SYSFS=y + +# +# Memory mapped GPIO drivers +# +# CONFIG_GPIO_74XX_MMIO is not set +# CONFIG_GPIO_ALTERA is not set +# CONFIG_GPIO_DWAPB is not set +# CONFIG_GPIO_FTGPIO010 is not set +# CONFIG_GPIO_GENERIC_PLATFORM is not set +# CONFIG_GPIO_GRGPIO is not set +# CONFIG_GPIO_MOCKUP is not set +# CONFIG_GPIO_PL061 is not set +# CONFIG_GPIO_SYSCON is not set +# CONFIG_GPIO_XGENE is not set +# CONFIG_GPIO_XILINX is not set + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_ADP5588 is not set +# CONFIG_GPIO_ADNP is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_SX150X is not set +# CONFIG_GPIO_TPIC2810 is not set + +# +# MFD GPIO expanders +# + +# +# SPI GPIO expanders +# +# CONFIG_GPIO_74X164 is not set +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set + +# +# USB GPIO expanders +# +# CONFIG_W1 is not set +# CONFIG_POWER_AVS is not set +CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_BRCMSTB is not set +# CONFIG_POWER_RESET_GPIO is not set +# CONFIG_POWER_RESET_GPIO_RESTART is not set +# CONFIG_POWER_RESET_LTC2952 is not set +# CONFIG_POWER_RESET_RESTART is not set +# CONFIG_POWER_RESET_XGENE is not set +# CONFIG_POWER_RESET_SYSCON is not set +# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set +# CONFIG_SYSCON_REBOOT_MODE is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_GENERIC_ADC_BATTERY is not set +# CONFIG_TEST_POWER is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_LEGO_EV3 is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_MANAGER is not set +# CONFIG_CHARGER_LTC3651 is not set +# CONFIG_CHARGER_DETECTOR_MAX14656 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ25890 is not set +# CONFIG_CHARGER_SMB347 is not set +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_CHARGER_RT9455 is not set +CONFIG_HWMON=y +# CONFIG_HWMON_VID is not set +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +# CONFIG_SENSORS_AD7314 is not set +# CONFIG_SENSORS_AD7414 is not set +# CONFIG_SENSORS_AD7418 is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1029 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7310 is not set +# CONFIG_SENSORS_ADT7410 is not set +# CONFIG_SENSORS_ADT7411 is not set +# CONFIG_SENSORS_ADT7462 is not set +# CONFIG_SENSORS_ADT7470 is not set +# CONFIG_SENSORS_ADT7475 is not set +# CONFIG_SENSORS_ASC7621 is not set +# CONFIG_SENSORS_ASPEED is not set +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_DS620 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_F71805F is not set +# CONFIG_SENSORS_F71882FG is not set +# CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_FTSTEUTATES is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_G760A is not set +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_GPIO_FAN is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_IIO_HWMON is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_JC42 is not set +# CONFIG_SENSORS_POWR1220 is not set +# CONFIG_SENSORS_LINEAGE is not set +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC2990 is not set +# CONFIG_SENSORS_LTC4151 is not set +# CONFIG_SENSORS_LTC4215 is not set +# CONFIG_SENSORS_LTC4222 is not set +# CONFIG_SENSORS_LTC4245 is not set +# CONFIG_SENSORS_LTC4260 is not set +# CONFIG_SENSORS_LTC4261 is not set +# CONFIG_SENSORS_MAX1111 is not set +# CONFIG_SENSORS_MAX16065 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_MAX1668 is not set +# CONFIG_SENSORS_MAX197 is not set +# CONFIG_SENSORS_MAX31722 is not set +# CONFIG_SENSORS_MAX6639 is not set +# CONFIG_SENSORS_MAX6642 is not set +# CONFIG_SENSORS_MAX6650 is not set +# CONFIG_SENSORS_MAX6697 is not set +# CONFIG_SENSORS_MAX31790 is not set +# CONFIG_SENSORS_MCP3021 is not set +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_ADCXX is not set +# CONFIG_SENSORS_LM63 is not set +# CONFIG_SENSORS_LM70 is not set +# CONFIG_SENSORS_LM73 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_LM93 is not set +# CONFIG_SENSORS_LM95234 is not set +# CONFIG_SENSORS_LM95241 is not set +# CONFIG_SENSORS_LM95245 is not set +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_PC87427 is not set +# CONFIG_SENSORS_NTC_THERMISTOR is not set +# CONFIG_SENSORS_NCT6683 is not set +# CONFIG_SENSORS_NCT6775 is not set +# CONFIG_SENSORS_NCT7802 is not set +# CONFIG_SENSORS_NCT7904 is not set +# CONFIG_SENSORS_PCF8591 is not set +# CONFIG_PMBUS is not set +# CONFIG_SENSORS_PWM_FAN is not set +# CONFIG_SENSORS_SHT15 is not set +# CONFIG_SENSORS_SHT21 is not set +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHTC1 is not set +# CONFIG_SENSORS_DME1737 is not set +# CONFIG_SENSORS_EMC1403 is not set +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC6W201 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47M192 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_SCH56XX_COMMON is not set +# CONFIG_SENSORS_SCH5627 is not set +# CONFIG_SENSORS_SCH5636 is not set +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_SMM665 is not set +# CONFIG_SENSORS_ADC128D818 is not set +# CONFIG_SENSORS_ADS1015 is not set +# CONFIG_SENSORS_ADS7828 is not set +# CONFIG_SENSORS_ADS7871 is not set +# CONFIG_SENSORS_AMC6821 is not set +# CONFIG_SENSORS_INA209 is not set +# CONFIG_SENSORS_INA2XX is not set +# CONFIG_SENSORS_INA3221 is not set +# CONFIG_SENSORS_TC74 is not set +# CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_TMP102 is not set +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP108 is not set +# CONFIG_SENSORS_TMP401 is not set +# CONFIG_SENSORS_TMP421 is not set +# CONFIG_SENSORS_VT1211 is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83791D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83793 is not set +# CONFIG_SENSORS_W83795 is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83L786NG is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set +CONFIG_NANOPI_THERMISTOR=m +CONFIG_THERMAL=y +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_OF=y +# CONFIG_THERMAL_WRITABLE_TRIPS is not set +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set +# CONFIG_THERMAL_GOV_FAIR_SHARE is not set +CONFIG_THERMAL_GOV_STEP_WISE=y +# CONFIG_THERMAL_GOV_BANG_BANG is not set +# CONFIG_THERMAL_GOV_USER_SPACE is not set +# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set +# CONFIG_CPU_THERMAL is not set +# CONFIG_THERMAL_EMULATION is not set +# CONFIG_QORIQ_THERMAL is not set + +# +# ACPI INT340X thermal drivers +# + +# +# Samsung thermal drivers +# +CONFIG_EXYNOS_THERMAL=y +# CONFIG_GENERIC_ADC_THERMAL is not set +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +# CONFIG_WATCHDOG_NOWAYOUT is not set +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y +# CONFIG_WATCHDOG_SYSFS is not set + +# +# Watchdog Device Drivers +# +# CONFIG_SOFT_WATCHDOG is not set +# CONFIG_GPIO_WATCHDOG is not set +# CONFIG_XILINX_WATCHDOG is not set +# CONFIG_ZIIRAVE_WATCHDOG is not set +# CONFIG_ARM_SP805_WATCHDOG is not set +# CONFIG_CADENCE_WATCHDOG is not set +CONFIG_HAVE_S3C2410_WATCHDOG=y +# CONFIG_S3C2410_WATCHDOG is not set +# CONFIG_DW_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set +# CONFIG_MEN_A21_WDT is not set + +# +# USB-based Watchdog Cards +# +# CONFIG_USBPCWATCHDOG is not set + +# +# Watchdog Pretimeout Governors +# +# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +# CONFIG_BCMA is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=y +# CONFIG_MFD_ACT8945A is not set +# CONFIG_MFD_AS3711 is not set +# CONFIG_MFD_AS3722 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set +# CONFIG_MFD_ATMEL_FLEXCOM is not set +# CONFIG_MFD_ATMEL_HLCDC is not set +CONFIG_MFD_AXP228=y +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_CROS_EC is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_MFD_HI6421_PMIC is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77620 is not set +# CONFIG_MFD_MAX77686 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77843 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_CPCAP is not set +# CONFIG_MFD_VIPERBOARD is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RTSX_USB is not set +# CONFIG_MFD_RC5T583 is not set +# CONFIG_MFD_RK808 is not set +# CONFIG_MFD_RN5T618 is not set +# CONFIG_MFD_SEC_CORE is not set +# CONFIG_MFD_SI476X_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_SMSC is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_STMPE is not set +CONFIG_MFD_SYSCON=y +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TI_LP87565 is not set +# CONFIG_MFD_TPS65218 is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS80031 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +CONFIG_REGULATOR=y +# CONFIG_REGULATOR_DEBUG is not set +# CONFIG_REGULATOR_FIXED_VOLTAGE is not set +# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set +# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set +# CONFIG_REGULATOR_ACT8865 is not set +# CONFIG_REGULATOR_AD5398 is not set +# CONFIG_REGULATOR_ANATOP is not set +CONFIG_REGULATOR_AXP228=y +# CONFIG_REGULATOR_DA9210 is not set +# CONFIG_REGULATOR_DA9211 is not set +# CONFIG_REGULATOR_FAN53555 is not set +# CONFIG_REGULATOR_GPIO is not set +# CONFIG_REGULATOR_ISL9305 is not set +# CONFIG_REGULATOR_ISL6271A is not set +# CONFIG_REGULATOR_LP3971 is not set +# CONFIG_REGULATOR_LP3972 is not set +# CONFIG_REGULATOR_LP872X is not set +# CONFIG_REGULATOR_LP8755 is not set +# CONFIG_REGULATOR_LTC3589 is not set +# CONFIG_REGULATOR_LTC3676 is not set +# CONFIG_REGULATOR_MAX1586 is not set +# CONFIG_REGULATOR_MAX8649 is not set +# CONFIG_REGULATOR_MAX8660 is not set +# CONFIG_REGULATOR_MAX8952 is not set +# CONFIG_REGULATOR_MAX8973 is not set +# CONFIG_REGULATOR_MT6311 is not set +# CONFIG_REGULATOR_PFUZE100 is not set +# CONFIG_REGULATOR_PV88060 is not set +# CONFIG_REGULATOR_PV88080 is not set +# CONFIG_REGULATOR_PV88090 is not set +# CONFIG_REGULATOR_PWM is not set +# CONFIG_REGULATOR_TPS51632 is not set +# CONFIG_REGULATOR_TPS62360 is not set +# CONFIG_REGULATOR_TPS65023 is not set +# CONFIG_REGULATOR_TPS6507X is not set +# CONFIG_REGULATOR_TPS65132 is not set +# CONFIG_REGULATOR_TPS6524X is not set +# CONFIG_REGULATOR_VCTRL is not set +CONFIG_RC_CORE=y +# CONFIG_RC_MAP is not set +# CONFIG_RC_DECODERS is not set +# CONFIG_RC_DEVICES is not set +CONFIG_MEDIA_SUPPORT=y + +# +# Multimedia core support +# +CONFIG_MEDIA_CAMERA_SUPPORT=y +# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set +# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set +# CONFIG_MEDIA_RADIO_SUPPORT is not set +# CONFIG_MEDIA_SDR_SUPPORT is not set +# CONFIG_MEDIA_CEC_SUPPORT is not set +CONFIG_MEDIA_CONTROLLER=y +CONFIG_VIDEO_DEV=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y +CONFIG_VIDEO_V4L2=y +# CONFIG_VIDEO_ADV_DEBUG is not set +# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set +CONFIG_VIDEOBUF2_CORE=m +CONFIG_VIDEOBUF2_MEMOPS=m +CONFIG_VIDEOBUF2_DMA_CONTIG=m +CONFIG_VIDEOBUF2_VMALLOC=m +# CONFIG_TTPCI_EEPROM is not set + +# +# Media drivers +# +CONFIG_MEDIA_USB_SUPPORT=y + +# +# Webcam devices +# +CONFIG_USB_VIDEO_CLASS=m +CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y +CONFIG_USB_GSPCA=m +# CONFIG_USB_M5602 is not set +# CONFIG_USB_STV06XX is not set +# CONFIG_USB_GL860 is not set +# CONFIG_USB_GSPCA_BENQ is not set +# CONFIG_USB_GSPCA_CONEX is not set +# CONFIG_USB_GSPCA_CPIA1 is not set +# CONFIG_USB_GSPCA_DTCS033 is not set +# CONFIG_USB_GSPCA_ETOMS is not set +# CONFIG_USB_GSPCA_FINEPIX is not set +# CONFIG_USB_GSPCA_JEILINJ is not set +# CONFIG_USB_GSPCA_JL2005BCD is not set +# CONFIG_USB_GSPCA_KINECT is not set +# CONFIG_USB_GSPCA_KONICA is not set +# CONFIG_USB_GSPCA_MARS is not set +# CONFIG_USB_GSPCA_MR97310A is not set +# CONFIG_USB_GSPCA_NW80X is not set +# CONFIG_USB_GSPCA_OV519 is not set +# CONFIG_USB_GSPCA_OV534 is not set +# CONFIG_USB_GSPCA_OV534_9 is not set +# CONFIG_USB_GSPCA_PAC207 is not set +# CONFIG_USB_GSPCA_PAC7302 is not set +# CONFIG_USB_GSPCA_PAC7311 is not set +# CONFIG_USB_GSPCA_SE401 is not set +# CONFIG_USB_GSPCA_SN9C2028 is not set +# CONFIG_USB_GSPCA_SN9C20X is not set +# CONFIG_USB_GSPCA_SONIXB is not set +# CONFIG_USB_GSPCA_SONIXJ is not set +# CONFIG_USB_GSPCA_SPCA500 is not set +# CONFIG_USB_GSPCA_SPCA501 is not set +# CONFIG_USB_GSPCA_SPCA505 is not set +# CONFIG_USB_GSPCA_SPCA506 is not set +# CONFIG_USB_GSPCA_SPCA508 is not set +# CONFIG_USB_GSPCA_SPCA561 is not set +# CONFIG_USB_GSPCA_SPCA1528 is not set +# CONFIG_USB_GSPCA_SQ905 is not set +# CONFIG_USB_GSPCA_SQ905C is not set +# CONFIG_USB_GSPCA_SQ930X is not set +# CONFIG_USB_GSPCA_STK014 is not set +# CONFIG_USB_GSPCA_STK1135 is not set +# CONFIG_USB_GSPCA_STV0680 is not set +# CONFIG_USB_GSPCA_SUNPLUS is not set +# CONFIG_USB_GSPCA_T613 is not set +# CONFIG_USB_GSPCA_TOPRO is not set +# CONFIG_USB_GSPCA_TOUPTEK is not set +# CONFIG_USB_GSPCA_TV8532 is not set +# CONFIG_USB_GSPCA_VC032X is not set +# CONFIG_USB_GSPCA_VICAM is not set +# CONFIG_USB_GSPCA_XIRLINK_CIT is not set +# CONFIG_USB_GSPCA_ZC3XX is not set +# CONFIG_USB_PWC is not set +# CONFIG_VIDEO_CPIA2 is not set +# CONFIG_USB_ZR364XX is not set +# CONFIG_USB_STKWEBCAM is not set +# CONFIG_USB_S2255 is not set +# CONFIG_VIDEO_USBTV is not set + +# +# Webcam, TV (analog/digital) USB devices +# +# CONFIG_VIDEO_EM28XX is not set +# CONFIG_V4L_PLATFORM_DRIVERS is not set +# CONFIG_V4L_MEM2MEM_DRIVERS is not set +# CONFIG_V4L_TEST_DRIVERS is not set +CONFIG_VIDEO_NEXELL_CODEC=m +CONFIG_NANO_VIDEODEV=m + +# +# Supported MMC/SDIO adapters +# +# CONFIG_CYPRESS_FIRMWARE is not set + +# +# Media ancillary drivers (tuners, sensors, i2c, spi, frontends) +# +# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set +# CONFIG_VIDEO_IR_I2C is not set + +# +# I2C Encoders, decoders, sensors and other helper chips +# + +# +# Audio decoders, processors and mixers +# +# CONFIG_VIDEO_TVAUDIO is not set +# CONFIG_VIDEO_TDA7432 is not set +# CONFIG_VIDEO_TDA9840 is not set +# CONFIG_VIDEO_TEA6415C is not set +# CONFIG_VIDEO_TEA6420 is not set +# CONFIG_VIDEO_MSP3400 is not set +# CONFIG_VIDEO_CS3308 is not set +# CONFIG_VIDEO_CS5345 is not set +# CONFIG_VIDEO_CS53L32A is not set +# CONFIG_VIDEO_TLV320AIC23B is not set +# CONFIG_VIDEO_UDA1342 is not set +# CONFIG_VIDEO_WM8775 is not set +# CONFIG_VIDEO_WM8739 is not set +# CONFIG_VIDEO_VP27SMPX is not set +# CONFIG_VIDEO_SONY_BTF_MPX is not set + +# +# RDS decoders +# +# CONFIG_VIDEO_SAA6588 is not set + +# +# Video decoders +# +# CONFIG_VIDEO_ADV7180 is not set +# CONFIG_VIDEO_ADV7183 is not set +# CONFIG_VIDEO_ADV748X is not set +# CONFIG_VIDEO_ADV7604 is not set +# CONFIG_VIDEO_ADV7842 is not set +# CONFIG_VIDEO_BT819 is not set +# CONFIG_VIDEO_BT856 is not set +# CONFIG_VIDEO_BT866 is not set +# CONFIG_VIDEO_KS0127 is not set +# CONFIG_VIDEO_ML86V7667 is not set +# CONFIG_VIDEO_AD5820 is not set +# CONFIG_VIDEO_DW9714 is not set +# CONFIG_VIDEO_SAA7110 is not set +# CONFIG_VIDEO_SAA711X is not set +# CONFIG_VIDEO_TC358743 is not set +# CONFIG_VIDEO_TVP514X is not set +# CONFIG_VIDEO_TVP5150 is not set +# CONFIG_VIDEO_TVP7002 is not set +# CONFIG_VIDEO_TW2804 is not set +# CONFIG_VIDEO_TW9903 is not set +# CONFIG_VIDEO_TW9906 is not set +# CONFIG_VIDEO_VPX3220 is not set + +# +# Video and audio decoders +# +# CONFIG_VIDEO_SAA717X is not set +# CONFIG_VIDEO_CX25840 is not set + +# +# Video encoders +# +# CONFIG_VIDEO_SAA7127 is not set +# CONFIG_VIDEO_SAA7185 is not set +# CONFIG_VIDEO_ADV7170 is not set +# CONFIG_VIDEO_ADV7175 is not set +# CONFIG_VIDEO_ADV7343 is not set +# CONFIG_VIDEO_ADV7393 is not set +# CONFIG_VIDEO_ADV7511 is not set +# CONFIG_VIDEO_AD9389B is not set +# CONFIG_VIDEO_AK881X is not set +# CONFIG_VIDEO_THS8200 is not set + +# +# Camera sensor devices +# +# CONFIG_VIDEO_OV2640 is not set +# CONFIG_VIDEO_OV2659 is not set +# CONFIG_VIDEO_OV5640 is not set +# CONFIG_VIDEO_OV5645 is not set +# CONFIG_VIDEO_OV5647 is not set +# CONFIG_VIDEO_OV6650 is not set +# CONFIG_VIDEO_OV5670 is not set +# CONFIG_VIDEO_OV7640 is not set +# CONFIG_VIDEO_OV7670 is not set +# CONFIG_VIDEO_OV9650 is not set +# CONFIG_VIDEO_OV13858 is not set +# CONFIG_VIDEO_VS6624 is not set +# CONFIG_VIDEO_MT9M032 is not set +# CONFIG_VIDEO_MT9M111 is not set +# CONFIG_VIDEO_MT9P031 is not set +# CONFIG_VIDEO_MT9T001 is not set +# CONFIG_VIDEO_MT9V011 is not set +# CONFIG_VIDEO_MT9V032 is not set +# CONFIG_VIDEO_SR030PC30 is not set +# CONFIG_VIDEO_NOON010PC30 is not set +# CONFIG_VIDEO_M5MOLS is not set +# CONFIG_VIDEO_S5K6AA is not set +# CONFIG_VIDEO_S5K6A3 is not set +# CONFIG_VIDEO_S5K4ECGX is not set +# CONFIG_VIDEO_S5K5BAF is not set +# CONFIG_VIDEO_SMIAPP is not set +# CONFIG_VIDEO_ET8EK8 is not set +# CONFIG_VIDEO_S5C73M3 is not set + +# +# Flash devices +# +# CONFIG_VIDEO_ADP1653 is not set +# CONFIG_VIDEO_AS3645A is not set +# CONFIG_VIDEO_LM3560 is not set +# CONFIG_VIDEO_LM3646 is not set + +# +# Video improvement chips +# +# CONFIG_VIDEO_UPD64031A is not set +# CONFIG_VIDEO_UPD64083 is not set + +# +# Audio/Video compression chips +# +# CONFIG_VIDEO_SAA6752HS is not set + +# +# SDR tuner chips +# + +# +# Miscellaneous helper chips +# +# CONFIG_VIDEO_THS7303 is not set +# CONFIG_VIDEO_M52790 is not set + +# +# Sensors used on soc_camera driver +# + +# +# SPI helper chips +# +# CONFIG_VIDEO_GS1662 is not set + +# +# Customise DVB Frontends +# + +# +# Tools to develop new frontends +# + +# +# Graphics support +# +CONFIG_DRM=y +# CONFIG_DRM_DP_AUX_CHARDEV is not set +# CONFIG_DRM_DEBUG_MM is not set +# CONFIG_DRM_DEBUG_MM_SELFTEST is not set +CONFIG_DRM_KMS_HELPER=y +CONFIG_DRM_KMS_FB_HELPER=y +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +# CONFIG_DRM_LOAD_EDID_FIRMWARE is not set + +# +# I2C encoder or helper chips +# +# CONFIG_DRM_I2C_CH7006 is not set +# CONFIG_DRM_I2C_SIL164 is not set +# CONFIG_DRM_I2C_NXP_TDA998X is not set +# CONFIG_DRM_HDLCD is not set +# CONFIG_DRM_MALI_DISPLAY is not set + +# +# ACP (Audio CoProcessor) Configuration +# +# CONFIG_DRM_VGEM is not set +# CONFIG_DRM_UDL is not set +# CONFIG_DRM_RCAR_DW_HDMI is not set +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_LVDS is not set +# CONFIG_DRM_PANEL_SIMPLE is not set +# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set +# CONFIG_DRM_PANEL_LG_LG4573 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set +# CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set +CONFIG_DRM_PANEL_NANOPI=y +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +# CONFIG_DRM_DUMB_VGA_DAC is not set +# CONFIG_DRM_LVDS_ENCODER is not set +# CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW is not set +# CONFIG_DRM_NXP_PTN3460 is not set +# CONFIG_DRM_PARADE_PS8622 is not set +# CONFIG_DRM_SIL_SII8620 is not set +# CONFIG_DRM_SII902X is not set +# CONFIG_DRM_TOSHIBA_TC358767 is not set +# CONFIG_DRM_TI_TFP410 is not set +# CONFIG_DRM_I2C_ADV7511 is not set +# CONFIG_DRM_ARCPGU is not set +# CONFIG_DRM_HISI_KIRIN is not set +# CONFIG_DRM_MXSFB is not set +# CONFIG_DRM_TINYDRM is not set +# CONFIG_DRM_PL111 is not set +CONFIG_DRM_NX=y +CONFIG_DRM_NX_RGB=y +CONFIG_DRM_NX_LVDS=y +# CONFIG_DRM_NX_MIPI_DSI is not set +CONFIG_DRM_NX_HDMI=y +# CONFIG_DRM_LEGACY is not set +# CONFIG_DRM_LIB_RANDOM is not set + +# +# ARM GPU Configuration +# +# CONFIG_MALI400 is not set + +# +# Frame buffer Devices +# +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_CMDLINE=y +CONFIG_FB_NOTIFY=y +# CONFIG_FB_DDC is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_PROVIDE_GET_FB_UNMAPPED_AREA is not set +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_DEFERRED_IO=y +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +# CONFIG_FB_BACKLIGHT is not set +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_ARMCLCD is not set +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_BROADSHEET is not set +# CONFIG_FB_AUO_K190X is not set +# CONFIG_FB_SIMPLE is not set +# CONFIG_FB_SSD1307 is not set +CONFIG_BACKLIGHT_LCD_SUPPORT=y +# CONFIG_LCD_CLASS_DEVICE is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_GENERIC is not set +# CONFIG_BACKLIGHT_PWM is not set +# CONFIG_BACKLIGHT_PM8941_WLED is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3630A is not set +# CONFIG_BACKLIGHT_LM3639 is not set +# CONFIG_BACKLIGHT_LP855X is not set +# CONFIG_BACKLIGHT_GPIO is not set +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +# CONFIG_VGASTATE is not set +CONFIG_VIDEOMODE_HELPERS=y +CONFIG_HDMI=y + +# +# Console display driver support +# +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set +CONFIG_LOGO=y +CONFIG_LOGO_LINUX_MONO=y +CONFIG_LOGO_LINUX_VGA16=y +CONFIG_LOGO_LINUX_CLUT224=y +CONFIG_SOUND=y +# CONFIG_SOUND_OSS_CORE is not set +CONFIG_SND=y +CONFIG_SND_TIMER=y +CONFIG_SND_PCM=y +CONFIG_SND_JACK=y +CONFIG_SND_JACK_INPUT_DEV=y +# CONFIG_SND_OSSEMUL is not set +CONFIG_SND_PCM_TIMER=y +# CONFIG_SND_HRTIMER is not set +# CONFIG_SND_DYNAMIC_MINORS is not set +# CONFIG_SND_SUPPORT_OLD_API is not set +CONFIG_SND_PROC_FS=y +# CONFIG_SND_VERBOSE_PROCFS is not set +# CONFIG_SND_VERBOSE_PRINTK is not set +# CONFIG_SND_DEBUG is not set +# CONFIG_SND_SEQUENCER is not set +# CONFIG_SND_OPL3_LIB_SEQ is not set +# CONFIG_SND_OPL4_LIB_SEQ is not set +# CONFIG_SND_DRIVERS is not set + +# +# HD-Audio +# +CONFIG_SND_HDA_PREALLOC_SIZE=64 +# CONFIG_SND_SPI is not set +CONFIG_SND_USB=y +# CONFIG_SND_USB_AUDIO is not set +# CONFIG_SND_USB_UA101 is not set +# CONFIG_SND_USB_CAIAQ is not set +# CONFIG_SND_USB_6FIRE is not set +# CONFIG_SND_USB_HIFACE is not set +# CONFIG_SND_BCD2000 is not set +# CONFIG_SND_USB_POD is not set +# CONFIG_SND_USB_PODHD is not set +# CONFIG_SND_USB_TONEPORT is not set +# CONFIG_SND_USB_VARIAX is not set +CONFIG_SND_SOC=y +# CONFIG_SND_SOC_AMD_ACP is not set +# CONFIG_SND_ATMEL_SOC is not set +# CONFIG_SND_DESIGNWARE_I2S is not set + +# +# SoC Audio for Freescale CPUs +# + +# +# Common SoC Audio options for Freescale CPUs: +# +# CONFIG_SND_SOC_FSL_ASRC is not set +# CONFIG_SND_SOC_FSL_SAI is not set +# CONFIG_SND_SOC_FSL_SSI is not set +# CONFIG_SND_SOC_FSL_SPDIF is not set +# CONFIG_SND_SOC_FSL_ESAI is not set +# CONFIG_SND_SOC_IMX_AUDMUX is not set +# CONFIG_SND_I2S_HI6210_I2S is not set +# CONFIG_SND_SOC_IMG is not set + +# +# STMicroelectronics STM32 SOC audio support +# +# CONFIG_SND_SOC_XTFPGA_I2S is not set +# CONFIG_ZX_TDM is not set +CONFIG_SND_NX_SOC=y +CONFIG_SND_NX_I2S=y +CONFIG_SND_NX_I2S_CH0=y +# CONFIG_SND_NX_I2S_CH1 is not set +# CONFIG_SND_NX_I2S_CH2 is not set +CONFIG_SND_NX_SPDIF_TX=y +CONFIG_SND_SPDIF_TRANSCEIVER=y +# CONFIG_SND_CODEC_NULL is not set +# CONFIG_SND_CODEC_ES8316 is not set +# CONFIG_SND_CODEC_ALC5658 is not set +CONFIG_SND_SOC_I2C_AND_SPI=y + +# +# CODEC drivers +# +# CONFIG_SND_SOC_AC97_CODEC is not set +# CONFIG_SND_SOC_ADAU1701 is not set +# CONFIG_SND_SOC_ADAU1761_I2C is not set +# CONFIG_SND_SOC_ADAU1761_SPI is not set +# CONFIG_SND_SOC_ADAU7002 is not set +# CONFIG_SND_SOC_AK4104 is not set +# CONFIG_SND_SOC_AK4554 is not set +# CONFIG_SND_SOC_AK4613 is not set +# CONFIG_SND_SOC_AK4642 is not set +# CONFIG_SND_SOC_AK5386 is not set +# CONFIG_SND_SOC_ALC5623 is not set +# CONFIG_SND_SOC_BT_SCO is not set +# CONFIG_SND_SOC_CS35L32 is not set +# CONFIG_SND_SOC_CS35L33 is not set +# CONFIG_SND_SOC_CS35L34 is not set +# CONFIG_SND_SOC_CS35L35 is not set +# CONFIG_SND_SOC_CS42L42 is not set +# CONFIG_SND_SOC_CS42L51_I2C is not set +# CONFIG_SND_SOC_CS42L52 is not set +# CONFIG_SND_SOC_CS42L56 is not set +# CONFIG_SND_SOC_CS42L73 is not set +# CONFIG_SND_SOC_CS4265 is not set +# CONFIG_SND_SOC_CS4270 is not set +# CONFIG_SND_SOC_CS4271_I2C is not set +# CONFIG_SND_SOC_CS4271_SPI is not set +# CONFIG_SND_SOC_CS42XX8_I2C is not set +# CONFIG_SND_SOC_CS43130 is not set +# CONFIG_SND_SOC_CS4349 is not set +# CONFIG_SND_SOC_CS53L30 is not set +# CONFIG_SND_SOC_DIO2125 is not set +# CONFIG_SND_SOC_ES7134 is not set +CONFIG_SND_SOC_ES8316=y +# CONFIG_SND_SOC_ES8328_I2C is not set +# CONFIG_SND_SOC_ES8328_SPI is not set +# CONFIG_SND_SOC_GTM601 is not set +# CONFIG_SND_SOC_INNO_RK3036 is not set +# CONFIG_SND_SOC_MAX98504 is not set +# CONFIG_SND_SOC_MAX98927 is not set +# CONFIG_SND_SOC_MAX9860 is not set +# CONFIG_SND_SOC_MSM8916_WCD_DIGITAL is not set +# CONFIG_SND_SOC_PCM1681 is not set +# CONFIG_SND_SOC_PCM179X_I2C is not set +# CONFIG_SND_SOC_PCM179X_SPI is not set +# CONFIG_SND_SOC_PCM3168A_I2C is not set +# CONFIG_SND_SOC_PCM3168A_SPI is not set +# CONFIG_SND_SOC_PCM512x_I2C is not set +# CONFIG_SND_SOC_PCM512x_SPI is not set +# CONFIG_SND_SOC_RT5616 is not set +# CONFIG_SND_SOC_RT5631 is not set +# CONFIG_SND_SOC_RT5677_SPI is not set +# CONFIG_SND_SOC_SGTL5000 is not set +# CONFIG_SND_SOC_SIRF_AUDIO_CODEC is not set +CONFIG_SND_SOC_SPDIF=y +# CONFIG_SND_SOC_SSM2602_SPI is not set +# CONFIG_SND_SOC_SSM2602_I2C is not set +# CONFIG_SND_SOC_SSM4567 is not set +# CONFIG_SND_SOC_STA32X is not set +# CONFIG_SND_SOC_STA350 is not set +# CONFIG_SND_SOC_STI_SAS is not set +# CONFIG_SND_SOC_TAS2552 is not set +# CONFIG_SND_SOC_TAS5086 is not set +# CONFIG_SND_SOC_TAS571X is not set +# CONFIG_SND_SOC_TAS5720 is not set +# CONFIG_SND_SOC_TFA9879 is not set +# CONFIG_SND_SOC_TLV320AIC23_I2C is not set +# CONFIG_SND_SOC_TLV320AIC23_SPI is not set +# CONFIG_SND_SOC_TLV320AIC31XX is not set +# CONFIG_SND_SOC_TLV320AIC3X is not set +# CONFIG_SND_SOC_TS3A227E is not set +# CONFIG_SND_SOC_WM8510 is not set +# CONFIG_SND_SOC_WM8523 is not set +# CONFIG_SND_SOC_WM8524 is not set +# CONFIG_SND_SOC_WM8580 is not set +# CONFIG_SND_SOC_WM8711 is not set +# CONFIG_SND_SOC_WM8728 is not set +# CONFIG_SND_SOC_WM8731 is not set +# CONFIG_SND_SOC_WM8737 is not set +# CONFIG_SND_SOC_WM8741 is not set +# CONFIG_SND_SOC_WM8750 is not set +# CONFIG_SND_SOC_WM8753 is not set +# CONFIG_SND_SOC_WM8770 is not set +# CONFIG_SND_SOC_WM8776 is not set +# CONFIG_SND_SOC_WM8804_I2C is not set +# CONFIG_SND_SOC_WM8804_SPI is not set +# CONFIG_SND_SOC_WM8903 is not set +# CONFIG_SND_SOC_WM8960 is not set +# CONFIG_SND_SOC_WM8962 is not set +# CONFIG_SND_SOC_WM8974 is not set +# CONFIG_SND_SOC_WM8978 is not set +# CONFIG_SND_SOC_WM8985 is not set +# CONFIG_SND_SOC_ZX_AUD96P22 is not set +# CONFIG_SND_SOC_NAU8540 is not set +# CONFIG_SND_SOC_NAU8810 is not set +# CONFIG_SND_SOC_NAU8824 is not set +# CONFIG_SND_SOC_TPA6130A2 is not set +CONFIG_SND_SIMPLE_CARD_UTILS=y +CONFIG_SND_SIMPLE_CARD=y +# CONFIG_SND_SIMPLE_SCU_CARD is not set +# CONFIG_SND_AUDIO_GRAPH_CARD is not set +# CONFIG_SND_AUDIO_GRAPH_SCU_CARD is not set + +# +# HID support +# +CONFIG_HID=y +# CONFIG_HID_BATTERY_STRENGTH is not set +# CONFIG_HIDRAW is not set +CONFIG_UHID=y +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +# CONFIG_HID_A4TECH is not set +# CONFIG_HID_ACCUTOUCH is not set +# CONFIG_HID_ACRUX is not set +# CONFIG_HID_APPLE is not set +# CONFIG_HID_APPLEIR is not set +# CONFIG_HID_ASUS is not set +# CONFIG_HID_AUREAL is not set +# CONFIG_HID_BELKIN is not set +# CONFIG_HID_BETOP_FF is not set +# CONFIG_HID_CHERRY is not set +# CONFIG_HID_CHICONY is not set +# CONFIG_HID_CORSAIR is not set +# CONFIG_HID_PRODIKEYS is not set +# CONFIG_HID_CMEDIA is not set +# CONFIG_HID_CYPRESS is not set +# CONFIG_HID_DRAGONRISE is not set +# CONFIG_HID_EMS_FF is not set +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_ELO is not set +# CONFIG_HID_EZKEY is not set +# CONFIG_HID_GEMBIRD is not set +# CONFIG_HID_GFRM is not set +# CONFIG_HID_HOLTEK is not set +# CONFIG_HID_GT683R is not set +# CONFIG_HID_KEYTOUCH is not set +# CONFIG_HID_KYE is not set +# CONFIG_HID_UCLOGIC is not set +# CONFIG_HID_WALTOP is not set +# CONFIG_HID_GYRATION is not set +# CONFIG_HID_ICADE is not set +# CONFIG_HID_ITE is not set +# CONFIG_HID_TWINHAN is not set +# CONFIG_HID_KENSINGTON is not set +# CONFIG_HID_LCPOWER is not set +# CONFIG_HID_LED is not set +# CONFIG_HID_LENOVO is not set +# CONFIG_HID_LOGITECH is not set +# CONFIG_HID_MAGICMOUSE is not set +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_MICROSOFT is not set +# CONFIG_HID_MONTEREY is not set +# CONFIG_HID_MULTITOUCH is not set +# CONFIG_HID_NTI is not set +# CONFIG_HID_NTRIG is not set +# CONFIG_HID_ORTEK is not set +# CONFIG_HID_PANTHERLORD is not set +# CONFIG_HID_PENMOUNT is not set +# CONFIG_HID_PETALYNX is not set +# CONFIG_HID_PICOLCD is not set +# CONFIG_HID_PLANTRONICS is not set +# CONFIG_HID_PRIMAX is not set +# CONFIG_HID_RETRODE is not set +# CONFIG_HID_ROCCAT is not set +# CONFIG_HID_SAITEK is not set +# CONFIG_HID_SAMSUNG is not set +# CONFIG_HID_SONY is not set +# CONFIG_HID_SPEEDLINK is not set +# CONFIG_HID_STEELSERIES is not set +# CONFIG_HID_SUNPLUS is not set +# CONFIG_HID_RMI is not set +# CONFIG_HID_GREENASIA is not set +# CONFIG_HID_SMARTJOYPLUS is not set +# CONFIG_HID_TIVO is not set +# CONFIG_HID_TOPSEED is not set +# CONFIG_HID_THINGM is not set +# CONFIG_HID_THRUSTMASTER is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_WACOM is not set +# CONFIG_HID_WIIMOTE is not set +# CONFIG_HID_XINMO is not set +# CONFIG_HID_ZEROPLUS is not set +# CONFIG_HID_ZYDACRON is not set +# CONFIG_HID_SENSOR_HUB is not set +# CONFIG_HID_ALPS is not set + +# +# USB HID support +# +CONFIG_USB_HID=y +# CONFIG_HID_PID is not set +# CONFIG_USB_HIDDEV is not set + +# +# I2C HID support +# +# CONFIG_I2C_HID is not set +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +CONFIG_USB_DYNAMIC_MINORS=y +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +# CONFIG_USB_LEDS_TRIGGER_USBPORT is not set +# CONFIG_USB_MON is not set +# CONFIG_USB_WUSB_CBAF is not set + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +# CONFIG_USB_XHCI_HCD is not set +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_EXYNOS=y +# CONFIG_USB_EHCI_HCD_PLATFORM is not set +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_ISP1362_HCD is not set +# CONFIG_USB_FOTG210_HCD is not set +# CONFIG_USB_MAX3421_HCD is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_EXYNOS=y +# CONFIG_USB_OHCI_HCD_PLATFORM is not set +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=m +CONFIG_USB_PRINTER=m +# CONFIG_USB_WDM is not set +# CONFIG_USB_TMC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=y +# CONFIG_USB_STORAGE_DEBUG is not set +# CONFIG_USB_STORAGE_REALTEK is not set +# CONFIG_USB_STORAGE_DATAFAB is not set +# CONFIG_USB_STORAGE_FREECOM is not set +# CONFIG_USB_STORAGE_ISD200 is not set +# CONFIG_USB_STORAGE_USBAT is not set +# CONFIG_USB_STORAGE_SDDR09 is not set +# CONFIG_USB_STORAGE_SDDR55 is not set +# CONFIG_USB_STORAGE_JUMPSHOT is not set +# CONFIG_USB_STORAGE_ALAUDA is not set +# CONFIG_USB_STORAGE_ONETOUCH is not set +# CONFIG_USB_STORAGE_KARMA is not set +# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set +# CONFIG_USB_STORAGE_ENE_UB6250 is not set +# CONFIG_USB_UAS is not set + +# +# USB Imaging devices +# +# CONFIG_USB_MDC800 is not set +# CONFIG_USB_MICROTEK is not set +# CONFIG_USBIP_CORE is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set +CONFIG_USB_DWC2=m +# CONFIG_USB_DWC2_HOST is not set + +# +# Gadget/Dual-role mode requires USB Gadget support to be enabled +# +# CONFIG_USB_DWC2_PERIPHERAL is not set +CONFIG_USB_DWC2_DUAL_ROLE=y +# CONFIG_USB_DWC2_DEBUG is not set +# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +# CONFIG_USB_SERIAL is not set + +# +# USB Miscellaneous drivers +# +# CONFIG_USB_EMI62 is not set +# CONFIG_USB_EMI26 is not set +# CONFIG_USB_ADUTUX is not set +# CONFIG_USB_SEVSEG is not set +# CONFIG_USB_RIO500 is not set +# CONFIG_USB_LEGOTOWER is not set +# CONFIG_USB_LCD is not set +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +# CONFIG_USB_IDMOUSE is not set +# CONFIG_USB_FTDI_ELAN is not set +# CONFIG_USB_APPLEDISPLAY is not set +# CONFIG_USB_SISUSBVGA is not set +# CONFIG_USB_LD is not set +# CONFIG_USB_TRANCEVIBRATOR is not set +# CONFIG_USB_IOWARRIOR is not set +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +# CONFIG_USB_ISIGHTFW is not set +# CONFIG_USB_YUREX is not set +# CONFIG_USB_EZUSB_FX2 is not set +# CONFIG_USB_HUB_USB251XB is not set +# CONFIG_USB_HSIC_USB3503 is not set +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +# CONFIG_USB_CHAOSKEY is not set + +# +# USB Physical Layer drivers +# +# CONFIG_USB_PHY is not set +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ISP1301 is not set +# CONFIG_USB_ULPI is not set +CONFIG_USB_GADGET=m +# CONFIG_USB_GADGET_DEBUG is not set +# CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=2 +CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2 + +# +# USB Peripheral Controller +# +# CONFIG_USB_FOTG210_UDC is not set +# CONFIG_USB_GR_UDC is not set +# CONFIG_USB_R8A66597 is not set +# CONFIG_USB_PXA27X is not set +# CONFIG_USB_MV_UDC is not set +# CONFIG_USB_MV_U3D is not set +# CONFIG_USB_SNP_UDC_PLAT is not set +# CONFIG_USB_M66592 is not set +# CONFIG_USB_BDC_UDC is not set +# CONFIG_USB_NET2272 is not set +# CONFIG_USB_GADGET_XILINX is not set +# CONFIG_USB_DUMMY_HCD is not set +CONFIG_USB_LIBCOMPOSITE=m +CONFIG_USB_U_ETHER=m +CONFIG_USB_F_ECM=m +CONFIG_USB_F_SUBSET=m +CONFIG_USB_F_RNDIS=m +# CONFIG_USB_CONFIGFS is not set +# CONFIG_USB_ZERO is not set +# CONFIG_USB_AUDIO is not set +CONFIG_USB_ETH=m +CONFIG_USB_ETH_RNDIS=y +# CONFIG_USB_ETH_EEM is not set +# CONFIG_USB_G_NCM is not set +# CONFIG_USB_GADGETFS is not set +# CONFIG_USB_FUNCTIONFS is not set +# CONFIG_USB_MASS_STORAGE is not set +# CONFIG_USB_G_SERIAL is not set +# CONFIG_USB_MIDI_GADGET is not set +# CONFIG_USB_G_PRINTER is not set +# CONFIG_USB_CDC_COMPOSITE is not set +# CONFIG_USB_G_ACM_MS is not set +# CONFIG_USB_G_MULTI is not set +# CONFIG_USB_G_HID is not set +# CONFIG_USB_G_DBGP is not set +# CONFIG_USB_G_WEBCAM is not set + +# +# USB Power Delivery and Type-C drivers +# +# CONFIG_TYPEC_UCSI is not set +# CONFIG_USB_LED_TRIG is not set +# CONFIG_USB_ULPI_BUS is not set +# CONFIG_UWB is not set +CONFIG_MMC=y +CONFIG_PWRSEQ_EMMC=y +CONFIG_PWRSEQ_SIMPLE=y +CONFIG_MMC_BLOCK=y +CONFIG_MMC_BLOCK_MINORS=8 +# CONFIG_SDIO_UART is not set +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_DEBUG is not set +# CONFIG_MMC_ARMMMCI is not set +# CONFIG_MMC_SDHCI is not set +# CONFIG_MMC_SPI is not set +CONFIG_MMC_DW=y +CONFIG_MMC_DW_PLTFM=y +# CONFIG_MMC_DW_EXYNOS is not set +CONFIG_MMC_DW_NEXELL=y +# CONFIG_MMC_DW_K3 is not set +# CONFIG_MMC_VUB300 is not set +# CONFIG_MMC_USHC is not set +# CONFIG_MMC_USDHI6ROL0 is not set +# CONFIG_MMC_MTK is not set +# CONFIG_MEMSTICK is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +# CONFIG_LEDS_CLASS_FLASH is not set +# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set + +# +# LED drivers +# +# CONFIG_LEDS_BCM6328 is not set +# CONFIG_LEDS_BCM6358 is not set +# CONFIG_LEDS_LM3530 is not set +# CONFIG_LEDS_LM3642 is not set +# CONFIG_LEDS_PCA9532 is not set +CONFIG_LEDS_GPIO=y +# CONFIG_LEDS_LP3944 is not set +# CONFIG_LEDS_LP3952 is not set +# CONFIG_LEDS_LP5521 is not set +# CONFIG_LEDS_LP5523 is not set +# CONFIG_LEDS_LP5562 is not set +# CONFIG_LEDS_LP8501 is not set +# CONFIG_LEDS_LP8860 is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_PWM is not set +# CONFIG_LEDS_REGULATOR is not set +# CONFIG_LEDS_BD2802 is not set +# CONFIG_LEDS_LT3593 is not set +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_TLC591XX is not set +# CONFIG_LEDS_LM355x is not set +# CONFIG_LEDS_IS31FL319X is not set +# CONFIG_LEDS_IS31FL32XX is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# +# CONFIG_LEDS_BLINKM is not set +# CONFIG_LEDS_SYSCON is not set +# CONFIG_LEDS_USER is not set + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +# CONFIG_LEDS_TRIGGER_TIMER is not set +# CONFIG_LEDS_TRIGGER_ONESHOT is not set +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set +# CONFIG_LEDS_TRIGGER_CPU is not set +# CONFIG_LEDS_TRIGGER_GPIO is not set +# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set + +# +# iptables trigger is under Netfilter config (LED target) +# +# CONFIG_LEDS_TRIGGER_TRANSIENT is not set +# CONFIG_LEDS_TRIGGER_CAMERA is not set +# CONFIG_LEDS_TRIGGER_PANIC is not set +# CONFIG_ACCESSIBILITY is not set +CONFIG_EDAC_SUPPORT=y +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_ABB5ZES3 is not set +# CONFIG_RTC_DRV_ABX80X is not set +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_HYM8563 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8523 is not set +# CONFIG_RTC_DRV_PCF85063 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8010 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set +# CONFIG_RTC_DRV_EM3027 is not set +# CONFIG_RTC_DRV_RV8803 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T93 is not set +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1302 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1343 is not set +# CONFIG_RTC_DRV_DS1347 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6916 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RX4581 is not set +# CONFIG_RTC_DRV_RX6110 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_PCF2123 is not set +# CONFIG_RTC_DRV_MCP795 is not set +CONFIG_RTC_I2C_AND_SPI=y + +# +# SPI and I2C RTC drivers +# +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_PCF2127 is not set +# CONFIG_RTC_DRV_RV3029C2 is not set + +# +# Platform RTC drivers +# +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1685_FAMILY is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_DS2404 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set +# CONFIG_RTC_DRV_ZYNQMP is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_PL030 is not set +# CONFIG_RTC_DRV_PL031 is not set +# CONFIG_RTC_DRV_FTRTC010 is not set +# CONFIG_RTC_DRV_SNVS is not set +# CONFIG_RTC_DRV_R7301 is not set +CONFIG_RTC_DRV_NX=y + +# +# HID Sensor RTC drivers +# +# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_VIRTUAL_CHANNELS=y +CONFIG_DMA_OF=y +# CONFIG_ALTERA_MSGDMA is not set +CONFIG_AMBA_PL08X=y +# CONFIG_FSL_EDMA is not set +# CONFIG_INTEL_IDMA64 is not set +# CONFIG_MV_XOR_V2 is not set +# CONFIG_PL330_DMA is not set +# CONFIG_XILINX_DMA is not set +# CONFIG_XILINX_ZYNQMP_DMA is not set +# CONFIG_QCOM_HIDMA_MGMT is not set +# CONFIG_QCOM_HIDMA is not set +# CONFIG_DW_DMAC is not set + +# +# DMA Clients +# +# CONFIG_ASYNC_TX_DMA is not set +# CONFIG_DMATEST is not set + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +CONFIG_SW_SYNC=y +# CONFIG_AUXDISPLAY is not set +# CONFIG_UIO is not set +# CONFIG_VIRT_DRIVERS is not set + +# +# Virtio drivers +# +# CONFIG_VIRTIO_MMIO is not set + +# +# Microsoft Hyper-V guest support +# +# CONFIG_HYPERV_TSCPAGE is not set +CONFIG_STAGING=y +# CONFIG_IRDA is not set +# CONFIG_PRISM2_USB is not set +# CONFIG_COMEDI is not set +# CONFIG_RTLLIB is not set +# CONFIG_RTL8723BS is not set +# CONFIG_R8712U is not set +# CONFIG_R8188EU is not set +# CONFIG_VT6656 is not set + +# +# IIO staging drivers +# + +# +# Accelerometers +# +# CONFIG_ADIS16201 is not set +# CONFIG_ADIS16203 is not set +# CONFIG_ADIS16209 is not set +# CONFIG_ADIS16240 is not set + +# +# Analog to digital converters +# +# CONFIG_AD7606 is not set +# CONFIG_AD7780 is not set +# CONFIG_AD7816 is not set +# CONFIG_AD7192 is not set +# CONFIG_AD7280 is not set + +# +# Analog digital bi-direction converters +# +# CONFIG_ADT7316 is not set + +# +# Capacitance to digital converters +# +# CONFIG_AD7150 is not set +# CONFIG_AD7152 is not set +# CONFIG_AD7746 is not set + +# +# Direct Digital Synthesis +# +# CONFIG_AD9832 is not set +# CONFIG_AD9834 is not set + +# +# Digital gyroscope sensors +# +# CONFIG_ADIS16060 is not set + +# +# Network Analyzer, Impedance Converters +# +# CONFIG_AD5933 is not set + +# +# Light sensors +# +# CONFIG_TSL2x7x is not set + +# +# Active energy metering IC +# +# CONFIG_ADE7753 is not set +# CONFIG_ADE7754 is not set +# CONFIG_ADE7758 is not set +# CONFIG_ADE7759 is not set +# CONFIG_ADE7854 is not set + +# +# Resolver to digital converters +# +# CONFIG_AD2S90 is not set +# CONFIG_AD2S1200 is not set +# CONFIG_AD2S1210 is not set + +# +# Triggers - standalone +# + +# +# Speakup console speech +# +# CONFIG_SPEAKUP is not set +# CONFIG_STAGING_MEDIA is not set + +# +# Android +# +CONFIG_ASHMEM=y +CONFIG_ION=y +# CONFIG_ION_SYSTEM_HEAP is not set +# CONFIG_ION_CARVEOUT_HEAP is not set +# CONFIG_ION_CHUNK_HEAP is not set +# CONFIG_ION_CMA_HEAP is not set +# CONFIG_STAGING_BOARD is not set +# CONFIG_LTE_GDM724X is not set +# CONFIG_LNET is not set +# CONFIG_GS_FPGABOOT is not set +# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set +# CONFIG_FB_TFT is not set +# CONFIG_WILC1000_SDIO is not set +# CONFIG_WILC1000_SPI is not set +# CONFIG_MOST is not set +# CONFIG_KS7010 is not set +# CONFIG_GREYBUS is not set + +# +# USB Power Delivery and Type-C drivers +# +# CONFIG_TYPEC_TCPM is not set +# CONFIG_PI433 is not set +# CONFIG_GOLDFISH is not set +# CONFIG_CHROME_PLATFORMS is not set +CONFIG_CLKDEV_LOOKUP=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y + +# +# Common Clock Framework +# +# CONFIG_COMMON_CLK_VERSATILE is not set +# CONFIG_CLK_HSDK is not set +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI514 is not set +# CONFIG_COMMON_CLK_SI570 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CDCE925 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# CONFIG_CLK_QORIQ is not set +CONFIG_COMMON_CLK_XGENE=y +# CONFIG_COMMON_CLK_NXP is not set +# CONFIG_COMMON_CLK_PWM is not set +# CONFIG_COMMON_CLK_PXA is not set +# CONFIG_COMMON_CLK_PIC32 is not set +# CONFIG_COMMON_CLK_VC5 is not set +# CONFIG_HWSPINLOCK is not set + +# +# Clock Source drivers +# +CONFIG_TIMER_OF=y +CONFIG_TIMER_PROBE=y +# CONFIG_ARM_TIMER_SP804 is not set +# CONFIG_ATMEL_PIT is not set +# CONFIG_SH_TIMER_CMT is not set +# CONFIG_SH_TIMER_MTU2 is not set +# CONFIG_SH_TIMER_TMU is not set +# CONFIG_EM_TIMER_STI is not set +CONFIG_CLKSRC_NEXELL_TIMER=y +# CONFIG_MAILBOX is not set +# CONFIG_IOMMU_SUPPORT is not set + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set + +# +# Rpmsg drivers +# + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# + +# +# Broadcom SoC drivers +# +# CONFIG_SOC_BRCMSTB is not set + +# +# i.MX SoC drivers +# + +# +# Qualcomm SoC drivers +# +# CONFIG_SUNXI_SRAM is not set +# CONFIG_SOC_TI is not set + +# +# NEXELL s5pxx18 +# +# CONFIG_PM_DEVFREQ is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set +CONFIG_IIO=y +CONFIG_IIO_BUFFER=y +# CONFIG_IIO_BUFFER_CB is not set +CONFIG_IIO_KFIFO_BUF=y +# CONFIG_IIO_CONFIGFS is not set +CONFIG_IIO_TRIGGER=y +CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 +# CONFIG_IIO_SW_DEVICE is not set +# CONFIG_IIO_SW_TRIGGER is not set + +# +# Accelerometers +# +# CONFIG_ADXL345_I2C is not set +# CONFIG_ADXL345_SPI is not set +# CONFIG_BMA180 is not set +# CONFIG_BMA220 is not set +# CONFIG_BMC150_ACCEL is not set +# CONFIG_DA280 is not set +# CONFIG_DA311 is not set +# CONFIG_DMARD06 is not set +# CONFIG_DMARD09 is not set +# CONFIG_DMARD10 is not set +# CONFIG_IIO_ST_ACCEL_3AXIS is not set +# CONFIG_KXSD9 is not set +# CONFIG_KXCJK1013 is not set +# CONFIG_MC3230 is not set +# CONFIG_MMA7455_I2C is not set +# CONFIG_MMA7455_SPI is not set +# CONFIG_MMA7660 is not set +# CONFIG_MMA8452 is not set +# CONFIG_MMA9551 is not set +# CONFIG_MMA9553 is not set +# CONFIG_MXC4005 is not set +# CONFIG_MXC6255 is not set +# CONFIG_SCA3000 is not set +# CONFIG_STK8312 is not set +# CONFIG_STK8BA50 is not set + +# +# Analog to digital converters +# +# CONFIG_AD7266 is not set +# CONFIG_AD7291 is not set +# CONFIG_AD7298 is not set +# CONFIG_AD7476 is not set +# CONFIG_AD7766 is not set +# CONFIG_AD7791 is not set +# CONFIG_AD7793 is not set +# CONFIG_AD7887 is not set +# CONFIG_AD7923 is not set +# CONFIG_AD799X is not set +# CONFIG_CC10001_ADC is not set +# CONFIG_ENVELOPE_DETECTOR is not set +# CONFIG_HI8435 is not set +# CONFIG_HX711 is not set +# CONFIG_INA2XX_ADC is not set +# CONFIG_LTC2471 is not set +# CONFIG_LTC2485 is not set +# CONFIG_LTC2497 is not set +# CONFIG_MAX1027 is not set +# CONFIG_MAX11100 is not set +# CONFIG_MAX1118 is not set +# CONFIG_MAX1363 is not set +# CONFIG_MAX9611 is not set +# CONFIG_MCP320X is not set +# CONFIG_MCP3422 is not set +# CONFIG_NAU7802 is not set +# CONFIG_TI_ADC081C is not set +# CONFIG_TI_ADC0832 is not set +# CONFIG_TI_ADC084S021 is not set +# CONFIG_TI_ADC12138 is not set +# CONFIG_TI_ADC108S102 is not set +# CONFIG_TI_ADC128S052 is not set +# CONFIG_TI_ADC161S626 is not set +# CONFIG_TI_ADS1015 is not set +# CONFIG_TI_ADS7950 is not set +# CONFIG_TI_ADS8688 is not set +# CONFIG_TI_TLC4541 is not set +# CONFIG_VF610_ADC is not set +CONFIG_NX_ADC=y + +# +# Amplifiers +# +# CONFIG_AD8366 is not set + +# +# Chemical Sensors +# +# CONFIG_ATLAS_PH_SENSOR is not set +# CONFIG_CCS811 is not set +# CONFIG_IAQCORE is not set +# CONFIG_VZ89X is not set + +# +# Hid Sensor IIO Common +# + +# +# SSP Sensor Common +# +# CONFIG_IIO_SSP_SENSORHUB is not set + +# +# Counters +# + +# +# Digital to analog converters +# +# CONFIG_AD5064 is not set +# CONFIG_AD5360 is not set +# CONFIG_AD5380 is not set +# CONFIG_AD5421 is not set +# CONFIG_AD5446 is not set +# CONFIG_AD5449 is not set +# CONFIG_AD5592R is not set +# CONFIG_AD5593R is not set +# CONFIG_AD5504 is not set +# CONFIG_AD5624R_SPI is not set +# CONFIG_LTC2632 is not set +# CONFIG_AD5686 is not set +# CONFIG_AD5755 is not set +# CONFIG_AD5761 is not set +# CONFIG_AD5764 is not set +# CONFIG_AD5791 is not set +# CONFIG_AD7303 is not set +# CONFIG_AD8801 is not set +# CONFIG_DPOT_DAC is not set +# CONFIG_M62332 is not set +# CONFIG_MAX517 is not set +# CONFIG_MAX5821 is not set +# CONFIG_MCP4725 is not set +# CONFIG_MCP4922 is not set +# CONFIG_VF610_DAC is not set + +# +# IIO dummy driver +# + +# +# Frequency Synthesizers DDS/PLL +# + +# +# Clock Generator/Distribution +# +# CONFIG_AD9523 is not set + +# +# Phase-Locked Loop (PLL) frequency synthesizers +# +# CONFIG_ADF4350 is not set + +# +# Digital gyroscope sensors +# +# CONFIG_ADIS16080 is not set +# CONFIG_ADIS16130 is not set +# CONFIG_ADIS16136 is not set +# CONFIG_ADIS16260 is not set +# CONFIG_ADXRS450 is not set +# CONFIG_BMG160 is not set +# CONFIG_MPU3050_I2C is not set +# CONFIG_IIO_ST_GYRO_3AXIS is not set +# CONFIG_ITG3200 is not set + +# +# Health Sensors +# + +# +# Heart Rate Monitors +# +# CONFIG_AFE4403 is not set +# CONFIG_AFE4404 is not set +# CONFIG_MAX30100 is not set +# CONFIG_MAX30102 is not set + +# +# Humidity sensors +# +# CONFIG_AM2315 is not set +# CONFIG_DHT11 is not set +# CONFIG_HDC100X is not set +# CONFIG_HTS221 is not set +# CONFIG_HTU21 is not set +# CONFIG_SI7005 is not set +# CONFIG_SI7020 is not set + +# +# Inertial measurement units +# +# CONFIG_ADIS16400 is not set +# CONFIG_ADIS16480 is not set +# CONFIG_BMI160_I2C is not set +# CONFIG_BMI160_SPI is not set +# CONFIG_KMX61 is not set +# CONFIG_INV_MPU6050_SPI is not set +# CONFIG_IIO_ST_LSM6DSX is not set + +# +# Light sensors +# +# CONFIG_ADJD_S311 is not set +# CONFIG_AL3320A is not set +# CONFIG_APDS9300 is not set +# CONFIG_APDS9960 is not set +# CONFIG_BH1750 is not set +# CONFIG_BH1780 is not set +# CONFIG_CM32181 is not set +# CONFIG_CM3232 is not set +# CONFIG_CM3323 is not set +# CONFIG_CM3605 is not set +# CONFIG_CM36651 is not set +# CONFIG_GP2AP020A00F is not set +# CONFIG_SENSORS_ISL29018 is not set +# CONFIG_SENSORS_ISL29028 is not set +# CONFIG_ISL29125 is not set +# CONFIG_JSA1212 is not set +# CONFIG_RPR0521 is not set +# CONFIG_LTR501 is not set +# CONFIG_MAX44000 is not set +# CONFIG_OPT3001 is not set +# CONFIG_PA12203001 is not set +# CONFIG_SI1145 is not set +# CONFIG_STK3310 is not set +# CONFIG_TCS3414 is not set +# CONFIG_TCS3472 is not set +# CONFIG_SENSORS_TSL2563 is not set +# CONFIG_TSL2583 is not set +# CONFIG_TSL4531 is not set +# CONFIG_US5182D is not set +# CONFIG_VCNL4000 is not set +# CONFIG_VEML6070 is not set +# CONFIG_VL6180 is not set + +# +# Magnetometer sensors +# +# CONFIG_AK8974 is not set +# CONFIG_AK8975 is not set +# CONFIG_AK09911 is not set +# CONFIG_BMC150_MAGN_I2C is not set +# CONFIG_BMC150_MAGN_SPI is not set +# CONFIG_MAG3110 is not set +# CONFIG_MMC35240 is not set +# CONFIG_IIO_ST_MAGN_3AXIS is not set +# CONFIG_SENSORS_HMC5843_I2C is not set +# CONFIG_SENSORS_HMC5843_SPI is not set + +# +# Multiplexers +# +# CONFIG_IIO_MUX is not set + +# +# Inclinometer sensors +# + +# +# Triggers - standalone +# +# CONFIG_IIO_INTERRUPT_TRIGGER is not set +# CONFIG_IIO_SYSFS_TRIGGER is not set + +# +# Digital potentiometers +# +# CONFIG_DS1803 is not set +# CONFIG_MAX5481 is not set +# CONFIG_MAX5487 is not set +# CONFIG_MCP4131 is not set +# CONFIG_MCP4531 is not set +# CONFIG_TPL0102 is not set + +# +# Digital potentiostats +# +# CONFIG_LMP91000 is not set + +# +# Pressure sensors +# +# CONFIG_ABP060MG is not set +# CONFIG_BMP280 is not set +# CONFIG_HP03 is not set +# CONFIG_MPL115_I2C is not set +# CONFIG_MPL115_SPI is not set +# CONFIG_MPL3115 is not set +# CONFIG_MS5611 is not set +# CONFIG_MS5637 is not set +# CONFIG_IIO_ST_PRESS is not set +# CONFIG_T5403 is not set +# CONFIG_HP206C is not set +# CONFIG_ZPA2326 is not set + +# +# Lightning sensors +# +# CONFIG_AS3935 is not set + +# +# Proximity and distance sensors +# +# CONFIG_LIDAR_LITE_V2 is not set +# CONFIG_SRF04 is not set +# CONFIG_SX9500 is not set +# CONFIG_SRF08 is not set + +# +# Temperature sensors +# +# CONFIG_MAXIM_THERMOCOUPLE is not set +# CONFIG_MLX90614 is not set +# CONFIG_TMP006 is not set +# CONFIG_TMP007 is not set +# CONFIG_TSYS01 is not set +# CONFIG_TSYS02D is not set +CONFIG_PWM=y +CONFIG_PWM_SYSFS=y +# CONFIG_PWM_FSL_FTM is not set +# CONFIG_PWM_PCA9685 is not set +CONFIG_PWM_SAMSUNG=m +CONFIG_IRQCHIP=y +CONFIG_ARM_GIC=y +CONFIG_ARM_GIC_MAX_NR=1 +# CONFIG_IPACK_BUS is not set +CONFIG_ARCH_HAS_RESET_CONTROLLER=y +CONFIG_RESET_CONTROLLER=y +# CONFIG_RESET_ATH79 is not set +# CONFIG_RESET_BERLIN is not set +# CONFIG_RESET_IMX7 is not set +# CONFIG_RESET_LANTIQ is not set +# CONFIG_RESET_LPC18XX is not set +# CONFIG_RESET_MESON is not set +# CONFIG_RESET_PISTACHIO is not set +# CONFIG_RESET_SOCFPGA is not set +# CONFIG_RESET_STM32 is not set +# CONFIG_RESET_SUNXI is not set +# CONFIG_RESET_TI_SYSCON is not set +# CONFIG_RESET_ZYNQ is not set +# CONFIG_RESET_TEGRA_BPMP is not set +# CONFIG_FMC is not set + +# +# PHY Subsystem +# +CONFIG_GENERIC_PHY=y +# CONFIG_PHY_XGENE is not set +# CONFIG_BCM_KONA_USB2_PHY is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_CPCAP_USB is not set +CONFIG_PHY_SAMSUNG_USB2=y +# CONFIG_PHY_EXYNOS4210_USB2 is not set +# CONFIG_PHY_EXYNOS4X12_USB2 is not set +# CONFIG_PHY_EXYNOS5250_USB2 is not set +CONFIG_PHY_NX_USB2=y +# CONFIG_POWERCAP is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +CONFIG_ARM_PMU=y +# CONFIG_RAS is not set + +# +# Android +# +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ANDROID_BINDER_DEVICES="binder" +# CONFIG_ANDROID_BINDER_IPC_SELFTEST is not set +# CONFIG_LIBNVDIMM is not set +CONFIG_DAX=y +# CONFIG_DEV_DAX is not set +CONFIG_NVMEM=y +# CONFIG_STM is not set +# CONFIG_INTEL_TH is not set +# CONFIG_FPGA is not set + +# +# FSI support +# +# CONFIG_FSI is not set +# CONFIG_TEE is not set + +# +# Firmware Drivers +# +CONFIG_ARM_PSCI_FW=y +# CONFIG_ARM_PSCI_CHECKER is not set +# CONFIG_FIRMWARE_MEMMAP is not set +CONFIG_HAVE_ARM_SMCCC=y +# CONFIG_GOOGLE_FIRMWARE is not set +# CONFIG_MESON_SM is not set + +# +# Tegra firmware driver +# + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_ENCRYPTION is not set +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set +# CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +# CONFIG_FS_DAX is not set +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +# CONFIG_EXPORTFS_BLOCK_OPS is not set +CONFIG_FILE_LOCKING=y +CONFIG_MANDATORY_FILE_LOCKING=y +# CONFIG_FS_ENCRYPTION is not set +CONFIG_FSNOTIFY=y +# CONFIG_DNOTIFY is not set +CONFIG_INOTIFY_USER=y +# CONFIG_FANOTIFY is not set +# CONFIG_QUOTA is not set +# CONFIG_QUOTACTL is not set +CONFIG_AUTOFS4_FS=y +CONFIG_FUSE_FS=y +# CONFIG_CUSE is not set +# CONFIG_OVERLAY_FS is not set + +# +# Caches +# +CONFIG_FSCACHE=m +# CONFIG_FSCACHE_STATS is not set +# CONFIG_FSCACHE_HISTOGRAM is not set +# CONFIG_FSCACHE_DEBUG is not set +# CONFIG_FSCACHE_OBJECT_LIST is not set +# CONFIG_CACHEFILES is not set + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +# CONFIG_ZISOFS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_FAT_DEFAULT_UTF8 is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +# CONFIG_PROC_KCORE is not set +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +# CONFIG_PROC_CHILDREN is not set +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +CONFIG_CONFIGFS_FS=y +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_PSTORE is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=m +CONFIG_NFS_V2=m +CONFIG_NFS_V3=m +# CONFIG_NFS_V3_ACL is not set +# CONFIG_NFS_V4 is not set +# CONFIG_NFS_SWAP is not set +# CONFIG_NFS_FSCACHE is not set +CONFIG_NFSD=m +CONFIG_NFSD_V3=y +# CONFIG_NFSD_V3_ACL is not set +# CONFIG_NFSD_V4 is not set +CONFIG_GRACE_PERIOD=m +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=m +# CONFIG_SUNRPC_DEBUG is not set +# CONFIG_CEPH_FS is not set +CONFIG_CIFS=m +# CONFIG_CIFS_STATS is not set +# CONFIG_CIFS_WEAK_PW_HASH is not set +# CONFIG_CIFS_UPCALL is not set +# CONFIG_CIFS_XATTR is not set +CONFIG_CIFS_DEBUG=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set +# CONFIG_CIFS_DFS_UPCALL is not set +# CONFIG_CIFS_SMB311 is not set +# CONFIG_CIFS_FSCACHE is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +# CONFIG_9P_FS is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +# CONFIG_NLS_MAC_ROMAN is not set +# CONFIG_NLS_MAC_CELTIC is not set +# CONFIG_NLS_MAC_CENTEURO is not set +# CONFIG_NLS_MAC_CROATIAN is not set +# CONFIG_NLS_MAC_CYRILLIC is not set +# CONFIG_NLS_MAC_GAELIC is not set +# CONFIG_NLS_MAC_GREEK is not set +# CONFIG_NLS_MAC_ICELAND is not set +# CONFIG_NLS_MAC_INUIT is not set +# CONFIG_NLS_MAC_ROMANIAN is not set +# CONFIG_NLS_MAC_TURKISH is not set +# CONFIG_NLS_UTF8 is not set +# CONFIG_DLM is not set +# CONFIG_VIRTUALIZATION is not set + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +# CONFIG_BOOT_PRINTK_DELAY is not set +CONFIG_DYNAMIC_DEBUG=y + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +# CONFIG_DEBUG_INFO_DWARF4 is not set +# CONFIG_GDB_SCRIPTS is not set +CONFIG_ENABLE_WARN_DEPRECATED=y +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=1024 +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_READABLE_ASM is not set +# CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_PAGE_OWNER is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +# CONFIG_DEBUG_SECTION_MISMATCH is not set +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_ARCH_WANT_FRAME_POINTERS=y +CONFIG_FRAME_POINTER=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_DEBUG_KERNEL=y + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_PAGE_REF is not set +# CONFIG_DEBUG_RODATA_TEST is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_DEBUG_SLAB is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_VM is not set +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_ARCH_KASAN=y +# CONFIG_KASAN is not set +CONFIG_ARCH_HAS_KCOV=y +# CONFIG_KCOV is not set +# CONFIG_DEBUG_SHIRQ is not set + +# +# Debug Lockups and Hangs +# +# CONFIG_SOFTLOCKUP_DETECTOR is not set +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +# CONFIG_WQ_WATCHDOG is not set +# CONFIG_PANIC_ON_OOPS is not set +CONFIG_PANIC_ON_OOPS_VALUE=0 +CONFIG_PANIC_TIMEOUT=5 +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +# CONFIG_SCHED_STACK_END_CHECK is not set +# CONFIG_DEBUG_TIMEKEEPING is not set +CONFIG_DEBUG_PREEMPT=y + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +# CONFIG_DEBUG_RT_MUTEXES is not set +CONFIG_DEBUG_SPINLOCK=y +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +CONFIG_DEBUG_ATOMIC_SLEEP=y +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_HAVE_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_BUGVERBOSE=y +# CONFIG_DEBUG_LIST is not set +# CONFIG_DEBUG_PI_LIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_PROVE_RCU is not set +# CONFIG_TORTURE_TEST is not set +# CONFIG_RCU_PERF_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_FUNCTION_TRACER is not set +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_PREEMPT_TRACER is not set +CONFIG_SCHED_TRACER=y +# CONFIG_HWLAT_TRACER is not set +# CONFIG_FTRACE_SYSCALLS is not set +CONFIG_TRACER_SNAPSHOT=y +# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_STACK_TRACER is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_UPROBE_EVENTS is not set +# CONFIG_PROBE_EVENTS is not set +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_TRACE_EVAL_MAP_FILE is not set +CONFIG_TRACING_EVENTS_GPIO=y +# CONFIG_DMA_API_DEBUG is not set + +# +# Runtime Testing +# +# CONFIG_LKDTM is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_TEST_SORT is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_TEST_STRING_HELPERS is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_HASH is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_USER_COPY is not set +# CONFIG_TEST_BPF is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_MEMTEST is not set +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_KGDB=y +CONFIG_KGDB_SERIAL_CONSOLE=y +# CONFIG_KGDB_TESTS is not set +CONFIG_KGDB_KDB=y +CONFIG_KDB_DEFAULT_ENABLE=0x1 +# CONFIG_KDB_KEYBOARD is not set +CONFIG_KDB_CONTINUE_CATASTROPHIC=0 +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +# CONFIG_ARCH_WANTS_UBSAN_NO_NULL is not set +# CONFIG_UBSAN is not set +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y +# CONFIG_STRICT_DEVMEM is not set +# CONFIG_ARM64_PTDUMP_CORE is not set +# CONFIG_ARM64_PTDUMP_DEBUGFS is not set +# CONFIG_PID_IN_CONTEXTIDR is not set +# CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET is not set +# CONFIG_DEBUG_WX is not set +# CONFIG_DEBUG_ALIGN_RODATA is not set +# CONFIG_ARM64_RELOC_TEST is not set +# CONFIG_CORESIGHT is not set + +# +# Security options +# +CONFIG_KEYS=y +CONFIG_KEYS_COMPAT=y +# CONFIG_PERSISTENT_KEYRINGS is not set +# CONFIG_BIG_KEYS is not set +# CONFIG_ENCRYPTED_KEYS is not set +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +# CONFIG_SECURITY_WRITABLE_HOOKS is not set +# CONFIG_SECURITYFS is not set +CONFIG_SECURITY_NETWORK=y +# CONFIG_SECURITY_NETWORK_XFRM is not set +# CONFIG_SECURITY_PATH is not set +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y +# CONFIG_HARDENED_USERCOPY is not set +# CONFIG_FORTIFY_SOURCE is not set +# CONFIG_STATIC_USERMODEHELPER is not set +# CONFIG_SECURITY_SELINUX is not set +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_SECURITY_LOADPIN is not set +# CONFIG_SECURITY_YAMA is not set +CONFIG_INTEGRITY=y +# CONFIG_INTEGRITY_SIGNATURE is not set +CONFIG_INTEGRITY_AUDIT=y +# CONFIG_IMA is not set +# CONFIG_EVM is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_DEFAULT_SECURITY="" +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=m +CONFIG_CRYPTO_ACOMP2=y +# CONFIG_CRYPTO_RSA is not set +# CONFIG_CRYPTO_DH is not set +CONFIG_CRYPTO_ECDH=m +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +# CONFIG_CRYPTO_USER is not set +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +CONFIG_CRYPTO_GF128MUL=m +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +# CONFIG_CRYPTO_PCRYPT is not set +CONFIG_CRYPTO_WORKQUEUE=y +# CONFIG_CRYPTO_CRYPTD is not set +# CONFIG_CRYPTO_MCRYPTD is not set +CONFIG_CRYPTO_AUTHENC=y +# CONFIG_CRYPTO_TEST is not set + +# +# Authenticated Encryption with Associated Data +# +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=m +# CONFIG_CRYPTO_CHACHA20POLY1305 is not set +CONFIG_CRYPTO_SEQIV=m +CONFIG_CRYPTO_ECHAINIV=y + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_CTR=m +# CONFIG_CRYPTO_CTS is not set +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set +# CONFIG_CRYPTO_KEYWRAP is not set + +# +# Hash modes +# +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_HMAC=y +# CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +# CONFIG_CRYPTO_CRC32 is not set +# CONFIG_CRYPTO_CRCT10DIF is not set +CONFIG_CRYPTO_GHASH=m +# CONFIG_CRYPTO_POLY1305 is not set +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA256=y +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_SHA3 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +# CONFIG_CRYPTO_ANUBIS is not set +CONFIG_CRYPTO_ARC4=y +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_CHACHA20 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_TEA is not set +CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_TWOFISH_COMMON=y + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_842 is not set +# CONFIG_CRYPTO_LZ4 is not set +# CONFIG_CRYPTO_LZ4HC is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +# CONFIG_CRYPTO_DRBG_HASH is not set +# CONFIG_CRYPTO_DRBG_CTR is not set +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +# CONFIG_CRYPTO_USER_API_HASH is not set +# CONFIG_CRYPTO_USER_API_SKCIPHER is not set +# CONFIG_CRYPTO_USER_API_RNG is not set +# CONFIG_CRYPTO_USER_API_AEAD is not set +# CONFIG_CRYPTO_HW is not set +# CONFIG_ASYMMETRIC_KEY_TYPE is not set + +# +# Certificates for signature checking +# +# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set +# CONFIG_ARM64_CRYPTO is not set +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_BITREVERSE=y +CONFIG_HAVE_ARCH_BITREVERSE=y +CONFIG_RATIONAL=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_IO=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +# CONFIG_CRC_T10DIF is not set +CONFIG_CRC_ITU_T=m +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +# CONFIG_CRC4 is not set +# CONFIG_CRC7 is not set +CONFIG_LIBCRC32C=y +# CONFIG_CRC8 is not set +CONFIG_AUDIT_GENERIC=y +CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y +CONFIG_AUDIT_COMPAT_GENERIC=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +# CONFIG_XZ_DEC is not set +# CONFIG_XZ_DEC_BCJ is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=y +CONFIG_TEXTSEARCH_BM=y +CONFIG_TEXTSEARCH_FSM=y +CONFIG_RADIX_TREE_MULTIORDER=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_DMA=y +# CONFIG_DMA_NOOP_OPS is not set +# CONFIG_DMA_VIRT_OPS is not set +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +# CONFIG_CORDIC is not set +# CONFIG_DDR is not set +# CONFIG_IRQ_POLL is not set +CONFIG_LIBFDT=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +# CONFIG_SG_SPLIT is not set +CONFIG_SG_POOL=y +CONFIG_ARCH_HAS_SG_CHAIN=y +CONFIG_SBITMAP=y +# CONFIG_STRING_SELFTEST is not set diff -ENwbur a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h --- a/arch/arm64/include/asm/arch_timer.h 2018-05-06 08:47:35.377264876 +0200 +++ b/arch/arm64/include/asm/arch_timer.h 2018-05-06 08:49:48.266658411 +0200 @@ -131,6 +131,7 @@ BUG(); } +#ifdef CONFIG_ARM_ARCH_TIMER static inline u32 arch_timer_get_cntfrq(void) { return read_sysreg(cntfrq_el0); @@ -165,5 +166,33 @@ { return 0; } +#else +static inline u32 arch_timer_get_cntfrq(void) +{ + return 0; +} + +static inline u32 arch_timer_get_cntkctl(void) +{ + + return 0; +} + +static inline void arch_timer_set_cntkctl(u32 cntkctl) +{ +} + +static inline u64 arch_counter_get_cntpct(void) +{ +/* +* AArch64 kernel and user space mandate the use of CNTVCT. +*/ + BUG(); + return 0; +} + +extern u64 arch_counter_get_cntvct(void); +extern int arch_timer_arch_init(void); +#endif #endif diff -ENwbur a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h --- a/arch/arm64/include/asm/tlbflush.h 2018-05-06 08:47:35.385265201 +0200 +++ b/arch/arm64/include/asm/tlbflush.h 2018-05-06 08:49:48.270658574 +0200 @@ -117,23 +117,36 @@ static inline void flush_tlb_mm(struct mm_struct *mm) { + /* FIXME: temporary turnaround code to resolve tlb flush by ASID BUG. + * We assume the cause of this issue is synchronization between cpu + * clusters. This issue must be resolved in BL1~BL3 layer not here. + * This patch will be removed afterwards. + */ +#if defined (CONFIG_SMP) && defined (CONFIG_ARCH_S5P6818) + flush_tlb_all(); +#else unsigned long asid = ASID(mm) << 48; dsb(ishst); __tlbi(aside1is, asid); __tlbi_user(aside1is, asid); dsb(ish); +#endif } static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) { +#ifdef CONFIG_ARM64_WORKAROUND_CCI400_DVMV7 + flush_tlb_mm(vma->vm_mm); +#else unsigned long addr = uaddr >> 12 | (ASID(vma->vm_mm) << 48); dsb(ishst); __tlbi(vale1is, addr); __tlbi_user(vale1is, addr); dsb(ish); +#endif } /* @@ -146,6 +159,9 @@ unsigned long start, unsigned long end, bool last_level) { +#ifdef CONFIG_ARM64_WORKAROUND_CCI400_DVMV7 + flush_tlb_mm(vma->vm_mm); +#else unsigned long asid = ASID(vma->vm_mm) << 48; unsigned long addr; @@ -168,6 +184,7 @@ } } dsb(ish); +#endif } static inline void flush_tlb_range(struct vm_area_struct *vma, @@ -178,6 +195,9 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) { +#ifdef CONFIG_ARM64_WORKAROUND_CCI400_DVMV7 + flush_tlb_all(); +#else unsigned long addr; if ((end - start) > MAX_TLB_RANGE) { @@ -193,6 +213,7 @@ __tlbi(vaae1is, addr); dsb(ish); isb(); +#endif } /* @@ -202,11 +223,15 @@ static inline void __flush_tlb_pgtable(struct mm_struct *mm, unsigned long uaddr) { +#ifdef CONFIG_ARM64_WORKAROUND_CCI400_DVMV7 + flush_tlb_mm(mm); +#else unsigned long addr = uaddr >> 12 | (ASID(mm) << 48); __tlbi(vae1is, addr); __tlbi_user(vae1is, addr); dsb(ish); +#endif } #endif diff -ENwbur a/arch/arm64/Kconfig b/arch/arm64/Kconfig --- a/arch/arm64/Kconfig 2018-05-06 08:47:35.345263577 +0200 +++ b/arch/arm64/Kconfig 2018-05-06 08:49:48.242657438 +0200 @@ -30,11 +30,11 @@ select ARCH_WANT_FRAME_POINTERS select ARCH_HAS_UBSAN_SANITIZE_ALL select ARM_AMBA - select ARM_ARCH_TIMER + select ARM_ARCH_TIMER if !ARCH_S5P6818 select ARM_GIC select AUDIT_ARCH_COMPAT_GENERIC select ARM_GIC_V2M if PCI - select ARM_GIC_V3 + select ARM_GIC_V3 if !ARCH_S5P6818 select ARM_GIC_V3_ITS if PCI select ARM_PSCI_FW select BUILDTIME_EXTABLE_SORT @@ -612,6 +612,25 @@ default 47 if ARM64_VA_BITS_47 default 48 if ARM64_VA_BITS_48 +config ARM64_WORKAROUND_CCI400_DVMV7 + bool "Work around for cci400 using dvmv7 protocol" + depends on SMP + help + This option adds an alternative code sequence to work around ARMv8 + cores using CCI400 DVMv7 protocol. + + According to "AMBA AXI and ACE Protocol Specification" chapter C12.4, + 12.6, CCI400 optionally supports DVM(Distributed Virtual Message) + protocol version8(DVMv8), and version8 supports ARMv8 and ARMv7 but, + version7(DVMv7) supports only ARMv7. + + To work properly with DVMv7 protocol, C12.4 recommends below list. + - If upper 8-bits of ASID are zero the message will be received correctly + - TLB Invalidation by address range will work incorrectly + + So, this patch changes available bits of ASID and all tlb flush by address + range commands to flush by ASID. + config CPU_BIG_ENDIAN bool "Build big-endian kernel" help diff -ENwbur a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms --- a/arch/arm64/Kconfig.platforms 2018-05-06 08:47:35.345263577 +0200 +++ b/arch/arm64/Kconfig.platforms 2018-05-06 08:49:48.242657438 +0200 @@ -215,6 +215,20 @@ help Support for Spreadtrum ARM based SoCs +config ARCH_S5P6818 + bool "NEXELL S5P6818" + select ARCH_REQUIRE_GPIOLIB + select ARCH_HAS_RESET_CONTROLLER + select CLKSRC_NXP_TIMER + select RESET_CONTROLLER + select PINCTRL + select ARM64_ERRATUM_845719 + select HAVE_S3C2410_I2C if I2C + select HAVE_S3C2410_WATCHDOG if WATCHDOG + select ARM64_WORKAROUND_CCI400_DVMV7 + help + This enables support for the NEXELL S5P6818 Architecture + config ARCH_THUNDER bool "Cavium Inc. Thunder SoC Family" help diff -ENwbur a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c --- a/arch/arm64/kernel/time.c 2018-05-06 08:47:35.393265526 +0200 +++ b/arch/arm64/kernel/time.c 2018-05-06 08:49:48.278658899 +0200 @@ -66,11 +66,14 @@ void __init time_init(void) { +#ifdef CONFIG_ARM_ARCH_TIMER u32 arch_timer_rate; +#endif of_clk_init(NULL); timer_probe(); +#ifdef CONFIG_ARM_ARCH_TIMER tick_setup_hrtimer_broadcast(); arch_timer_rate = arch_timer_get_rate(); @@ -79,4 +82,5 @@ /* Calibrate the delay loop directly */ lpj_fine = arch_timer_rate / HZ; +#endif } diff -ENwbur a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c --- a/arch/arm64/mm/context.c 2018-05-06 08:47:35.397265688 +0200 +++ b/arch/arm64/mm/context.c 2018-05-06 08:49:48.282659061 +0200 @@ -69,6 +69,11 @@ asid = 16; } +#ifdef CONFIG_ARM64_WORKAROUND_CCI400_DVMV7 +/* In DVMv7 protocol, ASID bits must be 8 regardless of cpu core feature */ + asid = 8; +#endif + return asid; } @@ -119,6 +124,9 @@ /* Queue a TLB invalidate and flush the I-cache if necessary. */ cpumask_setall(&tlb_flush_pending); +#ifdef CONFIG_ARM64_WORKAROUND_CCI400_DVMV7 + flush_tlb_all(); +#endif } static bool check_update_reserved_asid(u64 asid, u64 newasid) @@ -236,6 +244,9 @@ */ if (!system_uses_ttbr0_pan()) cpu_switch_mm(mm->pgd, mm); +#ifdef CONFIG_ARM64_WORKAROUND_CCI400_DVMV7 + flush_tlb_all(); +#endif } /* Errata workaround post TTBRx_EL1 update. */ diff -ENwbur a/Documentation/devicetree/bindings/pinctrl/nexell,s5p6818-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/nexell,s5p6818-pinctrl.txt --- a/Documentation/devicetree/bindings/pinctrl/nexell,s5p6818-pinctrl.txt 1970-01-01 01:00:00.000000000 +0100 +++ b/Documentation/devicetree/bindings/pinctrl/nexell,s5p6818-pinctrl.txt 2018-05-06 08:49:47.754637636 +0200 @@ -0,0 +1,95 @@ +Binding for Nexell s5p6818 pin cotroller +======================================== + +Nexell's ARM bases SoC's integrates a GPIO and Pin mux/config hardware +controller. It controls the input/output settings on the available pads/pins +and also provides ability to multiplex and configure the output of various +on-chip controllers onto these pads. + +Please refer to pinctrl-bindings.txt in this directory for details of the +common pinctrl bindings used by client devices, including the meaning of the +phrase "pin configuration node". + + +Required properties: + - compatible: may be "nexell,s5pxx18-pinctrl", "nexell,s5p6818-pinctrl" + - reg: should be register base and length as documented in the datasheet + - interrupts: interrupt specifier for the controller over gpio and alive pins + +Example: +pinctrl_0: pinctrl@c0010000 { + compatible = "nexell,s5pxx18-pinctrl"; + #address-cells = <1>; + #size-cells = <1>; + reg = ; + interrupts = <0 IRQ_GPIOA 0>, + <0 IRQ_GPIOB 0>, + <0 IRQ_GPIOC 0>, + <0 IRQ_GPIOD 0>, + <0 IRQ_GPIOE 0>, + <0 IRQ_ALIVE 0>; +}; + + Note: Setting up the order must always match. + + +Nexell's pin configuration nodes act as a container for an arbitrary number of +subnodes. Each of these subnodes represents some desired configuration for a +pin, a group, or a list of pins or groups. This configuration can include the +mux function to select on those pin(s)/group(s), and various pin configuration +parameters. + + Child nodes must be set at least one of the following settings: + - nexell,pins = Select pins for using this function. + - nexell,pin-function = Select the function for use in a selected pin. + - nexell,pin-pull = Pull up/down configuration. + - nexell,pin-strength = Drive strength configuration. + + Valid values for nexell,pins are: + "gpioX-N" : X in {A,B,C,D,E}, N in {0-31} + Valid values for nexell,pin-function are: + "N" : N in {0-3}. + This setting means that the value is different for each pin. + Please refer to datasheet. + Valid values for nexell,pin-pull are: + "N" : 0 - Down, 1 - Up, 2 - Off + Valid values for nexell,pin-strength are: + "N" : 0,1,2,3 + + +Example: + - pin settings + gmac_txd: gmac-txd { + nexell,pins = "gpioe-7", "gpioe-8", "gpioe-9", "gpioe-10"; + nexell,pin-function = <1>; + nexell,pin-pull = <2>; + nexell,pin-strength = <3>; + }; + + gmac_rxd: gmac-rxd { + nexell,pins = "gpioe-14", "gpioe-15", "gpioe-16", "gpioe-17"; + nexell,pin-function = <1>; + nexell,pin-pull = <2>; + nexell,pin-strength = <3>; + }; + + gmac_txen: gmac-txen { + nexell,pins = "gpioe-11"; + nexell,pin-function = <1>; + nexell,pin-pull = <2>; + nexell,pin-strength = <3>; + }; + + - used by client devices + gmac0:ethernet@... { + pinctrl-names = "default"; + pinctrl-0 = <&gmac_txd &gmac_rxd &gmac_txen &gmac_mdc + &gmac_mdio &gmac_rxclk &gmac_txclk>; + ... + }; + diff -ENwbur a/Documentation/devicetree/bindings/reset/nexell,reset.txt b/Documentation/devicetree/bindings/reset/nexell,reset.txt --- a/Documentation/devicetree/bindings/reset/nexell,reset.txt 1970-01-01 01:00:00.000000000 +0100 +++ b/Documentation/devicetree/bindings/reset/nexell,reset.txt 2018-05-06 08:49:47.766638123 +0200 @@ -0,0 +1,32 @@ +Nexell System Reset Controller +====================================== + +Please also refer to reset.txt in this directory for common reset +controller binding usage. + +The reset controller registers are part of the system-ctl block on s5pxx18 SoC. + +Required properties: +- compatible: may be "nexell,s5pxx18-reset" +- reg: should be register base and length as documented in the datasheet +- #reset-cells: 1, see below + +Example: +nexell_reset:reset@c0012000 { + #reset-cells = <1>; + compatible = "nexell,s5pxx18-reset"; + reg = <0xC0012000 0x3>; +}; + +Specifying reset lines connected to IP modules +============================================== +example: + + serial0:serial@..... { + ... + resets = <&nexell_reset RESET_ID_UART0>; + reset-names = "uart-reset"; + ... + }; + +The index could be found in . diff -ENwbur a/drivers/clk/Makefile b/drivers/clk/Makefile --- a/drivers/clk/Makefile 2018-05-06 08:47:36.225299305 +0200 +++ b/drivers/clk/Makefile 2018-05-06 08:49:49.034689576 +0200 @@ -98,3 +98,4 @@ endif obj-$(CONFIG_ARCH_ZX) += zte/ obj-$(CONFIG_ARCH_ZYNQ) += zynq/ +obj-$(CONFIG_ARCH_S5P6818) += nexell/ diff -ENwbur a/drivers/clk/nexell/clk-s5pxx18.c b/drivers/clk/nexell/clk-s5pxx18.c --- a/drivers/clk/nexell/clk-s5pxx18.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/clk/nexell/clk-s5pxx18.c 2018-05-06 08:49:49.054690387 +0200 @@ -0,0 +1,589 @@ +/* + * Copyright (C) 2016 Nexell Co., Ltd. + * Author: Youngbok, Park + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "clk-s5pxx18.h" + +#define to_clk_dev(_hw) container_of(_hw, struct clk_dev, hw) + +struct clk_dev_peri { + const char *parent_name; + const char *name; + void __iomem *base; + int id; + int clk_step; /* 1S or 2S */ + bool enable; + long rate; + u32 in_mask; + u32 in_mask1; + /* clock register */ + int div_src_0; + int div_val_0; + int invert_0; + int div_src_1; + int div_val_1; + int invert_1; + int in_extclk_1; + int in_extclk_2; + int fix_src; +}; + +struct clk_dev { + struct device_node *node; + struct clk *clk; + struct clk_hw hw; + struct clk_dev_peri *peri; + unsigned int rate; + spinlock_t lock; +}; + +struct clk_dev_map { + unsigned int con_enb; + unsigned int con_gen[4]; +}; + +#define MAX_DIVIDER ((1 << 8) - 1) /* 256, align 2 */ + +static inline void clk_dev_bclk(void *base, int on) +{ + struct clk_dev_map *reg = base; + unsigned int val = readl(®->con_enb) & ~(0x3); + + val |= (on ? 3 : 0) & 0x3; /* always BCLK */ + writel(val, ®->con_enb); +} + +static inline void clk_dev_pclk(void *base, int on) +{ + struct clk_dev_map *reg = base; + unsigned int val = 0; + + if (!on) + return; + + val = readl(®->con_enb) & ~(1 << 3); + val |= (1 << 3); + writel(val, ®->con_enb); +} + +static inline void clk_dev_rate(void *base, int step, int src, int div) +{ + struct clk_dev_map *reg = base; + unsigned int val = 0; + + val = readl(®->con_gen[step << 1]); + val &= ~(0x07 << 2); + val |= (src << 2); /* source */ + val &= ~(0xFF << 5); + val |= (div - 1) << 5; /* divider */ + writel(val, ®->con_gen[step << 1]); +} + +static inline void clk_dev_inv(void *base, int step, int inv) +{ + struct clk_dev_map *reg = base; + unsigned int val = readl(®->con_gen[step << 1]) & ~(1 << 1); + + val |= (inv << 1); + writel(val, ®->con_gen[step << 1]); +} + +static inline void clk_dev_enb(void *base, int on) +{ + struct clk_dev_map *reg = base; + unsigned int val = readl(®->con_enb) & ~(1 << 2); + + val |= ((on ? 1 : 0) << 2); + writel(val, ®->con_enb); +} + +static inline long clk_dev_divide(long rate, long request, int align, + int *divide) +{ + int div = (rate / request); + int max = MAX_DIVIDER & ~(align - 1); + int adv = (div & ~(align - 1)) + align; + + if (!div) { + if (divide) + *divide = 1; + return rate; + } + + if (1 != div) + div &= ~(align - 1); + + if (div != adv && abs(request - rate / div) > abs(request - rate / adv)) + div = adv; + + div = (div > max ? max : div); + if (divide) + *divide = div; + + return (rate / div); +} + +static long clk_dev_bus_rate(struct clk_dev_peri *peri) +{ + struct clk *clk; + const char *name = NULL; + long rate = 0; + + if (I_PCLK_MASK & peri->in_mask) + name = CLK_BUS_PCLK; + else if (I_BCLK_MASK & peri->in_mask) + name = CLK_BUS_PCLK; + + if (name) { + clk = clk_get(NULL, name); + rate = clk_get_rate(clk); + clk_put(clk); + } + + return rate ?: -EINVAL; +} + +static long clk_dev_pll_rate(int no) +{ + struct clk *clk; + char name[16]; + long rate = 0; + + sprintf(name, "pll%d", no); + clk = clk_get(NULL, name); + rate = clk_get_rate(clk); + clk_put(clk); + + return rate; +} + +static long dev_round_rate(struct clk_hw *hw, unsigned long rate) +{ + struct clk_dev_peri *peri = to_clk_dev(hw)->peri; + unsigned long request = rate, new_rate = 0; + unsigned long clock_hz, freq_hz = 0; + unsigned int mask; + int step, div[2] = { + 0, + }; + int i, n, start_src = 0, max_src = 0, clk2 = 0; + short s1 = 0, s2 = 0, d1 = 0, d2 = 0; + + step = peri->clk_step; + mask = peri->in_mask; + pr_debug("clk: %s request = %ld [input=0x%x]\n", peri->name, rate, + mask); + + if (!(mask & I_CLOCK_MASK)) + return clk_dev_bus_rate(peri); + +next: + if (peri->fix_src >= 0) { + start_src = peri->fix_src; + max_src = start_src + 1; + } else { + start_src = 0; + max_src = I_CLOCK_NUM; + } + + for (n = start_src ; n < max_src ; n++) { + if (!(((mask & I_CLOCK_MASK) >> n) & 0x1)) + continue; + + if (I_EXT1_BIT == n) + rate = peri->in_extclk_1; + else if (I_EXT2_BIT == n) + rate = peri->in_extclk_2; + else + rate = clk_dev_pll_rate(n); + + if (!rate) + continue; + + clock_hz = rate; + for (i = 0; step > i; i++) + rate = clk_dev_divide(rate, request, 2, &div[i]); + + if (new_rate && (abs(rate - request) > abs(new_rate - request))) + continue; + + pr_debug("clk: %s, pll.%d request[%ld] calc[%ld]\n", peri->name, + n, request, rate); + + if (clk2) { + s1 = -1, d1 = -1; /* not use */ + s2 = n, d2 = div[0]; + } else { + s1 = n, d1 = div[0]; + s2 = I_CLKn_BIT, d2 = div[1]; + } + + new_rate = rate; + freq_hz = clock_hz; + + if (request == rate) + break; + } + + /* search 2th clock from input */ + if (!clk2 && abs(new_rate - request) && + peri->in_mask1 & ((1 << I_CLOCK_NUM) - 1)) { + clk2 = 1; + mask = peri->in_mask1; + step = 1; + goto next; + } + + peri->div_src_0 = s1, peri->div_val_0 = d1; + peri->div_src_1 = s2, peri->div_val_1 = d2; + + pr_debug("clk: %s, step[%d] src[%d,%d] %ld /(div0: %d * div1: %d) ", + peri->name, peri->clk_step, peri->div_src_0, peri->div_src_1, + freq_hz, peri->div_val_0, peri->div_val_1); + pr_debug("= %ld, %ld diff (%ld)\n", new_rate, request, + (long)abs(new_rate - request)); + + return new_rate; +} + +static int dev_set_rate(struct clk_hw *hw, unsigned long rate) +{ + struct clk_dev_peri *peri = to_clk_dev(hw)->peri; + int i; + + rate = dev_round_rate(hw, rate); + + for (i = 0; peri->clk_step > i; i++) { + int s = (0 == i ? peri->div_src_0 : peri->div_src_1); + int d = (0 == i ? peri->div_val_0 : peri->div_val_1); + + if (-1 == s) + continue; + + /* change rate */ +#ifdef CONFIG_EARLY_PRINTK + if (!strcmp(peri->name, "uart0")) + break; +#endif + clk_dev_rate((void *)peri->base, i, s, d); + + pr_debug("clk: %s (%p) set_rate [%d] src[%d] div[%d]\n", + peri->name, peri->base, i, s, d); + } + peri->rate = rate; + return rate; +} + +/* + * clock devices interface + */ +static int clk_dev_enable(struct clk_hw *hw) +{ + struct clk_dev_peri *peri = to_clk_dev(hw)->peri; + int i = 0, inv = 0; + + pr_debug("clk: %s enable (BCLK=%s, PCLK=%s)\n", peri->name, + I_GATE_BCLK & peri->in_mask ? "ON" : "PASS", + I_GATE_PCLK & peri->in_mask ? "ON" : "PASS"); + + if (peri->in_mask & I_GATE_BCLK) + clk_dev_bclk((void *)peri->base, 1); + + if (peri->in_mask & I_GATE_PCLK) + clk_dev_pclk((void *)peri->base, 1); + + if (!(peri->in_mask & I_CLOCK_MASK)) + return 0; + + for (i = 0, inv = peri->invert_0; peri->clk_step > i; + i++, inv = peri->invert_1) + clk_dev_inv((void *)peri->base, i, inv); + + /* restore clock rate */ + for (i = 0; peri->clk_step > i; i++) { + if (peri->fix_src < 0) { + int s = (0 == i ? peri->div_src_0 : peri->div_src_1); + int d = (0 == i ? peri->div_val_0 : peri->div_val_1); + + if (s == -1) + continue; + + clk_dev_rate((void *)peri->base, i, s, d); + } else { + int s = peri->fix_src; + int d = (0 == i ? peri->div_val_0 : peri->div_val_1); + if(d == 0) + d = 1; + clk_dev_rate((void *)peri->base, i, s, d); + } + } + + clk_dev_enb((void *)peri->base, 1); + peri->enable = true; + + return 0; +} + +static void clk_dev_disable(struct clk_hw *hw) +{ + struct clk_dev_peri *peri = to_clk_dev(hw)->peri; + + pr_debug("clk: %s disable\n", peri->name); + + if (peri->in_mask & I_GATE_BCLK) + clk_dev_bclk((void *)peri->base, 0); + + if (peri->in_mask & I_GATE_PCLK) + clk_dev_pclk((void *)peri->base, 0); + + if (!(peri->in_mask & I_CLOCK_MASK)) + return; + + clk_dev_rate((void *)peri->base, 0, 7, 256); /* for power save */ + clk_dev_enb((void *)peri->base, 0); + + peri->enable = false; + +} + +static unsigned long clk_dev_recalc_rate(struct clk_hw *hw, unsigned long rate) +{ + struct clk_dev_peri *peri = to_clk_dev(hw)->peri; + + pr_debug("%s: name %s, (%lu)\n", __func__, peri->name, peri->rate); + return peri->rate; +} + +static long clk_dev_round_rate(struct clk_hw *hw, unsigned long drate, + unsigned long *prate) +{ + struct clk_dev_peri *peri = to_clk_dev(hw)->peri; + long rate = dev_round_rate(hw, drate); + + pr_debug("%s: name %s, (%lu, %lu)\n", __func__, peri->name, drate, + rate); + return rate; +} + +static int clk_dev_set_rate(struct clk_hw *hw, unsigned long drate, + unsigned long prate) +{ + struct clk_dev_peri *peri = to_clk_dev(hw)->peri; + int rate = dev_set_rate(hw, drate); + + pr_debug("%s: name %s, rate %lu:%d\n", __func__, peri->name, drate, + rate); + return rate; +} + +static const struct clk_ops clk_dev_ops = { + .recalc_rate = clk_dev_recalc_rate, + .round_rate = clk_dev_round_rate, + .set_rate = clk_dev_set_rate, + .enable = clk_dev_enable, + .disable = clk_dev_disable, +}; + +static const struct clk_ops clk_empty_ops = {}; + +struct clk *clk_dev_get_provider(struct of_phandle_args *clkspec, void *data) +{ + struct clk_dev *clk_data = data; + + pr_debug("%s: name %s\n", __func__, clk_data->peri->name); + return clk_data->clk; +} + +static void __init clk_dev_parse_device_data(struct device_node *np, + struct clk_dev *clk_data, + struct device *dev) +{ + struct clk_dev_peri *peri = clk_data->peri; + unsigned int frequency = 0; + u32 value; + + if (of_property_read_string(np, "clock-output-names", &peri->name)) { + pr_err("clock node is missing 'clock-output-names'\n"); + return; + } + + if (!of_property_read_string(np, "clock-names", &peri->parent_name)) + return; + + if (of_property_read_u32(np, "cell-id", &peri->id)) { + pr_err("clock node is missing 'cell-id'\n"); + return; + } + + if (of_property_read_u32(np, "clk-step", &peri->clk_step)) { + pr_err("clock node is missing 'clk-step'\n"); + return; + } + + if (of_property_read_u32(np, "clk-input", &peri->in_mask)) { + pr_err("clock node is missing 'clk-input'\n"); + return; + } + + if (2 == peri->clk_step && + of_property_read_u32(np, "clk-input1", &peri->in_mask1)) { + pr_err("clock node is missing 'clk-input1'\n"); + return; + } + + if (!of_property_read_u32(np, "src-force", &value)) + peri->fix_src = value; + else + peri->fix_src = -1; + + if (!of_property_read_u32(np, "clk-input-ext1", &value)) + peri->in_extclk_1 = value; + + if (!of_property_read_u32(np, "clk-input-ext2", &value)) + peri->in_extclk_2 = value; + + if (!of_property_read_u32(np, "clock-frequency", &frequency)) + clk_data->rate = frequency; + + peri->base = of_iomap(np, 0); + if (!peri->base) { + pr_err("Can't map registers for clock %s!\n", peri->name); + return; + } + + pr_debug("%8s: id=%2d, base=%p, step=%d, m=0x%04x, m1=0x%04x\n", + peri->name, peri->id, peri->base, peri->clk_step, + peri->in_mask, peri->in_mask1); +} + +struct clk *clk_dev_clock_register(const char *name, const char *parent_name, + struct clk_hw *hw, const struct clk_ops *ops, + unsigned long flags) +{ + struct clk *clk; + struct clk_init_data init; + + init.name = name; + init.ops = ops; + init.flags = flags; + init.parent_names = (parent_name ? &parent_name : NULL); + init.num_parents = parent_name ? 1 : 0; + hw->init = &init; + pr_debug("Register clk %8s: parent %s\n", name, parent_name); + + clk = clk_register(NULL, hw); + if (IS_ERR(clk)) { + pr_err("%s: failed to register pll clock %s\n", __func__, + init.name); + return NULL; + } + + if (clk_register_clkdev(clk, init.name, NULL)) + pr_err("%s: failed to register lookup for %s", __func__, + init.name); + + return clk; +} + +#ifdef CONFIG_PM_SLEEP +static int clk_syscore_suspend(void) { return 0; } + +static void clk_syscore_resume(void) {} + +static struct syscore_ops clk_syscore_ops = { + .suspend = clk_syscore_suspend, .resume = clk_syscore_resume, +}; +#endif /* CONFIG_PM_SLEEP */ + +static void __init clk_dev_of_setup(struct device_node *node) +{ + struct device_node *np; + struct clk_dev *clk_data = NULL; + struct clk_dev_peri *peri = NULL; + struct clk *clk; + int i = 0, size = (sizeof(*clk_data) + sizeof(*peri)); + int num_clks; + +#ifdef CONFIG_ARM_NEXELL_CPUFREQ + char pll[16]; + + sprintf(pll, "sys-pll%d", CONFIG_NEXELL_CPUFREQ_PLLDEV); +#endif + + num_clks = of_get_child_count(node); + if (!num_clks) { + pr_err("Failed to clocks count for clock generator!\n"); + return; + } + + clk_data = kzalloc(size * num_clks, GFP_KERNEL); + if (!clk_data) { + WARN_ON(1); + return; + } + peri = (struct clk_dev_peri *)(clk_data + num_clks); + + for_each_child_of_node(node, np) { + clk_data[i].peri = &peri[i]; + clk_data[i].node = np; + clk_dev_parse_device_data(np, &clk_data[i], NULL); + of_clk_add_provider(np, clk_dev_get_provider, &clk_data[i++]); + } + + for (i = 0; num_clks > i; i++, clk_data++) { + unsigned long flags = 0; + const struct clk_ops *ops = &clk_dev_ops; + + if (peri[i].parent_name) { + ops = &clk_empty_ops; + flags = CLK_IS_BASIC; +#ifdef CONFIG_ARM_NEXELL_CPUFREQ + if (!strcmp(pll, peri[i].parent_name)) + flags |= CLK_SET_RATE_PARENT; +#endif + } + + clk = clk_dev_clock_register(peri[i].name, peri[i].parent_name, + &clk_data->hw, ops, flags); + if (NULL == clk) + continue; + + clk_data->clk = clk; + if (clk_data->rate) { + pr_debug("[%s set boot rate %u]\n", node->name, + clk_data->rate); + clk_set_rate(clk, clk_data->rate); + } + } + +#ifdef CONFIG_PM_SLEEP + register_syscore_ops(&clk_syscore_ops); +#endif + + pr_debug("[%s:%d] %s (%d)\n", __func__, __LINE__, node->name, num_clks); +} +CLK_OF_DECLARE(s5pxx18, "nexell,s5pxx18,clocks", clk_dev_of_setup); diff -ENwbur a/drivers/clk/nexell/clk-s5pxx18.h b/drivers/clk/nexell/clk-s5pxx18.h --- a/drivers/clk/nexell/clk-s5pxx18.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/clk/nexell/clk-s5pxx18.h 2018-05-06 08:49:49.054690387 +0200 @@ -0,0 +1,159 @@ +/* + * Copyright (C) 2016 Nexell Co., Ltd. + * Author: Youngbok, Park + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef _CLK_S5PXX18_H +#define _CLK_S5PXX18_H + +#define I_PLL0_BIT (0) +#define I_PLL1_BIT (1) +#define I_PLL2_BIT (2) +#define I_PLL3_BIT (3) +#define I_EXT1_BIT (4) +#define I_EXT2_BIT (5) +#define I_CLKn_BIT (7) + +#define I_CLOCK_NUM 6 /* PLL0, PLL1, PLL2, PLL3, EXT1, EXT2 */ + +#ifdef CONFIG_ARM_NEXELL_CPUFREQ +#define I_EXECEPT_CLK (1 << CONFIG_NEXELL_CPUFREQ_PLLDEV) +#else +#define I_EXECEPT_CLK (0) +#endif + +#define I_CLOCK_MASK (((1 << I_CLOCK_NUM) - 1) & ~I_EXECEPT_CLK) +/* + * clock + */ +#define CLK_CPU_PLL0 "sys-pll0" +#define CLK_CPU_PLL1 "sys-pll1" +#define CLK_CPU_PLL2 "sys-pll2" +#define CLK_CPU_PLL3 "sys-pll3" +#define CLK_CPU_FCLK "sys-cfclk" +#define CLK_CPU_HCLK "sys-chclk" +#define CLK_MEM_FCLK "sys-mfclk" +#define CLK_MEM_DCLK "sys-mdclk" +#define CLK_MEM_BCLK "sys-mbclk" +#define CLK_MEM_PCLK "sys-mpclk" +#define CLK_BUS_BCLK "sys-bbclk" +#define CLK_BUS_PCLK "sys-bpclk" +#define CLK_VPU_BCLK "sys-vpubclk" +#define CLK_VPU_PCLK "sys-vpupclk" +#define CLK_DIS_BCLK "sys-disbclk" +#define CLK_DIS_PCLK "sys-disspclk" +#define CLK_CCI_BCLK "sys-ccibclk" +#define CLK_CCI_PCLK "sys-ccipclk" +#define CLK_G3D_BCLK "sys-g3dbclk" + +#define CLK_ID_TIMER_1 0 +#define CLK_ID_TIMER_2 1 +#define CLK_ID_TIMER_3 2 +#define CLK_ID_PWM_1 3 +#define CLK_ID_PWM_2 4 +#define CLK_ID_PWM_3 5 +#define CLK_ID_I2C_0 6 +#define CLK_ID_I2C_1 7 +#define CLK_ID_I2C_2 8 +#define CLK_ID_MIPI 9 +#define CLK_ID_GMAC 10 /* External Clock 1 */ +#define CLK_ID_SPDIF_TX 11 +#define CLK_ID_MPEGTSI 12 +#define CLK_ID_PWM_0 13 +#define CLK_ID_TIMER_0 14 +#define CLK_ID_I2S_0 15 /* External Clock 1 */ +#define CLK_ID_I2S_1 16 /* External Clock 1 */ +#define CLK_ID_I2S_2 17 /* External Clock 1 */ +#define CLK_ID_SDHC_0 18 +#define CLK_ID_SDHC_1 19 +#define CLK_ID_SDHC_2 20 +#define CLK_ID_VR 21 +#define CLK_ID_UART_0 22 /* UART0_MODULE */ +#define CLK_ID_UART_2 23 /* UART1_MODULE */ +#define CLK_ID_UART_1 24 /* pl01115_Uart_modem_MODULE */ +#define CLK_ID_UART_3 25 /* pl01115_Uart_nodma0_MODULE */ +#define CLK_ID_UART_4 26 /* pl01115_Uart_nodma1_MODULE */ +#define CLK_ID_UART_5 27 /* pl01115_Uart_nodma2_MODULE */ +#define CLK_ID_DIT 28 +#define CLK_ID_PPM 29 +#define CLK_ID_VIP_0 30 /* External Clock 1 */ +#define CLK_ID_VIP_1 31 /* External Clock 1, 2 */ +#define CLK_ID_USB2HOST 32 /* External Clock 2 */ +#define CLK_ID_CODA 33 +#define CLK_ID_CRYPTO 34 +#define CLK_ID_SCALER 35 +#define CLK_ID_PDM 36 +#define CLK_ID_SPI_0 37 +#define CLK_ID_SPI_1 38 +#define CLK_ID_SPI_2 39 +#define CLK_ID_MAX 39 +#define CLK_ID_USBOTG 40 /* Shared with USB2HOST */ + +#define I_PLL0_BIT (0) +#define I_PLL1_BIT (1) +#define I_PLL2_BIT (2) +#define I_PLL3_BIT (3) +#define I_EXT1_BIT (4) +#define I_EXT2_BIT (5) +#define I_CLKn_BIT (7) + +#define I_PLL0 (1 << I_PLL0_BIT) +#define I_PLL1 (1 << I_PLL1_BIT) +#define I_PLL2 (1 << I_PLL2_BIT) +#define I_PLL3 (1 << I_PLL3_BIT) +#define I_EXTCLK1 (1 << I_EXT1_BIT) +#define I_EXTCLK2 (1 << I_EXT2_BIT) + +#define I_PLL_0_1 (I_PLL0 | I_PLL1) +#define I_PLL_0_2 (I_PLL_0_1 | I_PLL2) +#define I_PLL_0_3 (I_PLL_0_2 | I_PLL3) +#define I_CLKnOUT (0) + +#define I_PCLK (1 << 8) +#define I_BCLK (1 << 9) +#define I_GATE_PCLK (1 << 12) +#define I_GATE_BCLK (1 << 13) +#define I_PCLK_MASK (I_GATE_PCLK | I_PCLK) +#define I_BCLK_MASK (I_GATE_BCLK | I_BCLK) + +#define CLK_INPUT_TIMER (I_PLL_0_2) +#define CLK_INPUT_UART (I_PLL_0_2) +#define CLK_INPUT_PWM (I_PLL_0_2) +#define CLK_INPUT_I2C (I_GATE_PCLK) +#define CLK_INPUT_SDHC (I_PLL_0_2 | I_GATE_PCLK) +#define CLK_INPUT_I2S (I_PLL_0_3 | I_EXTCLK1) +#define CLK_INPUT_I2S_IN1 (I_CLKnOUT) +#define CLK_INPUT_SPI (I_PLL_0_2) +#define CLK_INPUT_VIP0 (I_PLL_0_3 | I_EXTCLK1 | I_GATE_BCLK) +#define CLK_INPUT_VIP1 (I_PLL_0_3 | I_EXTCLK1 | I_EXTCLK2 | I_GATE_BCLK) +#define CLK_INPUT_MIPI (I_PLL_0_2) +#define CLK_INPUT_GMAC (I_PLL_0_3 | I_EXTCLK1) +#define CLK_INPUT_GMAC_IN1 (I_CLKnOUT) +#define CLK_INPUT_SPDIFTX (I_PLL_0_2) +#define CLK_INPUT_MPEGTS (I_GATE_BCLK) +#define CLK_INPUT_VR (I_GATE_BCLK) +#define CLK_INPUT_DIT (I_GATE_BCLK) +#define CLK_INPUT_PPM (I_PLL_0_2) +#define CLK_INPUT_EHCI (I_PLL_0_3) +#define CLK_INPUT_EHCI_IN1 (I_PLL_0_3 | I_EXTCLK1) +#define CLK_INPUT_VPU (I_GATE_PCLK | I_GATE_BCLK) +#define CLK_INPUT_CRYPTO (I_GATE_PCLK) +#define CLK_INPUT_SCALER (I_GATE_BCLK) +#define CLK_INPUT_OTG (I_PLL_0_3) +#define CLK_INPUT_OTG_IN1 (I_PLL_0_3 | I_EXTCLK1) +#define CLK_INPUT_PDM (I_GATE_PCLK) + +#endif diff -ENwbur a/drivers/clk/nexell/clk-s5pxx18-pll.c b/drivers/clk/nexell/clk-s5pxx18-pll.c --- a/drivers/clk/nexell/clk-s5pxx18-pll.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/clk/nexell/clk-s5pxx18-pll.c 2018-05-06 08:49:49.054690387 +0200 @@ -0,0 +1,838 @@ +/* + * Copyright (C) 2016 Nexell Co., Ltd. + * Author: Youngbok, Park + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "clk-s5pxx18.h" +#include "clk-s5pxx18-pll.h" + +#define PLL_LOCKING_TIME 100 + +struct pll_pms { + long rate; /* unint Khz */ + int P; + int M; + int S; +}; + +struct clk_core { + const char *name; + int id, div, pll; + unsigned long rate; + struct clk_hw hw; + struct pll_pms pms; +}; + +#ifdef CONFIG_ARCH_S5P4418 +/* PLL 0,1 */ +static struct pll_pms pll0_1_pms[] = { + [0] = { + .rate = 1200000, .P = 3, .M = 300, .S = 1, + }, + [1] = { + .rate = 1100000, .P = 3, .M = 275, .S = 1, + }, + [2] = { + .rate = 1000000, .P = 3, .M = 250, .S = 1, + }, + [3] = { + .rate = 900000, .P = 3, .M = 225, .S = 1, + }, + [4] = { + .rate = 800000, .P = 3, .M = 200, .S = 1, + }, + [5] = { + .rate = 700000, .P = 3, .M = 175, .S = 1, + }, + [6] = { + .rate = 600000, .P = 2, .M = 200, .S = 2, + }, + [7] = { + .rate = 500000, .P = 3, .M = 250, .S = 2, + }, + [8] = { + .rate = 400000, .P = 3, .M = 200, .S = 2, + }, + [9] = { + .rate = 300000, .P = 2, .M = 200, .S = 3, + }, + [10] = { + .rate = 200000, .P = 3, .M = 200, .S = 3, + }, + [11] = { + .rate = 100000, .P = 3, .M = 200, .S = 4, + }, +}; + +/* PLL 2,3 */ +static struct pll_pms pll2_3_pms[] = { + [0] = { + .rate = 1200000, .P = 3, .M = 300, .S = 1, + }, + [1] = { + .rate = 1100000, .P = 3, .M = 275, .S = 1, + }, + [2] = { + .rate = 1000000, .P = 3, .M = 250, .S = 1, + }, + [3] = { + .rate = 900000, .P = 3, .M = 225, .S = 1, + }, + [4] = { + .rate = 800000, .P = 3, .M = 200, .S = 1, + }, + [5] = { + .rate = 700000, .P = 3, .M = 175, .S = 1, + }, + [6] = { + .rate = 600000, .P = 3, .M = 150, .S = 1, + }, + [7] = { + .rate = 500000, .P = 3, .M = 250, .S = 2, + }, + [8] = { + .rate = 400000, .P = 3, .M = 200, .S = 2, + }, + [9] = { + .rate = 300000, .P = 3, .M = 150, .S = 2, + }, + [10] = { + .rate = 200000, .P = 3, .M = 200, .S = 3, + }, + [11] = { + .rate = 100000, .P = 3, .M = 200, .S = 4, + }, +}; +#else /* S5P6818 */ +/* PLL 0,1 */ +static struct pll_pms pll0_1_pms[] = { + [0] = { + .rate = 1600000, .P = 6, .M = 400, .S = 0, + }, + [1] = { + .rate = 1500000, .P = 6, .M = 375, .S = 0, + }, + [2] = { + .rate = 1400000, .P = 6, .M = 350, .S = 0, + }, + [3] = { + .rate = 1300000, .P = 6, .M = 325, .S = 0, + }, + [4] = { + .rate = 1200000, .P = 3, .M = 300, .S = 1, + }, + [5] = { + .rate = 1100000, .P = 3, .M = 275, .S = 1, + }, + [6] = { + .rate = 1000000, .P = 3, .M = 250, .S = 1, + }, + [7] = { + .rate = 900000, .P = 3, .M = 225, .S = 1, + }, + [8] = { + .rate = 800000, .P = 3, .M = 200, .S = 1, + }, + [9] = { + .rate = 700000, .P = 3, .M = 175, .S = 1, + }, + [10] = { + .rate = 600000, .P = 2, .M = 200, .S = 2, + }, + [11] = { + .rate = 500000, .P = 3, .M = 250, .S = 2, + }, + [12] = { + .rate = 400000, .P = 3, .M = 200, .S = 2, + }, + [13] = { + .rate = 300000, .P = 2, .M = 200, .S = 3, + }, + [14] = { + .rate = 200000, .P = 3, .M = 200, .S = 3, + }, + [15] = { + .rate = 100000, .P = 3, .M = 200, .S = 4, + }, +}; + +/* PLL 2,3 */ +static struct pll_pms pll2_3_pms[] = { + [0] = { + .rate = 1600000, .P = 3, .M = 400, .S = 1, + }, + [1] = { + .rate = 1500000, .P = 3, .M = 375, .S = 1, + }, + [2] = { + .rate = 1400000, .P = 3, .M = 350, .S = 1, + }, + [3] = { + .rate = 1300000, .P = 3, .M = 325, .S = 1, + }, + [4] = { + .rate = 1200000, .P = 3, .M = 300, .S = 1, + }, + [5] = { + .rate = 1100000, .P = 3, .M = 275, .S = 1, + }, + [6] = { + .rate = 1000000, .P = 3, .M = 250, .S = 1, + }, + [7] = { + .rate = 900000, .P = 3, .M = 225, .S = 1, + }, + [8] = { + .rate = 800000, .P = 3, .M = 200, .S = 1, + }, + [9] = { + .rate = 700000, .P = 3, .M = 175, .S = 1, + }, + [10] = { + .rate = 600000, .P = 3, .M = 150, .S = 1, + }, + [11] = { + .rate = 500000, .P = 3, .M = 250, .S = 2, + }, + [12] = { + .rate = 400000, .P = 3, .M = 200, .S = 2, + }, + [13] = { + .rate = 300000, .P = 3, .M = 150, .S = 2, + }, + [14] = { + .rate = 200000, .P = 3, .M = 200, .S = 3, + }, + [15] = { + .rate = 100000, .P = 3, .M = 200, .S = 4, + }, +}; +#endif + +#define PLL0_1_SIZE ARRAY_SIZE(pll0_1_pms) +#define PLL2_3_SIZE ARRAY_SIZE(pll2_3_pms) + +#define PMS_RATE(p, i) ((&p[i])->rate) +#define PMS_P(p, i) ((&p[i])->P) +#define PMS_M(p, i) ((&p[i])->M) +#define PMS_S(p, i) ((&p[i])->S) + +#define PLL_S_BITPOS 0 +#define PLL_M_BITPOS 8 +#define PLL_P_BITPOS 18 + +static void *ref_clk_base; +static spinlock_t pll_lock = __SPIN_LOCK_UNLOCKED(pll_lock); + +static void nx_pll_set_rate(int PLL, int P, int M, int S) +{ + struct reg_clkpwr *reg = ref_clk_base; + unsigned long flags; + + spin_lock_irqsave(&pll_lock, flags); + + /* + * 1. change PLL0 clock to Oscillator Clock + */ + reg->PLLSETREG[PLL] &= ~(1 << 28); /* pll bypass on, xtal clock use */ + reg->CLKMODEREG0 = (1 << PLL); /* update pll */ + + while (readl(®->CLKMODEREG0) & (1 << 31)) + ;/* wait for change update pll*/ + + /* + * 2. PLL Power Down & PMS value setting + */ + reg->PLLSETREG[PLL] = + ((1UL << 29 | /* power down */ + (0UL << 28) | /* clock bypass on, xtal clock use */ + (S << PLL_S_BITPOS) | + (M << PLL_M_BITPOS) | + (P << PLL_P_BITPOS))); + reg->CLKMODEREG0 = (1 << PLL); /* update pll */ + + while (readl(®->CLKMODEREG0) & (1 << 31)) + ; /* wait for change update pll */ + + udelay(10); + + /* + * 3. Update PLL & wait PLL locking + */ + reg->PLLSETREG[PLL] &= ~((u32)(1UL << 29));/* pll power up */ + reg->CLKMODEREG0 = (1 << PLL); /* update pll */ + + while (readl(®->CLKMODEREG0) & (1 << 31)) + ;/* wait for change update pll */ + + udelay(PLL_LOCKING_TIME);/* 1000us */ + + /* + * 4. Change to PLL clock + */ + reg->PLLSETREG[PLL] |= (1 << 28);/* pll bypass off, pll clock use */ + reg->CLKMODEREG0 = (1 << PLL); /* update pll */ + + while (readl(®->CLKMODEREG0) & (1 << 31)) + ;/* wait for change update pll */ + + spin_unlock_irqrestore(&pll_lock, flags); +} + +#if defined(CONFIG_ARCH_S5P4418) +asmlinkage int __invoke_nexell_fn_smc(u32, u32, u32, u32); +#endif + +int nx_change_bus_freq(u32 pll_data) +{ +#if defined(CONFIG_ARCH_S5P6818) + uint32_t pll_num = pll_data & 0x00000003; + uint32_t s = (pll_data & 0x000000fc) >> 2; + uint32_t m = (pll_data & 0x00ffff00) >> 8; + uint32_t p = (pll_data & 0xff000000) >> 24; + + nx_pll_set_rate(pll_num, p, m, s); + return 0; +#else + unsigned long flags; + int ret; + + spin_lock_irqsave(&pll_lock, flags); + ret = __invoke_nexell_fn_smc(0x82000009, pll_data, 0, 0); + spin_unlock_irqrestore(&pll_lock, flags); + + return ret; +#endif +} +EXPORT_SYMBOL(nx_change_bus_freq); + +static unsigned long pll_round_rate(int pllno, unsigned long rate, int *p, + int *m, int *s) +{ + struct pll_pms *pms; + int len, idx = 0, n = 0, l = 0; + long freq = 0; + + rate /= 1000; + pr_debug("PLL.%d, %ld", pllno, rate); + + switch (pllno) { + case 0: + case 1: + pms = pll0_1_pms; + len = PLL0_1_SIZE; + break; + case 2: + case 3: + pms = pll2_3_pms; + len = PLL2_3_SIZE; + break; + default: + pr_info("Not support pll.%d (0~3)\n", pllno); + return 0; + } + + /* array index so -1 */ + idx = (len / 2) - 1; + + while (1) { + l = n + idx; + freq = PMS_RATE(pms, l); + if (freq == rate) + break; + + if (rate > freq) + len -= idx, idx >>= 1; + else + n += idx, idx = (len - n - 1) >> 1; + + if (0 == idx) { + int k = l; + + if (abs(rate - freq) > abs(rate - PMS_RATE(pms, k + 1))) + k += 1; + + if (abs(rate - PMS_RATE(pms, k)) >= + abs(rate - PMS_RATE(pms, k - 1))) + k -= 1; + + l = k; + break; + } + } + + if (p) + *p = PMS_P(pms, l); + if (m) + *m = PMS_M(pms, l); + if (s) + *s = PMS_S(pms, l); + + pr_debug("(real %ld Khz, P=%d ,M=%3d, S=%d)\n", PMS_RATE(pms, l), + PMS_P(pms, l), PMS_M(pms, l), PMS_S(pms, l)); + + return (PMS_RATE(pms, l) * 1000); +} + +static unsigned long ref_clk = 24000000UL; + +#define getquotient(v, d) (v / d) + +#define DIV_CPUG0 0 +#define DIV_BUS 1 +#define DIV_MEM 2 +#define DIV_G3D 3 +#define DIV_VPU 4 +#define DIV_DISP 5 +#define DIV_HDMI 6 +#define DIV_CPUG1 7 +#define DIV_CCI4 8 + +#define DVO0 3 +#define DVO1 9 +#define DVO2 15 +#define DVO3 21 + +static inline unsigned int pll_rate(unsigned int pllN, unsigned int xtal) +{ + struct reg_clkpwr *reg = ref_clk_base; + unsigned int val, val1, nP, nM, nS, nK; + unsigned int temp = 0; + + val = reg->PLLSETREG[pllN]; + val1 = reg->PLLSETREG_SSCG[pllN]; + xtal /= 1000; /* Unit Khz */ + + nP = (val >> 18) & 0x03F; + nM = (val >> 8) & 0x3FF; + nS = (val >> 0) & 0x0FF; + nK = (val1 >> 16) & 0xFFFF; + + if ((pllN > 1) && nK) + temp = (unsigned int)( + getquotient((getquotient((nK * 1000), 65536) * xtal), + nP) >> nS); + + return (unsigned int)((getquotient((nM * xtal), nP) >> nS) * 1000) + + temp; +} + +static inline unsigned int pll_dvo(int dvo) +{ + struct reg_clkpwr *reg = ref_clk_base; + + return (reg->DVOREG[dvo] & 0x7); +} + +static inline unsigned int pll_div(int dvo) +{ + struct reg_clkpwr *reg = ref_clk_base; + unsigned int val = reg->DVOREG[dvo]; + + return ((((val >> DVO3) & 0x3F) + 1) << 24) | + ((((val >> DVO2) & 0x3F) + 1) << 16) | + ((((val >> DVO1) & 0x3F) + 1) << 8) | + ((((val >> DVO0) & 0x3F) + 1) << 0); +} + +#define PLLN_RATE(n) (pll_rate(n, ref_clk)) /* 0~ 3 */ +#define CPU_FCLK_RATE(n) \ + (pll_rate(pll_dvo(n), ref_clk) / ((pll_div(n) >> 0) & 0x3F)) +#define CPU_HCLK_RATE(n) \ + (pll_rate(pll_dvo(n), ref_clk) / ((pll_div(n) >> 0) & 0x3F) / \ + ((pll_div(n) >> 8) & 0x3F)) +#define MEM_FCLK_RATE() \ + (pll_rate(pll_dvo(DIV_MEM), ref_clk) / \ + ((pll_div(DIV_MEM) >> 0) & 0x3F) / ((pll_div(DIV_MEM) >> 8) & 0x3F)) +#define MEM_DCLK_RATE() \ + (pll_rate(pll_dvo(DIV_MEM), ref_clk) / ((pll_div(DIV_MEM) >> 0) & 0x3F)) +#define MEM_BCLK_RATE() \ + (pll_rate(pll_dvo(DIV_MEM), ref_clk) / \ + ((pll_div(DIV_MEM) >> 0) & 0x3F) / ((pll_div(DIV_MEM) >> 8) & 0x3F) / \ + ((pll_div(DIV_MEM) >> 16) & 0x3F)) +#define MEM_PCLK_RATE() \ + (pll_rate(pll_dvo(DIV_MEM), ref_clk) / \ + ((pll_div(DIV_MEM) >> 0) & 0x3F) / ((pll_div(DIV_MEM) >> 8) & 0x3F) / \ + ((pll_div(DIV_MEM) >> 16) & 0x3F) / \ + ((pll_div(DIV_MEM) >> 24) & 0x3F)) +#define BUS_BCLK_RATE() \ + (pll_rate(pll_dvo(DIV_BUS), ref_clk) / ((pll_div(DIV_BUS) >> 0) & 0x3F)) +#define BUS_PCLK_RATE() \ + (pll_rate(pll_dvo(DIV_BUS), ref_clk) / \ + ((pll_div(DIV_BUS) >> 0) & 0x3F) / ((pll_div(DIV_BUS) >> 8) & 0x3F)) +#define G3D_BCLK_RATE() \ + (pll_rate(pll_dvo(DIV_G3D), ref_clk) / ((pll_div(DIV_G3D) >> 0) & 0x3F)) +#define VPU_BCLK_RATE() \ + (pll_rate(pll_dvo(DIV_VPU), ref_clk) / ((pll_div(DIV_VPU) >> 0) & 0x3F)) +#define VPU_PCLK_RATE() \ + (pll_rate(pll_dvo(DIV_VPU), ref_clk) / \ + ((pll_div(DIV_VPU) >> 0) & 0x3F) / ((pll_div(DIV_VPU) >> 8) & 0x3F)) +#define DIS_BCLK_RATE() \ + (pll_rate(pll_dvo(DIV_DISP), ref_clk) / \ + ((pll_div(DIV_DISP) >> 0) & 0x3F)) +#define DIS_PCLK_RATE() \ + (pll_rate(pll_dvo(DIV_DISP), ref_clk) / \ + ((pll_div(DIV_DISP) >> 0) & 0x3F) / \ + ((pll_div(DIV_DISP) >> 8) & 0x3F)) +#define HDMI_PCLK_RATE() \ + (pll_rate(pll_dvo(DIV_HDMI), ref_clk) / \ + ((pll_div(DIV_HDMI) >> 0) & 0x3F)) +#define CCI_BCLK_RATE() \ + (pll_rate(pll_dvo(DIV_CCI4), ref_clk) / \ + ((pll_div(DIV_CCI4) >> 0) & 0x3F)) +#define CCI_PCLK_RATE() \ + (pll_rate(pll_dvo(DIV_CCI4), ref_clk) / \ + ((pll_div(DIV_CCI4) >> 0) & 0x3F) / \ + ((pll_div(DIV_CCI4) >> 8) & 0x3F)) + +/* + * core frequency clk interface + */ +static struct clk_core clk_pll_dev[] = { + [ID_CPU_PLL0] = { + .id = ID_CPU_PLL0, .name = CLK_CPU_PLL0, + }, + [ID_CPU_PLL1] = { + .id = ID_CPU_PLL1, .name = CLK_CPU_PLL1, + }, + [ID_CPU_PLL2] = { + .id = ID_CPU_PLL2, .name = CLK_CPU_PLL2, + }, + [ID_CPU_PLL3] = { + .id = ID_CPU_PLL3, .name = CLK_CPU_PLL3, + }, + [ID_CPU_FCLK] = {.id = ID_CPU_FCLK, + .name = CLK_CPU_FCLK, + .div = DIV_CPUG0}, + [ID_CPU_HCLK] = {.id = ID_CPU_HCLK, + .name = CLK_CPU_HCLK, + .div = DIV_CPUG0}, + [ID_MEM_FCLK] = {.id = ID_MEM_FCLK, + .name = CLK_MEM_FCLK, + .div = DIV_MEM}, + [ID_MEM_DCLK] = {.id = ID_MEM_DCLK, + .name = CLK_MEM_DCLK, + .div = DIV_MEM}, + [ID_MEM_BCLK] = {.id = ID_MEM_BCLK, + .name = CLK_MEM_BCLK, + .div = DIV_MEM}, + [ID_MEM_PCLK] = {.id = ID_MEM_PCLK, + .name = CLK_MEM_PCLK, + .div = DIV_MEM}, + [ID_BUS_BCLK] = {.id = ID_BUS_BCLK, + .name = CLK_BUS_BCLK, + .div = DIV_BUS}, + [ID_BUS_PCLK] = {.id = ID_BUS_PCLK, + .name = CLK_BUS_PCLK, + .div = DIV_BUS}, + [ID_VPU_BCLK] = {.id = ID_VPU_BCLK, + .name = CLK_VPU_BCLK, + .div = DIV_VPU}, + [ID_VPU_PCLK] = {.id = ID_VPU_PCLK, + .name = CLK_VPU_PCLK, + .div = DIV_VPU}, + [ID_DIS_BCLK] = {.id = ID_DIS_BCLK, + .name = CLK_DIS_BCLK, + .div = DIV_DISP}, + [ID_DIS_PCLK] = {.id = ID_DIS_PCLK, + .name = CLK_DIS_PCLK, + .div = DIV_DISP}, + [ID_CCI_BCLK] = {.id = ID_CCI_BCLK, + .name = CLK_CCI_BCLK, + .div = DIV_CCI4}, + [ID_CCI_PCLK] = {.id = ID_CCI_PCLK, + .name = CLK_CCI_PCLK, + .div = DIV_CCI4}, + [ID_G3D_BCLK] = {.id = ID_G3D_BCLK, + .name = CLK_G3D_BCLK, + .div = DIV_G3D}, +}; + +static unsigned long clk_pll_recalc_rate(struct clk_hw *hw, unsigned long rate) +{ + struct clk_core *clk_data = to_clk_core(hw); + int id = clk_data->id; + + switch (id) { + case ID_CPU_PLL0: + rate = PLLN_RATE(0); + break; + case ID_CPU_PLL1: + rate = PLLN_RATE(1); + break; + case ID_CPU_PLL2: + rate = PLLN_RATE(2); + break; + case ID_CPU_PLL3: + rate = PLLN_RATE(3); + break; + case ID_CPU_FCLK: + rate = CPU_FCLK_RATE(DIV_CPUG0); + break; + case ID_CPU_HCLK: + rate = CPU_HCLK_RATE(DIV_CPUG0); + break; + case ID_MEM_FCLK: + rate = MEM_FCLK_RATE(); + break; + case ID_BUS_BCLK: + rate = BUS_BCLK_RATE(); + break; + case ID_BUS_PCLK: + rate = BUS_PCLK_RATE(); + break; + case ID_MEM_DCLK: + rate = MEM_DCLK_RATE(); + break; + case ID_MEM_BCLK: + rate = MEM_BCLK_RATE(); + break; + case ID_MEM_PCLK: + rate = MEM_PCLK_RATE(); + break; + case ID_G3D_BCLK: + rate = G3D_BCLK_RATE(); + break; + case ID_VPU_BCLK: + rate = VPU_BCLK_RATE(); + break; + case ID_VPU_PCLK: + rate = VPU_PCLK_RATE(); + break; + case ID_DIS_BCLK: + rate = DIS_BCLK_RATE(); + break; + case ID_DIS_PCLK: + rate = DIS_PCLK_RATE(); + break; + case ID_CCI_BCLK: + rate = CCI_BCLK_RATE(); + break; + case ID_CCI_PCLK: + rate = CCI_PCLK_RATE(); + break; + default: + pr_info("Unknown clock ID [%d] ...\n", id); + break; + } + + pr_debug("%s: name %s id %d rate %ld\n", __func__, clk_data->name, + clk_data->id, rate); + return rate; +} + +static long clk_pll_round_rate(struct clk_hw *hw, unsigned long drate, + unsigned long *prate) +{ + struct clk_core *clk_data = to_clk_core(hw); + struct pll_pms *pms = &clk_data->pms; + int id = clk_data->id; + long rate = 0; + + /* clear P,M,S */ + pms->P = 0, pms->M = 0, pms->S = 0; + rate = pll_round_rate(id, drate, &pms->P, &pms->M, &pms->S); + + pr_debug("%s: name %s id %d (%lu, %lu) <%d,%d,%d>\n", __func__, + clk_data->name, id, drate, rate, pms->P, pms->M, pms->S); + return rate; +} + +static int clk_pll_set_rate(struct clk_hw *hw, unsigned long drate, + unsigned long prate) +{ + struct clk_core *clk_data = to_clk_core(hw); + struct pll_pms *pms = &clk_data->pms; + int id = clk_data->id; + long rate = drate; + + if (!pms->P && !pms->M && !pms->S) + rate = pll_round_rate(id, drate, &pms->P, &pms->M, &pms->S); + + pr_debug("%s: name %s id %d (%lu, %lu) <%d,%d,%d>\n", __func__, + clk_data->name, id, drate, rate, pms->P, pms->M, pms->S); + nx_pll_set_rate(id, pms->P, pms->M, pms->S); + + /* clear P,M,S */ + pms->P = 0, pms->M = 0, pms->S = 0; + + return 0; +} + +static const struct clk_ops clk_pll_ops = { + .recalc_rate = clk_pll_recalc_rate, + .round_rate = clk_pll_round_rate, + .set_rate = clk_pll_set_rate, +}; + +static const struct clk_ops clk_dev_ops = { + .recalc_rate = clk_pll_recalc_rate, +}; + +static struct clk *clk_pll_clock_register(const char *name, + const char *parent_name, + struct clk_hw *hw, + const struct clk_ops *ops, + unsigned long flags) +{ + struct clk *clk; + struct clk_init_data init; + + init.name = name; + init.ops = ops; + init.flags = flags; + init.parent_names = (parent_name ? &parent_name : NULL); + init.num_parents = parent_name ? 1 : 0; + hw->init = &init; + + clk = clk_register(NULL, hw); + if (IS_ERR(clk)) { + pr_err("%s: failed to register pll clock %s\n", __func__, + init.name); + return NULL; + } + + if (clk_register_clkdev(clk, init.name, NULL)) + pr_err("%s: failed to register lookup for %s", __func__, + init.name); + + return clk; +} + +static void __init clk_pll_sysclk_setup(struct device_node *np) +{ + struct clk *clk; + unsigned long flags = 0;/* | CLK_GET_RATE_NOCACHE; */ + unsigned long rate[ID_CPU_FCLK]; + int i; + + for (i = 0; i < ID_CPU_FCLK; i++) { + clk = clk_pll_clock_register(clk_pll_dev[i].name, NULL, + &clk_pll_dev[i].hw, &clk_pll_ops, + flags); + if (NULL == clk) + continue; + rate[i] = clk_get_rate(clk); + } + pr_info("PLL : [0] = %10lu, [1] = %10lu, [2] = %10lu, [3] = %10lu\n", + rate[0], rate[1], rate[2], rate[3]); +} + +static void __init clk_pll_of_clocks_setup(struct device_node *node) +{ + struct clk_core *clk_data = NULL; + struct clk *clk; + unsigned long flags = CLK_IS_BASIC; + const char *parent_name = NULL; + int i, div, pll; + + for (i = ID_CPU_FCLK; ID_END > i; i++) { + clk_data = &clk_pll_dev[i]; + div = clk_data->div; + pll = pll_dvo(div); + clk_data->pll = pll; + parent_name = clk_pll_dev[pll].name; + + clk = clk_pll_clock_register(clk_data->name, parent_name, + &clk_data->hw, &clk_dev_ops, flags); + if (clk) + clk_data->rate = clk_get_rate(clk); + } +} + +static void __init clk_pll_of_clocks_dump(struct device_node *np) +{ + struct clk_core *clk_data = clk_pll_dev; + int pll = pll_dvo(DIV_CPUG1); + + /* cpu 0, 1 : div 0, 7 */ + pr_info("(%d) PLL%d: CPU FCLK = %10lu, HCLK = %9lu (G0)\n", + clk_data[ID_CPU_FCLK].div, clk_data[ID_CPU_FCLK].pll, + clk_data[ID_CPU_FCLK].rate, clk_data[ID_CPU_HCLK].rate); + + pr_info("(%d) PLL%d: CPU FCLK = %10lu, HCLK = %9lu (G1)\n", DIV_CPUG1, + pll, (ulong)CPU_FCLK_RATE(DIV_CPUG1), + (ulong)CPU_HCLK_RATE(DIV_CPUG1)); + + /* memory */ + pr_info("(%d) PLL%d: MEM FCLK = %10lu, DCLK = %9lu, BCLK = %9lu,", + clk_data[ID_MEM_FCLK].div, clk_data[ID_MEM_FCLK].pll, + clk_data[ID_MEM_FCLK].rate, clk_data[ID_MEM_DCLK].rate, + clk_data[ID_MEM_BCLK].rate); + + pr_info("PCLK = %9lu\n", clk_data[ID_MEM_PCLK].rate); + + /* bus */ + pr_info("(%d) PLL%d: BUS BCLK = %10lu, PCLK = %9lu\n", + clk_data[ID_BUS_BCLK].div, clk_data[ID_BUS_BCLK].pll, + clk_data[ID_BUS_BCLK].rate, clk_data[ID_BUS_PCLK].rate); + + /* cci400 */ + pr_info("(%d) PLL%d: CCI4 BCLK = %10lu, PCLK = %9lu\n", + clk_data[ID_CCI_BCLK].div, clk_data[ID_CCI_BCLK].pll, + clk_data[ID_CCI_BCLK].rate, clk_data[ID_CCI_PCLK].rate); + + /* 3d graphic */ + pr_info("(%d) PLL%d: G3D BCLK = %10lu\n", clk_data[ID_G3D_BCLK].div, + clk_data[ID_G3D_BCLK].pll, clk_data[ID_G3D_BCLK].rate); + + /* coda (vpu) */ + pr_info("(%d) PLL%d: VPU BCLK = %10lu, PCLK = %9lu\n", + clk_data[ID_VPU_BCLK].div, clk_data[ID_VPU_BCLK].pll, + clk_data[ID_VPU_BCLK].rate, clk_data[ID_VPU_PCLK].rate); + + /* display */ + pr_info("(%d) PLL%d: DISP BCLK = %10lu, PCLK = %9lu\n", + clk_data[ID_DIS_BCLK].div, clk_data[ID_DIS_BCLK].pll, + clk_data[ID_DIS_BCLK].rate, clk_data[ID_DIS_PCLK].rate); +} + +static void __init clk_pll_of_setup(struct device_node *node) +{ + unsigned int pllin; + struct resource regs; + + if (of_address_to_resource(node, 0, ®s) < 0) { + pr_err("fail get clock pll regsister\n"); + return; + } + + ref_clk_base = ioremap(regs.start, resource_size(®s)); + if (ref_clk_base == NULL) { + pr_err("fail get Clock control base address\n"); + return; + } + if (0 == of_property_read_u32(node, "ref-freuecny", &pllin)) + ref_clk = pllin; + + clk_pll_sysclk_setup(node); + clk_pll_of_clocks_setup(node); + clk_pll_of_clocks_dump(node); + + pr_info("CPU REF HZ: %lu hz (0x%08x:0x%p)\n", ref_clk, 0xc0010000, + ref_clk_base); +} + +CLK_OF_DECLARE(s5pxx18, "nexell,s5pxx18,pll", clk_pll_of_setup); diff -ENwbur a/drivers/clk/nexell/clk-s5pxx18-pll.h b/drivers/clk/nexell/clk-s5pxx18-pll.h --- a/drivers/clk/nexell/clk-s5pxx18-pll.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/clk/nexell/clk-s5pxx18-pll.h 2018-05-06 08:49:49.054690387 +0200 @@ -0,0 +1,76 @@ +/* + * Copyright (C) 2016 Nexell Co., Ltd. + * Author: Youngbok, Park + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef _CLK_S5P4818_PLL_H +#define _CLK_S5P4818_PLL_H + +enum { ID_CPU_PLL0 = 0, + ID_CPU_PLL1, + ID_CPU_PLL2, + ID_CPU_PLL3, + ID_CPU_FCLK, + ID_CPU_HCLK, + ID_BUS_BCLK, + ID_BUS_PCLK, + ID_MEM_FCLK, + ID_MEM_DCLK, + ID_MEM_BCLK, + ID_MEM_PCLK, + ID_G3D_BCLK, + ID_VPU_BCLK, + ID_VPU_PCLK, + ID_DIS_BCLK, + ID_DIS_PCLK, + ID_CCI_BCLK, + ID_CCI_PCLK, + ID_END, +}; + +struct reg_clkpwr { + unsigned int CLKMODEREG0; + unsigned int __Reserved0; + unsigned int PLLSETREG[4]; + unsigned int __Reserved1[2]; + unsigned int DVOREG[9]; + unsigned int __Reserved2; + unsigned int PLLSETREG_SSCG[6]; + unsigned int __reserved3[8]; + unsigned char __Reserved4[0x200 - 0x80]; + unsigned int GPIOWAKEUPRISEENB; + unsigned int GPIOWAKEUPFALLENB; + unsigned int GPIORSTENB; + unsigned int GPIOWAKEUPENB; + unsigned int GPIOINTENB; + unsigned int GPIOINTPEND; + unsigned int RESETSTATUS; + unsigned int INTENABLE; + unsigned int INTPEND; + unsigned int PWRCONT; + unsigned int PWRMODE; + unsigned int __Reserved5; + unsigned int SCRATCH[3]; + unsigned int SYSRSTCONFIG; + unsigned int __Reserved6[0x100-0x80]; + unsigned int PADSTRENGTHGPIO[5][2]; + unsigned int __Reserved7[2]; + unsigned int PADSTRENGTHBUS; +}; + +#define to_clk_core(_hw) container_of(_hw, struct clk_core, hw) + +#endif diff -ENwbur a/drivers/clk/nexell/Makefile b/drivers/clk/nexell/Makefile --- a/drivers/clk/nexell/Makefile 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/clk/nexell/Makefile 2018-05-06 08:49:49.054690387 +0200 @@ -0,0 +1 @@ +obj-y += clk-s5pxx18.o clk-s5pxx18-pll.o diff -ENwbur a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig --- a/drivers/clocksource/Kconfig 2018-05-06 08:47:36.269301092 +0200 +++ b/drivers/clocksource/Kconfig 2018-05-06 08:49:49.082691523 +0200 @@ -615,4 +615,11 @@ Enable this option to use the Low Power controller timer as clocksource. +config CLKSRC_NEXELL_TIMER + bool "Support for s5p6818 timer generation" + def_bool y if ARCH_S5P6818 + select TIMER_OF if OF + help + This is a new clocksource driver for the Nexell timer. + endmenu diff -ENwbur a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile --- a/drivers/clocksource/Makefile 2018-05-06 08:47:36.269301092 +0200 +++ b/drivers/clocksource/Makefile 2018-05-06 08:49:49.082691523 +0200 @@ -54,6 +54,7 @@ obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o obj-$(CONFIG_OWL_TIMER) += owl-timer.o +obj-$(CONFIG_CLKSRC_NEXELL_TIMER) += s5pxx18_timer.o obj-$(CONFIG_ARC_TIMERS) += arc_timer.o obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o diff -ENwbur a/drivers/clocksource/s5pxx18_timer.c b/drivers/clocksource/s5pxx18_timer.c --- a/drivers/clocksource/s5pxx18_timer.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/clocksource/s5pxx18_timer.c 2018-05-06 08:49:49.082691523 +0200 @@ -0,0 +1,498 @@ +/* + * Copyright (C) 2016 Nexell Co., Ltd. + * Author: Youngbok, Park + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CLK_SOURCE_HZ (10 * 1000000) /* or 1MHZ */ +#define CLK_EVENT_HZ (10 * 1000000) /* or 1MHZ */ + +/* timer register */ +#define REG_TCFG0 (0x00) +#define REG_TCFG1 (0x04) +#define REG_TCON (0x08) +#define REG_TCNTB0 (0x0C) +#define REG_TCMPB0 (0x10) +#define REG_TCNT0 (0x14) +#define REG_CSTAT (0x44) + +#define TCON_BIT_AUTO (1 << 3) +#define TCON_BIT_INVT (1 << 2) +#define TCON_BIT_UP (1 << 1) +#define TCON_BIT_RUN (1 << 0) +#define TCFG0_BIT_CH(ch) (ch == 0 || ch == 1 ? 0 : 8) +#define TCFG1_BIT_CH(ch) (ch * 4) +#define TCON_BIT_CH(ch) (ch ? ch * 4 + 4 : 0) +#define TINT_CSTAT_BIT_CH(ch) (ch + 5) +#define TINT_CSTAT_MASK (0x1F) +#define TIMER_TCNT_OFFS (0xC) + +/* timer data structs */ +struct timer_info { + int channel; + int interrupt; + const char *clock_name; + struct clk *clk; + unsigned long request; + unsigned long rate; + int tmux; + int prescale; + unsigned int tcount; + unsigned int rcount; +}; + +struct timer_of_dev { + void __iomem *base; + struct clk *pclk; + struct timer_info timer_source; + struct timer_info timer_event; +}; + +static struct timer_of_dev *timer_dev; +#define get_timer_dev() ((struct timer_of_dev *)timer_dev) + +static inline void timer_periph_reset(int id) { return; } + +static inline void timer_clock(void __iomem *base, int ch, int mux, int scl) +{ + u32 val = readl(base + REG_TCFG0) & ~(0xFF << TCFG0_BIT_CH(ch)); + + writel(val | ((scl - 1) << TCFG0_BIT_CH(ch)), base + REG_TCFG0); + val = readl(base + REG_TCFG1) & ~(0xF << TCFG1_BIT_CH(ch)); + writel(val | (mux << TCFG1_BIT_CH(ch)), base + REG_TCFG1); +} + +static inline void timer_count(void __iomem *base, int ch, unsigned int cnt) +{ + writel((cnt - 1), base + REG_TCNTB0 + (TIMER_TCNT_OFFS * ch)); + writel((cnt - 1), base + REG_TCMPB0 + (TIMER_TCNT_OFFS * ch)); +} + +static inline void timer_start(void __iomem *base, int ch, int irqon) +{ + int on = irqon ? 1 : 0; + u32 val = readl(base + REG_CSTAT) & ~(TINT_CSTAT_MASK << 5 | 0x1 << ch); + + writel(val | (0x1 << TINT_CSTAT_BIT_CH(ch) | on << ch), + base + REG_CSTAT); + val = readl(base + REG_TCON) & ~(0xE << TCON_BIT_CH(ch)); + writel(val | (TCON_BIT_UP << TCON_BIT_CH(ch)), base + REG_TCON); + + val &= ~(TCON_BIT_UP << TCON_BIT_CH(ch)); + val |= ((TCON_BIT_AUTO | TCON_BIT_RUN) << TCON_BIT_CH(ch)); + writel(val, base + REG_TCON); +} + +static inline void timer_stop(void __iomem *base, int ch, int irqon) +{ + int on = irqon ? 1 : 0; + u32 val = readl(base + REG_CSTAT) & ~(TINT_CSTAT_MASK << 5 | 0x1 << ch); + + writel(val | (0x1 << TINT_CSTAT_BIT_CH(ch) | on << ch), + base + REG_CSTAT); + val = readl(base + REG_TCON) & ~(TCON_BIT_RUN << TCON_BIT_CH(ch)); + writel(val, base + REG_TCON); +} + +static inline unsigned int timer_read(void __iomem *base, int ch) +{ + return readl(base + REG_TCNT0 + (TIMER_TCNT_OFFS * ch)); +} + +static inline u32 timer_read_count(void) +{ + struct timer_of_dev *dev = get_timer_dev(); + struct timer_info *info; + + if (NULL == dev || NULL == dev->base) + return 0; + + info = &dev->timer_source; + + info->rcount = (info->tcount - timer_read(dev->base, info->channel)); + return (u64)info->rcount; +} + +/* + * Timer clock source + */ +static void timer_clock_select(struct timer_of_dev *dev, + struct timer_info *info) +{ + unsigned long rate, tout = 0; + unsigned long mout, thz, delt = (-1UL); + unsigned long frequency = info->request; + int tscl = 0, tmux = 5, smux = 0, pscl = 0; + int from_tclk = 0; + + if (dev->pclk) { + rate = clk_get_rate(dev->pclk); + for (smux = 0; 5 > smux; smux++) { + mout = rate / (1 << smux), pscl = mout / frequency; + thz = mout / (pscl ? pscl : 1); + if (!(mout % frequency) && 256 > pscl) { + tout = thz, tmux = smux, tscl = pscl; + break; + } + if (pscl > 256) + continue; + if (abs(frequency - thz) >= delt) + continue; + tout = thz, tmux = smux, tscl = pscl; + delt = abs(frequency - thz); + } + } + + if (tout != frequency) { + rate = clk_round_rate(info->clk, frequency); + if (abs(frequency - tout) >= abs(frequency - rate)) { + clk_set_rate(info->clk, rate); + clk_prepare_enable(info->clk); + tout = rate, tmux = 5, tscl = 1, from_tclk = 1; + } + } + + if (dev->pclk && !from_tclk) { + clk_put(info->clk); + info->clk = NULL; + rate = clk_get_rate(dev->pclk); /* restore pclk */ + } + + info->tmux = tmux; + info->prescale = tscl; + info->tcount = tout / HZ; + info->rate = tout; + + pr_debug("%s (ch:%d, mux=%d, scl=%d, rate=%ld, %s)\n", __func__, + info->channel, tmux, tscl, tout, from_tclk ? "TCLK" : "PCLK"); +} + +static void timer_source_suspend(struct clocksource *cs) +{ + struct timer_of_dev *dev = get_timer_dev(); + struct timer_info *info = &dev->timer_source; + void __iomem *base = dev->base; + int ch = info->channel; + + if (info->clk) { + clk_disable_unprepare(info->clk); + } + + info->rcount = (info->tcount - timer_read(base, ch)); + timer_stop(base, ch, 0); +} + +static void timer_source_resume(struct clocksource *cs) +{ + struct timer_of_dev *dev = get_timer_dev(); + struct timer_info *info = &dev->timer_source; + void __iomem *base = dev->base; + int ch = info->channel; + ulong flags; + + pr_debug("%s (ch:%d, mux:%d, scale:%d cnt:0x%x,0x%x)\n", __func__, ch, + info->tmux, info->prescale, info->rcount, info->tcount); + + local_irq_save(flags); + + if (info->clk) { + clk_set_rate(info->clk, info->rate); + clk_prepare_enable(info->clk); + } + + timer_stop(base, ch, 0); + timer_clock(base, ch, info->tmux, info->prescale); + timer_count(base, ch, info->rcount + 1); /* restore count */ + timer_start(base, ch, 0); + timer_count(base, ch, info->tcount + 1); /* next count */ + + local_irq_restore(flags); +} + +static u64 timer_source_read(struct clocksource *cs) +{ + return (u64)timer_read_count(); +} + +static struct clocksource timer_clocksource = { + .name = "source timer", + .rating = 300, + .read = timer_source_read, + .mask = CLOCKSOURCE_MASK(32), + .shift = 20, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .suspend = timer_source_suspend, + .resume = timer_source_resume, +}; + +static int __init timer_source_of_init(struct device_node *node) +{ + struct timer_of_dev *dev = get_timer_dev(); + struct timer_info *info = &dev->timer_source; + struct clocksource *cs = &timer_clocksource; + void __iomem *base = dev->base; + int ch = info->channel; + + info->request = CLK_SOURCE_HZ; + + timer_clock_select(dev, info); + + /* reset tcount */ + info->tcount = 0xFFFFFFFF; + + clocksource_register_hz(cs, info->rate); + + timer_stop(base, ch, 0); + timer_clock(base, ch, info->tmux, info->prescale); + timer_count(base, ch, 0); + timer_start(base, ch, 0); + + pr_debug("timer.%d: source, %9lu(HZ:%d), mult:%u\n", ch, info->rate, HZ, + cs->mult); + return 0; +} + +/* + * Timer clock event + */ +static void timer_event_resume(struct clock_event_device *evt) +{ + struct timer_of_dev *dev = get_timer_dev(); + struct timer_info *info = &dev->timer_event; + void __iomem *base = dev->base; + int ch = info->channel; + + pr_debug("%s (ch:%d, mux:%d, scale:%d)\n", __func__, ch, info->tmux, + info->prescale); + + timer_stop(base, ch, 1); + timer_clock(base, ch, info->tmux, info->prescale); +} + +static int timer_event_shutdown(struct clock_event_device *evt) +{ + struct timer_of_dev *dev = get_timer_dev(); + struct timer_info *info = &dev->timer_event; + void __iomem *base = dev->base; + int ch = info->channel; + + timer_stop(base, ch, 0); + + return 0; +} + +static int timer_event_set_oneshot(struct clock_event_device *evt) +{ + return 0; +} + +static int timer_event_set_periodic(struct clock_event_device *evt) +{ + struct timer_of_dev *dev = get_timer_dev(); + struct timer_info *info = &dev->timer_event; + void __iomem *base = dev->base; + int ch = info->channel; + unsigned long cnt = info->tcount; + + timer_stop(base, ch, 0); + timer_count(base, ch, cnt); + timer_start(base, ch, 1); + + return 0; +} + +static int timer_event_set_next(unsigned long delta, + struct clock_event_device *evt) +{ + struct timer_of_dev *dev = get_timer_dev(); + struct timer_info *info = &dev->timer_event; + void __iomem *base = dev->base; + int ch = info->channel; + ulong flags; + + raw_local_irq_save(flags); + + timer_stop(base, ch, 0); + timer_count(base, ch, delta); + timer_start(base, ch, 1); + + raw_local_irq_restore(flags); + return 0; +} + +static struct clock_event_device timer_clock_event = { + .name = "event timer", + .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, + .set_state_shutdown = timer_event_shutdown, + .set_state_periodic = timer_event_set_periodic, + .set_state_oneshot = timer_event_set_oneshot, + .tick_resume = timer_event_shutdown, + .set_next_event = timer_event_set_next, + .resume = timer_event_resume, + .rating = 50, /* Lower than dummy timer (for 6818) */ +}; + +static irqreturn_t timer_event_handler(int irq, void *dev_id) +{ + struct clock_event_device *evt = &timer_clock_event; + struct timer_of_dev *dev = get_timer_dev(); + struct timer_info *info = &dev->timer_event; + void __iomem *base = dev->base; + int ch = info->channel; + u32 val; + + /* clear status */ + val = readl(base + REG_CSTAT) & ~(TINT_CSTAT_MASK << 5); + val |= (0x1 << TINT_CSTAT_BIT_CH(ch)); + writel(val, base + REG_CSTAT); + + evt->event_handler(evt); + + return IRQ_HANDLED; +} + +static struct irqaction timer_event_irqaction = { + .name = "Event Timer IRQ", + .flags = IRQF_TIMER, /* removed IRQF_DISABLED kernel 4.1.15 */ + .handler = timer_event_handler, +}; + +#ifdef CONFIG_ARM64 +/* + * to __delay , refer to arch_timer.h and arm64 lib delay.c + */ +u64 arch_counter_get_cntvct(void) { return timer_read_count(); } +EXPORT_SYMBOL(arch_counter_get_cntvct); + +int arch_timer_arch_init(void) { return 0; } +#endif + +static int __init timer_event_of_init(struct device_node *node) +{ + struct timer_of_dev *dev = get_timer_dev(); + struct timer_info *info = &dev->timer_event; + struct clock_event_device *evt = &timer_clock_event; + void __iomem *base = dev->base; + int ch = info->channel; + + info->request = CLK_EVENT_HZ; + + timer_clock_select(dev, info); + timer_stop(base, ch, 1); + timer_clock(base, ch, info->tmux, info->prescale); + + setup_irq(info->interrupt, &timer_event_irqaction); + clockevents_calc_mult_shift(evt, info->rate, 5); + evt->max_delta_ns = clockevent_delta2ns(0xffffffff, evt); + evt->min_delta_ns = clockevent_delta2ns(0xf, evt); + evt->cpumask = cpumask_of(0); + evt->irq = info->interrupt; + + clockevents_register_device(evt); + + pr_debug("timer.%d: event , %9lu(HZ:%d), mult:%u\n", + ch, info->rate, HZ, evt->mult); + return 0; +} + +static int __init +timer_get_device_data(struct device_node *node, struct timer_of_dev *dev) +{ + struct timer_info *tsrc = &dev->timer_source; + struct timer_info *tevt = &dev->timer_event; + + dev->base = of_iomap(node, 0); + if (!dev->base) { + pr_err("Can't map registers for timer!"); + return -EINVAL; + } + + if (of_property_read_u32(node, "clksource", &tsrc->channel)) { + pr_err("timer node is missing 'clksource'\n"); + return -EINVAL; + } + + if (of_property_read_u32(node, "clkevent", &tevt->channel)) { + pr_err("timer node is missing 'clkevent'\n"); + return -EINVAL; + } + tevt->interrupt = irq_of_parse_and_map(node, 0); + + tsrc->clk = of_clk_get(node, 0); + if (IS_ERR(tsrc->clk)) { + pr_err("failed timer tsrc clock\n"); + return -EINVAL; + } + + tevt->clk = of_clk_get(node, 1); + if (IS_ERR(tevt->clk)) { + pr_err("failed timer event clock\n"); + return -EINVAL; + } + + dev->pclk = of_clk_get(node, 2); + if (IS_ERR(dev->pclk)) + dev->pclk = NULL; + + pr_debug("%s : ch %d,%d irq %d\n", node->name, tsrc->channel, + tevt->channel, tevt->interrupt); + + return 0; +} + +#ifdef CONFIG_ARM +static struct delay_timer nxp_delay_timer = { + .freq = CLK_SOURCE_HZ, + .read_current_timer = (unsigned long (*)(void))timer_read_count, +}; +#endif + +static int __init timer_of_init_dt(struct device_node *node) +{ + struct timer_of_dev *dev = NULL; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + + timer_dev = dev; + + if (timer_get_device_data(node, dev)) + panic("unable to map timer cpu !!!\n"); + + timer_source_of_init(node); + timer_event_of_init(node); + +#ifdef CONFIG_ARM + register_current_timer_delay(&nxp_delay_timer); +#endif + return 0; +} + +CLOCKSOURCE_OF_DECLARE(s5p6818, "nexell,s5p6818-timer", timer_of_init_dt); diff -ENwbur a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm --- a/drivers/cpufreq/Kconfig.arm 2018-05-06 08:47:36.273301254 +0200 +++ b/drivers/cpufreq/Kconfig.arm 2018-05-06 08:49:49.086691685 +0200 @@ -292,3 +292,45 @@ support for its operation. If in doubt, say N. + +config ARM_NEXELL_CPUFREQ + bool "Nexell CPU Frequency scaling support" + depends on ARCH_S5P6818 || ARCH_S5P4418 + default y + help + This adds the CPUFreq driver for Nexell SoC. + +config ARM_NEXELL_CPUFREQ_DEBUG + bool "Dynamic Frequency scaling debug message" + depends on ARM_NEXELL_CPUFREQ + default n + +config ARM_NEXELL_CPUFREQ_VOLTAGE_DEBUG + bool "Dynamic Voltage scaling debug message" + depends on ARM_NEXELL_CPUFREQ + default n + +config ARM_DYNAMIC_CLUSTER_HOTPLUG + bool "Dynamic CLUSTER Hotplug support" + depends on HOTPLUG_CPU + help + Enable Dynamic CLUSTER Hotplug + +choice + prompt "Select CPU PLL device" + depends on ARM_NEXELL_CPUFREQ + default NEXELL_CPUFREQ_PLL_1 + + config NEXELL_CPUFREQ_PLL_0 + bool "PLL 0" + + config NEXELL_CPUFREQ_PLL_1 + bool "PLL 1" +endchoice + +config NEXELL_CPUFREQ_PLLDEV + int + default 0 if NEXELL_CPUFREQ_PLL_0 + default 1 if NEXELL_CPUFREQ_PLL_1 + default 2 if NEXELL_CPUFREQ_PLL_2 + default 3 if NEXELL_CPUFREQ_PLL_3 diff -ENwbur a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile --- a/drivers/cpufreq/Makefile 2018-05-06 08:47:36.273301254 +0200 +++ b/drivers/cpufreq/Makefile 2018-05-06 08:49:49.086691685 +0200 @@ -83,6 +83,7 @@ obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o obj-$(CONFIG_MACH_MVEBU_V7) += mvebu-cpufreq.o +obj-$(CONFIG_ARM_NEXELL_CPUFREQ) += nexell-cpufreq.o ################################################################################## diff -ENwbur a/drivers/cpufreq/nexell-cpufreq.c b/drivers/cpufreq/nexell-cpufreq.c --- a/drivers/cpufreq/nexell-cpufreq.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/cpufreq/nexell-cpufreq.c 2018-05-06 08:49:49.090691848 +0200 @@ -0,0 +1,902 @@ +/* + * Copyright (C) 2016 Nexell Co., Ltd. + * Author: Youngbok, Park + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define DEV_NAME_CPUFREQ "nexell-cpufreq" + +/* + * DVFS info + */ +struct cpufreq_asv_ops { + int (*setup_table)(unsigned long (*tables)[2]); + long (*get_voltage)(long freqkhz); + int (*modify_vol_table)(unsigned long (*tables)[2], int table_size, + long val, bool dn, bool percent); + int (*current_label)(char *string); + long (*get_vol_margin)(long uV, long val, bool dn, bool percent); +}; + +#if defined(CONFIG_ARCH_S5P6818) +#include "s5p6818-cpufreq.h" +#elif defined(CONFIG_ARCH_S5P4418) +#include "s5p4418-cpufreq.h" +#else +#define FREQ_MAX_FREQ_KHZ (1400*1000) +#define FREQ_ARRAY_SIZE (11) +static struct cpufreq_asv_ops asv_ops = { }; +#endif + +struct cpufreq_dvfs_timestamp { + unsigned long start; + unsigned long duration; +}; + +struct cpufreq_dvfs_info { + struct cpufreq_frequency_table *freq_table; + unsigned long (*dvfs_table)[2]; + struct clk *clk; + cpumask_var_t cpus; + struct cpufreq_policy *policy; + int cpu; + long target_freq; + int freq_point; + struct mutex lock; + /* voltage control */ + struct regulator *volt; + int table_size; + long supply_delay_us; + /* for suspend/resume */ + struct notifier_block pm_notifier; + unsigned long resume_state; + long boot_frequency; + int boot_voltage; + /* check frequency duration */ + int pre_freq_point; + unsigned long check_state; + struct cpufreq_dvfs_timestamp *time_stamp; + /* ASV operation */ + struct cpufreq_asv_ops *asv_ops; +}; + +#define FREQ_TABLE_MAX (30) +#define FREQ_STATE_RESUME (0) /* bit num */ +#define FREQ_STATE_TIME_RUN (0) /* bit num */ + +static struct thermal_cooling_device *cdev; +static struct cpufreq_dvfs_info *ptr_current_dvfs; +static unsigned long dvfs_freq_voltage[FREQ_TABLE_MAX][2]; +static struct cpufreq_dvfs_timestamp dvfs_timestamp[FREQ_TABLE_MAX] = { {0,}, }; +#define ms_to_ktime(m) ns_to_ktime((u64)m * 1000 * 1000) + +static inline void set_dvfs_ptr(void *dvfs) { ptr_current_dvfs = dvfs; } +static inline void *get_dvfs_ptr(void) { return ptr_current_dvfs; } + +static inline unsigned long cpufreq_get_voltage(struct cpufreq_dvfs_info *dvfs, + unsigned long frequency) +{ + unsigned long (*dvfs_table)[2] = (unsigned long(*)[2])dvfs->dvfs_table; + int i = 0; + + for (i = 0; dvfs->table_size > i; i++) { + if (frequency == dvfs_table[i][0]) + return dvfs_table[i][1]; + } + + pr_err("Fail : invalid frequency (%ld:%d) id !!!\n", + frequency, dvfs->table_size); + return -EINVAL; +} + +static int nxp_cpufreq_set_freq_point(struct cpufreq_dvfs_info *dvfs, + unsigned long frequency) +{ + unsigned long (*dvfs_tables)[2] = (unsigned long(*)[2])dvfs->dvfs_table; + int len = dvfs->table_size; + int id = 0; + + for (id = 0; len > id; id++) + if (frequency == dvfs_tables[id][0]) + break; + + if (id == len) { + pr_err("Fail : invalid frequency (%ld:%d) id !!!\n", + frequency, len); + return -EINVAL; + } + + dvfs->freq_point = id; + return 0; +} + +static long nxp_cpufreq_change_voltage(struct cpufreq_dvfs_info *dvfs, + unsigned long frequency) +{ + long mS = 0, uS = 0; + long uV = 0, wT = 0; + + if (!dvfs->volt) + return 0; + + uV = cpufreq_get_voltage(dvfs, frequency); + wT = dvfs->supply_delay_us; + + /* when rest duration */ + if (0 > uV) { + pr_err("%s: failed invalid freq %ld uV %ld !!!\n", __func__, + frequency, uV); + return -EINVAL; + } + + if (dvfs->asv_ops->get_voltage) + uV = dvfs->asv_ops->get_voltage(frequency); + + regulator_set_voltage(dvfs->volt, uV, uV); + + if (wT) { + mS = wT/1000; + uS = wT%1000; + if (mS) + mdelay(mS); + if (uS) + udelay(uS); + } + +#ifdef CONFIG_ARM_NEXELL_CPUFREQ_VOLTAGE_DEBUG + pr_info(" volt (%lukhz %ld.%06ld v, delay %ld.%03ld us)\n", + frequency, uV/1000000, uV%1000000, mS, uS); +#endif + return uV; +} + +static unsigned long nxp_cpufreq_change_freq(struct cpufreq_dvfs_info *dvfs, + unsigned int new, unsigned old) +{ + struct clk *clk = dvfs->clk; + unsigned long rate_khz = 0; + struct cpufreq_policy policy; + + nxp_cpufreq_set_freq_point(dvfs, new); + + if (!test_bit(FREQ_STATE_RESUME, &dvfs->resume_state)) + return old; + + /* pre voltage */ + if (new >= old) + nxp_cpufreq_change_voltage(dvfs, new); + + if (NULL == dvfs->policy) { + cpumask_copy(policy.cpus, cpu_online_mask); + dvfs->policy = &policy; + } + + clk_set_rate(clk, new*1000); + rate_khz = clk_get_rate(clk)/1000; + +#ifdef CONFIG_ARM_NEXELL_CPUFREQ_DEBUG + pr_debug(" set rate %u:%lukhz\n", new, rate_khz); +#endif + + if (test_bit(FREQ_STATE_TIME_RUN, &dvfs->check_state)) { + int id = dvfs->freq_point; + int prev = dvfs->pre_freq_point; + long ms = ktime_to_ms(ktime_get()); + + dvfs->time_stamp[prev].duration += + (ms - dvfs->time_stamp[prev].start); + dvfs->time_stamp[id].start = ms; + dvfs->pre_freq_point = id; + } + + /* post voltage */ + if (old > new) + nxp_cpufreq_change_voltage(dvfs, new); + + return rate_khz; +} + +static int nxp_cpufreq_pm_notify(struct notifier_block *this, + unsigned long mode, void *unused) +{ + struct cpufreq_dvfs_info *dvfs = container_of(this, + struct cpufreq_dvfs_info, + pm_notifier); + struct clk *clk = dvfs->clk; + unsigned int old, new; + long max_freq = cpufreq_quick_get_max(dvfs->cpu); + + switch (mode) { + case PM_SUSPEND_PREPARE: /* set initial frequecny */ + mutex_lock(&dvfs->lock); + + new = dvfs->boot_frequency; + if (new > max_freq) { + new = max_freq; + pr_info("DVFS: max freq %ldkhz less than boot %ldkz.\n", + max_freq, dvfs->boot_frequency); + } + old = clk_get_rate(clk)/1000; + + dvfs->target_freq = new; + nxp_cpufreq_change_freq(dvfs, new, old); + + clear_bit(FREQ_STATE_RESUME, &dvfs->resume_state); + mutex_unlock(&dvfs->lock); + break; + + case PM_POST_SUSPEND: /* set restore frequecny */ + mutex_lock(&dvfs->lock); + set_bit(FREQ_STATE_RESUME, &dvfs->resume_state); + + new = dvfs->target_freq; + old = clk_get_rate(clk)/1000; + nxp_cpufreq_change_freq(dvfs, new, old); + + mutex_unlock(&dvfs->lock); + break; + } + return 0; +} + +/* + * Attribute sys interfaces + */ +static ssize_t show_speed_duration(struct cpufreq_policy *policy, char *buf) +{ + struct cpufreq_dvfs_info *dvfs = get_dvfs_ptr(); + int id = dvfs->freq_point; + ssize_t count = 0; + int i = 0; + + if (test_bit(FREQ_STATE_TIME_RUN, &dvfs->check_state)) { + long ms = ktime_to_ms(ktime_get()); + + if (dvfs->time_stamp[id].start) + dvfs->time_stamp[id].duration += + (ms - dvfs->time_stamp[id].start); + dvfs->time_stamp[id].start = ms; + dvfs->pre_freq_point = id; + } + + for (; dvfs->table_size > i; i++) + count += sprintf(&buf[count], "%8ld ", + dvfs->time_stamp[i].duration); + + count += sprintf(&buf[count], "\n"); + return count; +} + +static ssize_t store_speed_duration(struct cpufreq_policy *policy, + const char *buf, size_t count) +{ + struct cpufreq_dvfs_info *dvfs = get_dvfs_ptr(); + int id = dvfs->freq_point; + long ms = ktime_to_ms(ktime_get()); + const char *s = buf; + + mutex_lock(&dvfs->lock); + + if (0 == strncmp(s, "run", strlen("run"))) { + dvfs->pre_freq_point = id; + dvfs->time_stamp[id].start = ms; + set_bit(FREQ_STATE_TIME_RUN, &dvfs->check_state); + } else if (0 == strncmp(s, "stop", strlen("stop"))) { + clear_bit(FREQ_STATE_TIME_RUN, &dvfs->check_state); + } else if (0 == strncmp(s, "clear", strlen("clear"))) { + memset(dvfs->time_stamp, 0, sizeof(dvfs_timestamp)); + if (test_bit(FREQ_STATE_TIME_RUN, &dvfs->check_state)) { + dvfs->time_stamp[id].start = ms; + dvfs->pre_freq_point = id; + } + } else { + count = -1; + } + + mutex_unlock(&dvfs->lock); + + return count; +} + +static ssize_t show_available_voltages(struct cpufreq_policy *policy, char *buf) +{ + struct cpufreq_dvfs_info *dvfs = get_dvfs_ptr(); + unsigned long (*dvfs_table)[2] = (unsigned long(*)[2])dvfs->dvfs_table; + ssize_t count = 0; + int i = 0; + + for (; dvfs->table_size > i; i++) { + long uV = dvfs_table[i][1]; + + if (dvfs->asv_ops->get_voltage) + uV = dvfs->asv_ops->get_voltage(dvfs_table[i][0]); + count += sprintf(&buf[count], "%ld ", uV); + } + + count += sprintf(&buf[count], "\n"); + return count; +} + +static ssize_t show_cur_voltages(struct cpufreq_policy *policy, char *buf) +{ + struct cpufreq_dvfs_info *dvfs = get_dvfs_ptr(); + unsigned long (*dvfs_table)[2] = (unsigned long(*)[2])dvfs->dvfs_table; + ssize_t count = 0; + int i = 0; + + for (; dvfs->table_size > i; i++) + count += sprintf(&buf[count], "%ld ", dvfs_table[i][1]); + + count += sprintf(&buf[count], "\n"); + return count; +} + +static ssize_t store_cur_voltages(struct cpufreq_policy *policy, + const char *buf, size_t count) +{ + struct cpufreq_dvfs_info *dvfs = get_dvfs_ptr(); + unsigned long (*dvfs_tables)[2] = + (unsigned long(*)[2])dvfs_freq_voltage; + bool percent = false, down = false; + const char *s = strchr(buf, '-'); + long val; + + if (s) + down = true; + else + s = strchr(buf, '+'); + + if (!s) + s = buf; + else + s++; + + if (strchr(buf, '%')) + percent = 1; + + val = simple_strtol(s, NULL, 10); + + mutex_lock(&dvfs->lock); + + if (dvfs->asv_ops->modify_vol_table) + dvfs->asv_ops->modify_vol_table(dvfs_tables, dvfs->table_size, + val, down, percent); + + nxp_cpufreq_change_voltage(dvfs, dvfs->target_freq); + + mutex_unlock(&dvfs->lock); + return count; +} + +static ssize_t show_asv_level(struct cpufreq_policy *policy, char *buf) +{ + struct cpufreq_dvfs_info *dvfs = get_dvfs_ptr(); + int ret = 0; + + if (dvfs->asv_ops->current_label) + ret = dvfs->asv_ops->current_label(buf); + + return ret; +} + +/* + * show/store frequency duration time status + */ +static struct freq_attr cpufreq_freq_attr_scaling_speed_duration = { + .attr = { + .name = "scaling_speed_duration", + .mode = 0664, + }, + .show = show_speed_duration, + .store = store_speed_duration, +}; + +/* + * show available voltages each frequency + */ +static struct freq_attr cpufreq_freq_attr_scaling_available_voltages = { + .attr = { + .name = "scaling_available_voltages", + .mode = 0664, + }, + .show = show_available_voltages, +}; + +/* + * show/store ASV current voltage adjust margin + */ +static struct freq_attr cpufreq_freq_attr_scaling_cur_voltages = { + .attr = { + .name = "scaling_cur_voltages", + .mode = 0664, + }, + .show = show_cur_voltages, + .store = store_cur_voltages, +}; + +/* + * show ASV level status + */ +static struct freq_attr cpufreq_freq_attr_scaling_asv_level = { + .attr = { + .name = "scaling_asv_level", + .mode = 0664, + }, + .show = show_asv_level, +}; + +static struct freq_attr *nxp_cpufreq_attr[] = { + /* kernel attribute */ + &cpufreq_freq_attr_scaling_available_freqs, + /* new sttribute */ + &cpufreq_freq_attr_scaling_speed_duration, + &cpufreq_freq_attr_scaling_available_voltages, + &cpufreq_freq_attr_scaling_cur_voltages, + &cpufreq_freq_attr_scaling_asv_level, + NULL, +}; + +static int nxp_cpufreq_verify_speed(struct cpufreq_policy *policy) +{ + struct cpufreq_dvfs_info *dvfs = get_dvfs_ptr(); + struct cpufreq_frequency_table *freq_table = dvfs->freq_table; + + if (!freq_table) + return -EINVAL; + + return cpufreq_frequency_table_verify(policy, freq_table); +} + +static unsigned int nxp_cpufreq_get_speed(unsigned int cpu) +{ + struct cpufreq_dvfs_info *dvfs = get_dvfs_ptr(); + struct clk *clk = dvfs->clk; + long rate_khz = clk_get_rate(clk)/1000; + + return rate_khz; +} + +static int nxp_cpufreq_target(struct cpufreq_policy *policy, + unsigned int index) +{ + struct cpufreq_dvfs_info *dvfs = get_dvfs_ptr(); + struct cpufreq_frequency_table *freq_table = dvfs->freq_table; + unsigned long rate_khz = 0; + unsigned int old, new; + int ret = 0; + + old = policy->cur; + new = freq_table[index].frequency; + + new = max((unsigned int)pm_qos_request(PM_QOS_CPU_FREQ_MIN), new); + new = min((unsigned int)pm_qos_request(PM_QOS_CPU_FREQ_MAX), new); + + mutex_lock(&dvfs->lock); + + pr_debug("cpufreq : target %u -> %u khz", old, new); + + if (old == new && policy->cur == new) { + pr_debug("PASS\n"); + mutex_unlock(&dvfs->lock); + return ret; + } + + dvfs->target_freq = new; + + pr_debug("\n"); + + dvfs->policy = policy; + + rate_khz = nxp_cpufreq_change_freq(dvfs, new, old); + + policy->cur = rate_khz; + + mutex_unlock(&dvfs->lock); + + return ret; +} + +static int nxp_cpufreq_init(struct cpufreq_policy *policy) +{ + struct cpufreq_dvfs_info *dvfs = get_dvfs_ptr(); + struct cpufreq_frequency_table *freq_table = dvfs->freq_table; + + pr_debug("nxp-cpufreq: freq table 0x%p\n", freq_table); + return cpufreq_generic_init(policy, freq_table, 100000); +} + +static int cpufreq_min_qos_handler(struct notifier_block *b, + unsigned long val, void *v) +{ + struct cpufreq_policy *policy; + int ret; + + policy = cpufreq_cpu_get(0); + + if (!policy) + goto bad; + + if (policy->cur >= val) { + cpufreq_cpu_put(policy); + goto good; + } + + if (!policy->governor) { + cpufreq_cpu_put(policy); + goto bad; + } + + ret = __cpufreq_driver_target(policy, val, CPUFREQ_RELATION_L); + + cpufreq_cpu_put(policy); + + if (ret < 0) + goto bad; + +good: + return NOTIFY_OK; +bad: + return NOTIFY_BAD; +} + +static int cpufreq_max_qos_handler(struct notifier_block *b, + unsigned long val, void *v) +{ + struct cpufreq_policy *policy; + int ret; + + policy = cpufreq_cpu_get(0); + + if (!policy) + goto bad; + + if (policy->cur <= val) { + cpufreq_cpu_put(policy); + goto good; + } + + if (!policy->governor) { + cpufreq_cpu_put(policy); + goto bad; + } + + ret = __cpufreq_driver_target(policy, val, CPUFREQ_RELATION_H); + + cpufreq_cpu_put(policy); + + if (ret < 0) + goto bad; + +good: + return NOTIFY_OK; +bad: + return NOTIFY_BAD; +} + +static struct notifier_block cpufreq_min_qos_notifier = { + .notifier_call = cpufreq_min_qos_handler, +}; + +static struct notifier_block cpufreq_max_qos_notifier = { + .notifier_call = cpufreq_max_qos_handler, +}; + +static void nxp_cpufreq_ready(struct cpufreq_policy *policy) +{ + struct device_node *cpu0; + + cpu0 = of_get_cpu_node(0, NULL); + if (!cpu0) { + pr_err("failed to find cpu0 node\n"); + return; + } + + if (of_find_property(cpu0, "#cooling-cells", NULL)) { + cdev = of_cpufreq_cooling_register(cpu0, policy); + if (IS_ERR(cdev)) + pr_err("running cpufreq without cooling device: %ld\n", + PTR_ERR(cdev)); + } + + pm_qos_add_notifier(PM_QOS_CPU_FREQ_MIN, &cpufreq_min_qos_notifier); + pm_qos_add_notifier(PM_QOS_CPU_FREQ_MAX, &cpufreq_max_qos_notifier); +} + +static struct cpufreq_driver nxp_cpufreq_driver = { + .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = nxp_cpufreq_verify_speed, + .target_index = nxp_cpufreq_target, + .get = nxp_cpufreq_get_speed, + .init = nxp_cpufreq_init, + .ready = nxp_cpufreq_ready, + .name = "nxp-cpufreq", + .attr = nxp_cpufreq_attr, +}; + +#ifdef CONFIG_OF +static unsigned long dt_dvfs_table[FREQ_TABLE_MAX][2]; + +struct nxp_cpufreq_plat_data dt_cpufreq_data = { + .pll_dev = CONFIG_NEXELL_CPUFREQ_PLLDEV, + .dvfs_table = dt_dvfs_table, +}; + +static const struct of_device_id dvfs_dt_match[] = { + { + .compatible = "nexell,s5pxx18-cpufreq", + .data = (void *)&dt_cpufreq_data, + }, {}, +}; +MODULE_DEVICE_TABLE(of, dvfs_dt_match); + +#define FN_SIZE 4 +static void *nxp_cpufreq_get_dt_data(struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + const struct of_device_id *match; + struct nxp_cpufreq_plat_data *pdata; + unsigned long (*plat_tbs)[2] = NULL; + const __be32 *list; + char *supply; + int value, i, size = 0; + + match = of_match_node(dvfs_dt_match, node); + if (!match) + return NULL; + + pdata = (struct nxp_cpufreq_plat_data *)match->data; + plat_tbs = (unsigned long(*)[2])pdata->dvfs_table; + + if (!of_property_read_string(node, "supply_name", + (const char **)&supply)) { + pdata->supply_name = supply; + if (!of_property_read_u32(node, "supply_delay_us", &value)) + pdata->supply_delay_us = value; + pr_info("voltage supply : %s\n", pdata->supply_name); + } + + list = of_get_property(node, "dvfs-tables", &size); + size /= FN_SIZE; + + if (size) { + for (i = 0; size/2 > i; i++) { + plat_tbs[i][0] = be32_to_cpu(*list++); + plat_tbs[i][1] = be32_to_cpu(*list++); + pr_debug("DTS %2d = %8ldkhz, %8ld uV\n", + i, plat_tbs[i][0], plat_tbs[i][1]); + } + pdata->table_size = size/2; + } + + return pdata; +} +#else +#define dvfs_dt_match NULL +#endif + +static void *nxp_cpufreq_make_table(struct platform_device *pdev, + int *table_size, + unsigned long (*dvfs_tables)[2]) +{ + struct nxp_cpufreq_plat_data *pdata = pdev->dev.platform_data; + struct cpufreq_frequency_table *freq_table; + struct cpufreq_asv_ops *ops = &asv_ops; + unsigned long (*plat_tbs)[2] = NULL; + int tb_size, asv_size = 0; + int id = 0, n = 0; + + /* user defined dvfs */ + if (pdata->dvfs_table && pdata->table_size) + plat_tbs = (unsigned long(*)[2])pdata->dvfs_table; + + /* asv dvfs tables */ + if (ops->setup_table) + asv_size = ops->setup_table(dvfs_tables); + + if (!pdata->table_size && !asv_size) { + dev_err(&pdev->dev, "failed no freq table !!!\n"); + return NULL; + } + + tb_size = (pdata->table_size ? pdata->table_size : asv_size); + + /* alloc with end table */ + freq_table = kzalloc((sizeof(*freq_table) * (tb_size + 1)), GFP_KERNEL); + if (!freq_table) { + dev_warn(&pdev->dev, "failed cllocate freq table!!!\n"); + return NULL; + } + + /* make frequency table with platform data */ + if (asv_size > 0) { + for (n = 0, id = 0; tb_size > id && asv_size > n; n++) { + if (plat_tbs) { + for (n = 0; asv_size > n; n++) { + if (plat_tbs[id][0] == + dvfs_tables[n][0]) { + dvfs_tables[id][0] = + dvfs_tables[n][0]; + dvfs_tables[id][1] = + dvfs_tables[n][1]; + break; + } + } + } else { + if (dvfs_tables[n][0] > FREQ_MAX_FREQ_KHZ) + continue; + dvfs_tables[id][0] = dvfs_tables[n][0]; + dvfs_tables[id][1] = dvfs_tables[n][1]; + } + + freq_table[id].frequency = dvfs_tables[id][0]; + pr_info("ASV %2d = %8ldkhz, %8ld uV\n", + id, dvfs_tables[id][0], dvfs_tables[id][1]); + /* next */ + id++; + } + } else { + for (id = 0; tb_size > id; id++) { + dvfs_tables[id][0] = plat_tbs[id][0]; + dvfs_tables[id][1] = plat_tbs[id][1]; + freq_table[id].frequency = dvfs_tables[id][0]; + pr_info("DTB %2d = %8ldkhz, %8ld uV\n", + id, dvfs_tables[id][0], dvfs_tables[id][1]); + } + } + + /* End table */ + freq_table[id].frequency = CPUFREQ_TABLE_END; + *table_size = id; + + return (void *)freq_table; +} + +static int nxp_cpufreq_set_supply(struct platform_device *pdev, + struct cpufreq_dvfs_info *dvfs) +{ + struct nxp_cpufreq_plat_data *pdata = pdev->dev.platform_data; + static struct notifier_block *pm_notifier; + + /* get voltage regulator */ + dvfs->volt = regulator_get(&pdev->dev, pdata->supply_name); + if (IS_ERR(dvfs->volt)) { + dev_err(&pdev->dev, "Cannot get regulator for DVS supply %s\n", + pdata->supply_name); + return -1; + } + + pm_notifier = &dvfs->pm_notifier; + pm_notifier->notifier_call = nxp_cpufreq_pm_notify; + if (register_pm_notifier(pm_notifier)) { + dev_err(&pdev->dev, "Cannot pm notifier %s\n", + pdata->supply_name); + return -1; + } + + /* bootup voltage */ + nxp_cpufreq_change_voltage(dvfs, dvfs->boot_frequency); + dvfs->boot_voltage = regulator_get_voltage(dvfs->volt); + + pr_info("DVFS: regulator %s\n", pdata->supply_name); + return 0; +} + +static int nxp_cpufreq_probe(struct platform_device *pdev) +{ + struct nxp_cpufreq_plat_data *pdata = pdev->dev.platform_data; + unsigned long (*dvfs_tables)[2] = + (unsigned long(*)[2])dvfs_freq_voltage; + struct cpufreq_dvfs_info *dvfs = NULL; + struct cpufreq_frequency_table *freq_table = NULL; + int cpu = raw_smp_processor_id(); + char name[16]; + int table_size = 0, ret = 0; + + dvfs = kzalloc(sizeof(*dvfs), GFP_KERNEL); + if (!dvfs) { + dev_err(&pdev->dev, "failed allocate DVFS data !!!\n"); + return -ENOMEM; + } + +#ifdef CONFIG_OF + if (pdev->dev.of_node) { + pdata = nxp_cpufreq_get_dt_data(pdev); + if (!pdata) + goto err_free_table; + pdev->dev.platform_data = pdata; + } +#endif + + freq_table = nxp_cpufreq_make_table(pdev, &table_size, dvfs_tables); + if (!freq_table) + goto err_free_table; + + sprintf(name, "pll%d", pdata->pll_dev); + dvfs->clk = clk_get(NULL, name); + if (IS_ERR(dvfs->clk)) + goto err_free_table; + + set_dvfs_ptr(dvfs); + mutex_init(&dvfs->lock); + + dvfs->asv_ops = &asv_ops; + dvfs->freq_table = freq_table; + dvfs->dvfs_table = (unsigned long(*)[2])(dvfs_tables); + dvfs->table_size = table_size; + dvfs->supply_delay_us = pdata->supply_delay_us; + dvfs->boot_frequency = nxp_cpufreq_get_speed(cpu); + dvfs->target_freq = dvfs->boot_frequency; + dvfs->pre_freq_point = -1; + dvfs->check_state = 0; + dvfs->time_stamp = dvfs_timestamp; + + set_bit(FREQ_STATE_RESUME, &dvfs->resume_state); + nxp_cpufreq_set_freq_point(dvfs, dvfs->target_freq); + + if (pdata->supply_name) { + ret = nxp_cpufreq_set_supply(pdev, dvfs); + if (0 > ret) + goto err_free_table; + } + + pr_info("DVFS: cpu %s with PLL.%d [tables=%d]\n", + dvfs->volt?"DVFS":"DFS", pdata->pll_dev, dvfs->table_size); + + ret = cpufreq_register_driver(&nxp_cpufreq_driver); + if (ret) { + pr_err("Fial registet cpufreq driver!!\n"); + goto err_free_table; + } + + return 0; + +err_free_table: + if (dvfs) + kfree(dvfs); + + if (freq_table) + kfree(freq_table); + + return ret; +} + +static struct platform_driver cpufreq_driver = { + .probe = nxp_cpufreq_probe, + .driver = { + .name = DEV_NAME_CPUFREQ, + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(dvfs_dt_match), + }, +}; +module_platform_driver(cpufreq_driver); diff -ENwbur a/drivers/cpufreq/s5p6818-cpufreq.h b/drivers/cpufreq/s5p6818-cpufreq.h --- a/drivers/cpufreq/s5p6818-cpufreq.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/cpufreq/s5p6818-cpufreq.h 2018-05-06 08:49:49.094692010 +0200 @@ -0,0 +1,302 @@ +/* + * Copyright (C) 2016 Nexell Co., Ltd. + * Author: Youngbok, Park + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __S5P6818_ASV_H__ +#define __S5P6818_ASV_H__ + +#define VOLTAGE_STEP_UV (1) +#define ASV_DEFAULT_LEVEL (0) + +#define FREQ_MAX_FREQ_KHZ (1400*1000) +#define FREQ_ARRAY_SIZE (13) +#define UV(v) (v*1000) + +struct asv_tb_info { + int ids; + int ro; + long mhz[FREQ_ARRAY_SIZE]; + long uv[FREQ_ARRAY_SIZE]; +}; + +#define ASB_FREQ_MHZ { \ + [0] = 1600, \ + [1] = 1500, \ + [2] = 1400, \ + [3] = 1300, \ + [4] = 1200, \ + [5] = 1100, \ + [6] = 1000, \ + [7] = 900, \ + [8] = 800, \ + [9] = 700, \ + [10] = 600, \ + [11] = 500, \ + [12] = 400, \ + } + +static struct asv_tb_info asv_tables[] = { + [0] = { .ids = 6, .ro = 90, + .mhz = ASB_FREQ_MHZ, + .uv = { UV(1360), UV(1350), /* OVER FREQ */ + UV(1325), UV(1275), UV(1225), UV(1175), + UV(1150), UV(1125), UV(1100), UV(1075), + UV(1050), UV(1025), UV(1000) }, + }, + [1] = { .ids = 15, .ro = 130, + .mhz = ASB_FREQ_MHZ, + .uv = { UV(1350), UV(1280), /* OVER FREQ */ + UV(1275), UV(1225), UV(1175), UV(1125), + UV(1100), UV(1075), UV(1050), UV(1025), + UV(1000), UV(1000), UV(1000) }, + }, + [2] = { .ids = 38, .ro = 170, + .mhz = ASB_FREQ_MHZ, + .uv = { UV(1270), UV(1240), /* OVER FREQ */ + UV(1225), UV(1175), UV(1125), UV(1075), + UV(1050), UV(1025), UV(1000), UV(1000), + UV(1000), UV(1000), UV(1000) }, + }, + [3] = { .ids = 78, .ro = 200, + .mhz = ASB_FREQ_MHZ, + .uv = { UV(1240), UV(1210), /* OVER FREQ */ + UV(1175), UV(1125), UV(1075), UV(1050), + UV(1025), UV(1000), UV(1000), UV(1000), + UV(1000), UV(1000), UV(1000) }, + }, + [4] = { .ids = 78, .ro = 200, + .mhz = ASB_FREQ_MHZ, + .uv = { UV(1225), UV(1175), /* OVER FREQ */ + UV(1125), UV(1075), UV(1025), UV(1000), + UV(1000), UV(1000), UV(1000), UV(1000), + UV(1000), UV(1000), UV(1000) }, + }, +}; +#define ASV_ARRAY_SIZE ARRAY_SIZE(asv_tables) + +struct asv_param { + int level; + int ids, ro; + int flag, group, shift; +}; + +static struct asv_tb_info *p_asv_table; +static struct asv_param asv_param = { 0, }; + +extern int nxp_cpu_id_ecid(u32 ecid[4]); + +static inline unsigned int mtol(unsigned int data, int bits) +{ + unsigned int result = 0; + unsigned int mask = 1; + int i = 0; + + for (i = 0; i < bits ; i++) { + if (data&(1<>1) & 0x07, 3); + int ag = mtol((ecid[2]>>4) & 0x0F, 4); + + asv_param.level = (ag - gs); + + if (asv_param.level < 0) + asv_param.level = 0; + + asv_param.flag = 1; + asv_param.group = ag; + asv_param.shift = gs; + p_asv_table = &asv_tables[asv_param.level]; + pr_info("DVFS: ASV[%d] IDS(%dmA) Ro(%d), Fusing Shift(%d), Group(%d)\n", + asv_param.level+1, p_asv_table->ids, p_asv_table->ro, + gs, ag); + goto asv_done; + } + + /* Use IDS/Ro */ + ids = mtol((ecid[1]>>16) & 0xFF, 8); + ro = mtol((ecid[1]>>24) & 0xFF, 8); + + /* find IDS Level */ + for (i = 0; (ASV_ARRAY_SIZE-1) > i; i++) { + p_asv_table = &asv_tables[i]; + if (p_asv_table->ids >= ids) + break; + } + idslv = ASV_ARRAY_SIZE != i ? i : (ASV_ARRAY_SIZE-1); + + /* find RO Level */ + for (i = 0; (ASV_ARRAY_SIZE-1) > i; i++) { + p_asv_table = &asv_tables[i]; + if (p_asv_table->ro >= ro) + break; + } + rolv = ASV_ARRAY_SIZE != i ? i : (ASV_ARRAY_SIZE-1); + + /* find Lowest ASV Level */ + asvlv = idslv > rolv ? rolv : idslv; + + p_asv_table = &asv_tables[asvlv]; + asv_param.level = asvlv; + asv_param.ids = ids; + asv_param.ro = ro; + pr_info("DVFS: ASV[%d] IDS %dmA, Ro %3d -> Table [IDS %dmA, Ro %3d]\n", + asv_param.level+1, ids, ro, p_asv_table->ids, p_asv_table->ro); + +asv_done: + for (i = 0; FREQ_ARRAY_SIZE > i; i++) { + freq_tables[i][0] = p_asv_table->mhz[i] * 1000; /* frequency */ + freq_tables[i][1] = p_asv_table->uv[i]; /* voltage */ + } + + return FREQ_ARRAY_SIZE; +} + +static long s5p6818_asv_get_voltage(long freqkhz) +{ + long uv = 0; + int i = 0; + + if (NULL == p_asv_table) + return -EINVAL; + + for (i = 0; FREQ_ARRAY_SIZE > i; i++) { + if (freqkhz == (p_asv_table->mhz[i]*1000)) { + uv = p_asv_table->uv[i]; + break; + } + } + + if (0 == uv) { + pr_info("FAIL: %ldkhz is not exist on the ASV TABLEs !!!\n", + freqkhz); + return -EINVAL; + } + + return uv; +} + +static int s5p6818_asv_modify_vol_table(unsigned long (*freq_tables)[2], + int table_size, long value, bool down, + bool percent) +{ + long step_vol = VOLTAGE_STEP_UV; + long uv, dv, new; + int i = 0, n = 0; + + if (NULL == freq_tables || + NULL == p_asv_table || (0 > value)) + return -EINVAL; + + /* initialzie */ + for (i = 0; table_size > i; i++) { + for (n = 0; FREQ_ARRAY_SIZE > n; n++) { + if (freq_tables[i][0] == (p_asv_table->mhz[n]*1000)) { + freq_tables[i][1] = p_asv_table->uv[n]; + break; + } + } + } + pr_info("DVFS:%s%ld%s\n", down?"-":"+", value, percent?"%":"mV"); + + /* new voltage */ + for (i = 0; table_size > i; i++) { + int al = 0; + + uv = freq_tables[i][1]; + dv = percent ? ((uv/100) * value) : (value*1000); + new = down ? uv - dv : uv + dv; + + if ((new % step_vol)) { + new = (new / step_vol) * step_vol; + + al = 1; + if (down) + new += step_vol; /* Upper */ + } + + pr_info("%7ldkhz, %7ld (%s%ld) align %ld (%s) -> %7ld\n", + freq_tables[i][0], freq_tables[i][1], + down?"-":"+", dv, step_vol, al?"X":"O", new); + + freq_tables[i][1] = new; + } + return 0; +} + +static long s5p6818_asv_get_vol_margin(long uv, long value, bool down, + bool percent) +{ + long step_vol = VOLTAGE_STEP_UV; + long dv = percent ? ((uv/100) * value) : (value*1000); + long new = down ? uv - dv : uv + dv; + int al = 0; + + if (NULL == p_asv_table) + return -EINVAL; + + if ((new % step_vol)) { + new = (new / step_vol) * step_vol; + al = 1; + if (down) + new += step_vol; /* Upper */ + } + return new; +} + +static int s5p6818_asv_current_label(char *buf) +{ + char *s = buf; + + if (NULL == p_asv_table) + return -EINVAL; + + if (s && p_asv_table) { + if (!asv_param.flag) { + s += sprintf(s, "%d:%dmA,%d\n", + asv_param.level, asv_param.ids, + asv_param.ro); + } else { + s += sprintf(s, "%d:G%d,S%d\n", + asv_param.level, asv_param.group, + asv_param.shift); + } + } + return (s - buf); +} + +static struct cpufreq_asv_ops asv_ops = { + .setup_table = s5p6818_asv_setup_table, + .get_voltage = s5p6818_asv_get_voltage, + .modify_vol_table = s5p6818_asv_modify_vol_table, + .current_label = s5p6818_asv_current_label, + .get_vol_margin = s5p6818_asv_get_vol_margin, +}; +#endif diff -ENwbur a/drivers/cpuidle/cpuidle-nexell.c b/drivers/cpuidle/cpuidle-nexell.c --- a/drivers/cpuidle/cpuidle-nexell.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/cpuidle/cpuidle-nexell.c 2018-05-06 08:49:49.094692010 +0200 @@ -0,0 +1,105 @@ +/* + * Copyright (C) 2016 Nexell Co., Ltd. + * Author: Youngbok, Park + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "dt_idle_states.h" + +#define NEXELL_MAX_STATES 1 + +static int nexell_enter_idle(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + cpu_do_idle(); + + return index; +} + +static struct cpuidle_driver nexell_idle_driver = { + .name = "nexell_idle", + .owner = THIS_MODULE, + .states = { + { + .enter = nexell_enter_idle, + .exit_latency = 1, + .target_residency = 1, + .name = "Nexell Idle", + .desc = "Nexell cpu Idle", + }, + }, + .safe_state_index = 0, + .state_count = NEXELL_MAX_STATES, +}; + +static const struct of_device_id nexell_idle_state_match[] __initconst = { + { .compatible = "nexell,idle-state", + .data = nexell_enter_idle }, + { }, +}; + +static int __init nexell_idle_init(void) +{ + int cpu, ret; + struct cpuidle_driver *drv = &nexell_idle_driver; + struct cpuidle_device *dev; + + ret = dt_init_idle_driver(drv, nexell_idle_state_match, 1); + if (ret <= 0) + return ret ? : -ENODEV; + + ret = cpuidle_register_driver(drv); + if (ret) { + pr_err("Failed to register cpuidle driver\n"); + return ret; + } + for_each_possible_cpu(cpu) { + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) { + pr_err("Failed to allocate cpuidle device\n"); + goto out_fail; + } + dev->cpu = cpu; + + ret = cpuidle_register_device(dev); + if (ret) { + pr_err("Failed to register cpuidle device for CPU %d\n" + , cpu); + kfree(dev); + goto out_fail; + } + } + + return 0; +out_fail: + while (--cpu >= 0) { + dev = per_cpu(cpuidle_devices, cpu); + cpuidle_unregister_device(dev); + kfree(dev); + } + + cpuidle_unregister_driver(drv); + + return ret; +} +device_initcall(nexell_idle_init); diff -ENwbur a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm --- a/drivers/cpuidle/Kconfig.arm 2018-05-06 08:47:36.281301578 +0200 +++ b/drivers/cpuidle/Kconfig.arm 2018-05-06 08:49:49.094692010 +0200 @@ -75,3 +75,10 @@ depends on ARCH_MVEBU && !ARM64 help Select this to enable cpuidle on Armada 370, 38x and XP processors. + +config ARM_NEXELL_CPUIDLE + bool "Cpu Idle Driver for the nexell processors" + depends on ARCH_S5P4418 || ARCH_S5P6818 + select DT_IDLE_STATES + help + Select this to enable cpuidle for Nexell processors diff -ENwbur a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile --- a/drivers/cpuidle/Makefile 2018-05-06 08:47:36.281301578 +0200 +++ b/drivers/cpuidle/Makefile 2018-05-06 08:49:49.094692010 +0200 @@ -19,6 +19,7 @@ obj-$(CONFIG_ARM_U8500_CPUIDLE) += cpuidle-ux500.o obj-$(CONFIG_ARM_AT91_CPUIDLE) += cpuidle-at91.o obj-$(CONFIG_ARM_EXYNOS_CPUIDLE) += cpuidle-exynos.o +obj-$(CONFIG_ARM_NEXELL_CPUIDLE) += cpuidle-nexell.o obj-$(CONFIG_ARM_CPUIDLE) += cpuidle-arm.o ############################################################################### diff -ENwbur a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig --- a/drivers/devfreq/Kconfig 2018-05-06 08:47:36.309302715 +0200 +++ b/drivers/devfreq/Kconfig 2018-05-06 08:49:49.122693147 +0200 @@ -113,6 +113,14 @@ It sets the frequency for the memory controller and reads the usage counts from hardware. +config ARM_S5Pxx18_DEVFREQ + tristate "Nexell S5Pxx18 Bus DEVFREQ Driver" + depends on ARCH_S5P6818 + select DEVFREQ_GOV_SIMPLE_ONDEMAND + select PM_OPP + help + This adds the DEVFREQ driver for Nexell S5Pxx18 Series SoC bus interface. + source "drivers/devfreq/event/Kconfig" endif # PM_DEVFREQ diff -ENwbur a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile --- a/drivers/devfreq/Makefile 2018-05-06 08:47:36.309302715 +0200 +++ b/drivers/devfreq/Makefile 2018-05-06 08:49:49.122693147 +0200 @@ -11,6 +11,7 @@ obj-$(CONFIG_ARM_EXYNOS_BUS_DEVFREQ) += exynos-bus.o obj-$(CONFIG_ARM_RK3399_DMC_DEVFREQ) += rk3399_dmc.o obj-$(CONFIG_ARM_TEGRA_DEVFREQ) += tegra-devfreq.o +obj-$(CONFIG_ARM_S5Pxx18_DEVFREQ) += nx-devfreq.o # DEVFREQ Event Drivers obj-$(CONFIG_PM_DEVFREQ_EVENT) += event/ diff -ENwbur a/drivers/devfreq/nx-devfreq.c b/drivers/devfreq/nx-devfreq.c --- a/drivers/devfreq/nx-devfreq.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/devfreq/nx-devfreq.c 2018-05-06 08:49:49.122693147 +0200 @@ -0,0 +1,592 @@ +/* + * Copyright (C) 2016 Nexell Co., Ltd. + * Author: Sungwoo, Park + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "governor.h" + +#define KHZ 1000 + +struct nx_devfreq { + struct devfreq *devfreq; + struct devfreq_notifier_block nb; + int pm_qos_class; + struct clk *bclk; + atomic_t req_freq; + atomic_t cur_freq; + u32 pll; + char *supply_name; + struct regulator *regulator; + struct device *dev; + unsigned long suspend_freq; +}; + +struct bus_opp_table { + int index; + unsigned long clk; +}; + +static struct nx_devfreq *_nx_devfreq; + +static struct bus_opp_table bus_opp_table[] = { + {0, NX_BUS_CLK_HIGH_KHZ}, + {1, NX_BUS_CLK_MID_KHZ}, + {2, NX_BUS_CLK_LOW_KHZ}, + {0, 0}, +}; + +struct nx_bus_notifier_data { + struct list_head list; + struct notifier_block *data; +}; + +LIST_HEAD(nx_devfreq_notifier_list); +DEFINE_MUTEX(nx_devfreq_notifier_list_lock); + +int nx_bus_add_notifier(void *data) +{ + struct nx_bus_notifier_data *noti_data; + + noti_data = kzalloc(sizeof(struct nx_bus_notifier_data), GFP_KERNEL); + if (!noti_data) + return -ENOMEM; + + noti_data->data = data; + + mutex_lock(&nx_devfreq_notifier_list_lock); + list_add(¬i_data->list, &nx_devfreq_notifier_list); + mutex_unlock(&nx_devfreq_notifier_list_lock); + + return 0; +} + +void nx_bus_remove_notifier(void *data) +{ + struct nx_bus_notifier_data *noti_data; + bool found = false; + + mutex_lock(&nx_devfreq_notifier_list_lock); + list_for_each_entry(noti_data, &nx_devfreq_notifier_list, list) { + if (noti_data->data == data) { + found = true; + break; + } + } + if (found) { + list_del_init(¬i_data->list); + kfree(noti_data); + } + mutex_unlock(&nx_devfreq_notifier_list_lock); +} + +static int register_all_pm_qos_notifiers(int pm_qos_class) +{ + int ret = 0; + struct nx_bus_notifier_data *noti_data; + + mutex_lock(&nx_devfreq_notifier_list_lock); + list_for_each_entry(noti_data, &nx_devfreq_notifier_list, list) { + ret = pm_qos_add_notifier(pm_qos_class, + noti_data->data); + if (ret) + break; + } + mutex_unlock(&nx_devfreq_notifier_list_lock); + + return ret; +} + +static struct pm_qos_request nx_bus_qos; + +/* interface function for update qos */ +void nx_bus_qos_update(int val) +{ + pm_qos_update_request(&nx_bus_qos, val); +} +EXPORT_SYMBOL(nx_bus_qos_update); + +/* soc specific */ +struct pll_pms { + unsigned long rate; + unsigned long voltage; + u32 P; + u32 M; + u32 S; +}; + +#if defined(CONFIG_ARCH_S5P6818) +static struct pll_pms pll0_1_pms[] = { + [0] = { .rate = NX_BUS_CLK_HIGH_KHZ, .voltage = 1200000, .P = 3, .M = 200, .S = 1, }, + [1] = { .rate = NX_BUS_CLK_MID_KHZ, .voltage = 1000000, .P = 3, .M = 300, .S = 3, }, + [2] = { .rate = NX_BUS_CLK_LOW_KHZ, .voltage = 1000000, .P = 3, .M = 200, .S = 3, }, +}; + +static struct pll_pms pll2_3_pms[] = { + [0] = { .rate = NX_BUS_CLK_HIGH_KHZ, .voltage = 1200000, .P = 3, .M = 200, .S = 2, }, + [1] = { .rate = NX_BUS_CLK_MID_KHZ, .voltage = 1000000, .P = 3, .M = 200, .S = 3, }, + [2] = { .rate = NX_BUS_CLK_LOW_KHZ, .voltage = 1000000, .P = 3, .M = 250, .S = 4, }, +}; +#elif defined(CONFIG_ARCH_S5P4418) +static struct pll_pms pll0_1_pms[] = { + [0] = { .rate = NX_BUS_CLK_HIGH_KHZ, .voltage = 1100000, .P = 3, .M = 200, .S = 1, }, + [1] = { .rate = NX_BUS_CLK_MID_KHZ, .voltage = 1000000, .P = 2, .M = 200, .S = 3, }, + [2] = { .rate = NX_BUS_CLK_LOW_KHZ, .voltage = 1000000, .P = 3, .M = 200, .S = 3, }, +}; + +static struct pll_pms pll2_3_pms[] = { + [0] = { .rate = NX_BUS_CLK_HIGH_KHZ, .voltage = 1100000, .P = 3, .M = 200, .S = 1, }, + [1] = { .rate = NX_BUS_CLK_MID_KHZ, .voltage = 1000000, .P = 3, .M = 150, .S = 2, }, + [2] = { .rate = NX_BUS_CLK_LOW_KHZ, .voltage = 1000000, .P = 3, .M = 200, .S = 3, }, +}; +#endif + +static int get_pll_data(u32 pll, unsigned long rate, u32 *pll_data, + unsigned long *voltage) +{ + struct pll_pms *p = NULL; + int len; + int i; + unsigned long freq = 0; + + switch (pll) { + case 0: + case 1: + p = &pll0_1_pms[0]; + len = ARRAY_SIZE(pll0_1_pms); + break; + case 2: + case 3: + p = &pll2_3_pms[0]; + len = ARRAY_SIZE(pll2_3_pms); + break; + } + + for (i = 0; i < len; i++) { + freq = p->rate; + if (freq == rate) + break; + p++; + } + + if (freq) { + *pll_data = (p->P << 24) | (p->M << 8) | (p->S << 2) | pll; + *voltage = p->voltage; + return 0; + } + + return -EINVAL; +} + +/* profile */ +static int nx_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) +{ + int err; + u32 pll_data; + struct nx_devfreq *nx_devfreq = dev_get_drvdata(dev); + struct dev_pm_opp *opp; + unsigned long rate = *freq * KHZ; + unsigned long voltage; + bool is_up = false; + + rcu_read_lock(); + opp = devfreq_recommended_opp(dev, freq, flags); + if (IS_ERR(opp)) { + rcu_read_unlock(); + dev_err(dev, "failed to find opp for %lu KHz\n", *freq); + return PTR_ERR(opp); + } + rate = dev_pm_opp_get_freq(opp); + rcu_read_unlock(); + + dev_dbg(dev, "freq: %lu KHz, rate: %lu\n", *freq, rate); + + if (atomic_read(&nx_devfreq->cur_freq) == *freq) + return 0; + + if (atomic_read(&nx_devfreq->cur_freq) < *freq) + is_up = true; + + err = get_pll_data(nx_devfreq->pll, *freq, &pll_data, &voltage); + if (err) { + dev_err(dev, "failed to get pll data of freq %lu KHz\n", *freq); + return err; + } + + if (is_up) + regulator_set_voltage(nx_devfreq->regulator, voltage, voltage); + + err = nx_change_bus_freq(pll_data); + if (err) { + dev_err(dev, "failed to change bus clock for %lu KHz\n", *freq); + return err; + } + + if (!is_up) + regulator_set_voltage(nx_devfreq->regulator, voltage, voltage); + + atomic_set(&nx_devfreq->cur_freq, *freq); + return 0; +} + +static int nx_devfreq_get_cur_freq(struct device *dev, unsigned long *freq) +{ + struct nx_devfreq *nx_devfreq = dev_get_drvdata(dev); + + *freq = atomic_read(&nx_devfreq->cur_freq); + return 0; +} + +static int nx_devfreq_get_dev_status(struct device *dev, + struct devfreq_dev_status *stat) +{ + struct nx_devfreq *nx_devfreq = dev_get_drvdata(dev); + + stat->current_frequency = atomic_read(&nx_devfreq->cur_freq); + stat->private_data = nx_devfreq; + + return 0; +} + +static struct devfreq_dev_profile nx_devfreq_profile = { + .target = nx_devfreq_target, + .get_dev_status = nx_devfreq_get_dev_status, + .get_cur_freq = nx_devfreq_get_cur_freq, +}; + +/* notifier */ +static int nx_devfreq_pm_qos_notifier(struct notifier_block *nb, + unsigned long val, + void *v) +{ + struct devfreq_notifier_block *devfreq_nb; + struct nx_devfreq *nx_devfreq; + u32 cur_freq, new; + bool changed = false; + + devfreq_nb = container_of(nb, struct devfreq_notifier_block, nb); + nx_devfreq = devfreq_nb->df->data; + + dev_dbg(nx_devfreq->dev, "%s: val --> %ld\n", __func__, val); + if (val == PM_QOS_DEFAULT_VALUE) + val = nx_devfreq_profile.initial_freq; + + cur_freq = atomic_read(&nx_devfreq->cur_freq); + + new = val; + new = max((unsigned int)pm_qos_request(PM_QOS_BUS_THROUGHPUT), new); + if (new != cur_freq) + changed = true; + + if (changed) { + dev_dbg(nx_devfreq->dev, "%s changed from %d to %d\n", + __func__, cur_freq, new); + atomic_set(&nx_devfreq->req_freq, new); + mutex_lock(&devfreq_nb->df->lock); + update_devfreq(devfreq_nb->df); + mutex_unlock(&devfreq_nb->df->lock); + return NOTIFY_OK; + } + + return NOTIFY_STOP; +} + +static int nx_devfreq_register_notifier(struct devfreq *devfreq) +{ + struct nx_devfreq *nx_devfreq; + int ret; + + nx_devfreq = devfreq->data; + + dev_dbg(nx_devfreq->dev, "%s: E\n", __func__); + nx_devfreq->nb.df = devfreq; + nx_devfreq->nb.nb.notifier_call = nx_devfreq_pm_qos_notifier; + + ret = pm_qos_add_notifier(nx_devfreq->pm_qos_class, + &nx_devfreq->nb.nb); + if (ret) { + dev_err(nx_devfreq->dev, "failed to add notifier\n"); + return ret; + } + + return register_all_pm_qos_notifiers(nx_devfreq->pm_qos_class); +} + +static int nx_devfreq_unregister_notifier(struct devfreq *devfreq) +{ + struct nx_devfreq *nx_devfreq = devfreq->data; + + dev_info(nx_devfreq->dev, "%s: E\n", __func__); + return pm_qos_remove_notifier(nx_devfreq->pm_qos_class, + &nx_devfreq->nb.nb); +} + +/* governor */ +static int nx_governor_get_target(struct devfreq *devfreq, unsigned long *freq) +{ + struct devfreq_dev_status stat; + struct nx_devfreq *nx_devfreq; + int err; + + err = devfreq->profile->get_dev_status(devfreq->dev.parent, &stat); + if (err) + return err; + + nx_devfreq = stat.private_data; + *freq = atomic_read(&nx_devfreq->req_freq); + + if (*freq == atomic_read(&nx_devfreq->cur_freq)) { + if (devfreq->min_freq && *freq != devfreq->min_freq) + *freq = devfreq->min_freq; + else if (devfreq->max_freq && *freq != devfreq->max_freq) + *freq = devfreq->max_freq; + } + + return 0; +} + +static void governor_suspend(struct devfreq *devfreq) +{ + struct nx_devfreq *nx_devfreq = devfreq->data; + unsigned long freq; + + nx_devfreq->suspend_freq = 0; + if (atomic_read(&nx_devfreq->cur_freq) != NX_BUS_CLK_HIGH_KHZ) { + freq = NX_BUS_CLK_HIGH_KHZ; + nx_devfreq->suspend_freq = atomic_read(&nx_devfreq->cur_freq); + nx_devfreq_target(devfreq->dev.parent, &freq, 0); + } +} + +static void governor_resume(struct devfreq *devfreq) +{ + struct nx_devfreq *nx_devfreq = devfreq->data; + unsigned long freq; + + if (nx_devfreq->suspend_freq != 0) { + freq = nx_devfreq->suspend_freq; + nx_devfreq_target(devfreq->dev.parent, &freq, 0); + } +} + +static int nx_governor_event_handler(struct devfreq *devfreq, + unsigned int event, void *data) +{ + int ret; + + switch (event) { + case DEVFREQ_GOV_START: + ret = nx_devfreq_register_notifier(devfreq); + if (ret) + return ret; + devfreq_monitor_start(devfreq); + break; + + case DEVFREQ_GOV_STOP: + devfreq_monitor_stop(devfreq); + ret = nx_devfreq_unregister_notifier(devfreq); + if (ret) + return ret; + break; + + case DEVFREQ_GOV_SUSPEND: + governor_suspend(devfreq); + devfreq_monitor_suspend(devfreq); + break; + + case DEVFREQ_GOV_RESUME: + governor_resume(devfreq); + devfreq_monitor_resume(devfreq); + break; + } + + return 0; +} + +static struct devfreq_governor nx_devfreq_governor = { + .name = "nx_devfreq_gov", + .get_target_freq = nx_governor_get_target, + .event_handler = nx_governor_event_handler, +}; + +/* util function */ +static int nx_devfreq_parse_dt(struct device *dev, + struct nx_devfreq *nx_devfreq) +{ + struct device_node *np = dev->of_node; + + if (of_property_read_u32(np, "pll", &nx_devfreq->pll)) { + dev_err(dev, "failed to get dt pll number\n"); + return -EINVAL; + } + + if (of_property_read_string(np, "supply_name", + (const char **)&nx_devfreq->supply_name)) { + dev_err(dev, "failed to get dt supply name\n"); + return -EINVAL; + } + + return 0; +} + +/* platform driver interface */ +static int nx_devfreq_probe(struct platform_device *pdev) +{ + struct nx_devfreq *nx_devfreq; + struct bus_opp_table *entry; + + nx_devfreq = devm_kzalloc(&pdev->dev, sizeof(*nx_devfreq), GFP_KERNEL); + if (!nx_devfreq) + return -ENOMEM; + + if (nx_devfreq_parse_dt(&pdev->dev, nx_devfreq)) + return -EINVAL; + + nx_devfreq->regulator = regulator_get(&pdev->dev, + nx_devfreq->supply_name); + if (IS_ERR(nx_devfreq->regulator)) { + dev_err(&pdev->dev, "failed to regulator_get for supply %s\n", + nx_devfreq->supply_name); + return PTR_ERR(nx_devfreq->regulator); + } + + nx_devfreq->bclk = devm_clk_get(&pdev->dev, "bclk"); + if (IS_ERR(nx_devfreq->bclk)) { + dev_err(&pdev->dev, "failed to get bus clock\n"); + return PTR_ERR(nx_devfreq->bclk); + } + + atomic_set(&nx_devfreq->cur_freq, clk_get_rate(nx_devfreq->bclk) / KHZ); + dev_info(&pdev->dev, "Current bus clock rate: %dKHz\n", + atomic_read(&nx_devfreq->cur_freq)); + + entry = &bus_opp_table[0]; + while (entry->clk != 0) { + dev_pm_opp_add(&pdev->dev, entry->clk, 0); + entry++; + } + + nx_devfreq->pm_qos_class = PM_QOS_BUS_THROUGHPUT; + nx_devfreq_profile.initial_freq = NX_BUS_CLK_LOW_KHZ; + nx_devfreq->devfreq = devm_devfreq_add_device(&pdev->dev, + &nx_devfreq_profile, + "nx_devfreq_gov", + nx_devfreq); + + platform_set_drvdata(pdev, nx_devfreq); + nx_devfreq->dev = &pdev->dev; + _nx_devfreq = nx_devfreq; + + pm_qos_add_request(&nx_bus_qos, PM_QOS_BUS_THROUGHPUT, + nx_devfreq_profile.initial_freq); + pm_qos_update_request_timeout(&nx_bus_qos, NX_BUS_CLK_HIGH_KHZ, + 60 * 1000 * 1000); + + return 0; +} + +static int nx_devfreq_remove(struct platform_device *pdev) +{ + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int nx_devfreq_pm_prepare(struct device *dev) +{ + struct nx_devfreq *nx_devfreq = dev_get_drvdata(dev); + struct devfreq *devfreq = nx_devfreq->devfreq; + + return devfreq_suspend_device(devfreq); +} + +static void nx_devfreq_pm_complete(struct device *dev) +{ + struct nx_devfreq *nx_devfreq = dev_get_drvdata(dev); + struct devfreq *devfreq = nx_devfreq->devfreq; + + devfreq_resume_device(devfreq); +} + +static const struct dev_pm_ops nx_devfreq_pm_ops = { + .prepare = nx_devfreq_pm_prepare, + .complete = nx_devfreq_pm_complete, +}; +#endif + +static const struct of_device_id nx_devfreq_of_match[] = { + { .compatible = "nexell,s5pxx18-devfreq" }, + { }, +}; + +MODULE_DEVICE_TABLE(of, nx_devfreq_of_match); + +static struct platform_driver nx_devfreq_driver = { + .probe = nx_devfreq_probe, + .remove = nx_devfreq_remove, + .driver = { + .name = "nx-devfreq", + .of_match_table = nx_devfreq_of_match, +#ifdef CONFIG_PM_SLEEP + .pm = &nx_devfreq_pm_ops, +#endif + }, +}; + +static int __init nx_devfreq_init(void) +{ + int ret; + + ret = devfreq_add_governor(&nx_devfreq_governor); + if (ret) { + pr_err("%s: failed to add governor: %d\n", __func__, ret); + return ret; + } + + ret = platform_driver_register(&nx_devfreq_driver); + if (ret) + devfreq_remove_governor(&nx_devfreq_governor); + + return ret; +} +module_init(nx_devfreq_init); + +static void __exit nx_devfreq_exit(void) +{ + int ret; + + platform_driver_unregister(&nx_devfreq_driver); + + ret = devfreq_remove_governor(&nx_devfreq_governor); + if (ret) + pr_err("%s: failed to remove governor: %d\n", __func__, ret); +} +module_exit(nx_devfreq_exit); + +MODULE_AUTHOR("SungwooPark "); +MODULE_DESCRIPTION("Nexell S5Pxx18 series SoC devfreq driver"); +MODULE_LICENSE("GPL v2"); diff -ENwbur a/drivers/gpu/arm/Kbuild b/drivers/gpu/arm/Kbuild --- a/drivers/gpu/arm/Kbuild 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/Kbuild 2018-05-06 08:49:49.174695256 +0200 @@ -0,0 +1,13 @@ +# +# (C) COPYRIGHT 2012 ARM Limited. All rights reserved. +# +# This program is free software and is provided to you under the terms of the GNU General Public License version 2 +# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. +# +# A copy of the licence is included with the program, and can also be obtained from Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +# + + +obj-$(CONFIG_MALI400) += mali400/ diff -ENwbur a/drivers/gpu/arm/Kconfig b/drivers/gpu/arm/Kconfig --- a/drivers/gpu/arm/Kconfig 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/Kconfig 2018-05-06 08:49:49.174695256 +0200 @@ -0,0 +1,15 @@ +# +# (C) COPYRIGHT 2012 ARM Limited. All rights reserved. +# +# This program is free software and is provided to you under the terms of the GNU General Public License version 2 +# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. +# +# A copy of the licence is included with the program, and can also be obtained from Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +# + + +menu "ARM GPU Configuration" +source "drivers/gpu/arm/mali400/Kconfig" +endmenu diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_broadcast.c b/drivers/gpu/arm/mali400/common/mali_broadcast.c --- a/drivers/gpu/arm/mali400/common/mali_broadcast.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_broadcast.c 2018-05-06 08:49:49.174695256 +0200 @@ -0,0 +1,142 @@ +/* + * Copyright (C) 2012-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_broadcast.h" +#include "mali_kernel_common.h" +#include "mali_osk.h" + +#define MALI_BROADCAST_REGISTER_SIZE 0x1000 +#define MALI_BROADCAST_REG_BROADCAST_MASK 0x0 +#define MALI_BROADCAST_REG_INTERRUPT_MASK 0x4 + +struct mali_bcast_unit { + struct mali_hw_core hw_core; + u32 current_mask; +}; + +struct mali_bcast_unit *mali_bcast_unit_create(const _mali_osk_resource_t *resource) +{ + struct mali_bcast_unit *bcast_unit = NULL; + + MALI_DEBUG_ASSERT_POINTER(resource); + MALI_DEBUG_PRINT(2, ("Broadcast: Creating Mali Broadcast unit: %s\n", + resource->description)); + + bcast_unit = _mali_osk_malloc(sizeof(struct mali_bcast_unit)); + if (NULL == bcast_unit) { + MALI_PRINT_ERROR(("Broadcast: Failed to allocate memory for Broadcast unit\n")); + return NULL; + } + + if (_MALI_OSK_ERR_OK == mali_hw_core_create(&bcast_unit->hw_core, + resource, MALI_BROADCAST_REGISTER_SIZE)) { + bcast_unit->current_mask = 0; + mali_bcast_reset(bcast_unit); + + return bcast_unit; + } else { + MALI_PRINT_ERROR(("Broadcast: Failed map broadcast unit\n")); + } + + _mali_osk_free(bcast_unit); + + return NULL; +} + +void mali_bcast_unit_delete(struct mali_bcast_unit *bcast_unit) +{ + MALI_DEBUG_ASSERT_POINTER(bcast_unit); + mali_hw_core_delete(&bcast_unit->hw_core); + _mali_osk_free(bcast_unit); +} + +/* Call this function to add the @group's id into bcast mask + * Note: redundant calling this function with same @group + * doesn't make any difference as calling it once + */ +void mali_bcast_add_group(struct mali_bcast_unit *bcast_unit, + struct mali_group *group) +{ + u32 bcast_id; + u32 broadcast_mask; + + MALI_DEBUG_ASSERT_POINTER(bcast_unit); + MALI_DEBUG_ASSERT_POINTER(group); + + bcast_id = mali_pp_core_get_bcast_id(mali_group_get_pp_core(group)); + + broadcast_mask = bcast_unit->current_mask; + + broadcast_mask |= (bcast_id); /* add PP core to broadcast */ + broadcast_mask |= (bcast_id << 16); /* add MMU to broadcast */ + + /* store mask so we can restore on reset */ + bcast_unit->current_mask = broadcast_mask; +} + +/* Call this function to remove @group's id from bcast mask + * Note: redundant calling this function with same @group + * doesn't make any difference as calling it once + */ +void mali_bcast_remove_group(struct mali_bcast_unit *bcast_unit, + struct mali_group *group) +{ + u32 bcast_id; + u32 broadcast_mask; + + MALI_DEBUG_ASSERT_POINTER(bcast_unit); + MALI_DEBUG_ASSERT_POINTER(group); + + bcast_id = mali_pp_core_get_bcast_id(mali_group_get_pp_core(group)); + + broadcast_mask = bcast_unit->current_mask; + + broadcast_mask &= ~((bcast_id << 16) | bcast_id); + + /* store mask so we can restore on reset */ + bcast_unit->current_mask = broadcast_mask; +} + +void mali_bcast_reset(struct mali_bcast_unit *bcast_unit) +{ + MALI_DEBUG_ASSERT_POINTER(bcast_unit); + + MALI_DEBUG_PRINT(4, + ("Broadcast: setting mask 0x%08X + 0x%08X (reset)\n", + bcast_unit->current_mask, + bcast_unit->current_mask & 0xFF)); + + /* set broadcast mask */ + mali_hw_core_register_write(&bcast_unit->hw_core, + MALI_BROADCAST_REG_BROADCAST_MASK, + bcast_unit->current_mask); + + /* set IRQ override mask */ + mali_hw_core_register_write(&bcast_unit->hw_core, + MALI_BROADCAST_REG_INTERRUPT_MASK, + bcast_unit->current_mask & 0xFF); +} + +void mali_bcast_disable(struct mali_bcast_unit *bcast_unit) +{ + MALI_DEBUG_ASSERT_POINTER(bcast_unit); + + MALI_DEBUG_PRINT(4, ("Broadcast: setting mask 0x0 + 0x0 (disable)\n")); + + /* set broadcast mask */ + mali_hw_core_register_write(&bcast_unit->hw_core, + MALI_BROADCAST_REG_BROADCAST_MASK, + 0x0); + + /* set IRQ override mask */ + mali_hw_core_register_write(&bcast_unit->hw_core, + MALI_BROADCAST_REG_INTERRUPT_MASK, + 0x0); +} diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_broadcast.h b/drivers/gpu/arm/mali400/common/mali_broadcast.h --- a/drivers/gpu/arm/mali400/common/mali_broadcast.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_broadcast.h 2018-05-06 08:49:49.174695256 +0200 @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2012-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_BROADCAST_H__ +#define __MALI_BROADCAST_H__ + +/* + * Interface for the broadcast unit on Mali-450. + * + * - Represents up to 8 × (MMU + PP) pairs. + * - Supports dynamically changing which (MMU + PP) pairs receive the broadcast by + * setting a mask. + */ + +#include "mali_hw_core.h" +#include "mali_group.h" + +struct mali_bcast_unit; + +struct mali_bcast_unit *mali_bcast_unit_create(const _mali_osk_resource_t *resource); +void mali_bcast_unit_delete(struct mali_bcast_unit *bcast_unit); + +/* Add a group to the list of (MMU + PP) pairs broadcasts go out to. */ +void mali_bcast_add_group(struct mali_bcast_unit *bcast_unit, struct mali_group *group); + +/* Remove a group to the list of (MMU + PP) pairs broadcasts go out to. */ +void mali_bcast_remove_group(struct mali_bcast_unit *bcast_unit, struct mali_group *group); + +/* Re-set cached mask. This needs to be called after having been suspended. */ +void mali_bcast_reset(struct mali_bcast_unit *bcast_unit); + +/** + * Disable broadcast unit + * + * mali_bcast_enable must be called to re-enable the unit. Cores may not be + * added or removed when the unit is disabled. + */ +void mali_bcast_disable(struct mali_bcast_unit *bcast_unit); + +/** + * Re-enable broadcast unit + * + * This resets the masks to include the cores present when mali_bcast_disable was called. + */ +MALI_STATIC_INLINE void mali_bcast_enable(struct mali_bcast_unit *bcast_unit) +{ + mali_bcast_reset(bcast_unit); +} + +#endif /* __MALI_BROADCAST_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_control_timer.c b/drivers/gpu/arm/mali400/common/mali_control_timer.c --- a/drivers/gpu/arm/mali400/common/mali_control_timer.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_control_timer.c 2018-05-06 08:49:49.174695256 +0200 @@ -0,0 +1,128 @@ +/* + * Copyright (C) 2010-2012, 2014-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_kernel_utilization.h" +#include "mali_osk.h" +#include "mali_osk_mali.h" +#include "mali_kernel_common.h" +#include "mali_session.h" +#include "mali_dvfs_policy.h" +#include "mali_control_timer.h" + +static u64 period_start_time = 0; + +static _mali_osk_timer_t *mali_control_timer = NULL; +static mali_bool timer_running = MALI_FALSE; + +static u32 mali_control_timeout = 1000; + +void mali_control_timer_add(u32 timeout) +{ + _mali_osk_timer_add(mali_control_timer, _mali_osk_time_mstoticks(timeout)); +} + +static void mali_control_timer_callback(void *arg) +{ + if (mali_utilization_enabled()) { + struct mali_gpu_utilization_data *util_data = NULL; + u64 time_period = 0; + mali_bool need_add_timer = MALI_TRUE; + + /* Calculate gpu utilization */ + util_data = mali_utilization_calculate(&period_start_time, &time_period, &need_add_timer); + + if (util_data) { +#if defined(CONFIG_MALI_DVFS) + mali_dvfs_policy_realize(util_data, time_period); +#else + mali_utilization_platform_realize(util_data); +#endif + + if (MALI_TRUE == need_add_timer) { + mali_control_timer_add(mali_control_timeout); + } + } + } +} + +/* Init a timer (for now it is used for GPU utilization and dvfs) */ +_mali_osk_errcode_t mali_control_timer_init(void) +{ + _mali_osk_device_data data; + + if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) { + /* Use device specific settings (if defined) */ + if (0 != data.control_interval) { + mali_control_timeout = data.control_interval; + MALI_DEBUG_PRINT(2, ("Mali GPU Timer: %u\n", mali_control_timeout)); + } + } + + mali_control_timer = _mali_osk_timer_init(); + if (NULL == mali_control_timer) { + return _MALI_OSK_ERR_FAULT; + } + _mali_osk_timer_setcallback(mali_control_timer, mali_control_timer_callback, NULL); + + return _MALI_OSK_ERR_OK; +} + +void mali_control_timer_term(void) +{ + if (NULL != mali_control_timer) { + _mali_osk_timer_del(mali_control_timer); + timer_running = MALI_FALSE; + _mali_osk_timer_term(mali_control_timer); + mali_control_timer = NULL; + } +} + +mali_bool mali_control_timer_resume(u64 time_now) +{ + mali_utilization_data_assert_locked(); + + if (timer_running != MALI_TRUE) { + timer_running = MALI_TRUE; + + period_start_time = time_now; + + mali_utilization_reset(); + + return MALI_TRUE; + } + + return MALI_FALSE; +} + +void mali_control_timer_pause(void) +{ + mali_utilization_data_assert_locked(); + if (timer_running == MALI_TRUE) { + timer_running = MALI_FALSE; + } +} + +void mali_control_timer_suspend(mali_bool suspend) +{ + mali_utilization_data_lock(); + + if (timer_running == MALI_TRUE) { + timer_running = MALI_FALSE; + + mali_utilization_data_unlock(); + + if (suspend == MALI_TRUE) { + _mali_osk_timer_del(mali_control_timer); + mali_utilization_reset(); + } + } else { + mali_utilization_data_unlock(); + } +} diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_control_timer.h b/drivers/gpu/arm/mali400/common/mali_control_timer.h --- a/drivers/gpu/arm/mali400/common/mali_control_timer.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_control_timer.h 2018-05-06 08:49:49.174695256 +0200 @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2010-2012, 2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_CONTROL_TIMER_H__ +#define __MALI_CONTROL_TIMER_H__ + +#include "mali_osk.h" + +_mali_osk_errcode_t mali_control_timer_init(void); + +void mali_control_timer_term(void); + +mali_bool mali_control_timer_resume(u64 time_now); + +void mali_control_timer_suspend(mali_bool suspend); +void mali_control_timer_pause(void); + +void mali_control_timer_add(u32 timeout); + +#endif /* __MALI_CONTROL_TIMER_H__ */ + diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_dlbu.c b/drivers/gpu/arm/mali400/common/mali_dlbu.c --- a/drivers/gpu/arm/mali400/common/mali_dlbu.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_dlbu.c 2018-05-06 08:49:49.174695256 +0200 @@ -0,0 +1,213 @@ +/* + * Copyright (C) 2012-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_dlbu.h" +#include "mali_memory.h" +#include "mali_pp.h" +#include "mali_group.h" +#include "mali_osk.h" +#include "mali_hw_core.h" + +/** + * Size of DLBU registers in bytes + */ +#define MALI_DLBU_SIZE 0x400 + +mali_dma_addr mali_dlbu_phys_addr = 0; +static mali_io_address mali_dlbu_cpu_addr = NULL; + +/** + * DLBU register numbers + * Used in the register read/write routines. + * See the hardware documentation for more information about each register + */ +typedef enum mali_dlbu_register { + MALI_DLBU_REGISTER_MASTER_TLLIST_PHYS_ADDR = 0x0000, /**< Master tile list physical base address; + 31:12 Physical address to the page used for the DLBU + 0 DLBU enable - set this bit to 1 enables the AXI bus + between PPs and L2s, setting to 0 disables the router and + no further transactions are sent to DLBU */ + MALI_DLBU_REGISTER_MASTER_TLLIST_VADDR = 0x0004, /**< Master tile list virtual base address; + 31:12 Virtual address to the page used for the DLBU */ + MALI_DLBU_REGISTER_TLLIST_VBASEADDR = 0x0008, /**< Tile list virtual base address; + 31:12 Virtual address to the tile list. This address is used when + calculating the call address sent to PP.*/ + MALI_DLBU_REGISTER_FB_DIM = 0x000C, /**< Framebuffer dimension; + 23:16 Number of tiles in Y direction-1 + 7:0 Number of tiles in X direction-1 */ + MALI_DLBU_REGISTER_TLLIST_CONF = 0x0010, /**< Tile list configuration; + 29:28 select the size of each allocated block: 0=128 bytes, 1=256, 2=512, 3=1024 + 21:16 2^n number of tiles to be binned to one tile list in Y direction + 5:0 2^n number of tiles to be binned to one tile list in X direction */ + MALI_DLBU_REGISTER_START_TILE_POS = 0x0014, /**< Start tile positions; + 31:24 start position in Y direction for group 1 + 23:16 start position in X direction for group 1 + 15:8 start position in Y direction for group 0 + 7:0 start position in X direction for group 0 */ + MALI_DLBU_REGISTER_PP_ENABLE_MASK = 0x0018, /**< PP enable mask; + 7 enable PP7 for load balancing + 6 enable PP6 for load balancing + 5 enable PP5 for load balancing + 4 enable PP4 for load balancing + 3 enable PP3 for load balancing + 2 enable PP2 for load balancing + 1 enable PP1 for load balancing + 0 enable PP0 for load balancing */ +} mali_dlbu_register; + +typedef enum { + PP0ENABLE = 0, + PP1ENABLE, + PP2ENABLE, + PP3ENABLE, + PP4ENABLE, + PP5ENABLE, + PP6ENABLE, + PP7ENABLE +} mali_dlbu_pp_enable; + +struct mali_dlbu_core { + struct mali_hw_core hw_core; /**< Common for all HW cores */ + u32 pp_cores_mask; /**< This is a mask for the PP cores whose operation will be controlled by LBU + see MALI_DLBU_REGISTER_PP_ENABLE_MASK register */ +}; + +_mali_osk_errcode_t mali_dlbu_initialize(void) +{ + MALI_DEBUG_PRINT(2, ("Mali DLBU: Initializing\n")); + + if (_MALI_OSK_ERR_OK == + mali_mmu_get_table_page(&mali_dlbu_phys_addr, + &mali_dlbu_cpu_addr)) { + return _MALI_OSK_ERR_OK; + } + + return _MALI_OSK_ERR_FAULT; +} + +void mali_dlbu_terminate(void) +{ + MALI_DEBUG_PRINT(3, ("Mali DLBU: terminating\n")); + + if (0 != mali_dlbu_phys_addr && 0 != mali_dlbu_cpu_addr) { + mali_mmu_release_table_page(mali_dlbu_phys_addr, + mali_dlbu_cpu_addr); + mali_dlbu_phys_addr = 0; + mali_dlbu_cpu_addr = 0; + } +} + +struct mali_dlbu_core *mali_dlbu_create(const _mali_osk_resource_t *resource) +{ + struct mali_dlbu_core *core = NULL; + + MALI_DEBUG_PRINT(2, ("Mali DLBU: Creating Mali dynamic load balancing unit: %s\n", resource->description)); + + core = _mali_osk_malloc(sizeof(struct mali_dlbu_core)); + if (NULL != core) { + if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALI_DLBU_SIZE)) { + core->pp_cores_mask = 0; + if (_MALI_OSK_ERR_OK == mali_dlbu_reset(core)) { + return core; + } + MALI_PRINT_ERROR(("Failed to reset DLBU %s\n", core->hw_core.description)); + mali_hw_core_delete(&core->hw_core); + } + + _mali_osk_free(core); + } else { + MALI_PRINT_ERROR(("Mali DLBU: Failed to allocate memory for DLBU core\n")); + } + + return NULL; +} + +void mali_dlbu_delete(struct mali_dlbu_core *dlbu) +{ + MALI_DEBUG_ASSERT_POINTER(dlbu); + mali_hw_core_delete(&dlbu->hw_core); + _mali_osk_free(dlbu); +} + +_mali_osk_errcode_t mali_dlbu_reset(struct mali_dlbu_core *dlbu) +{ + u32 dlbu_registers[7]; + _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT; + MALI_DEBUG_ASSERT_POINTER(dlbu); + + MALI_DEBUG_PRINT(4, ("Mali DLBU: mali_dlbu_reset: %s\n", dlbu->hw_core.description)); + + dlbu_registers[0] = mali_dlbu_phys_addr | 1; /* bit 0 enables the whole core */ + dlbu_registers[1] = MALI_DLBU_VIRT_ADDR; + dlbu_registers[2] = 0; + dlbu_registers[3] = 0; + dlbu_registers[4] = 0; + dlbu_registers[5] = 0; + dlbu_registers[6] = dlbu->pp_cores_mask; + + /* write reset values to core registers */ + mali_hw_core_register_write_array_relaxed(&dlbu->hw_core, MALI_DLBU_REGISTER_MASTER_TLLIST_PHYS_ADDR, dlbu_registers, 7); + + err = _MALI_OSK_ERR_OK; + + return err; +} + +void mali_dlbu_update_mask(struct mali_dlbu_core *dlbu) +{ + MALI_DEBUG_ASSERT_POINTER(dlbu); + + mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_PP_ENABLE_MASK, dlbu->pp_cores_mask); +} + +void mali_dlbu_add_group(struct mali_dlbu_core *dlbu, struct mali_group *group) +{ + struct mali_pp_core *pp_core; + u32 bcast_id; + + MALI_DEBUG_ASSERT_POINTER(dlbu); + MALI_DEBUG_ASSERT_POINTER(group); + + pp_core = mali_group_get_pp_core(group); + bcast_id = mali_pp_core_get_bcast_id(pp_core); + + dlbu->pp_cores_mask |= bcast_id; + MALI_DEBUG_PRINT(3, ("Mali DLBU: Adding core[%d] New mask= 0x%02x\n", bcast_id , dlbu->pp_cores_mask)); +} + +/* Remove a group from the DLBU */ +void mali_dlbu_remove_group(struct mali_dlbu_core *dlbu, struct mali_group *group) +{ + struct mali_pp_core *pp_core; + u32 bcast_id; + + MALI_DEBUG_ASSERT_POINTER(dlbu); + MALI_DEBUG_ASSERT_POINTER(group); + + pp_core = mali_group_get_pp_core(group); + bcast_id = mali_pp_core_get_bcast_id(pp_core); + + dlbu->pp_cores_mask &= ~bcast_id; + MALI_DEBUG_PRINT(3, ("Mali DLBU: Removing core[%d] New mask= 0x%02x\n", bcast_id, dlbu->pp_cores_mask)); +} + +/* Configure the DLBU for \a job. This needs to be done before the job is started on the groups in the DLBU. */ +void mali_dlbu_config_job(struct mali_dlbu_core *dlbu, struct mali_pp_job *job) +{ + u32 *registers; + MALI_DEBUG_ASSERT(job); + registers = mali_pp_job_get_dlbu_registers(job); + MALI_DEBUG_PRINT(4, ("Mali DLBU: Starting job\n")); + + /* Writing 4 registers: + * DLBU registers except the first two (written once at DLBU initialisation / reset) and the PP_ENABLE_MASK register */ + mali_hw_core_register_write_array_relaxed(&dlbu->hw_core, MALI_DLBU_REGISTER_TLLIST_VBASEADDR, registers, 4); + +} diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_dlbu.h b/drivers/gpu/arm/mali400/common/mali_dlbu.h --- a/drivers/gpu/arm/mali400/common/mali_dlbu.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_dlbu.h 2018-05-06 08:49:49.174695256 +0200 @@ -0,0 +1,45 @@ +/* + * Copyright (C) 2012-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_DLBU_H__ +#define __MALI_DLBU_H__ + +#define MALI_DLBU_VIRT_ADDR 0xFFF00000 /* master tile virtual address fixed at this value and mapped into every session */ + +#include "mali_osk.h" + +struct mali_pp_job; +struct mali_group; +struct mali_dlbu_core; + +extern mali_dma_addr mali_dlbu_phys_addr; + +_mali_osk_errcode_t mali_dlbu_initialize(void); +void mali_dlbu_terminate(void); + +struct mali_dlbu_core *mali_dlbu_create(const _mali_osk_resource_t *resource); +void mali_dlbu_delete(struct mali_dlbu_core *dlbu); + +_mali_osk_errcode_t mali_dlbu_reset(struct mali_dlbu_core *dlbu); + +void mali_dlbu_add_group(struct mali_dlbu_core *dlbu, struct mali_group *group); +void mali_dlbu_remove_group(struct mali_dlbu_core *dlbu, struct mali_group *group); + +/** @brief Called to update HW after DLBU state changed + * + * This function must be called after \a mali_dlbu_add_group or \a + * mali_dlbu_remove_group to write the updated mask to hardware, unless the + * same is accomplished by calling \a mali_dlbu_reset. + */ +void mali_dlbu_update_mask(struct mali_dlbu_core *dlbu); + +void mali_dlbu_config_job(struct mali_dlbu_core *dlbu, struct mali_pp_job *job); + +#endif /* __MALI_DLBU_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_dvfs_policy.c b/drivers/gpu/arm/mali400/common/mali_dvfs_policy.c --- a/drivers/gpu/arm/mali400/common/mali_dvfs_policy.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_dvfs_policy.c 2018-05-06 08:49:49.174695256 +0200 @@ -0,0 +1,312 @@ +/* + * Copyright (C) 2010-2012, 2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include +#include "mali_kernel_common.h" +#include "mali_scheduler.h" +#include "mali_dvfs_policy.h" +#include "mali_osk_mali.h" +#include "mali_osk_profiling.h" + +#define CLOCK_TUNING_TIME_DEBUG 0 + +#define MAX_PERFORMANCE_VALUE 256 +#define MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(percent) ((int) ((percent)*(MAX_PERFORMANCE_VALUE)/100.0 + 0.5)) + +/** The max fps the same as display vsync default 60, can set by module insert parameter */ +int mali_max_system_fps = 60; +/** A lower limit on their desired FPS default 58, can set by module insert parameter */ +int mali_desired_fps = 58; + +static int mali_fps_step1 = 0; +static int mali_fps_step2 = 0; + +static int clock_step = -1; +static int cur_clk_step = -1; +static struct mali_gpu_clock *gpu_clk = NULL; + +/*Function prototype */ +static int (*mali_gpu_set_freq)(int) = NULL; +static int (*mali_gpu_get_freq)(void) = NULL; + +static mali_bool mali_dvfs_enabled = MALI_FALSE; + +#define NUMBER_OF_NANOSECONDS_PER_SECOND 1000000000ULL +static u32 calculate_window_render_fps(u64 time_period) +{ + u32 max_window_number; + u64 tmp; + u64 max = time_period; + u32 leading_zeroes; + u32 shift_val; + u32 time_period_shift; + u32 max_window_number_shift; + u32 ret_val; + + max_window_number = mali_session_max_window_num(); + + /* To avoid float division, extend the dividend to ns unit */ + tmp = (u64)max_window_number * NUMBER_OF_NANOSECONDS_PER_SECOND; + if (tmp > time_period) { + max = tmp; + } + + /* + * We may have 64-bit values, a dividend or a divisor or both + * To avoid dependencies to a 64-bit divider, we shift down the two values + * equally first. + */ + leading_zeroes = _mali_osk_clz((u32)(max >> 32)); + shift_val = 32 - leading_zeroes; + + time_period_shift = (u32)(time_period >> shift_val); + max_window_number_shift = (u32)(tmp >> shift_val); + + ret_val = max_window_number_shift / time_period_shift; + + return ret_val; +} + +static bool mali_pickup_closest_avail_clock(int target_clock_mhz, mali_bool pick_clock_up) +{ + int i = 0; + bool clock_changed = false; + + /* Round up the closest available frequency step for target_clock_hz */ + for (i = 0; i < gpu_clk->num_of_steps; i++) { + /* Find the first item > target_clock_hz */ + if (((int)(gpu_clk->item[i].clock) - target_clock_mhz) > 0) { + break; + } + } + + /* If the target clock greater than the maximum clock just pick the maximum one*/ + if (i == gpu_clk->num_of_steps) { + i = gpu_clk->num_of_steps - 1; + } else { + if ((!pick_clock_up) && (i > 0)) { + i = i - 1; + } + } + + clock_step = i; + if (cur_clk_step != clock_step) { + clock_changed = true; + } + + return clock_changed; +} + +void mali_dvfs_policy_realize(struct mali_gpu_utilization_data *data, u64 time_period) +{ + int under_perform_boundary_value = 0; + int over_perform_boundary_value = 0; + int current_fps = 0; + int current_gpu_util = 0; + bool clock_changed = false; +#if CLOCK_TUNING_TIME_DEBUG + struct timeval start; + struct timeval stop; + unsigned int elapse_time; + do_gettimeofday(&start); +#endif + u32 window_render_fps; + + if (NULL == gpu_clk) { + MALI_DEBUG_PRINT(2, ("Enable DVFS but patform doesn't Support freq change. \n")); + return; + } + + window_render_fps = calculate_window_render_fps(time_period); + + current_fps = window_render_fps; + current_gpu_util = data->utilization_gpu; + + /* Get the specific under_perform_boundary_value and over_perform_boundary_value */ + if ((mali_desired_fps <= current_fps) && (current_fps < mali_max_system_fps)) { + under_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(90); + over_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(70); + } else if ((mali_fps_step1 <= current_fps) && (current_fps < mali_desired_fps)) { + under_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(55); + over_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(35); + } else if ((mali_fps_step2 <= current_fps) && (current_fps < mali_fps_step1)) { + under_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(70); + over_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(50); + } else { + under_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(55); + over_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(35); + } + + MALI_DEBUG_PRINT(5, ("Using ARM power policy: gpu util = %d \n", current_gpu_util)); + MALI_DEBUG_PRINT(5, ("Using ARM power policy: under_perform = %d, over_perform = %d \n", under_perform_boundary_value, over_perform_boundary_value)); + MALI_DEBUG_PRINT(5, ("Using ARM power policy: render fps = %d, pressure render fps = %d \n", current_fps, window_render_fps)); + + /* Get current clock value */ + cur_clk_step = mali_gpu_get_freq(); + +#ifdef CONFIG_ARM_S5Pxx18_DEVFREQ + if (1) { +#else + /* Consider offscreen */ + if (0 == current_fps) { +#endif + /* GP or PP under perform, need to give full power */ + if (current_gpu_util > over_perform_boundary_value) { + if (cur_clk_step != gpu_clk->num_of_steps - 1) { + clock_changed = true; + clock_step = gpu_clk->num_of_steps - 1; + } + } + + /* If GPU is idle, use lowest power */ + if (0 == current_gpu_util) { + if (cur_clk_step != 0) { + clock_changed = true; + clock_step = 0; + } + } + + goto real_setting; + } + + /* 2. Calculate target clock if the GPU clock can be tuned */ + if (-1 != cur_clk_step) { + int target_clk_mhz = -1; + mali_bool pick_clock_up = MALI_TRUE; + + if (current_gpu_util > under_perform_boundary_value) { + /* when under perform, need to consider the fps part */ + target_clk_mhz = gpu_clk->item[cur_clk_step].clock * current_gpu_util * mali_desired_fps / under_perform_boundary_value / current_fps; + pick_clock_up = MALI_TRUE; + } else if (current_gpu_util < over_perform_boundary_value) { + /* when over perform, did't need to consider fps, system didn't want to reach desired fps */ + target_clk_mhz = gpu_clk->item[cur_clk_step].clock * current_gpu_util / under_perform_boundary_value; + pick_clock_up = MALI_FALSE; + } + + if (-1 != target_clk_mhz) { + clock_changed = mali_pickup_closest_avail_clock(target_clk_mhz, pick_clock_up); + } + } + +real_setting: + if (clock_changed) { + mali_gpu_set_freq(clock_step); + + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_EVENT_CHANNEL_GPU | + MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE, + gpu_clk->item[clock_step].clock, + gpu_clk->item[clock_step].vol / 1000, + 0, 0, 0); + } + +#if CLOCK_TUNING_TIME_DEBUG + do_gettimeofday(&stop); + + elapse_time = timeval_to_ns(&stop) - timeval_to_ns(&start); + MALI_DEBUG_PRINT(2, ("Using ARM power policy: eclapse time = %d\n", elapse_time)); +#endif +} + +_mali_osk_errcode_t mali_dvfs_policy_init(void) +{ + _mali_osk_device_data data; + _mali_osk_errcode_t err = _MALI_OSK_ERR_OK; + + if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) { + if ((NULL != data.get_clock_info) && (NULL != data.set_freq) && (NULL != data.get_freq)) { + MALI_DEBUG_PRINT(2, ("Mali DVFS init: using arm dvfs policy \n")); + + + mali_fps_step1 = mali_max_system_fps / 3; + mali_fps_step2 = mali_max_system_fps / 5; + + data.get_clock_info(&gpu_clk); + + if (gpu_clk != NULL) { +#ifdef DEBUG + int i; + for (i = 0; i < gpu_clk->num_of_steps; i++) { + MALI_DEBUG_PRINT(5, ("mali gpu clock info: step%d clock(%d)Hz,vol(%d) \n", + i, gpu_clk->item[i].clock, gpu_clk->item[i].vol)); + } +#endif + } else { + MALI_DEBUG_PRINT(2, ("Mali DVFS init: platform didn't define enough info for ddk to do DVFS \n")); + } + + mali_gpu_get_freq = data.get_freq; + mali_gpu_set_freq = data.set_freq; + + if ((NULL != gpu_clk) && (gpu_clk->num_of_steps > 0) + && (NULL != mali_gpu_get_freq) && (NULL != mali_gpu_set_freq)) { + mali_dvfs_enabled = MALI_TRUE; + } + } else { + MALI_DEBUG_PRINT(2, ("Mali DVFS init: platform function callback incomplete, need check mali_gpu_device_data in platform .\n")); + } + } else { + err = _MALI_OSK_ERR_FAULT; + MALI_DEBUG_PRINT(2, ("Mali DVFS init: get platform data error .\n")); + } + + return err; +} + +/* + * Always give full power when start a new period, + * if mali dvfs enabled, for performance consideration + */ +void mali_dvfs_policy_new_period(void) +{ + /* Always give full power when start a new period */ + unsigned int cur_clk_step = 0; + + cur_clk_step = mali_gpu_get_freq(); + + if (cur_clk_step != (gpu_clk->num_of_steps - 1)) { + mali_gpu_set_freq(gpu_clk->num_of_steps - 1); + + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_EVENT_CHANNEL_GPU | + MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE, gpu_clk->item[gpu_clk->num_of_steps - 1].clock, + gpu_clk->item[gpu_clk->num_of_steps - 1].vol / 1000, 0, 0, 0); + } +} + +mali_bool mali_dvfs_policy_enabled(void) +{ + return mali_dvfs_enabled; +} + +#if defined(CONFIG_MALI400_PROFILING) +void mali_get_current_gpu_clk_item(struct mali_gpu_clk_item *clk_item) +{ + if (mali_platform_device != NULL) { + + struct mali_gpu_device_data *device_data = NULL; + device_data = (struct mali_gpu_device_data *)mali_platform_device->dev.platform_data; + + if ((NULL != device_data->get_clock_info) && (NULL != device_data->get_freq)) { + + int cur_clk_step = device_data->get_freq(); + struct mali_gpu_clock *mali_gpu_clk = NULL; + + device_data->get_clock_info(&mali_gpu_clk); + clk_item->clock = mali_gpu_clk->item[cur_clk_step].clock; + clk_item->vol = mali_gpu_clk->item[cur_clk_step].vol; + } else { + MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: platform function callback incomplete, need check mali_gpu_device_data in platform .\n")); + } + } +} +#endif + diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_dvfs_policy.h b/drivers/gpu/arm/mali400/common/mali_dvfs_policy.h --- a/drivers/gpu/arm/mali400/common/mali_dvfs_policy.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_dvfs_policy.h 2018-05-06 08:49:49.174695256 +0200 @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2010-2012, 2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_DVFS_POLICY_H__ +#define __MALI_DVFS_POLICY_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +void mali_dvfs_policy_realize(struct mali_gpu_utilization_data *data, u64 time_period); + +_mali_osk_errcode_t mali_dvfs_policy_init(void); + +void mali_dvfs_policy_new_period(void); + +mali_bool mali_dvfs_policy_enabled(void); + +#if defined(CONFIG_MALI400_PROFILING) +void mali_get_current_gpu_clk_item(struct mali_gpu_clk_item *clk_item); +#endif + +#ifdef __cplusplus +} +#endif + +#endif/* __MALI_DVFS_POLICY_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_executor.c b/drivers/gpu/arm/mali400/common/mali_executor.c --- a/drivers/gpu/arm/mali400/common/mali_executor.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_executor.c 2018-05-06 08:49:49.174695256 +0200 @@ -0,0 +1,2693 @@ +/* + * Copyright (C) 2012-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_executor.h" +#include "mali_scheduler.h" +#include "mali_kernel_common.h" +#include "mali_kernel_core.h" +#include "mali_osk.h" +#include "mali_osk_list.h" +#include "mali_pp.h" +#include "mali_pp_job.h" +#include "mali_group.h" +#include "mali_pm.h" +#include "mali_timeline.h" +#include "mali_osk_profiling.h" +#include "mali_session.h" +#include "mali_osk_mali.h" + +/* + * If dma_buf with map on demand is used, we defer job deletion and job queue + * if in atomic context, since both might sleep. + */ +#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) +#define MALI_EXECUTOR_USE_DEFERRED_PP_JOB_DELETE 1 +#define MALI_EXECUTOR_USE_DEFERRED_PP_JOB_QUEUE 1 +#endif /* !defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) */ + +/* + * ---------- static type definitions (structs, enums, etc) ---------- + */ + +enum mali_executor_state_t { + EXEC_STATE_NOT_PRESENT, /* Virtual group on Mali-300/400 (do not use) */ + EXEC_STATE_DISABLED, /* Disabled by core scaling (do not use) */ + EXEC_STATE_EMPTY, /* No child groups for virtual group (do not use) */ + EXEC_STATE_INACTIVE, /* Can be used, but must be activate first */ + EXEC_STATE_IDLE, /* Active and ready to be used */ + EXEC_STATE_WORKING, /* Executing a job */ +}; + +/* + * ---------- global variables (exported due to inline functions) ---------- + */ + +/* Lock for this module (protecting all HW access except L2 caches) */ +_mali_osk_spinlock_irq_t *mali_executor_lock_obj = NULL; + +mali_bool mali_executor_hints[MALI_EXECUTOR_HINT_MAX]; + +/* + * ---------- static variables ---------- + */ + +/* Used to defer job scheduling */ +static _mali_osk_wq_work_t *executor_wq_high_pri = NULL; + +/* Store version from GP and PP (user space wants to know this) */ +static u32 pp_version = 0; +static u32 gp_version = 0; + +/* List of physical PP groups which are disabled by some external source */ +static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_disabled); +static u32 group_list_disabled_count = 0; + +/* List of groups which can be used, but activate first */ +static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_inactive); +static u32 group_list_inactive_count = 0; + +/* List of groups which are active and ready to be used */ +static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_idle); +static u32 group_list_idle_count = 0; + +/* List of groups which are executing a job */ +static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_working); +static u32 group_list_working_count = 0; + +/* Virtual group (if any) */ +static struct mali_group *virtual_group = NULL; + +/* Virtual group state is tracked with a state variable instead of 4 lists */ +static enum mali_executor_state_t virtual_group_state = EXEC_STATE_NOT_PRESENT; + +/* GP group */ +static struct mali_group *gp_group = NULL; + +/* GP group state is tracked with a state variable instead of 4 lists */ +static enum mali_executor_state_t gp_group_state = EXEC_STATE_NOT_PRESENT; + +static u32 gp_returned_cookie = 0; + +/* Total number of physical PP cores present */ +static u32 num_physical_pp_cores_total = 0; + +/* Number of physical cores which are enabled */ +static u32 num_physical_pp_cores_enabled = 0; + +/* Enable or disable core scaling */ +static mali_bool core_scaling_enabled = MALI_TRUE; + +/* Variables to allow safe pausing of the scheduler */ +static _mali_osk_wait_queue_t *executor_working_wait_queue = NULL; +static u32 pause_count = 0; + +/* PP cores haven't been enabled because of some pp cores haven't been disabled. */ +static int core_scaling_delay_up_mask[MALI_MAX_NUMBER_OF_DOMAINS] = { 0 }; + +/* Variables used to implement notify pp core changes to userspace when core scaling + * is finished in mali_executor_complete_group() function. */ +static _mali_osk_wq_work_t *executor_wq_notify_core_change = NULL; +static _mali_osk_wait_queue_t *executor_notify_core_change_wait_queue = NULL; + +/* + * ---------- Forward declaration of static functions ---------- + */ +static mali_bool mali_executor_is_suspended(void *data); +static mali_bool mali_executor_is_working(void); +static void mali_executor_disable_empty_virtual(void); +static mali_bool mali_executor_physical_rejoin_virtual(struct mali_group *group); +static mali_bool mali_executor_has_virtual_group(void); +static mali_bool mali_executor_virtual_group_is_usable(void); +static void mali_executor_schedule(void); +static void mali_executor_wq_schedule(void *arg); +static void mali_executor_send_gp_oom_to_user(struct mali_gp_job *job); +static void mali_executor_complete_group(struct mali_group *group, + mali_bool success, + struct mali_gp_job **gp_job_done, + struct mali_pp_job **pp_job_done); +static void mali_executor_change_state_pp_physical(struct mali_group *group, + _mali_osk_list_t *old_list, + u32 *old_count, + _mali_osk_list_t *new_list, + u32 *new_count); +static mali_bool mali_executor_group_is_in_state(struct mali_group *group, + enum mali_executor_state_t state); + +static void mali_executor_group_enable_internal(struct mali_group *group); +static void mali_executor_group_disable_internal(struct mali_group *group); +static void mali_executor_core_scale(unsigned int target_core_nr); +static void mali_executor_core_scale_in_group_complete(struct mali_group *group); +static void mali_executor_notify_core_change(u32 num_cores); +static void mali_executor_wq_notify_core_change(void *arg); +static void mali_executor_change_group_status_disabled(struct mali_group *group); +static mali_bool mali_executor_deactivate_list_idle(mali_bool deactivate_idle_group); +static void mali_executor_set_state_pp_physical(struct mali_group *group, + _mali_osk_list_t *new_list, + u32 *new_count); + +/* + * ---------- Actual implementation ---------- + */ + +_mali_osk_errcode_t mali_executor_initialize(void) +{ + mali_executor_lock_obj = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_EXECUTOR); + if (NULL == mali_executor_lock_obj) { + mali_executor_terminate(); + return _MALI_OSK_ERR_NOMEM; + } + + executor_wq_high_pri = _mali_osk_wq_create_work_high_pri(mali_executor_wq_schedule, NULL); + if (NULL == executor_wq_high_pri) { + mali_executor_terminate(); + return _MALI_OSK_ERR_NOMEM; + } + + executor_working_wait_queue = _mali_osk_wait_queue_init(); + if (NULL == executor_working_wait_queue) { + mali_executor_terminate(); + return _MALI_OSK_ERR_NOMEM; + } + + executor_wq_notify_core_change = _mali_osk_wq_create_work(mali_executor_wq_notify_core_change, NULL); + if (NULL == executor_wq_notify_core_change) { + mali_executor_terminate(); + return _MALI_OSK_ERR_NOMEM; + } + + executor_notify_core_change_wait_queue = _mali_osk_wait_queue_init(); + if (NULL == executor_notify_core_change_wait_queue) { + mali_executor_terminate(); + return _MALI_OSK_ERR_NOMEM; + } + + return _MALI_OSK_ERR_OK; +} + +void mali_executor_terminate(void) +{ + if (NULL != executor_notify_core_change_wait_queue) { + _mali_osk_wait_queue_term(executor_notify_core_change_wait_queue); + executor_notify_core_change_wait_queue = NULL; + } + + if (NULL != executor_wq_notify_core_change) { + _mali_osk_wq_delete_work(executor_wq_notify_core_change); + executor_wq_notify_core_change = NULL; + } + + if (NULL != executor_working_wait_queue) { + _mali_osk_wait_queue_term(executor_working_wait_queue); + executor_working_wait_queue = NULL; + } + + if (NULL != executor_wq_high_pri) { + _mali_osk_wq_delete_work(executor_wq_high_pri); + executor_wq_high_pri = NULL; + } + + if (NULL != mali_executor_lock_obj) { + _mali_osk_spinlock_irq_term(mali_executor_lock_obj); + mali_executor_lock_obj = NULL; + } +} + +void mali_executor_populate(void) +{ + u32 num_groups; + u32 i; + + num_groups = mali_group_get_glob_num_groups(); + + /* Do we have a virtual group? */ + for (i = 0; i < num_groups; i++) { + struct mali_group *group = mali_group_get_glob_group(i); + + if (mali_group_is_virtual(group)) { + virtual_group = group; + virtual_group_state = EXEC_STATE_INACTIVE; + break; + } + } + + /* Find all the available physical GP and PP cores */ + for (i = 0; i < num_groups; i++) { + struct mali_group *group = mali_group_get_glob_group(i); + + if (NULL != group) { + struct mali_pp_core *pp_core = mali_group_get_pp_core(group); + struct mali_gp_core *gp_core = mali_group_get_gp_core(group); + + if (!mali_group_is_virtual(group)) { + if (NULL != pp_core) { + if (0 == pp_version) { + /* Retrieve PP version from the first available PP core */ + pp_version = mali_pp_core_get_version(pp_core); + } + + if (NULL != virtual_group) { + mali_executor_lock(); + mali_group_add_group(virtual_group, group); + mali_executor_unlock(); + } else { + _mali_osk_list_add(&group->executor_list, &group_list_inactive); + group_list_inactive_count++; + } + + num_physical_pp_cores_total++; + } else { + MALI_DEBUG_ASSERT_POINTER(gp_core); + + if (0 == gp_version) { + /* Retrieve GP version */ + gp_version = mali_gp_core_get_version(gp_core); + } + + gp_group = group; + gp_group_state = EXEC_STATE_INACTIVE; + } + + } + } + } + + num_physical_pp_cores_enabled = num_physical_pp_cores_total; +} + +void mali_executor_depopulate(void) +{ + struct mali_group *group; + struct mali_group *temp; + + MALI_DEBUG_ASSERT(EXEC_STATE_WORKING != gp_group_state); + + if (NULL != gp_group) { + mali_group_delete(gp_group); + gp_group = NULL; + } + + MALI_DEBUG_ASSERT(EXEC_STATE_WORKING != virtual_group_state); + + if (NULL != virtual_group) { + mali_group_delete(virtual_group); + virtual_group = NULL; + } + + MALI_DEBUG_ASSERT(_mali_osk_list_empty(&group_list_working)); + + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, executor_list) { + mali_group_delete(group); + } + + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_inactive, struct mali_group, executor_list) { + mali_group_delete(group); + } + + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, executor_list) { + mali_group_delete(group); + } +} + +void mali_executor_suspend(void) +{ + mali_executor_lock(); + + /* Increment the pause_count so that no more jobs will be scheduled */ + pause_count++; + + mali_executor_unlock(); + + _mali_osk_wait_queue_wait_event(executor_working_wait_queue, + mali_executor_is_suspended, NULL); + + /* + * mali_executor_complete_XX() leaves jobs in idle state. + * deactivate option is used when we are going to power down + * the entire GPU (OS suspend) and want a consistent SW vs HW + * state. + */ + mali_executor_lock(); + + mali_executor_deactivate_list_idle(MALI_TRUE); + + /* + * The following steps are used to deactive all of activated + * (MALI_GROUP_STATE_ACTIVE) and activating (MALI_GROUP + * _STAET_ACTIVATION_PENDING) groups, to make sure the variable + * pd_mask_wanted is equal with 0. */ + if (MALI_GROUP_STATE_INACTIVE != mali_group_get_state(gp_group)) { + gp_group_state = EXEC_STATE_INACTIVE; + mali_group_deactivate(gp_group); + } + + if (mali_executor_has_virtual_group()) { + if (MALI_GROUP_STATE_INACTIVE + != mali_group_get_state(virtual_group)) { + virtual_group_state = EXEC_STATE_INACTIVE; + mali_group_deactivate(virtual_group); + } + } + + if (0 < group_list_inactive_count) { + struct mali_group *group; + struct mali_group *temp; + + _MALI_OSK_LIST_FOREACHENTRY(group, temp, + &group_list_inactive, + struct mali_group, executor_list) { + if (MALI_GROUP_STATE_ACTIVATION_PENDING + == mali_group_get_state(group)) { + mali_group_deactivate(group); + } + + /* + * On mali-450 platform, we may have physical group in the group inactive + * list, and its state is MALI_GROUP_STATE_ACTIVATION_PENDING, so we only + * deactivate it is not enough, we still also need add it back to virtual group. + * And now, virtual group must be in INACTIVE state, so it's safe to add + * physical group to virtual group at this point. + */ + if (NULL != virtual_group) { + _mali_osk_list_delinit(&group->executor_list); + group_list_inactive_count--; + + mali_group_add_group(virtual_group, group); + } + } + } + + mali_executor_unlock(); +} + +void mali_executor_resume(void) +{ + mali_executor_lock(); + + /* Decrement pause_count to allow scheduling again (if it reaches 0) */ + pause_count--; + if (0 == pause_count) { + mali_executor_schedule(); + } + + mali_executor_unlock(); +} + +u32 mali_executor_get_num_cores_total(void) +{ + return num_physical_pp_cores_total; +} + +u32 mali_executor_get_num_cores_enabled(void) +{ + return num_physical_pp_cores_enabled; +} + +struct mali_pp_core *mali_executor_get_virtual_pp(void) +{ + MALI_DEBUG_ASSERT_POINTER(virtual_group); + MALI_DEBUG_ASSERT_POINTER(virtual_group->pp_core); + return virtual_group->pp_core; +} + +struct mali_group *mali_executor_get_virtual_group(void) +{ + return virtual_group; +} + +void mali_executor_zap_all_active(struct mali_session_data *session) +{ + struct mali_group *group; + struct mali_group *temp; + mali_bool ret; + + mali_executor_lock(); + + /* + * This function is a bit complicated because + * mali_group_zap_session() can fail. This only happens because the + * group is in an unhandled page fault status. + * We need to make sure this page fault is handled before we return, + * so that we know every single outstanding MMU transactions have + * completed. This will allow caller to safely remove physical pages + * when we have returned. + */ + + MALI_DEBUG_ASSERT(NULL != gp_group); + ret = mali_group_zap_session(gp_group, session); + if (MALI_FALSE == ret) { + struct mali_gp_job *gp_job = NULL; + + mali_executor_complete_group(gp_group, MALI_FALSE, &gp_job, NULL); + + MALI_DEBUG_ASSERT_POINTER(gp_job); + + /* GP job completed, make sure it is freed */ + mali_scheduler_complete_gp_job(gp_job, MALI_FALSE, + MALI_TRUE, MALI_TRUE); + } + + if (mali_executor_has_virtual_group()) { + ret = mali_group_zap_session(virtual_group, session); + if (MALI_FALSE == ret) { + struct mali_pp_job *pp_job = NULL; + + mali_executor_complete_group(virtual_group, MALI_FALSE, NULL, &pp_job); + + if (NULL != pp_job) { + /* PP job completed, make sure it is freed */ + mali_scheduler_complete_pp_job(pp_job, 0, + MALI_FALSE, MALI_TRUE); + } + } + } + + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working, + struct mali_group, executor_list) { + ret = mali_group_zap_session(group, session); + if (MALI_FALSE == ret) { + ret = mali_group_zap_session(group, session); + if (MALI_FALSE == ret) { + struct mali_pp_job *pp_job = NULL; + + mali_executor_complete_group(group, MALI_FALSE, NULL, &pp_job); + + if (NULL != pp_job) { + /* PP job completed, free it */ + mali_scheduler_complete_pp_job(pp_job, + 0, MALI_FALSE, + MALI_TRUE); + } + } + } + } + + mali_executor_unlock(); +} + +void mali_executor_schedule_from_mask(mali_scheduler_mask mask, mali_bool deferred_schedule) +{ + if (MALI_SCHEDULER_MASK_EMPTY != mask) { + if (MALI_TRUE == deferred_schedule) { + _mali_osk_wq_schedule_work_high_pri(executor_wq_high_pri); + } else { + /* Schedule from this thread*/ + mali_executor_lock(); + mali_executor_schedule(); + mali_executor_unlock(); + } + } +} + +_mali_osk_errcode_t mali_executor_interrupt_gp(struct mali_group *group, + mali_bool in_upper_half) +{ + enum mali_interrupt_result int_result; + mali_bool time_out = MALI_FALSE; + + MALI_DEBUG_PRINT(4, ("Executor: GP interrupt from %s in %s half\n", + mali_group_core_description(group), + in_upper_half ? "upper" : "bottom")); + + mali_executor_lock(); + if (!mali_group_is_working(group)) { + /* Not working, so nothing to do */ + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; + } + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT(mali_group_is_working(group)); + + if (mali_group_has_timed_out(group)) { + int_result = MALI_INTERRUPT_RESULT_ERROR; + time_out = MALI_TRUE; + MALI_PRINT(("Executor GP: Job %d Timeout on %s\n", + mali_gp_job_get_id(group->gp_running_job), + mali_group_core_description(group))); + } else { + int_result = mali_group_get_interrupt_result_gp(group); + if (MALI_INTERRUPT_RESULT_NONE == int_result) { + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; + } + } + +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + if (MALI_INTERRUPT_RESULT_NONE == int_result) { + /* No interrupts signalled, so nothing to do */ + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; + } +#else + MALI_DEBUG_ASSERT(MALI_INTERRUPT_RESULT_NONE != int_result); +#endif + + mali_group_mask_all_interrupts_gp(group); + + if (MALI_INTERRUPT_RESULT_SUCCESS_VS == int_result) { + if (mali_group_gp_is_active(group)) { + /* Only VS completed so far, while PLBU is still active */ + + /* Enable all but the current interrupt */ + mali_group_enable_interrupts_gp(group, int_result); + + mali_executor_unlock(); + return _MALI_OSK_ERR_OK; + } + } else if (MALI_INTERRUPT_RESULT_SUCCESS_PLBU == int_result) { + if (mali_group_gp_is_active(group)) { + /* Only PLBU completed so far, while VS is still active */ + + /* Enable all but the current interrupt */ + mali_group_enable_interrupts_gp(group, int_result); + + mali_executor_unlock(); + return _MALI_OSK_ERR_OK; + } + } else if (MALI_INTERRUPT_RESULT_OOM == int_result) { + struct mali_gp_job *job = mali_group_get_running_gp_job(group); + + /* PLBU out of mem */ + MALI_DEBUG_PRINT(3, ("Executor: PLBU needs more heap memory\n")); + +#if defined(CONFIG_MALI400_PROFILING) + /* Give group a chance to generate a SUSPEND event */ + mali_group_oom(group); +#endif + + /* + * no need to hold interrupt raised while + * waiting for more memory. + */ + mali_executor_send_gp_oom_to_user(job); + + mali_executor_unlock(); + + return _MALI_OSK_ERR_OK; + } + + /* We should now have a real interrupt to handle */ + + MALI_DEBUG_PRINT(4, ("Executor: Group %s completed with %s\n", + mali_group_core_description(group), + (MALI_INTERRUPT_RESULT_ERROR == int_result) ? + "ERROR" : "success")); + + if (in_upper_half && MALI_INTERRUPT_RESULT_ERROR == int_result) { + /* Don't bother to do processing of errors in upper half */ + mali_executor_unlock(); + + if (MALI_FALSE == time_out) { + mali_group_schedule_bottom_half_gp(group); + } + } else { + struct mali_gp_job *job; + mali_bool success; + + if (MALI_TRUE == time_out) { + mali_group_dump_status(group); + } + + success = (int_result != MALI_INTERRUPT_RESULT_ERROR) ? + MALI_TRUE : MALI_FALSE; + + mali_executor_complete_group(group, success, &job, NULL); + + mali_executor_unlock(); + + /* GP jobs always fully complete */ + MALI_DEBUG_ASSERT(NULL != job); + + /* This will notify user space and close the job object */ + mali_scheduler_complete_gp_job(job, success, + MALI_TRUE, MALI_TRUE); + } + + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t mali_executor_interrupt_pp(struct mali_group *group, + mali_bool in_upper_half) +{ + enum mali_interrupt_result int_result; + mali_bool time_out = MALI_FALSE; + + MALI_DEBUG_PRINT(4, ("Executor: PP interrupt from %s in %s half\n", + mali_group_core_description(group), + in_upper_half ? "upper" : "bottom")); + + mali_executor_lock(); + + if (!mali_group_is_working(group)) { + /* Not working, so nothing to do */ + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; + } + + if (in_upper_half) { + if (mali_group_is_in_virtual(group)) { + /* Child groups should never handle PP interrupts */ + MALI_DEBUG_ASSERT(!mali_group_has_timed_out(group)); + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; + } + } + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT(mali_group_is_working(group)); + MALI_DEBUG_ASSERT(!mali_group_is_in_virtual(group)); + + if (mali_group_has_timed_out(group)) { + int_result = MALI_INTERRUPT_RESULT_ERROR; + time_out = MALI_TRUE; + MALI_PRINT(("Executor PP: Job %d Timeout on %s\n", + mali_pp_job_get_id(group->pp_running_job), + mali_group_core_description(group))); + } else { + int_result = mali_group_get_interrupt_result_pp(group); + if (MALI_INTERRUPT_RESULT_NONE == int_result) { + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; + } + } + +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + if (MALI_INTERRUPT_RESULT_NONE == int_result) { + /* No interrupts signalled, so nothing to do */ + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; + } else if (MALI_INTERRUPT_RESULT_SUCCESS == int_result) { + if (mali_group_is_virtual(group) && mali_group_pp_is_active(group)) { + /* Some child groups are still working, so nothing to do right now */ + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; + } + } +#else + MALI_DEBUG_ASSERT(MALI_INTERRUPT_RESULT_NONE != int_result); +#endif + + /* We should now have a real interrupt to handle */ + + MALI_DEBUG_PRINT(4, ("Executor: Group %s completed with %s\n", + mali_group_core_description(group), + (MALI_INTERRUPT_RESULT_ERROR == int_result) ? + "ERROR" : "success")); + + if (in_upper_half && MALI_INTERRUPT_RESULT_ERROR == int_result) { + /* Don't bother to do processing of errors in upper half */ + mali_group_mask_all_interrupts_pp(group); + mali_executor_unlock(); + + if (MALI_FALSE == time_out) { + mali_group_schedule_bottom_half_pp(group); + } + } else { + struct mali_pp_job *job = NULL; + mali_bool success; + + if (MALI_TRUE == time_out) { + mali_group_dump_status(group); + } + + success = (int_result == MALI_INTERRUPT_RESULT_SUCCESS) ? + MALI_TRUE : MALI_FALSE; + + mali_executor_complete_group(group, success, NULL, &job); + + mali_executor_unlock(); + + if (NULL != job) { + /* Notify user space and close the job object */ + mali_scheduler_complete_pp_job(job, + num_physical_pp_cores_total, + MALI_TRUE, MALI_TRUE); + } + } + + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t mali_executor_interrupt_mmu(struct mali_group *group, + mali_bool in_upper_half) +{ + enum mali_interrupt_result int_result; + + MALI_DEBUG_PRINT(4, ("Executor: MMU interrupt from %s in %s half\n", + mali_group_core_description(group), + in_upper_half ? "upper" : "bottom")); + + mali_executor_lock(); + if (!mali_group_is_working(group)) { + /* Not working, so nothing to do */ + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; + } + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT(mali_group_is_working(group)); + + int_result = mali_group_get_interrupt_result_mmu(group); + if (MALI_INTERRUPT_RESULT_NONE == int_result) { + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; + } + +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + if (MALI_INTERRUPT_RESULT_NONE == int_result) { + /* No interrupts signalled, so nothing to do */ + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; + } +#else + MALI_DEBUG_ASSERT(MALI_INTERRUPT_RESULT_ERROR == int_result); +#endif + + /* We should now have a real interrupt to handle */ + + if (in_upper_half) { + /* Don't bother to do processing of errors in upper half */ + + struct mali_group *parent = group->parent_group; + + mali_mmu_mask_all_interrupts(group->mmu); + + mali_executor_unlock(); + + if (NULL == parent) { + mali_group_schedule_bottom_half_mmu(group); + } else { + mali_group_schedule_bottom_half_mmu(parent); + } + + } else { + struct mali_gp_job *gp_job = NULL; + struct mali_pp_job *pp_job = NULL; + +#ifdef DEBUG + + u32 fault_address = mali_mmu_get_page_fault_addr(group->mmu); + u32 status = mali_mmu_get_status(group->mmu); + MALI_DEBUG_PRINT(2, ("Executor: Mali page fault detected at 0x%x from bus id %d of type %s on %s\n", + (void *)(uintptr_t)fault_address, + (status >> 6) & 0x1F, + (status & 32) ? "write" : "read", + group->mmu->hw_core.description)); + MALI_DEBUG_PRINT(3, ("Executor: MMU rawstat = 0x%08X, MMU status = 0x%08X\n", + mali_mmu_get_rawstat(group->mmu), status)); + mali_mmu_pagedir_diag(mali_session_get_page_directory(group->session), fault_address); +#endif + + mali_executor_complete_group(group, MALI_FALSE, &gp_job, &pp_job); + + mali_executor_unlock(); + + if (NULL != gp_job) { + MALI_DEBUG_ASSERT(NULL == pp_job); + + /* Notify user space and close the job object */ + mali_scheduler_complete_gp_job(gp_job, MALI_FALSE, + MALI_TRUE, MALI_TRUE); + } else if (NULL != pp_job) { + MALI_DEBUG_ASSERT(NULL == gp_job); + + /* Notify user space and close the job object */ + mali_scheduler_complete_pp_job(pp_job, + num_physical_pp_cores_total, + MALI_TRUE, MALI_TRUE); + } + } + + return _MALI_OSK_ERR_OK; +} + +void mali_executor_group_power_up(struct mali_group *groups[], u32 num_groups) +{ + u32 i; + mali_bool child_groups_activated = MALI_FALSE; + mali_bool do_schedule = MALI_FALSE; +#if defined(DEBUG) + u32 num_activated = 0; +#endif + + MALI_DEBUG_ASSERT_POINTER(groups); + MALI_DEBUG_ASSERT(0 < num_groups); + + mali_executor_lock(); + + MALI_DEBUG_PRINT(3, ("Executor: powering up %u groups\n", num_groups)); + + for (i = 0; i < num_groups; i++) { + MALI_DEBUG_PRINT(3, ("Executor: powering up group %s\n", + mali_group_core_description(groups[i]))); + + mali_group_power_up(groups[i]); + + if ((MALI_GROUP_STATE_ACTIVATION_PENDING != mali_group_get_state(groups[i]) || + (MALI_TRUE != mali_executor_group_is_in_state(groups[i], EXEC_STATE_INACTIVE)))) { + /* nothing more to do for this group */ + continue; + } + + MALI_DEBUG_PRINT(3, ("Executor: activating group %s\n", + mali_group_core_description(groups[i]))); + +#if defined(DEBUG) + num_activated++; +#endif + + if (mali_group_is_in_virtual(groups[i])) { + /* + * At least one child group of virtual group is powered on. + */ + child_groups_activated = MALI_TRUE; + } else if (MALI_FALSE == mali_group_is_virtual(groups[i])) { + /* Set gp and pp not in virtual to active. */ + mali_group_set_active(groups[i]); + } + + /* Move group from inactive to idle list */ + if (groups[i] == gp_group) { + MALI_DEBUG_ASSERT(EXEC_STATE_INACTIVE == + gp_group_state); + gp_group_state = EXEC_STATE_IDLE; + } else if (MALI_FALSE == mali_group_is_in_virtual(groups[i]) + && MALI_FALSE == mali_group_is_virtual(groups[i])) { + MALI_DEBUG_ASSERT(MALI_TRUE == mali_executor_group_is_in_state(groups[i], + EXEC_STATE_INACTIVE)); + + mali_executor_change_state_pp_physical(groups[i], + &group_list_inactive, + &group_list_inactive_count, + &group_list_idle, + &group_list_idle_count); + } + + do_schedule = MALI_TRUE; + } + + if (mali_executor_has_virtual_group() && + MALI_TRUE == child_groups_activated && + MALI_GROUP_STATE_ACTIVATION_PENDING == + mali_group_get_state(virtual_group)) { + /* + * Try to active virtual group while it may be not sucessful every time, + * because there is one situation that not all of child groups are powered on + * in one time and virtual group is in activation pending state. + */ + if (mali_group_set_active(virtual_group)) { + /* Move group from inactive to idle */ + MALI_DEBUG_ASSERT(EXEC_STATE_INACTIVE == + virtual_group_state); + virtual_group_state = EXEC_STATE_IDLE; + + MALI_DEBUG_PRINT(3, ("Executor: powering up %u groups completed, %u physical activated, 1 virtual activated.\n", num_groups, num_activated)); + } else { + MALI_DEBUG_PRINT(3, ("Executor: powering up %u groups completed, %u physical activated\n", num_groups, num_activated)); + } + } else { + MALI_DEBUG_PRINT(3, ("Executor: powering up %u groups completed, %u physical activated\n", num_groups, num_activated)); + } + + if (MALI_TRUE == do_schedule) { + /* Trigger a schedule */ + mali_executor_schedule(); + } + + mali_executor_unlock(); +} + +void mali_executor_group_power_down(struct mali_group *groups[], + u32 num_groups) +{ + u32 i; + + MALI_DEBUG_ASSERT_POINTER(groups); + MALI_DEBUG_ASSERT(0 < num_groups); + + mali_executor_lock(); + + MALI_DEBUG_PRINT(3, ("Executor: powering down %u groups\n", num_groups)); + + for (i = 0; i < num_groups; i++) { + /* Groups must be either disabled or inactive. while for virtual group, + * it maybe in empty state, because when we meet pm_runtime_suspend, + * virtual group could be powered off, and before we acquire mali_executor_lock, + * we must release mali_pm_state_lock, if there is a new physical job was queued, + * all of physical groups in virtual group could be pulled out, so we only can + * powered down an empty virtual group. Those physical groups will be powered + * up in following pm_runtime_resume callback function. + */ + MALI_DEBUG_ASSERT(mali_executor_group_is_in_state(groups[i], + EXEC_STATE_DISABLED) || + mali_executor_group_is_in_state(groups[i], + EXEC_STATE_INACTIVE) || + mali_executor_group_is_in_state(groups[i], + EXEC_STATE_EMPTY)); + + MALI_DEBUG_PRINT(3, ("Executor: powering down group %s\n", + mali_group_core_description(groups[i]))); + + mali_group_power_down(groups[i]); + } + + MALI_DEBUG_PRINT(3, ("Executor: powering down %u groups completed\n", num_groups)); + + mali_executor_unlock(); +} + +void mali_executor_abort_session(struct mali_session_data *session) +{ + struct mali_group *group; + struct mali_group *tmp_group; + + MALI_DEBUG_ASSERT_POINTER(session); + MALI_DEBUG_ASSERT(session->is_aborting); + + MALI_DEBUG_PRINT(3, + ("Executor: Aborting all jobs from session 0x%08X.\n", + session)); + + mali_executor_lock(); + + if (mali_group_get_session(gp_group) == session) { + if (EXEC_STATE_WORKING == gp_group_state) { + struct mali_gp_job *gp_job = NULL; + + mali_executor_complete_group(gp_group, MALI_FALSE, &gp_job, NULL); + + MALI_DEBUG_ASSERT_POINTER(gp_job); + + /* GP job completed, make sure it is freed */ + mali_scheduler_complete_gp_job(gp_job, MALI_FALSE, + MALI_FALSE, MALI_TRUE); + } else { + /* Same session, but not working, so just clear it */ + mali_group_clear_session(gp_group); + } + } + + if (mali_executor_has_virtual_group()) { + if (EXEC_STATE_WORKING == virtual_group_state + && mali_group_get_session(virtual_group) == session) { + struct mali_pp_job *pp_job = NULL; + + mali_executor_complete_group(virtual_group, MALI_FALSE, NULL, &pp_job); + + if (NULL != pp_job) { + /* PP job completed, make sure it is freed */ + mali_scheduler_complete_pp_job(pp_job, 0, + MALI_FALSE, MALI_TRUE); + } + } + } + + _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_working, + struct mali_group, executor_list) { + if (mali_group_get_session(group) == session) { + struct mali_pp_job *pp_job = NULL; + + mali_executor_complete_group(group, MALI_FALSE, NULL, &pp_job); + + if (NULL != pp_job) { + /* PP job completed, make sure it is freed */ + mali_scheduler_complete_pp_job(pp_job, 0, + MALI_FALSE, MALI_TRUE); + } + } + } + + _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_idle, struct mali_group, executor_list) { + mali_group_clear_session(group); + } + + _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_inactive, struct mali_group, executor_list) { + mali_group_clear_session(group); + } + + _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_disabled, struct mali_group, executor_list) { + mali_group_clear_session(group); + } + + mali_executor_unlock(); +} + + +void mali_executor_core_scaling_enable(void) +{ + /* PS: Core scaling is by default enabled */ + core_scaling_enabled = MALI_TRUE; +} + +void mali_executor_core_scaling_disable(void) +{ + core_scaling_enabled = MALI_FALSE; +} + +mali_bool mali_executor_core_scaling_is_enabled(void) +{ + return core_scaling_enabled; +} + +void mali_executor_group_enable(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + + mali_executor_lock(); + + if ((NULL != mali_group_get_gp_core(group) || NULL != mali_group_get_pp_core(group)) + && (mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED))) { + mali_executor_group_enable_internal(group); + } + + mali_executor_schedule(); + mali_executor_unlock(); + + _mali_osk_wq_schedule_work(executor_wq_notify_core_change); +} + +/* + * If a physical group is inactive or idle, we should disable it immediately, + * if group is in virtual, and virtual group is idle, disable given physical group in it. + */ +void mali_executor_group_disable(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + + mali_executor_lock(); + + if ((NULL != mali_group_get_gp_core(group) || NULL != mali_group_get_pp_core(group)) + && (!mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED))) { + mali_executor_group_disable_internal(group); + } + + mali_executor_schedule(); + mali_executor_unlock(); + + _mali_osk_wq_schedule_work(executor_wq_notify_core_change); +} + +mali_bool mali_executor_group_is_disabled(struct mali_group *group) +{ + /* NB: This function is not optimized for time critical usage */ + + mali_bool ret; + + MALI_DEBUG_ASSERT_POINTER(group); + + mali_executor_lock(); + ret = mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED); + mali_executor_unlock(); + + return ret; +} + +int mali_executor_set_perf_level(unsigned int target_core_nr, mali_bool override) +{ + if (target_core_nr == num_physical_pp_cores_enabled) return 0; + if (MALI_FALSE == core_scaling_enabled && MALI_FALSE == override) return -EPERM; + if (target_core_nr > num_physical_pp_cores_total) return -EINVAL; + if (0 == target_core_nr) return -EINVAL; + + mali_executor_core_scale(target_core_nr); + + _mali_osk_wq_schedule_work(executor_wq_notify_core_change); + + return 0; +} + +#if MALI_STATE_TRACKING +u32 mali_executor_dump_state(char *buf, u32 size) +{ + int n = 0; + struct mali_group *group; + struct mali_group *temp; + + mali_executor_lock(); + + switch (gp_group_state) { + case EXEC_STATE_INACTIVE: + n += _mali_osk_snprintf(buf + n, size - n, + "GP group is in state INACTIVE\n"); + break; + case EXEC_STATE_IDLE: + n += _mali_osk_snprintf(buf + n, size - n, + "GP group is in state IDLE\n"); + break; + case EXEC_STATE_WORKING: + n += _mali_osk_snprintf(buf + n, size - n, + "GP group is in state WORKING\n"); + break; + default: + n += _mali_osk_snprintf(buf + n, size - n, + "GP group is in unknown/illegal state %u\n", + gp_group_state); + break; + } + + n += mali_group_dump_state(gp_group, buf + n, size - n); + + n += _mali_osk_snprintf(buf + n, size - n, + "Physical PP groups in WORKING state (count = %u):\n", + group_list_working_count); + + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working, struct mali_group, executor_list) { + n += mali_group_dump_state(group, buf + n, size - n); + } + + n += _mali_osk_snprintf(buf + n, size - n, + "Physical PP groups in IDLE state (count = %u):\n", + group_list_idle_count); + + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, executor_list) { + n += mali_group_dump_state(group, buf + n, size - n); + } + + n += _mali_osk_snprintf(buf + n, size - n, + "Physical PP groups in INACTIVE state (count = %u):\n", + group_list_inactive_count); + + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_inactive, struct mali_group, executor_list) { + n += mali_group_dump_state(group, buf + n, size - n); + } + + n += _mali_osk_snprintf(buf + n, size - n, + "Physical PP groups in DISABLED state (count = %u):\n", + group_list_disabled_count); + + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, executor_list) { + n += mali_group_dump_state(group, buf + n, size - n); + } + + if (mali_executor_has_virtual_group()) { + switch (virtual_group_state) { + case EXEC_STATE_EMPTY: + n += _mali_osk_snprintf(buf + n, size - n, + "Virtual PP group is in state EMPTY\n"); + break; + case EXEC_STATE_INACTIVE: + n += _mali_osk_snprintf(buf + n, size - n, + "Virtual PP group is in state INACTIVE\n"); + break; + case EXEC_STATE_IDLE: + n += _mali_osk_snprintf(buf + n, size - n, + "Virtual PP group is in state IDLE\n"); + break; + case EXEC_STATE_WORKING: + n += _mali_osk_snprintf(buf + n, size - n, + "Virtual PP group is in state WORKING\n"); + break; + default: + n += _mali_osk_snprintf(buf + n, size - n, + "Virtual PP group is in unknown/illegal state %u\n", + virtual_group_state); + break; + } + + n += mali_group_dump_state(virtual_group, buf + n, size - n); + } + + mali_executor_unlock(); + + n += _mali_osk_snprintf(buf + n, size - n, "\n"); + + return n; +} +#endif + +_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores(_mali_uk_get_pp_number_of_cores_s *args) +{ + MALI_DEBUG_ASSERT_POINTER(args); + MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx); + args->number_of_total_cores = num_physical_pp_cores_total; + args->number_of_enabled_cores = num_physical_pp_cores_enabled; + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t _mali_ukk_get_pp_core_version(_mali_uk_get_pp_core_version_s *args) +{ + MALI_DEBUG_ASSERT_POINTER(args); + MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx); + args->version = pp_version; + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores(_mali_uk_get_gp_number_of_cores_s *args) +{ + MALI_DEBUG_ASSERT_POINTER(args); + MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx); + args->number_of_cores = 1; + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t _mali_ukk_get_gp_core_version(_mali_uk_get_gp_core_version_s *args) +{ + MALI_DEBUG_ASSERT_POINTER(args); + MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx); + args->version = gp_version; + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t _mali_ukk_gp_suspend_response(_mali_uk_gp_suspend_response_s *args) +{ + struct mali_session_data *session; + struct mali_gp_job *job; + + MALI_DEBUG_ASSERT_POINTER(args); + MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx); + + session = (struct mali_session_data *)(uintptr_t)args->ctx; + + if (_MALIGP_JOB_RESUME_WITH_NEW_HEAP == args->code) { + _mali_osk_notification_t *new_notification = NULL; + + new_notification = _mali_osk_notification_create( + _MALI_NOTIFICATION_GP_STALLED, + sizeof(_mali_uk_gp_job_suspended_s)); + + if (NULL != new_notification) { + MALI_DEBUG_PRINT(3, ("Executor: Resuming job %u with new heap; 0x%08X - 0x%08X\n", + args->cookie, args->arguments[0], args->arguments[1])); + + mali_executor_lock(); + + /* Resume the job in question if it is still running */ + job = mali_group_get_running_gp_job(gp_group); + if (NULL != job && + args->cookie == mali_gp_job_get_id(job) && + session == mali_gp_job_get_session(job)) { + /* + * Correct job is running, resume with new heap + */ + + mali_gp_job_set_oom_notification(job, + new_notification); + + /* This will also re-enable interrupts */ + mali_group_resume_gp_with_new_heap(gp_group, + args->cookie, + args->arguments[0], + args->arguments[1]); + + mali_executor_unlock(); + return _MALI_OSK_ERR_OK; + } else { + MALI_DEBUG_PRINT(2, ("Executor: Unable to resume gp job becasue gp time out or any other unexpected reason!\n")); + + _mali_osk_notification_delete(new_notification); + + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; + } + } else { + MALI_PRINT_ERROR(("Executor: Failed to allocate notification object. Will abort GP job.\n")); + } + } else { + MALI_DEBUG_PRINT(2, ("Executor: Aborting job %u, no new heap provided\n", args->cookie)); + } + + mali_executor_lock(); + + /* Abort the job in question if it is still running */ + job = mali_group_get_running_gp_job(gp_group); + if (NULL != job && + args->cookie == mali_gp_job_get_id(job) && + session == mali_gp_job_get_session(job)) { + /* Correct job is still running */ + struct mali_gp_job *job_done = NULL; + + mali_executor_complete_group(gp_group, MALI_FALSE, &job_done, NULL); + + /* The same job should have completed */ + MALI_DEBUG_ASSERT(job_done == job); + + /* GP job completed, make sure it is freed */ + mali_scheduler_complete_gp_job(job_done, MALI_FALSE, + MALI_TRUE, MALI_TRUE); + } + + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; +} + + +/* + * ---------- Implementation of static functions ---------- + */ + +void mali_executor_lock(void) +{ + _mali_osk_spinlock_irq_lock(mali_executor_lock_obj); + MALI_DEBUG_PRINT(5, ("Executor: lock taken\n")); +} + +void mali_executor_unlock(void) +{ + MALI_DEBUG_PRINT(5, ("Executor: Releasing lock\n")); + _mali_osk_spinlock_irq_unlock(mali_executor_lock_obj); +} + +static mali_bool mali_executor_is_suspended(void *data) +{ + mali_bool ret; + + /* This callback does not use the data pointer. */ + MALI_IGNORE(data); + + mali_executor_lock(); + + ret = pause_count > 0 && !mali_executor_is_working(); + + mali_executor_unlock(); + + return ret; +} + +static mali_bool mali_executor_is_working() +{ + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + return (0 != group_list_working_count || + EXEC_STATE_WORKING == gp_group_state || + EXEC_STATE_WORKING == virtual_group_state); +} + +static void mali_executor_disable_empty_virtual(void) +{ + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT(virtual_group_state != EXEC_STATE_EMPTY); + MALI_DEBUG_ASSERT(virtual_group_state != EXEC_STATE_WORKING); + + if (mali_group_is_empty(virtual_group)) { + virtual_group_state = EXEC_STATE_EMPTY; + } +} + +static mali_bool mali_executor_physical_rejoin_virtual(struct mali_group *group) +{ + mali_bool trigger_pm_update = MALI_FALSE; + + MALI_DEBUG_ASSERT_POINTER(group); + /* Only rejoining after job has completed (still active) */ + MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE == + mali_group_get_state(group)); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT(MALI_TRUE == mali_executor_has_virtual_group()); + MALI_DEBUG_ASSERT(MALI_FALSE == mali_group_is_virtual(group)); + + /* Make sure group and virtual group have same status */ + + if (MALI_GROUP_STATE_INACTIVE == mali_group_get_state(virtual_group)) { + if (mali_group_deactivate(group)) { + trigger_pm_update = MALI_TRUE; + } + + if (virtual_group_state == EXEC_STATE_EMPTY) { + virtual_group_state = EXEC_STATE_INACTIVE; + } + } else if (MALI_GROUP_STATE_ACTIVATION_PENDING == + mali_group_get_state(virtual_group)) { + /* + * Activation is pending for virtual group, leave + * this child group as active. + */ + if (virtual_group_state == EXEC_STATE_EMPTY) { + virtual_group_state = EXEC_STATE_INACTIVE; + } + } else { + MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE == + mali_group_get_state(virtual_group)); + + if (virtual_group_state == EXEC_STATE_EMPTY) { + virtual_group_state = EXEC_STATE_IDLE; + } + } + + /* Remove group from idle list */ + MALI_DEBUG_ASSERT(mali_executor_group_is_in_state(group, + EXEC_STATE_IDLE)); + _mali_osk_list_delinit(&group->executor_list); + group_list_idle_count--; + + /* + * And finally rejoin the virtual group + * group will start working on same job as virtual_group, + * if virtual_group is working on a job + */ + mali_group_add_group(virtual_group, group); + + return trigger_pm_update; +} + +static mali_bool mali_executor_has_virtual_group(void) +{ +#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) + return (NULL != virtual_group) ? MALI_TRUE : MALI_FALSE; +#else + return MALI_FALSE; +#endif /* (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) */ +} + +static mali_bool mali_executor_virtual_group_is_usable(void) +{ +#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + return ((EXEC_STATE_INACTIVE == virtual_group_state || + EXEC_STATE_IDLE == virtual_group_state) && (virtual_group->state != MALI_GROUP_STATE_ACTIVATION_PENDING)) ? + MALI_TRUE : MALI_FALSE; +#else + return MALI_FALSE; +#endif /* (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) */ +} + +static mali_bool mali_executor_tackle_gp_bound(void) +{ + struct mali_pp_job *job; + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + job = mali_scheduler_job_pp_physical_peek(); + + if (NULL != job && MALI_TRUE == mali_is_mali400()) { + if (0 < group_list_working_count && + mali_pp_job_is_large_and_unstarted(job)) { + return MALI_TRUE; + } + } + + return MALI_FALSE; +} + +static mali_bool mali_executor_schedule_is_early_out(mali_bool *gpu_secure_mode_is_needed) +{ + struct mali_pp_job *next_pp_job_to_start = NULL; + struct mali_group *group; + struct mali_group *tmp_group; + struct mali_pp_job *physical_pp_job_working = NULL; + struct mali_pp_job *virtual_pp_job_working = NULL; + mali_bool gpu_working_in_protected_mode = MALI_FALSE; + mali_bool gpu_working_in_non_protected_mode = MALI_FALSE; + + MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj); + + *gpu_secure_mode_is_needed = MALI_FALSE; + + /* Check if the gpu secure mode is supported, exit if not.*/ + if (MALI_FALSE == _mali_osk_gpu_secure_mode_is_supported()) { + return MALI_FALSE; + } + + /* Check if need to set gpu secure mode for the next pp job, + * get the next pp job that will be scheduled if exist. + */ + next_pp_job_to_start = mali_scheduler_job_pp_next(); + + /* Check current pp physical/virtual running job is protected job or not if exist.*/ + _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_working, + struct mali_group, executor_list) { + physical_pp_job_working = group->pp_running_job; + break; + } + + if (EXEC_STATE_WORKING == virtual_group_state) { + virtual_pp_job_working = virtual_group->pp_running_job; + } + + if (NULL != physical_pp_job_working) { + if (MALI_TRUE == mali_pp_job_is_protected_job(physical_pp_job_working)) { + gpu_working_in_protected_mode = MALI_TRUE; + } else { + gpu_working_in_non_protected_mode = MALI_TRUE; + } + } else if (NULL != virtual_pp_job_working) { + if (MALI_TRUE == mali_pp_job_is_protected_job(virtual_pp_job_working)) { + gpu_working_in_protected_mode = MALI_TRUE; + } else { + gpu_working_in_non_protected_mode = MALI_TRUE; + } + } else if (EXEC_STATE_WORKING == gp_group_state) { + gpu_working_in_non_protected_mode = MALI_TRUE; + } + + /* If the next pp job is the protected pp job.*/ + if ((NULL != next_pp_job_to_start) && MALI_TRUE == mali_pp_job_is_protected_job(next_pp_job_to_start)) { + /* if gp is working or any non-protected pp job is working now, unable to schedule protected pp job. */ + if (MALI_TRUE == gpu_working_in_non_protected_mode) + return MALI_TRUE; + + *gpu_secure_mode_is_needed = MALI_TRUE; + return MALI_FALSE; + + } + + if (MALI_TRUE == gpu_working_in_protected_mode) { + /* Unable to schedule non-protected pp job/gp job if exist protected pp running jobs*/ + return MALI_TRUE; + } + + return MALI_FALSE; +} +/* + * This is where jobs are actually started. + */ +static void mali_executor_schedule(void) +{ + u32 i; + u32 num_physical_needed = 0; + u32 num_physical_to_process = 0; + mali_bool trigger_pm_update = MALI_FALSE; + mali_bool deactivate_idle_group = MALI_TRUE; + mali_bool gpu_secure_mode_is_needed = MALI_FALSE; + mali_bool is_gpu_secure_mode = MALI_FALSE; + /* Physical groups + jobs to start in this function */ + struct mali_group *groups_to_start[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS]; + struct mali_pp_job *jobs_to_start[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS]; + u32 sub_jobs_to_start[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS]; + int num_jobs_to_start = 0; + + /* Virtual job to start in this function */ + struct mali_pp_job *virtual_job_to_start = NULL; + + /* GP job to start in this function */ + struct mali_gp_job *gp_job_to_start = NULL; + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + if (pause_count > 0) { + /* Execution is suspended, don't schedule any jobs. */ + return; + } + + /* Lock needed in order to safely handle the job queues */ + mali_scheduler_lock(); + + /* 1. Check the schedule if need to early out. */ + if (MALI_TRUE == mali_executor_schedule_is_early_out(&gpu_secure_mode_is_needed)) { + mali_scheduler_unlock(); + return; + } + + /* 2. Activate gp firstly if have gp job queued. */ + if ((EXEC_STATE_INACTIVE == gp_group_state) + && (0 < mali_scheduler_job_gp_count()) + && (gpu_secure_mode_is_needed == MALI_FALSE)) { + + enum mali_group_state state = + mali_group_activate(gp_group); + if (MALI_GROUP_STATE_ACTIVE == state) { + /* Set GP group state to idle */ + gp_group_state = EXEC_STATE_IDLE; + } else { + trigger_pm_update = MALI_TRUE; + } + } + + /* 3. Prepare as many physical groups as needed/possible */ + + num_physical_needed = mali_scheduler_job_physical_head_count(gpu_secure_mode_is_needed); + + /* On mali-450 platform, we don't need to enter in this block frequently. */ + if (0 < num_physical_needed) { + + if (num_physical_needed <= group_list_idle_count) { + /* We have enough groups on idle list already */ + num_physical_to_process = num_physical_needed; + num_physical_needed = 0; + } else { + /* We need to get a hold of some more groups */ + num_physical_to_process = group_list_idle_count; + num_physical_needed -= group_list_idle_count; + } + + if (0 < num_physical_needed) { + + /* 3.1. Activate groups which are inactive */ + + struct mali_group *group; + struct mali_group *temp; + + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_inactive, + struct mali_group, executor_list) { + enum mali_group_state state = + mali_group_activate(group); + if (MALI_GROUP_STATE_ACTIVE == state) { + /* Move from inactive to idle */ + mali_executor_change_state_pp_physical(group, + &group_list_inactive, + &group_list_inactive_count, + &group_list_idle, + &group_list_idle_count); + num_physical_to_process++; + } else { + trigger_pm_update = MALI_TRUE; + } + + num_physical_needed--; + if (0 == num_physical_needed) { + /* We have activated all the groups we need */ + break; + } + } + } + + if (mali_executor_virtual_group_is_usable()) { + + /* + * 3.2. And finally, steal and activate groups + * from virtual group if we need even more + */ + while (0 < num_physical_needed) { + struct mali_group *group; + + group = mali_group_acquire_group(virtual_group); + if (NULL != group) { + enum mali_group_state state; + + mali_executor_disable_empty_virtual(); + + state = mali_group_activate(group); + if (MALI_GROUP_STATE_ACTIVE == state) { + /* Group is ready, add to idle list */ + _mali_osk_list_add( + &group->executor_list, + &group_list_idle); + group_list_idle_count++; + num_physical_to_process++; + } else { + /* + * Group is not ready yet, + * add to inactive list + */ + _mali_osk_list_add( + &group->executor_list, + &group_list_inactive); + group_list_inactive_count++; + + trigger_pm_update = MALI_TRUE; + } + num_physical_needed--; + } else { + /* + * We could not get enough groups + * from the virtual group. + */ + break; + } + } + } + + /* 3.3. Assign physical jobs to groups */ + + if (0 < num_physical_to_process) { + struct mali_group *group; + struct mali_group *temp; + + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, + struct mali_group, executor_list) { + struct mali_pp_job *job = NULL; + u32 sub_job = MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS; + + MALI_DEBUG_ASSERT(num_jobs_to_start < + MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS); + + MALI_DEBUG_ASSERT(0 < + mali_scheduler_job_physical_head_count(gpu_secure_mode_is_needed)); + + /* If the next pp job is non-protected, check if gp bound now. */ + if ((MALI_FALSE == gpu_secure_mode_is_needed) + && (mali_executor_hint_is_enabled(MALI_EXECUTOR_HINT_GP_BOUND)) + && (MALI_TRUE == mali_executor_tackle_gp_bound())) { + /* + * We're gp bound, + * don't start this right now. + */ + deactivate_idle_group = MALI_FALSE; + num_physical_to_process = 0; + break; + } + + job = mali_scheduler_job_pp_physical_get( + &sub_job); + + if (MALI_FALSE == gpu_secure_mode_is_needed) { + MALI_DEBUG_ASSERT(MALI_FALSE == mali_pp_job_is_protected_job(job)); + } else { + MALI_DEBUG_ASSERT(MALI_TRUE == mali_pp_job_is_protected_job(job)); + } + + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT(sub_job <= MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS); + + /* Put job + group on list of jobs to start later on */ + + groups_to_start[num_jobs_to_start] = group; + jobs_to_start[num_jobs_to_start] = job; + sub_jobs_to_start[num_jobs_to_start] = sub_job; + num_jobs_to_start++; + + /* Move group from idle to working */ + mali_executor_change_state_pp_physical(group, + &group_list_idle, + &group_list_idle_count, + &group_list_working, + &group_list_working_count); + + num_physical_to_process--; + if (0 == num_physical_to_process) { + /* Got all we needed */ + break; + } + } + } + } + + /* 4. Deactivate idle pp group , must put deactive here before active vitual group + * for cover case first only has physical job in normal queue but group inactive, + * so delay the job start go to active group, when group activated, + * call scheduler again, but now if we get high queue virtual job, + * we will do nothing in schedule cause executor schedule stop + */ + + if (MALI_TRUE == mali_executor_deactivate_list_idle(deactivate_idle_group + && (!mali_timeline_has_physical_pp_job()))) { + trigger_pm_update = MALI_TRUE; + } + + /* 5. Activate virtual group, if needed */ + if (EXEC_STATE_INACTIVE == virtual_group_state && + MALI_TRUE == mali_scheduler_job_next_is_virtual()) { + struct mali_pp_job *virtual_job = mali_scheduler_job_pp_virtual_peek(); + if ((MALI_FALSE == gpu_secure_mode_is_needed && MALI_FALSE == mali_pp_job_is_protected_job(virtual_job)) + || (MALI_TRUE == gpu_secure_mode_is_needed && MALI_TRUE == mali_pp_job_is_protected_job(virtual_job))) { + enum mali_group_state state = + mali_group_activate(virtual_group); + if (MALI_GROUP_STATE_ACTIVE == state) { + /* Set virtual group state to idle */ + virtual_group_state = EXEC_STATE_IDLE; + } else { + trigger_pm_update = MALI_TRUE; + } + } + } + + /* 6. To power up group asap, trigger pm update only when no need to swith the gpu mode. */ + + is_gpu_secure_mode = _mali_osk_gpu_secure_mode_is_enabled(); + + if ((MALI_FALSE == gpu_secure_mode_is_needed && MALI_FALSE == is_gpu_secure_mode) + || (MALI_TRUE == gpu_secure_mode_is_needed && MALI_TRUE == is_gpu_secure_mode)) { + if (MALI_TRUE == trigger_pm_update) { + trigger_pm_update = MALI_FALSE; + mali_pm_update_async(); + } + } + + /* 7. Assign jobs to idle virtual group (or deactivate if no job) */ + + if (EXEC_STATE_IDLE == virtual_group_state) { + if (MALI_TRUE == mali_scheduler_job_next_is_virtual()) { + struct mali_pp_job *virtual_job = mali_scheduler_job_pp_virtual_peek(); + if ((MALI_FALSE == gpu_secure_mode_is_needed && MALI_FALSE == mali_pp_job_is_protected_job(virtual_job)) + || (MALI_TRUE == gpu_secure_mode_is_needed && MALI_TRUE == mali_pp_job_is_protected_job(virtual_job))) { + virtual_job_to_start = + mali_scheduler_job_pp_virtual_get(); + virtual_group_state = EXEC_STATE_WORKING; + } + } else if (!mali_timeline_has_virtual_pp_job()) { + virtual_group_state = EXEC_STATE_INACTIVE; + + if (mali_group_deactivate(virtual_group)) { + trigger_pm_update = MALI_TRUE; + } + } + } + + /* 8. Assign job to idle GP group (or deactivate if no job) */ + + if (EXEC_STATE_IDLE == gp_group_state && MALI_FALSE == gpu_secure_mode_is_needed) { + if (0 < mali_scheduler_job_gp_count()) { + gp_job_to_start = mali_scheduler_job_gp_get(); + gp_group_state = EXEC_STATE_WORKING; + } else if (!mali_timeline_has_gp_job()) { + gp_group_state = EXEC_STATE_INACTIVE; + if (mali_group_deactivate(gp_group)) { + trigger_pm_update = MALI_TRUE; + } + } + } + + /* 9. We no longer need the schedule/queue lock */ + + mali_scheduler_unlock(); + + /* 10. start jobs */ + if (NULL != virtual_job_to_start) { + MALI_DEBUG_ASSERT(!mali_group_pp_is_active(virtual_group)); + mali_group_start_pp_job(virtual_group, + virtual_job_to_start, 0, is_gpu_secure_mode); + } + + for (i = 0; i < num_jobs_to_start; i++) { + MALI_DEBUG_ASSERT(!mali_group_pp_is_active( + groups_to_start[i])); + mali_group_start_pp_job(groups_to_start[i], + jobs_to_start[i], + sub_jobs_to_start[i], is_gpu_secure_mode); + } + + MALI_DEBUG_ASSERT_POINTER(gp_group); + + if (NULL != gp_job_to_start) { + MALI_DEBUG_ASSERT(!mali_group_gp_is_active(gp_group)); + mali_group_start_gp_job(gp_group, gp_job_to_start, is_gpu_secure_mode); + } + + /* 11. Trigger any pending PM updates */ + if (MALI_TRUE == trigger_pm_update) { + mali_pm_update_async(); + } +} + +/* Handler for deferred schedule requests */ +static void mali_executor_wq_schedule(void *arg) +{ + MALI_IGNORE(arg); + mali_executor_lock(); + mali_executor_schedule(); + mali_executor_unlock(); +} + +static void mali_executor_send_gp_oom_to_user(struct mali_gp_job *job) +{ + _mali_uk_gp_job_suspended_s *jobres; + _mali_osk_notification_t *notification; + + notification = mali_gp_job_get_oom_notification(job); + + /* + * Remember the id we send to user space, so we have something to + * verify when we get a response + */ + gp_returned_cookie = mali_gp_job_get_id(job); + + jobres = (_mali_uk_gp_job_suspended_s *)notification->result_buffer; + jobres->user_job_ptr = mali_gp_job_get_user_id(job); + jobres->cookie = gp_returned_cookie; + + mali_session_send_notification(mali_gp_job_get_session(job), + notification); +} +static struct mali_gp_job *mali_executor_complete_gp(struct mali_group *group, + mali_bool success) +{ + struct mali_gp_job *job; + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + /* Extracts the needed HW status from core and reset */ + job = mali_group_complete_gp(group, success); + + MALI_DEBUG_ASSERT_POINTER(job); + + /* Core is now ready to go into idle list */ + gp_group_state = EXEC_STATE_IDLE; + + /* This will potentially queue more GP and PP jobs */ + mali_timeline_tracker_release(&job->tracker); + + /* Signal PP job */ + mali_gp_job_signal_pp_tracker(job, success); + + return job; +} + +static struct mali_pp_job *mali_executor_complete_pp(struct mali_group *group, + mali_bool success) +{ + struct mali_pp_job *job; + u32 sub_job; + mali_bool job_is_done; + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + /* Extracts the needed HW status from core and reset */ + job = mali_group_complete_pp(group, success, &sub_job); + + MALI_DEBUG_ASSERT_POINTER(job); + + /* Core is now ready to go into idle list */ + if (mali_group_is_virtual(group)) { + virtual_group_state = EXEC_STATE_IDLE; + } else { + /* Move from working to idle state */ + mali_executor_change_state_pp_physical(group, + &group_list_working, + &group_list_working_count, + &group_list_idle, + &group_list_idle_count); + } + + /* It is the executor module which owns the jobs themselves by now */ + mali_pp_job_mark_sub_job_completed(job, success); + job_is_done = mali_pp_job_is_complete(job); + + if (job_is_done) { + /* This will potentially queue more GP and PP jobs */ + mali_timeline_tracker_release(&job->tracker); + } + + return job; +} + +static void mali_executor_complete_group(struct mali_group *group, + mali_bool success, + struct mali_gp_job **gp_job_done, + struct mali_pp_job **pp_job_done) +{ + struct mali_gp_core *gp_core = mali_group_get_gp_core(group); + struct mali_pp_core *pp_core = mali_group_get_pp_core(group); + struct mali_gp_job *gp_job = NULL; + struct mali_pp_job *pp_job = NULL; + mali_bool pp_job_is_done = MALI_TRUE; + + if (NULL != gp_core) { + gp_job = mali_executor_complete_gp(group, success); + } else { + MALI_DEBUG_ASSERT_POINTER(pp_core); + MALI_IGNORE(pp_core); + pp_job = mali_executor_complete_pp(group, success); + + pp_job_is_done = mali_pp_job_is_complete(pp_job); + } + + if (pause_count > 0) { + /* Execution has been suspended */ + + if (!mali_executor_is_working()) { + /* Last job completed, wake up sleepers */ + _mali_osk_wait_queue_wake_up( + executor_working_wait_queue); + } + } else if (MALI_TRUE == mali_group_disable_requested(group)) { + mali_executor_core_scale_in_group_complete(group); + + mali_executor_schedule(); + } else { + /* try to schedule new jobs */ + mali_executor_schedule(); + } + + if (NULL != gp_job) { + MALI_DEBUG_ASSERT_POINTER(gp_job_done); + *gp_job_done = gp_job; + } else if (pp_job_is_done) { + MALI_DEBUG_ASSERT_POINTER(pp_job); + MALI_DEBUG_ASSERT_POINTER(pp_job_done); + *pp_job_done = pp_job; + } +} + +static void mali_executor_change_state_pp_physical(struct mali_group *group, + _mali_osk_list_t *old_list, + u32 *old_count, + _mali_osk_list_t *new_list, + u32 *new_count) +{ + /* + * It's a bit more complicated to change the state for the physical PP + * groups since their state is determined by the list they are on. + */ +#if defined(DEBUG) + mali_bool found = MALI_FALSE; + struct mali_group *group_iter; + struct mali_group *temp; + u32 old_counted = 0; + u32 new_counted = 0; + + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(old_list); + MALI_DEBUG_ASSERT_POINTER(old_count); + MALI_DEBUG_ASSERT_POINTER(new_list); + MALI_DEBUG_ASSERT_POINTER(new_count); + + /* + * Verify that group is present on old list, + * and that the count is correct + */ + + _MALI_OSK_LIST_FOREACHENTRY(group_iter, temp, old_list, + struct mali_group, executor_list) { + old_counted++; + if (group == group_iter) { + found = MALI_TRUE; + } + } + + _MALI_OSK_LIST_FOREACHENTRY(group_iter, temp, new_list, + struct mali_group, executor_list) { + new_counted++; + } + + if (MALI_FALSE == found) { + if (old_list == &group_list_idle) { + MALI_DEBUG_PRINT(1, (" old Group list is idle,")); + } else if (old_list == &group_list_inactive) { + MALI_DEBUG_PRINT(1, (" old Group list is inactive,")); + } else if (old_list == &group_list_working) { + MALI_DEBUG_PRINT(1, (" old Group list is working,")); + } else if (old_list == &group_list_disabled) { + MALI_DEBUG_PRINT(1, (" old Group list is disable,")); + } + + if (MALI_TRUE == mali_executor_group_is_in_state(group, EXEC_STATE_WORKING)) { + MALI_DEBUG_PRINT(1, (" group in working \n")); + } else if (MALI_TRUE == mali_executor_group_is_in_state(group, EXEC_STATE_INACTIVE)) { + MALI_DEBUG_PRINT(1, (" group in inactive \n")); + } else if (MALI_TRUE == mali_executor_group_is_in_state(group, EXEC_STATE_IDLE)) { + MALI_DEBUG_PRINT(1, (" group in idle \n")); + } else if (MALI_TRUE == mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED)) { + MALI_DEBUG_PRINT(1, (" but group in disabled \n")); + } + } + + MALI_DEBUG_ASSERT(MALI_TRUE == found); + MALI_DEBUG_ASSERT(0 < (*old_count)); + MALI_DEBUG_ASSERT((*old_count) == old_counted); + MALI_DEBUG_ASSERT((*new_count) == new_counted); +#endif + + _mali_osk_list_move(&group->executor_list, new_list); + (*old_count)--; + (*new_count)++; +} + +static void mali_executor_set_state_pp_physical(struct mali_group *group, + _mali_osk_list_t *new_list, + u32 *new_count) +{ + _mali_osk_list_add(&group->executor_list, new_list); + (*new_count)++; +} + +static mali_bool mali_executor_group_is_in_state(struct mali_group *group, + enum mali_executor_state_t state) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + if (gp_group == group) { + if (gp_group_state == state) { + return MALI_TRUE; + } + } else if (virtual_group == group || mali_group_is_in_virtual(group)) { + if (virtual_group_state == state) { + return MALI_TRUE; + } + } else { + /* Physical PP group */ + struct mali_group *group_iter; + struct mali_group *temp; + _mali_osk_list_t *list; + + if (EXEC_STATE_DISABLED == state) { + list = &group_list_disabled; + } else if (EXEC_STATE_INACTIVE == state) { + list = &group_list_inactive; + } else if (EXEC_STATE_IDLE == state) { + list = &group_list_idle; + } else { + MALI_DEBUG_ASSERT(EXEC_STATE_WORKING == state); + list = &group_list_working; + } + + _MALI_OSK_LIST_FOREACHENTRY(group_iter, temp, list, + struct mali_group, executor_list) { + if (group_iter == group) { + return MALI_TRUE; + } + } + } + + /* group not in correct state */ + return MALI_FALSE; +} + +static void mali_executor_group_enable_internal(struct mali_group *group) +{ + MALI_DEBUG_ASSERT(group); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT(mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED)); + + /* Put into inactive state (== "lowest" enabled state) */ + if (group == gp_group) { + MALI_DEBUG_ASSERT(EXEC_STATE_DISABLED == gp_group_state); + gp_group_state = EXEC_STATE_INACTIVE; + } else { + mali_executor_change_state_pp_physical(group, + &group_list_disabled, + &group_list_disabled_count, + &group_list_inactive, + &group_list_inactive_count); + + ++num_physical_pp_cores_enabled; + MALI_DEBUG_PRINT(4, ("Enabling group id %d \n", group->pp_core->core_id)); + } + + if (MALI_GROUP_STATE_ACTIVE == mali_group_activate(group)) { + MALI_DEBUG_ASSERT(MALI_TRUE == mali_group_power_is_on(group)); + + /* Move from inactive to idle */ + if (group == gp_group) { + gp_group_state = EXEC_STATE_IDLE; + } else { + mali_executor_change_state_pp_physical(group, + &group_list_inactive, + &group_list_inactive_count, + &group_list_idle, + &group_list_idle_count); + + if (mali_executor_has_virtual_group()) { + if (mali_executor_physical_rejoin_virtual(group)) { + mali_pm_update_async(); + } + } + } + } else { + mali_pm_update_async(); + } +} + +static void mali_executor_group_disable_internal(struct mali_group *group) +{ + mali_bool working; + + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT(!mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED)); + + working = mali_executor_group_is_in_state(group, EXEC_STATE_WORKING); + if (MALI_TRUE == working) { + /** Group to be disabled once it completes current work, + * when virtual group completes, also check child groups for this flag */ + mali_group_set_disable_request(group, MALI_TRUE); + return; + } + + /* Put into disabled state */ + if (group == gp_group) { + /* GP group */ + MALI_DEBUG_ASSERT(EXEC_STATE_WORKING != gp_group_state); + gp_group_state = EXEC_STATE_DISABLED; + } else { + if (mali_group_is_in_virtual(group)) { + /* A child group of virtual group. move the specific group from virtual group */ + MALI_DEBUG_ASSERT(EXEC_STATE_WORKING != virtual_group_state); + + mali_executor_set_state_pp_physical(group, + &group_list_disabled, + &group_list_disabled_count); + + mali_group_remove_group(virtual_group, group); + mali_executor_disable_empty_virtual(); + } else { + mali_executor_change_group_status_disabled(group); + } + + --num_physical_pp_cores_enabled; + MALI_DEBUG_PRINT(4, ("Disabling group id %d \n", group->pp_core->core_id)); + } + + if (MALI_GROUP_STATE_INACTIVE != group->state) { + if (MALI_TRUE == mali_group_deactivate(group)) { + mali_pm_update_async(); + } + } +} + +static void mali_executor_notify_core_change(u32 num_cores) +{ + mali_bool done = MALI_FALSE; + + if (mali_is_mali450() || mali_is_mali470()) { + return; + } + + /* + * This function gets a bit complicated because we can't hold the session lock while + * allocating notification objects. + */ + while (!done) { + u32 i; + u32 num_sessions_alloc; + u32 num_sessions_with_lock; + u32 used_notification_objects = 0; + _mali_osk_notification_t **notobjs; + + /* Pre allocate the number of notifications objects we need right now (might change after lock has been taken) */ + num_sessions_alloc = mali_session_get_count(); + if (0 == num_sessions_alloc) { + /* No sessions to report to */ + return; + } + + notobjs = (_mali_osk_notification_t **)_mali_osk_malloc(sizeof(_mali_osk_notification_t *) * num_sessions_alloc); + if (NULL == notobjs) { + MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure)\n")); + /* there is probably no point in trying again, system must be really low on memory and probably unusable now anyway */ + return; + } + + for (i = 0; i < num_sessions_alloc; i++) { + notobjs[i] = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_NUM_CORE_CHANGE, sizeof(_mali_uk_pp_num_cores_changed_s)); + if (NULL != notobjs[i]) { + _mali_uk_pp_num_cores_changed_s *data = notobjs[i]->result_buffer; + data->number_of_enabled_cores = num_cores; + } else { + MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure %u)\n", i)); + } + } + + mali_session_lock(); + + /* number of sessions will not change while we hold the lock */ + num_sessions_with_lock = mali_session_get_count(); + + if (num_sessions_alloc >= num_sessions_with_lock) { + /* We have allocated enough notification objects for all the sessions atm */ + struct mali_session_data *session, *tmp; + MALI_SESSION_FOREACH(session, tmp, link) { + MALI_DEBUG_ASSERT(used_notification_objects < num_sessions_alloc); + if (NULL != notobjs[used_notification_objects]) { + mali_session_send_notification(session, notobjs[used_notification_objects]); + notobjs[used_notification_objects] = NULL; /* Don't track this notification object any more */ + } + used_notification_objects++; + } + done = MALI_TRUE; + } + + mali_session_unlock(); + + /* Delete any remaining/unused notification objects */ + for (; used_notification_objects < num_sessions_alloc; used_notification_objects++) { + if (NULL != notobjs[used_notification_objects]) { + _mali_osk_notification_delete(notobjs[used_notification_objects]); + } + } + + _mali_osk_free(notobjs); + } +} + +static mali_bool mali_executor_core_scaling_is_done(void *data) +{ + u32 i; + u32 num_groups; + mali_bool ret = MALI_TRUE; + + MALI_IGNORE(data); + + mali_executor_lock(); + + num_groups = mali_group_get_glob_num_groups(); + + for (i = 0; i < num_groups; i++) { + struct mali_group *group = mali_group_get_glob_group(i); + + if (NULL != group) { + if (MALI_TRUE == group->disable_requested && NULL != mali_group_get_pp_core(group)) { + ret = MALI_FALSE; + break; + } + } + } + mali_executor_unlock(); + + return ret; +} + +static void mali_executor_wq_notify_core_change(void *arg) +{ + MALI_IGNORE(arg); + + if (mali_is_mali450() || mali_is_mali470()) { + return; + } + + _mali_osk_wait_queue_wait_event(executor_notify_core_change_wait_queue, + mali_executor_core_scaling_is_done, NULL); + + mali_executor_notify_core_change(num_physical_pp_cores_enabled); +} + +/** + * Clear all disable request from the _last_ core scaling behavior. + */ +static void mali_executor_core_scaling_reset(void) +{ + u32 i; + u32 num_groups; + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + num_groups = mali_group_get_glob_num_groups(); + + for (i = 0; i < num_groups; i++) { + struct mali_group *group = mali_group_get_glob_group(i); + + if (NULL != group) { + group->disable_requested = MALI_FALSE; + } + } + + for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) { + core_scaling_delay_up_mask[i] = 0; + } +} + +static void mali_executor_core_scale(unsigned int target_core_nr) +{ + int current_core_scaling_mask[MALI_MAX_NUMBER_OF_DOMAINS] = { 0 }; + int target_core_scaling_mask[MALI_MAX_NUMBER_OF_DOMAINS] = { 0 }; + int i; + + MALI_DEBUG_ASSERT(0 < target_core_nr); + MALI_DEBUG_ASSERT(num_physical_pp_cores_total >= target_core_nr); + + mali_executor_lock(); + + if (target_core_nr < num_physical_pp_cores_enabled) { + MALI_DEBUG_PRINT(2, ("Requesting %d cores: disabling %d cores\n", target_core_nr, num_physical_pp_cores_enabled - target_core_nr)); + } else { + MALI_DEBUG_PRINT(2, ("Requesting %d cores: enabling %d cores\n", target_core_nr, target_core_nr - num_physical_pp_cores_enabled)); + } + + /* When a new core scaling request is comming, we should remove the un-doing + * part of the last core scaling request. It's safe because we have only one + * lock(executor lock) protection. */ + mali_executor_core_scaling_reset(); + + mali_pm_get_best_power_cost_mask(num_physical_pp_cores_enabled, current_core_scaling_mask); + mali_pm_get_best_power_cost_mask(target_core_nr, target_core_scaling_mask); + + for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) { + target_core_scaling_mask[i] = target_core_scaling_mask[i] - current_core_scaling_mask[i]; + MALI_DEBUG_PRINT(5, ("target_core_scaling_mask[%d] = %d\n", i, target_core_scaling_mask[i])); + } + + for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) { + if (0 > target_core_scaling_mask[i]) { + struct mali_pm_domain *domain; + + domain = mali_pm_domain_get_from_index(i); + + /* Domain is valid and has pp cores */ + if ((NULL != domain) && !(_mali_osk_list_empty(&domain->group_list))) { + struct mali_group *group; + struct mali_group *temp; + + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &domain->group_list, struct mali_group, pm_domain_list) { + if (NULL != mali_group_get_pp_core(group) && (!mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED)) + && (!mali_group_is_virtual(group))) { + mali_executor_group_disable_internal(group); + target_core_scaling_mask[i]++; + if ((0 == target_core_scaling_mask[i])) { + break; + } + + } + } + } + } + } + + for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) { + /** + * Target_core_scaling_mask[i] is bigger than 0, + * means we need to enable some pp cores in + * this domain whose domain index is i. + */ + if (0 < target_core_scaling_mask[i]) { + struct mali_pm_domain *domain; + + if (num_physical_pp_cores_enabled >= target_core_nr) { + break; + } + + domain = mali_pm_domain_get_from_index(i); + + /* Domain is valid and has pp cores */ + if ((NULL != domain) && !(_mali_osk_list_empty(&domain->group_list))) { + struct mali_group *group; + struct mali_group *temp; + + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &domain->group_list, struct mali_group, pm_domain_list) { + if (NULL != mali_group_get_pp_core(group) && mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED) + && (!mali_group_is_virtual(group))) { + mali_executor_group_enable_internal(group); + target_core_scaling_mask[i]--; + + if ((0 == target_core_scaling_mask[i]) || num_physical_pp_cores_enabled == target_core_nr) { + break; + } + } + } + } + } + } + + /** + * Here, we may still have some pp cores not been enabled because of some + * pp cores need to be disabled are still in working state. + */ + for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) { + if (0 < target_core_scaling_mask[i]) { + core_scaling_delay_up_mask[i] = target_core_scaling_mask[i]; + } + } + + mali_executor_schedule(); + mali_executor_unlock(); +} + +static void mali_executor_core_scale_in_group_complete(struct mali_group *group) +{ + int num_pp_cores_disabled = 0; + int num_pp_cores_to_enable = 0; + int i; + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT(MALI_TRUE == mali_group_disable_requested(group)); + + /* Disable child group of virtual group */ + if (mali_group_is_virtual(group)) { + struct mali_group *child; + struct mali_group *temp; + + _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) { + if (MALI_TRUE == mali_group_disable_requested(child)) { + mali_group_set_disable_request(child, MALI_FALSE); + mali_executor_group_disable_internal(child); + num_pp_cores_disabled++; + } + } + mali_group_set_disable_request(group, MALI_FALSE); + } else { + mali_executor_group_disable_internal(group); + mali_group_set_disable_request(group, MALI_FALSE); + if (NULL != mali_group_get_pp_core(group)) { + num_pp_cores_disabled++; + } + } + + num_pp_cores_to_enable = num_pp_cores_disabled; + + for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) { + if (0 < core_scaling_delay_up_mask[i]) { + struct mali_pm_domain *domain; + + if (0 == num_pp_cores_to_enable) { + break; + } + + domain = mali_pm_domain_get_from_index(i); + + /* Domain is valid and has pp cores */ + if ((NULL != domain) && !(_mali_osk_list_empty(&domain->group_list))) { + struct mali_group *disabled_group; + struct mali_group *temp; + + _MALI_OSK_LIST_FOREACHENTRY(disabled_group, temp, &domain->group_list, struct mali_group, pm_domain_list) { + if (NULL != mali_group_get_pp_core(disabled_group) && mali_executor_group_is_in_state(disabled_group, EXEC_STATE_DISABLED)) { + mali_executor_group_enable_internal(disabled_group); + core_scaling_delay_up_mask[i]--; + num_pp_cores_to_enable--; + + if ((0 == core_scaling_delay_up_mask[i]) || 0 == num_pp_cores_to_enable) { + break; + } + } + } + } + } + } + + _mali_osk_wait_queue_wake_up(executor_notify_core_change_wait_queue); +} + +static void mali_executor_change_group_status_disabled(struct mali_group *group) +{ + /* Physical PP group */ + mali_bool idle; + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + idle = mali_executor_group_is_in_state(group, EXEC_STATE_IDLE); + if (MALI_TRUE == idle) { + mali_executor_change_state_pp_physical(group, + &group_list_idle, + &group_list_idle_count, + &group_list_disabled, + &group_list_disabled_count); + } else { + mali_executor_change_state_pp_physical(group, + &group_list_inactive, + &group_list_inactive_count, + &group_list_disabled, + &group_list_disabled_count); + } +} + +static mali_bool mali_executor_deactivate_list_idle(mali_bool deactivate_idle_group) +{ + mali_bool trigger_pm_update = MALI_FALSE; + + if (group_list_idle_count > 0) { + if (mali_executor_has_virtual_group()) { + + /* Rejoin virtual group on Mali-450 */ + + struct mali_group *group; + struct mali_group *temp; + + _MALI_OSK_LIST_FOREACHENTRY(group, temp, + &group_list_idle, + struct mali_group, executor_list) { + if (mali_executor_physical_rejoin_virtual( + group)) { + trigger_pm_update = MALI_TRUE; + } + } + } else if (deactivate_idle_group) { + struct mali_group *group; + struct mali_group *temp; + + /* Deactivate group on Mali-300/400 */ + + _MALI_OSK_LIST_FOREACHENTRY(group, temp, + &group_list_idle, + struct mali_group, executor_list) { + if (mali_group_deactivate(group)) { + trigger_pm_update = MALI_TRUE; + } + + /* Move from idle to inactive */ + mali_executor_change_state_pp_physical(group, + &group_list_idle, + &group_list_idle_count, + &group_list_inactive, + &group_list_inactive_count); + } + } + } + + return trigger_pm_update; +} + +void mali_executor_running_status_print(void) +{ + struct mali_group *group = NULL; + struct mali_group *temp = NULL; + + MALI_PRINT(("GP running job: %p\n", gp_group->gp_running_job)); + if ((gp_group->gp_core) && (gp_group->is_working)) { + mali_group_dump_status(gp_group); + } + MALI_PRINT(("Physical PP groups in WORKING state (count = %u):\n", group_list_working_count)); + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working, struct mali_group, executor_list) { + MALI_PRINT(("PP running job: %p, subjob %d \n", group->pp_running_job, group->pp_running_sub_job)); + mali_group_dump_status(group); + } + MALI_PRINT(("Physical PP groups in INACTIVE state (count = %u):\n", group_list_inactive_count)); + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_inactive, struct mali_group, executor_list) { + MALI_PRINT(("\tPP status %d, SW power: %s\n", group->state, group->power_is_on ? "On" : "Off")); + MALI_PRINT(("\tPP #%d: %s\n", group->pp_core->core_id, group->pp_core->hw_core.description)); + } + MALI_PRINT(("Physical PP groups in IDLE state (count = %u):\n", group_list_idle_count)); + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, executor_list) { + MALI_PRINT(("\tPP status %d, SW power: %s\n", group->state, group->power_is_on ? "On" : "Off")); + MALI_PRINT(("\tPP #%d: %s\n", group->pp_core->core_id, group->pp_core->hw_core.description)); + } + MALI_PRINT(("Physical PP groups in DISABLED state (count = %u):\n", group_list_disabled_count)); + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, executor_list) { + MALI_PRINT(("\tPP status %d, SW power: %s\n", group->state, group->power_is_on ? "On" : "Off")); + MALI_PRINT(("\tPP #%d: %s\n", group->pp_core->core_id, group->pp_core->hw_core.description)); + } + + if (mali_executor_has_virtual_group()) { + MALI_PRINT(("Virtual group running job: %p\n", virtual_group->pp_running_job)); + MALI_PRINT(("Virtual group status: %d\n", virtual_group_state)); + MALI_PRINT(("Virtual group->status: %d\n", virtual_group->state)); + MALI_PRINT(("\tSW power: %s\n", virtual_group->power_is_on ? "On" : "Off")); + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &virtual_group->group_list, + struct mali_group, group_list) { + int i = 0; + MALI_PRINT(("\tchild group(%s) running job: %p\n", group->pp_core->hw_core.description, group->pp_running_job)); + MALI_PRINT(("\tchild group(%s)->status: %d\n", group->pp_core->hw_core.description, group->state)); + MALI_PRINT(("\tchild group(%s) SW power: %s\n", group->pp_core->hw_core.description, group->power_is_on ? "On" : "Off")); + if (group->pm_domain) { + MALI_PRINT(("\tPower domain: id %u\n", mali_pm_domain_get_id(group->pm_domain))); + MALI_PRINT(("\tMask:0x%04x \n", mali_pm_domain_get_mask(group->pm_domain))); + MALI_PRINT(("\tUse-count:%u \n", mali_pm_domain_get_use_count(group->pm_domain))); + MALI_PRINT(("\tCurrent power status:%s \n", (mali_pm_domain_get_mask(group->pm_domain)& mali_pm_get_current_mask()) ? "On" : "Off")); + MALI_PRINT(("\tWanted power status:%s \n", (mali_pm_domain_get_mask(group->pm_domain)& mali_pm_get_wanted_mask()) ? "On" : "Off")); + } + + for (i = 0; i < 2; i++) { + if (NULL != group->l2_cache_core[i]) { + struct mali_pm_domain *domain; + domain = mali_l2_cache_get_pm_domain(group->l2_cache_core[i]); + MALI_PRINT(("\t L2(index %d) group SW power: %s\n", i, group->l2_cache_core[i]->power_is_on ? "On" : "Off")); + if (domain) { + MALI_PRINT(("\tL2 Power domain: id %u\n", mali_pm_domain_get_id(domain))); + MALI_PRINT(("\tL2 Mask:0x%04x \n", mali_pm_domain_get_mask(domain))); + MALI_PRINT(("\tL2 Use-count:%u \n", mali_pm_domain_get_use_count(domain))); + MALI_PRINT(("\tL2 Current power status:%s \n", (mali_pm_domain_get_mask(domain) & mali_pm_get_current_mask()) ? "On" : "Off")); + MALI_PRINT(("\tL2 Wanted power status:%s \n", (mali_pm_domain_get_mask(domain) & mali_pm_get_wanted_mask()) ? "On" : "Off")); + } + } + } + } + if (EXEC_STATE_WORKING == virtual_group_state) { + mali_group_dump_status(virtual_group); + } + } +} + +void mali_executor_status_dump(void) +{ + mali_executor_lock(); + mali_scheduler_lock(); + + /* print schedule queue status */ + mali_scheduler_gp_pp_job_queue_print(); + + mali_scheduler_unlock(); + mali_executor_unlock(); +} diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_executor.h b/drivers/gpu/arm/mali400/common/mali_executor.h --- a/drivers/gpu/arm/mali400/common/mali_executor.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_executor.h 2018-05-06 08:49:49.174695256 +0200 @@ -0,0 +1,102 @@ +/* + * Copyright (C) 2012, 2014-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_EXECUTOR_H__ +#define __MALI_EXECUTOR_H__ + +#include "mali_osk.h" +#include "mali_scheduler_types.h" +#include "mali_kernel_common.h" + +typedef enum { + MALI_EXECUTOR_HINT_GP_BOUND = 0 +#define MALI_EXECUTOR_HINT_MAX 1 +} mali_executor_hint; + +extern mali_bool mali_executor_hints[MALI_EXECUTOR_HINT_MAX]; + +/* forward declare struct instead of using include */ +struct mali_session_data; +struct mali_group; +struct mali_pp_core; + +extern _mali_osk_spinlock_irq_t *mali_executor_lock_obj; + +#define MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD() MALI_DEBUG_ASSERT_LOCK_HELD(mali_executor_lock_obj); + +_mali_osk_errcode_t mali_executor_initialize(void); +void mali_executor_terminate(void); + +void mali_executor_populate(void); +void mali_executor_depopulate(void); + +void mali_executor_suspend(void); +void mali_executor_resume(void); + +u32 mali_executor_get_num_cores_total(void); +u32 mali_executor_get_num_cores_enabled(void); +struct mali_pp_core *mali_executor_get_virtual_pp(void); +struct mali_group *mali_executor_get_virtual_group(void); + +void mali_executor_zap_all_active(struct mali_session_data *session); + +/** + * Schedule GP and PP according to bitmask. + * + * @param mask A scheduling bitmask. + * @param deferred_schedule MALI_TRUE if schedule should be deferred, MALI_FALSE if not. + */ +void mali_executor_schedule_from_mask(mali_scheduler_mask mask, mali_bool deferred_schedule); + +_mali_osk_errcode_t mali_executor_interrupt_gp(struct mali_group *group, mali_bool in_upper_half); +_mali_osk_errcode_t mali_executor_interrupt_pp(struct mali_group *group, mali_bool in_upper_half); +_mali_osk_errcode_t mali_executor_interrupt_mmu(struct mali_group *group, mali_bool in_upper_half); +void mali_executor_group_power_up(struct mali_group *groups[], u32 num_groups); +void mali_executor_group_power_down(struct mali_group *groups[], u32 num_groups); + +void mali_executor_abort_session(struct mali_session_data *session); + +void mali_executor_core_scaling_enable(void); +void mali_executor_core_scaling_disable(void); +mali_bool mali_executor_core_scaling_is_enabled(void); + +void mali_executor_group_enable(struct mali_group *group); +void mali_executor_group_disable(struct mali_group *group); +mali_bool mali_executor_group_is_disabled(struct mali_group *group); + +int mali_executor_set_perf_level(unsigned int target_core_nr, mali_bool override); + +#if MALI_STATE_TRACKING +u32 mali_executor_dump_state(char *buf, u32 size); +#endif + +MALI_STATIC_INLINE void mali_executor_hint_enable(mali_executor_hint hint) +{ + MALI_DEBUG_ASSERT(hint < MALI_EXECUTOR_HINT_MAX); + mali_executor_hints[hint] = MALI_TRUE; +} + +MALI_STATIC_INLINE void mali_executor_hint_disable(mali_executor_hint hint) +{ + MALI_DEBUG_ASSERT(hint < MALI_EXECUTOR_HINT_MAX); + mali_executor_hints[hint] = MALI_FALSE; +} + +MALI_STATIC_INLINE mali_bool mali_executor_hint_is_enabled(mali_executor_hint hint) +{ + MALI_DEBUG_ASSERT(hint < MALI_EXECUTOR_HINT_MAX); + return mali_executor_hints[hint]; +} + +void mali_executor_running_status_print(void); +void mali_executor_status_dump(void); +void mali_executor_lock(void); +void mali_executor_unlock(void); +#endif /* __MALI_EXECUTOR_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_gp.c b/drivers/gpu/arm/mali400/common/mali_gp.c --- a/drivers/gpu/arm/mali400/common/mali_gp.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_gp.c 2018-05-06 08:49:49.174695256 +0200 @@ -0,0 +1,357 @@ +/* + * Copyright (C) 2011-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_gp.h" +#include "mali_hw_core.h" +#include "mali_group.h" +#include "mali_osk.h" +#include "regs/mali_gp_regs.h" +#include "mali_kernel_common.h" +#include "mali_kernel_core.h" +#if defined(CONFIG_MALI400_PROFILING) +#include "mali_osk_profiling.h" +#endif + +static struct mali_gp_core *mali_global_gp_core = NULL; + +/* Interrupt handlers */ +static void mali_gp_irq_probe_trigger(void *data); +static _mali_osk_errcode_t mali_gp_irq_probe_ack(void *data); + +struct mali_gp_core *mali_gp_create(const _mali_osk_resource_t *resource, struct mali_group *group) +{ + struct mali_gp_core *core = NULL; + + MALI_DEBUG_ASSERT(NULL == mali_global_gp_core); + MALI_DEBUG_PRINT(2, ("Mali GP: Creating Mali GP core: %s\n", resource->description)); + + core = _mali_osk_malloc(sizeof(struct mali_gp_core)); + if (NULL != core) { + if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALIGP2_REGISTER_ADDRESS_SPACE_SIZE)) { + _mali_osk_errcode_t ret; + + ret = mali_gp_reset(core); + + if (_MALI_OSK_ERR_OK == ret) { + ret = mali_group_add_gp_core(group, core); + if (_MALI_OSK_ERR_OK == ret) { + /* Setup IRQ handlers (which will do IRQ probing if needed) */ + core->irq = _mali_osk_irq_init(resource->irq, + mali_group_upper_half_gp, + group, + mali_gp_irq_probe_trigger, + mali_gp_irq_probe_ack, + core, + resource->description); + if (NULL != core->irq) { + MALI_DEBUG_PRINT(4, ("Mali GP: set global gp core from 0x%08X to 0x%08X\n", mali_global_gp_core, core)); + mali_global_gp_core = core; + + return core; + } else { + MALI_PRINT_ERROR(("Mali GP: Failed to setup interrupt handlers for GP core %s\n", core->hw_core.description)); + } + mali_group_remove_gp_core(group); + } else { + MALI_PRINT_ERROR(("Mali GP: Failed to add core %s to group\n", core->hw_core.description)); + } + } + mali_hw_core_delete(&core->hw_core); + } + + _mali_osk_free(core); + } else { + MALI_PRINT_ERROR(("Failed to allocate memory for GP core\n")); + } + + return NULL; +} + +void mali_gp_delete(struct mali_gp_core *core) +{ + MALI_DEBUG_ASSERT_POINTER(core); + + _mali_osk_irq_term(core->irq); + mali_hw_core_delete(&core->hw_core); + mali_global_gp_core = NULL; + _mali_osk_free(core); +} + +void mali_gp_stop_bus(struct mali_gp_core *core) +{ + MALI_DEBUG_ASSERT_POINTER(core); + + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_STOP_BUS); +} + +_mali_osk_errcode_t mali_gp_stop_bus_wait(struct mali_gp_core *core) +{ + int i; + + MALI_DEBUG_ASSERT_POINTER(core); + + /* Send the stop bus command. */ + mali_gp_stop_bus(core); + + /* Wait for bus to be stopped */ + for (i = 0; i < MALI_REG_POLL_COUNT_SLOW; i++) { + if (mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS) & MALIGP2_REG_VAL_STATUS_BUS_STOPPED) { + break; + } + } + + if (MALI_REG_POLL_COUNT_SLOW == i) { + MALI_PRINT_ERROR(("Mali GP: Failed to stop bus on %s\n", core->hw_core.description)); + return _MALI_OSK_ERR_FAULT; + } + return _MALI_OSK_ERR_OK; +} + +void mali_gp_hard_reset(struct mali_gp_core *core) +{ + const u32 reset_wait_target_register = MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_LIMIT; + const u32 reset_invalid_value = 0xC0FFE000; + const u32 reset_check_value = 0xC01A0000; + const u32 reset_default_value = 0; + int i; + + MALI_DEBUG_ASSERT_POINTER(core); + MALI_DEBUG_PRINT(4, ("Mali GP: Hard reset of core %s\n", core->hw_core.description)); + + mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_invalid_value); + + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_RESET); + + for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) { + mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_check_value); + if (reset_check_value == mali_hw_core_register_read(&core->hw_core, reset_wait_target_register)) { + break; + } + } + + if (MALI_REG_POLL_COUNT_FAST == i) { + MALI_PRINT_ERROR(("Mali GP: The hard reset loop didn't work, unable to recover\n")); + } + + mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_default_value); /* set it back to the default */ + /* Re-enable interrupts */ + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL); + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED); + +} + +void mali_gp_reset_async(struct mali_gp_core *core) +{ + MALI_DEBUG_ASSERT_POINTER(core); + + MALI_DEBUG_PRINT(4, ("Mali GP: Reset of core %s\n", core->hw_core.description)); + + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, 0); /* disable the IRQs */ + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALI400GP_REG_VAL_IRQ_RESET_COMPLETED); + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALI400GP_REG_VAL_CMD_SOFT_RESET); + +} + +_mali_osk_errcode_t mali_gp_reset_wait(struct mali_gp_core *core) +{ + int i; + u32 rawstat = 0; + + MALI_DEBUG_ASSERT_POINTER(core); + + for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) { + rawstat = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT); + if (rawstat & MALI400GP_REG_VAL_IRQ_RESET_COMPLETED) { + break; + } + } + + if (i == MALI_REG_POLL_COUNT_FAST) { + MALI_PRINT_ERROR(("Mali GP: Failed to reset core %s, rawstat: 0x%08x\n", + core->hw_core.description, rawstat)); + return _MALI_OSK_ERR_FAULT; + } + + /* Re-enable interrupts */ + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL); + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED); + + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t mali_gp_reset(struct mali_gp_core *core) +{ + mali_gp_reset_async(core); + return mali_gp_reset_wait(core); +} + +void mali_gp_job_start(struct mali_gp_core *core, struct mali_gp_job *job) +{ + u32 startcmd = 0; + u32 *frame_registers = mali_gp_job_get_frame_registers(job); + u32 counter_src0 = mali_gp_job_get_perf_counter_src0(job); + u32 counter_src1 = mali_gp_job_get_perf_counter_src1(job); + + MALI_DEBUG_ASSERT_POINTER(core); + + if (mali_gp_job_has_vs_job(job)) { + startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_VS; + } + + if (mali_gp_job_has_plbu_job(job)) { + startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_PLBU; + } + + MALI_DEBUG_ASSERT(0 != startcmd); + + mali_hw_core_register_write_array_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR, frame_registers, MALIGP2_NUM_REGS_FRAME); + + if (MALI_HW_CORE_NO_COUNTER != counter_src0) { + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC, counter_src0); + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE); + } + if (MALI_HW_CORE_NO_COUNTER != counter_src1) { + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC, counter_src1); + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE); + } + + MALI_DEBUG_PRINT(3, ("Mali GP: Starting job (0x%08x) on core %s with command 0x%08X\n", job, core->hw_core.description, startcmd)); + + mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC); + + /* Barrier to make sure the previous register write is finished */ + _mali_osk_write_mem_barrier(); + + /* This is the command that starts the core. + * + * Don't actually run the job if PROFILING_SKIP_PP_JOBS are set, just + * force core to assert the completion interrupt. + */ +#if !defined(PROFILING_SKIP_GP_JOBS) + mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, startcmd); +#else + { + u32 bits = 0; + + if (mali_gp_job_has_vs_job(job)) + bits = MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST; + if (mali_gp_job_has_plbu_job(job)) + bits |= MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST; + + mali_hw_core_register_write_relaxed(&core->hw_core, + MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT, bits); + } +#endif + + /* Barrier to make sure the previous register write is finished */ + _mali_osk_write_mem_barrier(); +} + +void mali_gp_resume_with_new_heap(struct mali_gp_core *core, u32 start_addr, u32 end_addr) +{ + u32 irq_readout; + + MALI_DEBUG_ASSERT_POINTER(core); + + irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT); + + if (irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM) { + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, (MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | MALIGP2_REG_VAL_IRQ_HANG)); + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED); /* re-enable interrupts */ + mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR, start_addr); + mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR, end_addr); + + MALI_DEBUG_PRINT(3, ("Mali GP: Resuming job\n")); + + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC); + _mali_osk_write_mem_barrier(); + } + /* + * else: core has been reset between PLBU_OUT_OF_MEM interrupt and this new heap response. + * A timeout or a page fault on Mali-200 PP core can cause this behaviour. + */ +} + +u32 mali_gp_core_get_version(struct mali_gp_core *core) +{ + MALI_DEBUG_ASSERT_POINTER(core); + return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_VERSION); +} + +struct mali_gp_core *mali_gp_get_global_gp_core(void) +{ + return mali_global_gp_core; +} + +/* ------------- interrupt handling below ------------------ */ +static void mali_gp_irq_probe_trigger(void *data) +{ + struct mali_gp_core *core = (struct mali_gp_core *)data; + + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED); + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT, MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR); + _mali_osk_mem_barrier(); +} + +static _mali_osk_errcode_t mali_gp_irq_probe_ack(void *data) +{ + struct mali_gp_core *core = (struct mali_gp_core *)data; + u32 irq_readout; + + irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_STAT); + if (MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR & irq_readout) { + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR); + _mali_osk_mem_barrier(); + return _MALI_OSK_ERR_OK; + } + + return _MALI_OSK_ERR_FAULT; +} + +/* ------ local helper functions below --------- */ +#if MALI_STATE_TRACKING +u32 mali_gp_dump_state(struct mali_gp_core *core, char *buf, u32 size) +{ + int n = 0; + + n += _mali_osk_snprintf(buf + n, size - n, "\tGP: %s\n", core->hw_core.description); + + return n; +} +#endif + +void mali_gp_update_performance_counters(struct mali_gp_core *core, struct mali_gp_job *job) +{ + u32 val0 = 0; + u32 val1 = 0; + u32 counter_src0 = mali_gp_job_get_perf_counter_src0(job); + u32 counter_src1 = mali_gp_job_get_perf_counter_src1(job); + + if (MALI_HW_CORE_NO_COUNTER != counter_src0) { + val0 = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE); + mali_gp_job_set_perf_counter_value0(job, val0); + +#if defined(CONFIG_MALI400_PROFILING) + _mali_osk_profiling_report_hw_counter(COUNTER_VP_0_C0, val0); + _mali_osk_profiling_record_global_counters(COUNTER_VP_0_C0, val0); +#endif + + } + + if (MALI_HW_CORE_NO_COUNTER != counter_src1) { + val1 = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE); + mali_gp_job_set_perf_counter_value1(job, val1); + +#if defined(CONFIG_MALI400_PROFILING) + _mali_osk_profiling_report_hw_counter(COUNTER_VP_0_C1, val1); + _mali_osk_profiling_record_global_counters(COUNTER_VP_0_C1, val1); +#endif + } +} diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_gp.h b/drivers/gpu/arm/mali400/common/mali_gp.h --- a/drivers/gpu/arm/mali400/common/mali_gp.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_gp.h 2018-05-06 08:49:49.174695256 +0200 @@ -0,0 +1,127 @@ +/* + * Copyright (C) 2011-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_GP_H__ +#define __MALI_GP_H__ + +#include "mali_osk.h" +#include "mali_gp_job.h" +#include "mali_hw_core.h" +#include "regs/mali_gp_regs.h" + +struct mali_group; + +/** + * Definition of the GP core struct + * Used to track a GP core in the system. + */ +struct mali_gp_core { + struct mali_hw_core hw_core; /**< Common for all HW cores */ + _mali_osk_irq_t *irq; /**< IRQ handler */ +}; + +_mali_osk_errcode_t mali_gp_initialize(void); +void mali_gp_terminate(void); + +struct mali_gp_core *mali_gp_create(const _mali_osk_resource_t *resource, struct mali_group *group); +void mali_gp_delete(struct mali_gp_core *core); + +void mali_gp_stop_bus(struct mali_gp_core *core); +_mali_osk_errcode_t mali_gp_stop_bus_wait(struct mali_gp_core *core); +void mali_gp_reset_async(struct mali_gp_core *core); +_mali_osk_errcode_t mali_gp_reset_wait(struct mali_gp_core *core); +void mali_gp_hard_reset(struct mali_gp_core *core); +_mali_osk_errcode_t mali_gp_reset(struct mali_gp_core *core); + +void mali_gp_job_start(struct mali_gp_core *core, struct mali_gp_job *job); +void mali_gp_resume_with_new_heap(struct mali_gp_core *core, u32 start_addr, u32 end_addr); + +u32 mali_gp_core_get_version(struct mali_gp_core *core); + +struct mali_gp_core *mali_gp_get_global_gp_core(void); + +#if MALI_STATE_TRACKING +u32 mali_gp_dump_state(struct mali_gp_core *core, char *buf, u32 size); +#endif + +void mali_gp_update_performance_counters(struct mali_gp_core *core, struct mali_gp_job *job); + +MALI_STATIC_INLINE const char *mali_gp_core_description(struct mali_gp_core *core) +{ + return core->hw_core.description; +} + +MALI_STATIC_INLINE enum mali_interrupt_result mali_gp_get_interrupt_result(struct mali_gp_core *core) +{ + u32 stat_used = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_STAT) & + MALIGP2_REG_VAL_IRQ_MASK_USED; + + if (0 == stat_used) { + return MALI_INTERRUPT_RESULT_NONE; + } else if ((MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | + MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST) == stat_used) { + return MALI_INTERRUPT_RESULT_SUCCESS; + } else if (MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST == stat_used) { + return MALI_INTERRUPT_RESULT_SUCCESS_VS; + } else if (MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST == stat_used) { + return MALI_INTERRUPT_RESULT_SUCCESS_PLBU; + } else if (MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM & stat_used) { + return MALI_INTERRUPT_RESULT_OOM; + } + + return MALI_INTERRUPT_RESULT_ERROR; +} + +MALI_STATIC_INLINE u32 mali_gp_get_rawstat(struct mali_gp_core *core) +{ + MALI_DEBUG_ASSERT_POINTER(core); + return mali_hw_core_register_read(&core->hw_core, + MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT); +} + +MALI_STATIC_INLINE u32 mali_gp_is_active(struct mali_gp_core *core) +{ + u32 status = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS); + return (status & MALIGP2_REG_VAL_STATUS_MASK_ACTIVE) ? MALI_TRUE : MALI_FALSE; +} + +MALI_STATIC_INLINE void mali_gp_mask_all_interrupts(struct mali_gp_core *core) +{ + mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_NONE); +} + +MALI_STATIC_INLINE void mali_gp_enable_interrupts(struct mali_gp_core *core, enum mali_interrupt_result exceptions) +{ + /* Enable all interrupts, except those specified in exceptions */ + u32 value; + + if (MALI_INTERRUPT_RESULT_SUCCESS_VS == exceptions) { + /* Enable all used except VS complete */ + value = MALIGP2_REG_VAL_IRQ_MASK_USED & + ~MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST; + } else { + MALI_DEBUG_ASSERT(MALI_INTERRUPT_RESULT_SUCCESS_PLBU == + exceptions); + /* Enable all used except PLBU complete */ + value = MALIGP2_REG_VAL_IRQ_MASK_USED & + ~MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST; + } + + mali_hw_core_register_write(&core->hw_core, + MALIGP2_REG_ADDR_MGMT_INT_MASK, + value); +} + +MALI_STATIC_INLINE u32 mali_gp_read_plbu_alloc_start_addr(struct mali_gp_core *core) +{ + return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR); +} + +#endif /* __MALI_GP_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_gp_job.c b/drivers/gpu/arm/mali400/common/mali_gp_job.c --- a/drivers/gpu/arm/mali400/common/mali_gp_job.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_gp_job.c 2018-05-06 08:49:49.174695256 +0200 @@ -0,0 +1,302 @@ +/* + * Copyright (C) 2011-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_gp_job.h" +#include "mali_osk.h" +#include "mali_osk_list.h" +#include "mali_uk_types.h" +#include "mali_memory_virtual.h" +#include "mali_memory_defer_bind.h" + +static u32 gp_counter_src0 = MALI_HW_CORE_NO_COUNTER; /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */ +static u32 gp_counter_src1 = MALI_HW_CORE_NO_COUNTER; /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */ +static void _mali_gp_del_varying_allocations(struct mali_gp_job *job); + + +static int _mali_gp_add_varying_allocations(struct mali_session_data *session, + struct mali_gp_job *job, + u32 *alloc, + u32 num) +{ + int i = 0; + struct mali_gp_allocation_node *alloc_node; + mali_mem_allocation *mali_alloc = NULL; + struct mali_vma_node *mali_vma_node = NULL; + + for (i = 0 ; i < num ; i++) { + MALI_DEBUG_ASSERT(alloc[i]); + alloc_node = _mali_osk_calloc(1, sizeof(struct mali_gp_allocation_node)); + if (alloc_node) { + INIT_LIST_HEAD(&alloc_node->node); + /* find mali allocation structure by vaddress*/ + mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, alloc[i], 0); + + if (likely(mali_vma_node)) { + mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node); + MALI_DEBUG_ASSERT(alloc[i] == mali_vma_node->vm_node.start); + } else { + MALI_DEBUG_PRINT(1, ("ERROE!_mali_gp_add_varying_allocations,can't find allocation %d by address =0x%x, num=%d\n", i, alloc[i], num)); + _mali_osk_free(alloc_node); + goto fail; + } + alloc_node->alloc = mali_alloc; + /* add to gp job varying alloc list*/ + list_move(&alloc_node->node, &job->varying_alloc); + } else + goto fail; + } + + return 0; +fail: + MALI_DEBUG_PRINT(1, ("ERROE!_mali_gp_add_varying_allocations,failed to alloc memory!\n")); + _mali_gp_del_varying_allocations(job); + return -1; +} + + +static void _mali_gp_del_varying_allocations(struct mali_gp_job *job) +{ + struct mali_gp_allocation_node *alloc_node, *tmp_node; + + list_for_each_entry_safe(alloc_node, tmp_node, &job->varying_alloc, node) { + list_del(&alloc_node->node); + kfree(alloc_node); + } + INIT_LIST_HEAD(&job->varying_alloc); +} + +struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *uargs, u32 id, struct mali_timeline_tracker *pp_tracker) +{ + struct mali_gp_job *job; + u32 perf_counter_flag; + u32 __user *memory_list = NULL; + struct mali_gp_allocation_node *alloc_node, *tmp_node; + + job = _mali_osk_calloc(1, sizeof(struct mali_gp_job)); + if (NULL != job) { + job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_FINISHED, sizeof(_mali_uk_gp_job_finished_s)); + if (NULL == job->finished_notification) { + goto fail3; + } + + job->oom_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_STALLED, sizeof(_mali_uk_gp_job_suspended_s)); + if (NULL == job->oom_notification) { + goto fail2; + } + + if (0 != _mali_osk_copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_gp_start_job_s))) { + goto fail1; + } + + perf_counter_flag = mali_gp_job_get_perf_counter_flag(job); + + /* case when no counters came from user space + * so pass the debugfs / DS-5 provided global ones to the job object */ + if (!((perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) || + (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE))) { + mali_gp_job_set_perf_counter_src0(job, mali_gp_job_get_gp_counter_src0()); + mali_gp_job_set_perf_counter_src1(job, mali_gp_job_get_gp_counter_src1()); + } + + _mali_osk_list_init(&job->list); + job->session = session; + job->id = id; + job->heap_current_addr = job->uargs.frame_registers[4]; + job->perf_counter_value0 = 0; + job->perf_counter_value1 = 0; + job->pid = _mali_osk_get_pid(); + job->tid = _mali_osk_get_tid(); + + + INIT_LIST_HEAD(&job->varying_alloc); + INIT_LIST_HEAD(&job->vary_todo); + job->dmem = NULL; + + if (job->uargs.deferred_mem_num > session->allocation_mgr.mali_allocation_num) { + MALI_PRINT_ERROR(("Mali GP job: The number of varying buffer to defer bind is invalid !\n")); + goto fail1; + } + + /* add varying allocation list*/ + if (job->uargs.deferred_mem_num > 0) { + /* copy varying list from user space*/ + job->varying_list = _mali_osk_calloc(1, sizeof(u32) * job->uargs.deferred_mem_num); + if (!job->varying_list) { + MALI_PRINT_ERROR(("Mali GP job: allocate varying_list failed varying_alloc_num = %d !\n", job->uargs.deferred_mem_num)); + goto fail1; + } + + memory_list = (u32 __user *)(uintptr_t)job->uargs.deferred_mem_list; + + if (0 != _mali_osk_copy_from_user(job->varying_list, memory_list, sizeof(u32) * job->uargs.deferred_mem_num)) { + MALI_PRINT_ERROR(("Mali GP job: Failed to copy varying list from user space!\n")); + goto fail; + } + + if (unlikely(_mali_gp_add_varying_allocations(session, job, job->varying_list, + job->uargs.deferred_mem_num))) { + MALI_PRINT_ERROR(("Mali GP job: _mali_gp_add_varying_allocations failed!\n")); + goto fail; + } + + /* do preparetion for each allocation */ + list_for_each_entry_safe(alloc_node, tmp_node, &job->varying_alloc, node) { + if (unlikely(_MALI_OSK_ERR_OK != mali_mem_defer_bind_allocation_prepare(alloc_node->alloc, &job->vary_todo, &job->required_varying_memsize))) { + MALI_PRINT_ERROR(("Mali GP job: mali_mem_defer_bind_allocation_prepare failed!\n")); + goto fail; + } + } + + _mali_gp_del_varying_allocations(job); + + /* bind varying here, to avoid memory latency issue. */ + { + struct mali_defer_mem_block dmem_block; + + INIT_LIST_HEAD(&dmem_block.free_pages); + atomic_set(&dmem_block.num_free_pages, 0); + + if (mali_mem_prepare_mem_for_job(job, &dmem_block)) { + MALI_PRINT_ERROR(("Mali GP job: mali_mem_prepare_mem_for_job failed!\n")); + goto fail; + } + if (_MALI_OSK_ERR_OK != mali_mem_defer_bind(job, &dmem_block)) { + MALI_PRINT_ERROR(("gp job create, mali_mem_defer_bind failed! GP %x fail!", job)); + goto fail; + } + } + + if (job->uargs.varying_memsize > MALI_UK_BIG_VARYING_SIZE) { + job->big_job = 1; + } + } + job->pp_tracker = pp_tracker; + if (NULL != job->pp_tracker) { + /* Take a reference on PP job's tracker that will be released when the GP + job is done. */ + mali_timeline_system_tracker_get(session->timeline_system, pp_tracker); + } + + mali_timeline_tracker_init(&job->tracker, MALI_TIMELINE_TRACKER_GP, NULL, job); + mali_timeline_fence_copy_uk_fence(&(job->tracker.fence), &(job->uargs.fence)); + + return job; + } else { + MALI_PRINT_ERROR(("Mali GP job: _mali_osk_calloc failed!\n")); + return NULL; + } + + +fail: + _mali_osk_free(job->varying_list); + /* Handle allocate fail here, free all varying node */ + { + struct mali_backend_bind_list *bkn, *bkn_tmp; + list_for_each_entry_safe(bkn, bkn_tmp , &job->vary_todo, node) { + list_del(&bkn->node); + _mali_osk_free(bkn); + } + } +fail1: + _mali_osk_notification_delete(job->oom_notification); +fail2: + _mali_osk_notification_delete(job->finished_notification); +fail3: + _mali_osk_free(job); + return NULL; +} + +void mali_gp_job_delete(struct mali_gp_job *job) +{ + struct mali_backend_bind_list *bkn, *bkn_tmp; + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT(NULL == job->pp_tracker); + MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list)); + _mali_osk_free(job->varying_list); + + /* Handle allocate fail here, free all varying node */ + list_for_each_entry_safe(bkn, bkn_tmp , &job->vary_todo, node) { + list_del(&bkn->node); + _mali_osk_free(bkn); + } + + mali_mem_defer_dmem_free(job); + + /* de-allocate the pre-allocated oom notifications */ + if (NULL != job->oom_notification) { + _mali_osk_notification_delete(job->oom_notification); + job->oom_notification = NULL; + } + if (NULL != job->finished_notification) { + _mali_osk_notification_delete(job->finished_notification); + job->finished_notification = NULL; + } + + _mali_osk_free(job); +} + +void mali_gp_job_list_add(struct mali_gp_job *job, _mali_osk_list_t *list) +{ + struct mali_gp_job *iter; + struct mali_gp_job *tmp; + + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + + /* Find position in list/queue where job should be added. */ + _MALI_OSK_LIST_FOREACHENTRY_REVERSE(iter, tmp, list, + struct mali_gp_job, list) { + + /* A span is used to handle job ID wrapping. */ + bool job_is_after = (mali_gp_job_get_id(job) - + mali_gp_job_get_id(iter)) < + MALI_SCHEDULER_JOB_ID_SPAN; + + if (job_is_after) { + break; + } + } + + _mali_osk_list_add(&job->list, &iter->list); +} + +u32 mali_gp_job_get_gp_counter_src0(void) +{ + return gp_counter_src0; +} + +void mali_gp_job_set_gp_counter_src0(u32 counter) +{ + gp_counter_src0 = counter; +} + +u32 mali_gp_job_get_gp_counter_src1(void) +{ + return gp_counter_src1; +} + +void mali_gp_job_set_gp_counter_src1(u32 counter) +{ + gp_counter_src1 = counter; +} + +mali_scheduler_mask mali_gp_job_signal_pp_tracker(struct mali_gp_job *job, mali_bool success) +{ + mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY; + + MALI_DEBUG_ASSERT_POINTER(job); + + if (NULL != job->pp_tracker) { + schedule_mask |= mali_timeline_system_tracker_put(job->session->timeline_system, job->pp_tracker, MALI_FALSE == success); + job->pp_tracker = NULL; + } + + return schedule_mask; +} diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_gp_job.h b/drivers/gpu/arm/mali400/common/mali_gp_job.h --- a/drivers/gpu/arm/mali400/common/mali_gp_job.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_gp_job.h 2018-05-06 08:49:49.174695256 +0200 @@ -0,0 +1,324 @@ +/* + * Copyright (C) 2011-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_GP_JOB_H__ +#define __MALI_GP_JOB_H__ + +#include "mali_osk.h" +#include "mali_osk_list.h" +#include "mali_uk_types.h" +#include "mali_session.h" +#include "mali_timeline.h" +#include "mali_scheduler_types.h" +#include "mali_scheduler.h" +#include "mali_executor.h" +#include "mali_timeline.h" + +struct mali_defer_mem; +/** + * This structure represents a GP job + * + * The GP job object itself is not protected by any single lock, + * but relies on other locks instead (scheduler, executor and timeline lock). + * Think of the job object as moving between these sub systems through-out + * its lifetime. Different part of the GP job struct is used by different + * subsystems. Accessor functions ensure that correct lock is taken. + * Do NOT access any data members directly from outside this module! + */ +struct mali_gp_job { + /* + * These members are typically only set at creation, + * and only read later on. + * They do not require any lock protection. + */ + _mali_uk_gp_start_job_s uargs; /**< Arguments from user space */ + struct mali_session_data *session; /**< Session which submitted this job */ + u32 pid; /**< Process ID of submitting process */ + u32 tid; /**< Thread ID of submitting thread */ + u32 id; /**< Identifier for this job in kernel space (sequential numbering) */ + u32 cache_order; /**< Cache order used for L2 cache flushing (sequential numbering) */ + struct mali_timeline_tracker tracker; /**< Timeline tracker for this job */ + struct mali_timeline_tracker *pp_tracker; /**< Pointer to Timeline tracker for PP job that depends on this job. */ + _mali_osk_notification_t *finished_notification; /**< Notification sent back to userspace on job complete */ + + /* + * These members are used by the scheduler, + * protected by scheduler lock + */ + _mali_osk_list_t list; /**< Used to link jobs together in the scheduler queue */ + + /* + * These members are used by the executor and/or group, + * protected by executor lock + */ + _mali_osk_notification_t *oom_notification; /**< Notification sent back to userspace on OOM */ + + /* + * Set by executor/group on job completion, read by scheduler when + * returning job to user. Hold executor lock when setting, + * no lock needed when reading + */ + u32 heap_current_addr; /**< Holds the current HEAP address when the job has completed */ + u32 perf_counter_value0; /**< Value of performance counter 0 (to be returned to user space) */ + u32 perf_counter_value1; /**< Value of performance counter 1 (to be returned to user space) */ + struct mali_defer_mem *dmem; /** < used for defer bind to store dmem info */ + struct list_head varying_alloc; /**< hold the list of varying allocations */ + u32 bind_flag; /** < flag for deferbind*/ + u32 *varying_list; /**< varying memory list need to to defer bind*/ + struct list_head vary_todo; /**< list of backend list need to do defer bind*/ + u32 required_varying_memsize; /** < size of varying memory to reallocate*/ + u32 big_job; /** < if the gp job have large varying output and may take long time*/ +}; + +#define MALI_DEFER_BIND_MEMORY_PREPARED (0x1 << 0) +#define MALI_DEFER_BIND_MEMORY_BINDED (0x1 << 2) + +struct mali_gp_allocation_node { + struct list_head node; + mali_mem_allocation *alloc; +}; + +struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *uargs, u32 id, struct mali_timeline_tracker *pp_tracker); +void mali_gp_job_delete(struct mali_gp_job *job); + +u32 mali_gp_job_get_gp_counter_src0(void); +void mali_gp_job_set_gp_counter_src0(u32 counter); +u32 mali_gp_job_get_gp_counter_src1(void); +void mali_gp_job_set_gp_counter_src1(u32 counter); + +MALI_STATIC_INLINE u32 mali_gp_job_get_id(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return (NULL == job) ? 0 : job->id; +} + +MALI_STATIC_INLINE void mali_gp_job_set_cache_order(struct mali_gp_job *job, + u32 cache_order) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + job->cache_order = cache_order; +} + +MALI_STATIC_INLINE u32 mali_gp_job_get_cache_order(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return (NULL == job) ? 0 : job->cache_order; +} + +MALI_STATIC_INLINE u64 mali_gp_job_get_user_id(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.user_job_ptr; +} + +MALI_STATIC_INLINE u32 mali_gp_job_get_frame_builder_id(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.frame_builder_id; +} + +MALI_STATIC_INLINE u32 mali_gp_job_get_flush_id(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.flush_id; +} + +MALI_STATIC_INLINE u32 mali_gp_job_get_pid(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->pid; +} + +MALI_STATIC_INLINE u32 mali_gp_job_get_tid(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->tid; +} + +MALI_STATIC_INLINE u32 *mali_gp_job_get_frame_registers(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.frame_registers; +} + +MALI_STATIC_INLINE struct mali_session_data *mali_gp_job_get_session(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->session; +} + +MALI_STATIC_INLINE mali_bool mali_gp_job_has_vs_job(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return (job->uargs.frame_registers[0] != job->uargs.frame_registers[1]) ? MALI_TRUE : MALI_FALSE; +} + +MALI_STATIC_INLINE mali_bool mali_gp_job_has_plbu_job(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return (job->uargs.frame_registers[2] != job->uargs.frame_registers[3]) ? MALI_TRUE : MALI_FALSE; +} + +MALI_STATIC_INLINE u32 mali_gp_job_get_current_heap_addr(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->heap_current_addr; +} + +MALI_STATIC_INLINE void mali_gp_job_set_current_heap_addr(struct mali_gp_job *job, u32 heap_addr) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + job->heap_current_addr = heap_addr; +} + +MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_flag(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.perf_counter_flag; +} + +MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_src0(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.perf_counter_src0; +} + +MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_src1(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.perf_counter_src1; +} + +MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_value0(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->perf_counter_value0; +} + +MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_value1(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->perf_counter_value1; +} + +MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_src0(struct mali_gp_job *job, u32 src) +{ + MALI_DEBUG_ASSERT_POINTER(job); + job->uargs.perf_counter_src0 = src; +} + +MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_src1(struct mali_gp_job *job, u32 src) +{ + MALI_DEBUG_ASSERT_POINTER(job); + job->uargs.perf_counter_src1 = src; +} + +MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_value0(struct mali_gp_job *job, u32 value) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + job->perf_counter_value0 = value; +} + +MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_value1(struct mali_gp_job *job, u32 value) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + job->perf_counter_value1 = value; +} + +void mali_gp_job_list_add(struct mali_gp_job *job, _mali_osk_list_t *list); + +MALI_STATIC_INLINE void mali_gp_job_list_move(struct mali_gp_job *job, + _mali_osk_list_t *list) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + MALI_DEBUG_ASSERT(!_mali_osk_list_empty(&job->list)); + _mali_osk_list_move(&job->list, list); +} + +MALI_STATIC_INLINE void mali_gp_job_list_remove(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + _mali_osk_list_delinit(&job->list); +} + +MALI_STATIC_INLINE _mali_osk_notification_t * +mali_gp_job_get_finished_notification(struct mali_gp_job *job) +{ + _mali_osk_notification_t *notification; + + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_POINTER(job->finished_notification); + + notification = job->finished_notification; + job->finished_notification = NULL; + + return notification; +} + +MALI_STATIC_INLINE _mali_osk_notification_t *mali_gp_job_get_oom_notification( + struct mali_gp_job *job) +{ + _mali_osk_notification_t *notification; + + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT_POINTER(job->oom_notification); + + notification = job->oom_notification; + job->oom_notification = NULL; + + return notification; +} + +MALI_STATIC_INLINE void mali_gp_job_set_oom_notification( + struct mali_gp_job *job, + _mali_osk_notification_t *notification) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT(NULL == job->oom_notification); + job->oom_notification = notification; +} + +MALI_STATIC_INLINE struct mali_timeline_tracker *mali_gp_job_get_tracker( + struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return &(job->tracker); +} + + +MALI_STATIC_INLINE u32 *mali_gp_job_get_timeline_point_ptr( + struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return (u32 __user *)(uintptr_t)job->uargs.timeline_point_ptr; +} + + +/** + * Release reference on tracker for PP job that depends on this GP job. + * + * @note If GP job has a reference on tracker, this function MUST be called before the GP job is + * deleted. + * + * @param job GP job that is done. + * @param success MALI_TRUE if job completed successfully, MALI_FALSE if not. + * @return A scheduling bitmask indicating whether scheduling needs to be done. + */ +mali_scheduler_mask mali_gp_job_signal_pp_tracker(struct mali_gp_job *job, mali_bool success); + +#endif /* __MALI_GP_JOB_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_group.c b/drivers/gpu/arm/mali400/common/mali_group.c --- a/drivers/gpu/arm/mali400/common/mali_group.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_group.c 2018-05-06 08:49:49.174695256 +0200 @@ -0,0 +1,1865 @@ +/* + * Copyright (C) 2011-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ +#include "mali_kernel_common.h" +#include "mali_group.h" +#include "mali_osk.h" +#include "mali_l2_cache.h" +#include "mali_gp.h" +#include "mali_pp.h" +#include "mali_mmu.h" +#include "mali_dlbu.h" +#include "mali_broadcast.h" +#include "mali_scheduler.h" +#include "mali_osk_profiling.h" +#include "mali_osk_mali.h" +#include "mali_pm_domain.h" +#include "mali_pm.h" +#include "mali_executor.h" + +#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS) +#include +#include +#endif + +#define MALI_MAX_NUM_DOMAIN_REFS (MALI_MAX_NUMBER_OF_GROUPS * 2) + +#if defined(CONFIG_MALI400_PROFILING) +static void mali_group_report_l2_cache_counters_per_core(struct mali_group *group, u32 core_num); +#endif /* #if defined(CONFIG_MALI400_PROFILING) */ + +static struct mali_group *mali_global_groups[MALI_MAX_NUMBER_OF_GROUPS] = { NULL, }; +static u32 mali_global_num_groups = 0; + +/* SW timer for job execution */ +int mali_max_job_runtime = MALI_MAX_JOB_RUNTIME_DEFAULT; + +/* local helper functions */ +static void mali_group_bottom_half_mmu(void *data); +static void mali_group_bottom_half_gp(void *data); +static void mali_group_bottom_half_pp(void *data); +static void mali_group_timeout(void *data); +static void mali_group_reset_pp(struct mali_group *group); +static void mali_group_reset_mmu(struct mali_group *group); + +static void mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session, mali_bool is_reload); +static void mali_group_recovery_reset(struct mali_group *group); + +struct mali_group *mali_group_create(struct mali_l2_cache_core *core, + struct mali_dlbu_core *dlbu, + struct mali_bcast_unit *bcast, + u32 domain_index) +{ + struct mali_group *group = NULL; + + if (mali_global_num_groups >= MALI_MAX_NUMBER_OF_GROUPS) { + MALI_PRINT_ERROR(("Mali group: Too many group objects created\n")); + return NULL; + } + + group = _mali_osk_calloc(1, sizeof(struct mali_group)); + if (NULL != group) { + group->timeout_timer = _mali_osk_timer_init(); + if (NULL != group->timeout_timer) { + _mali_osk_timer_setcallback(group->timeout_timer, mali_group_timeout, (void *)group); + + group->l2_cache_core[0] = core; + _mali_osk_list_init(&group->group_list); + _mali_osk_list_init(&group->executor_list); + _mali_osk_list_init(&group->pm_domain_list); + group->bcast_core = bcast; + group->dlbu_core = dlbu; + + /* register this object as a part of the correct power domain */ + if ((NULL != core) || (NULL != dlbu) || (NULL != bcast)) + group->pm_domain = mali_pm_register_group(domain_index, group); + + mali_global_groups[mali_global_num_groups] = group; + mali_global_num_groups++; + + return group; + } + _mali_osk_free(group); + } + + return NULL; +} + +void mali_group_delete(struct mali_group *group) +{ + u32 i; + + MALI_DEBUG_PRINT(4, ("Deleting group %s\n", + mali_group_core_description(group))); + + MALI_DEBUG_ASSERT(NULL == group->parent_group); + MALI_DEBUG_ASSERT((MALI_GROUP_STATE_INACTIVE == group->state) || ((MALI_GROUP_STATE_ACTIVATION_PENDING == group->state))); + + /* Delete the resources that this group owns */ + if (NULL != group->gp_core) { + mali_gp_delete(group->gp_core); + } + + if (NULL != group->pp_core) { + mali_pp_delete(group->pp_core); + } + + if (NULL != group->mmu) { + mali_mmu_delete(group->mmu); + } + + if (mali_group_is_virtual(group)) { + /* Remove all groups from virtual group */ + struct mali_group *child; + struct mali_group *temp; + + _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) { + child->parent_group = NULL; + mali_group_delete(child); + } + + mali_dlbu_delete(group->dlbu_core); + + if (NULL != group->bcast_core) { + mali_bcast_unit_delete(group->bcast_core); + } + } + + for (i = 0; i < mali_global_num_groups; i++) { + if (mali_global_groups[i] == group) { + mali_global_groups[i] = NULL; + mali_global_num_groups--; + + if (i != mali_global_num_groups) { + /* We removed a group from the middle of the array -- move the last + * group to the current position to close the gap */ + mali_global_groups[i] = mali_global_groups[mali_global_num_groups]; + mali_global_groups[mali_global_num_groups] = NULL; + } + + break; + } + } + + if (NULL != group->timeout_timer) { + _mali_osk_timer_del(group->timeout_timer); + _mali_osk_timer_term(group->timeout_timer); + } + + if (NULL != group->bottom_half_work_mmu) { + _mali_osk_wq_delete_work(group->bottom_half_work_mmu); + } + + if (NULL != group->bottom_half_work_gp) { + _mali_osk_wq_delete_work(group->bottom_half_work_gp); + } + + if (NULL != group->bottom_half_work_pp) { + _mali_osk_wq_delete_work(group->bottom_half_work_pp); + } + + _mali_osk_free(group); +} + +_mali_osk_errcode_t mali_group_add_mmu_core(struct mali_group *group, struct mali_mmu_core *mmu_core) +{ + /* This group object now owns the MMU core object */ + group->mmu = mmu_core; + group->bottom_half_work_mmu = _mali_osk_wq_create_work(mali_group_bottom_half_mmu, group); + if (NULL == group->bottom_half_work_mmu) { + return _MALI_OSK_ERR_FAULT; + } + return _MALI_OSK_ERR_OK; +} + +void mali_group_remove_mmu_core(struct mali_group *group) +{ + /* This group object no longer owns the MMU core object */ + group->mmu = NULL; + if (NULL != group->bottom_half_work_mmu) { + _mali_osk_wq_delete_work(group->bottom_half_work_mmu); + } +} + +_mali_osk_errcode_t mali_group_add_gp_core(struct mali_group *group, struct mali_gp_core *gp_core) +{ + /* This group object now owns the GP core object */ + group->gp_core = gp_core; + group->bottom_half_work_gp = _mali_osk_wq_create_work(mali_group_bottom_half_gp, group); + if (NULL == group->bottom_half_work_gp) { + return _MALI_OSK_ERR_FAULT; + } + + return _MALI_OSK_ERR_OK; +} + +void mali_group_remove_gp_core(struct mali_group *group) +{ + /* This group object no longer owns the GP core object */ + group->gp_core = NULL; + if (NULL != group->bottom_half_work_gp) { + _mali_osk_wq_delete_work(group->bottom_half_work_gp); + } +} + +_mali_osk_errcode_t mali_group_add_pp_core(struct mali_group *group, struct mali_pp_core *pp_core) +{ + /* This group object now owns the PP core object */ + group->pp_core = pp_core; + group->bottom_half_work_pp = _mali_osk_wq_create_work(mali_group_bottom_half_pp, group); + if (NULL == group->bottom_half_work_pp) { + return _MALI_OSK_ERR_FAULT; + } + return _MALI_OSK_ERR_OK; +} + +void mali_group_remove_pp_core(struct mali_group *group) +{ + /* This group object no longer owns the PP core object */ + group->pp_core = NULL; + if (NULL != group->bottom_half_work_pp) { + _mali_osk_wq_delete_work(group->bottom_half_work_pp); + } +} + +enum mali_group_state mali_group_activate(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + MALI_DEBUG_PRINT(4, ("Group: Activating group %s\n", + mali_group_core_description(group))); + + if (MALI_GROUP_STATE_INACTIVE == group->state) { + /* Group is inactive, get PM refs in order to power up */ + + /* + * We'll take a maximum of 2 power domain references pr group, + * one for the group itself, and one for it's L2 cache. + */ + struct mali_pm_domain *domains[MALI_MAX_NUM_DOMAIN_REFS]; + struct mali_group *groups[MALI_MAX_NUM_DOMAIN_REFS]; + u32 num_domains = 0; + mali_bool all_groups_on; + + /* Deal with child groups first */ + if (mali_group_is_virtual(group)) { + /* + * The virtual group might have 0, 1 or 2 L2s in + * its l2_cache_core array, but we ignore these and + * let the child groups take the needed L2 cache ref + * on behalf of the virtual group. + * In other words; The L2 refs are taken in pair with + * the physical group which the L2 is attached to. + */ + struct mali_group *child; + struct mali_group *temp; + + /* + * Child group is inactive, get PM + * refs in order to power up. + */ + _MALI_OSK_LIST_FOREACHENTRY(child, temp, + &group->group_list, + struct mali_group, group_list) { + MALI_DEBUG_ASSERT(MALI_GROUP_STATE_INACTIVE + == child->state); + + child->state = MALI_GROUP_STATE_ACTIVATION_PENDING; + + MALI_DEBUG_ASSERT_POINTER( + child->pm_domain); + domains[num_domains] = child->pm_domain; + groups[num_domains] = child; + num_domains++; + + /* + * Take L2 domain ref for child group. + */ + MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS + > num_domains); + domains[num_domains] = mali_l2_cache_get_pm_domain( + child->l2_cache_core[0]); + groups[num_domains] = NULL; + MALI_DEBUG_ASSERT(NULL == + child->l2_cache_core[1]); + num_domains++; + } + } else { + /* Take L2 domain ref for physical groups. */ + MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS > + num_domains); + + domains[num_domains] = mali_l2_cache_get_pm_domain( + group->l2_cache_core[0]); + groups[num_domains] = NULL; + MALI_DEBUG_ASSERT(NULL == group->l2_cache_core[1]); + num_domains++; + } + + /* Do the group itself last (it's dependencies first) */ + + group->state = MALI_GROUP_STATE_ACTIVATION_PENDING; + + MALI_DEBUG_ASSERT_POINTER(group->pm_domain); + domains[num_domains] = group->pm_domain; + groups[num_domains] = group; + num_domains++; + + all_groups_on = mali_pm_get_domain_refs(domains, groups, + num_domains); + + /* + * Complete activation for group, include + * virtual group or physical group. + */ + if (MALI_TRUE == all_groups_on) { + + mali_group_set_active(group); + } + } else if (MALI_GROUP_STATE_ACTIVE == group->state) { + /* Already active */ + MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on); + } else { + /* + * Activation already pending, group->power_is_on could + * be both true or false. We need to wait for power up + * notification anyway. + */ + MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVATION_PENDING + == group->state); + } + + MALI_DEBUG_PRINT(4, ("Group: group %s activation result: %s\n", + mali_group_core_description(group), + MALI_GROUP_STATE_ACTIVE == group->state ? + "ACTIVE" : "PENDING")); + + return group->state; +} + +mali_bool mali_group_set_active(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVATION_PENDING == group->state); + MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on); + + MALI_DEBUG_PRINT(4, ("Group: Activation completed for %s\n", + mali_group_core_description(group))); + + if (mali_group_is_virtual(group)) { + struct mali_group *child; + struct mali_group *temp; + + _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, + struct mali_group, group_list) { + if (MALI_TRUE != child->power_is_on) { + return MALI_FALSE; + } + + child->state = MALI_GROUP_STATE_ACTIVE; + } + + mali_group_reset(group); + } + + /* Go to ACTIVE state */ + group->state = MALI_GROUP_STATE_ACTIVE; + + return MALI_TRUE; +} + +mali_bool mali_group_deactivate(struct mali_group *group) +{ + struct mali_pm_domain *domains[MALI_MAX_NUM_DOMAIN_REFS]; + u32 num_domains = 0; + mali_bool power_down = MALI_FALSE; + + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT(MALI_GROUP_STATE_INACTIVE != group->state); + + MALI_DEBUG_PRINT(3, ("Group: Deactivating group %s\n", + mali_group_core_description(group))); + + group->state = MALI_GROUP_STATE_INACTIVE; + + MALI_DEBUG_ASSERT_POINTER(group->pm_domain); + domains[num_domains] = group->pm_domain; + num_domains++; + + if (mali_group_is_virtual(group)) { + /* Release refs for all child groups */ + struct mali_group *child; + struct mali_group *temp; + + _MALI_OSK_LIST_FOREACHENTRY(child, temp, + &group->group_list, + struct mali_group, group_list) { + child->state = MALI_GROUP_STATE_INACTIVE; + + MALI_DEBUG_ASSERT_POINTER(child->pm_domain); + domains[num_domains] = child->pm_domain; + num_domains++; + + /* Release L2 cache domain for child groups */ + MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS > + num_domains); + domains[num_domains] = mali_l2_cache_get_pm_domain( + child->l2_cache_core[0]); + MALI_DEBUG_ASSERT(NULL == child->l2_cache_core[1]); + num_domains++; + } + + /* + * Must do mali_group_power_down() steps right here for + * virtual group, because virtual group itself is likely to + * stay powered on, however child groups are now very likely + * to be powered off (and thus lose their state). + */ + + mali_group_clear_session(group); + /* + * Disable the broadcast unit (clear it's mask). + * This is needed in case the GPU isn't actually + * powered down at this point and groups are + * removed from an inactive virtual group. + * If not, then the broadcast unit will intercept + * their interrupts! + */ + mali_bcast_disable(group->bcast_core); + } else { + /* Release L2 cache domain for physical groups */ + MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS > + num_domains); + domains[num_domains] = mali_l2_cache_get_pm_domain( + group->l2_cache_core[0]); + MALI_DEBUG_ASSERT(NULL == group->l2_cache_core[1]); + num_domains++; + } + + power_down = mali_pm_put_domain_refs(domains, num_domains); + + return power_down; +} + +void mali_group_power_up(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + MALI_DEBUG_PRINT(3, ("Group: Power up for %s\n", + mali_group_core_description(group))); + + group->power_is_on = MALI_TRUE; + + if (MALI_FALSE == mali_group_is_virtual(group) + && MALI_FALSE == mali_group_is_in_virtual(group)) { + mali_group_reset(group); + } + + /* + * When we just acquire only one physical group form virt group, + * we should remove the bcast&dlbu mask from virt group and + * reset bcast and dlbu core, although part of pp cores in virt + * group maybe not be powered on. + */ + if (MALI_TRUE == mali_group_is_virtual(group)) { + mali_bcast_reset(group->bcast_core); + mali_dlbu_update_mask(group->dlbu_core); + } +} + +void mali_group_power_down(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + MALI_DEBUG_PRINT(3, ("Group: Power down for %s\n", + mali_group_core_description(group))); + + group->power_is_on = MALI_FALSE; + + if (mali_group_is_virtual(group)) { + /* + * What we do for physical jobs in this function should + * already have been done in mali_group_deactivate() + * for virtual group. + */ + MALI_DEBUG_ASSERT(NULL == group->session); + } else { + mali_group_clear_session(group); + } +} + +MALI_DEBUG_CODE(static void mali_group_print_virtual(struct mali_group *vgroup) +{ + u32 i; + struct mali_group *group; + struct mali_group *temp; + + MALI_DEBUG_PRINT(4, ("Virtual group %s (%p)\n", + mali_group_core_description(vgroup), + vgroup)); + MALI_DEBUG_PRINT(4, ("l2_cache_core[0] = %p, ref = %d\n", vgroup->l2_cache_core[0], vgroup->l2_cache_core_ref_count[0])); + MALI_DEBUG_PRINT(4, ("l2_cache_core[1] = %p, ref = %d\n", vgroup->l2_cache_core[1], vgroup->l2_cache_core_ref_count[1])); + + i = 0; + _MALI_OSK_LIST_FOREACHENTRY(group, temp, &vgroup->group_list, struct mali_group, group_list) { + MALI_DEBUG_PRINT(4, ("[%d] %s (%p), l2_cache_core[0] = %p\n", + i, mali_group_core_description(group), + group, group->l2_cache_core[0])); + i++; + } +}) + +static void mali_group_dump_core_status(struct mali_group *group) +{ + u32 i; + + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT(NULL != group->gp_core || (NULL != group->pp_core && !mali_group_is_virtual(group))); + + if (NULL != group->gp_core) { + MALI_PRINT(("Dump Group %s\n", group->gp_core->hw_core.description)); + + for (i = 0; i < 0xA8; i += 0x10) { + MALI_PRINT(("0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i, mali_hw_core_register_read(&group->gp_core->hw_core, i), + mali_hw_core_register_read(&group->gp_core->hw_core, i + 4), + mali_hw_core_register_read(&group->gp_core->hw_core, i + 8), + mali_hw_core_register_read(&group->gp_core->hw_core, i + 12))); + } + + + } else { + MALI_PRINT(("Dump Group %s\n", group->pp_core->hw_core.description)); + + for (i = 0; i < 0x5c; i += 0x10) { + MALI_PRINT(("0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i, mali_hw_core_register_read(&group->pp_core->hw_core, i), + mali_hw_core_register_read(&group->pp_core->hw_core, i + 4), + mali_hw_core_register_read(&group->pp_core->hw_core, i + 8), + mali_hw_core_register_read(&group->pp_core->hw_core, i + 12))); + } + + /* Ignore some minor registers */ + for (i = 0x1000; i < 0x1068; i += 0x10) { + MALI_PRINT(("0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i, mali_hw_core_register_read(&group->pp_core->hw_core, i), + mali_hw_core_register_read(&group->pp_core->hw_core, i + 4), + mali_hw_core_register_read(&group->pp_core->hw_core, i + 8), + mali_hw_core_register_read(&group->pp_core->hw_core, i + 12))); + } + } + + MALI_PRINT(("Dump Group MMU\n")); + for (i = 0; i < 0x24; i += 0x10) { + MALI_PRINT(("0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i, mali_hw_core_register_read(&group->mmu->hw_core, i), + mali_hw_core_register_read(&group->mmu->hw_core, i + 4), + mali_hw_core_register_read(&group->mmu->hw_core, i + 8), + mali_hw_core_register_read(&group->mmu->hw_core, i + 12))); + } +} + + +/** + * @Dump group status + */ +void mali_group_dump_status(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + + if (mali_group_is_virtual(group)) { + struct mali_group *group_c; + struct mali_group *temp; + _MALI_OSK_LIST_FOREACHENTRY(group_c, temp, &group->group_list, struct mali_group, group_list) { + mali_group_dump_core_status(group_c); + } + } else { + mali_group_dump_core_status(group); + } +} + +/** + * @brief Add child group to virtual group parent + */ +void mali_group_add_group(struct mali_group *parent, struct mali_group *child) +{ + mali_bool found; + u32 i; + + MALI_DEBUG_PRINT(3, ("Adding group %s to virtual group %s\n", + mali_group_core_description(child), + mali_group_core_description(parent))); + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT(mali_group_is_virtual(parent)); + MALI_DEBUG_ASSERT(!mali_group_is_virtual(child)); + MALI_DEBUG_ASSERT(NULL == child->parent_group); + + _mali_osk_list_addtail(&child->group_list, &parent->group_list); + + child->parent_group = parent; + + MALI_DEBUG_ASSERT_POINTER(child->l2_cache_core[0]); + + MALI_DEBUG_PRINT(4, ("parent->l2_cache_core: [0] = %p, [1] = %p\n", parent->l2_cache_core[0], parent->l2_cache_core[1])); + MALI_DEBUG_PRINT(4, ("child->l2_cache_core: [0] = %p, [1] = %p\n", child->l2_cache_core[0], child->l2_cache_core[1])); + + /* Keep track of the L2 cache cores of child groups */ + found = MALI_FALSE; + for (i = 0; i < 2; i++) { + if (parent->l2_cache_core[i] == child->l2_cache_core[0]) { + MALI_DEBUG_ASSERT(parent->l2_cache_core_ref_count[i] > 0); + parent->l2_cache_core_ref_count[i]++; + found = MALI_TRUE; + } + } + + if (!found) { + /* First time we see this L2 cache, add it to our list */ + i = (NULL == parent->l2_cache_core[0]) ? 0 : 1; + + MALI_DEBUG_PRINT(4, ("First time we see l2_cache %p. Adding to [%d] = %p\n", child->l2_cache_core[0], i, parent->l2_cache_core[i])); + + MALI_DEBUG_ASSERT(NULL == parent->l2_cache_core[i]); + + parent->l2_cache_core[i] = child->l2_cache_core[0]; + parent->l2_cache_core_ref_count[i]++; + } + + /* Update Broadcast Unit and DLBU */ + mali_bcast_add_group(parent->bcast_core, child); + mali_dlbu_add_group(parent->dlbu_core, child); + + if (MALI_TRUE == parent->power_is_on) { + mali_bcast_reset(parent->bcast_core); + mali_dlbu_update_mask(parent->dlbu_core); + } + + if (MALI_TRUE == child->power_is_on) { + if (NULL == parent->session) { + if (NULL != child->session) { + /* + * Parent has no session, so clear + * child session as well. + */ + mali_mmu_activate_empty_page_directory(child->mmu); + } + } else { + if (parent->session == child->session) { + /* We already have same session as parent, + * so a simple zap should be enough. + */ + mali_mmu_zap_tlb(child->mmu); + } else { + /* + * Parent has a different session, so we must + * switch to that sessions page table + */ + mali_mmu_activate_page_directory(child->mmu, mali_session_get_page_directory(parent->session)); + } + + /* It is the parent which keeps the session from now on */ + child->session = NULL; + } + } else { + /* should have been cleared when child was powered down */ + MALI_DEBUG_ASSERT(NULL == child->session); + } + + /* Start job on child when parent is active */ + if (NULL != parent->pp_running_job) { + struct mali_pp_job *job = parent->pp_running_job; + + MALI_DEBUG_PRINT(3, ("Group %x joining running job %d on virtual group %x\n", + child, mali_pp_job_get_id(job), parent)); + + /* Only allowed to add active child to an active parent */ + MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE == parent->state); + MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE == child->state); + + mali_pp_job_start(child->pp_core, job, mali_pp_core_get_id(child->pp_core), MALI_TRUE); + + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) | + MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH, + mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0); + + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | + MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) | + MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL, + mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0); +#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS) + trace_gpu_sched_switch( + mali_pp_core_description(group->pp_core), + sched_clock(), mali_pp_job_get_tid(job), + 0, mali_pp_job_get_id(job)); +#endif + +#if defined(CONFIG_MALI400_PROFILING) + trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core), + mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job)); +#endif + } + + MALI_DEBUG_CODE(mali_group_print_virtual(parent);) +} + +/** + * @brief Remove child group from virtual group parent + */ +void mali_group_remove_group(struct mali_group *parent, struct mali_group *child) +{ + u32 i; + + MALI_DEBUG_PRINT(3, ("Removing group %s from virtual group %s\n", + mali_group_core_description(child), + mali_group_core_description(parent))); + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT(mali_group_is_virtual(parent)); + MALI_DEBUG_ASSERT(!mali_group_is_virtual(child)); + MALI_DEBUG_ASSERT(parent == child->parent_group); + + /* Update Broadcast Unit and DLBU */ + mali_bcast_remove_group(parent->bcast_core, child); + mali_dlbu_remove_group(parent->dlbu_core, child); + + if (MALI_TRUE == parent->power_is_on) { + mali_bcast_reset(parent->bcast_core); + mali_dlbu_update_mask(parent->dlbu_core); + } + + child->session = parent->session; + child->parent_group = NULL; + + _mali_osk_list_delinit(&child->group_list); + if (_mali_osk_list_empty(&parent->group_list)) { + parent->session = NULL; + } + + /* Keep track of the L2 cache cores of child groups */ + i = (child->l2_cache_core[0] == parent->l2_cache_core[0]) ? 0 : 1; + + MALI_DEBUG_ASSERT(child->l2_cache_core[0] == parent->l2_cache_core[i]); + + parent->l2_cache_core_ref_count[i]--; + if (parent->l2_cache_core_ref_count[i] == 0) { + parent->l2_cache_core[i] = NULL; + } + + MALI_DEBUG_CODE(mali_group_print_virtual(parent)); +} + +struct mali_group *mali_group_acquire_group(struct mali_group *parent) +{ + struct mali_group *child = NULL; + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT(mali_group_is_virtual(parent)); + + if (!_mali_osk_list_empty(&parent->group_list)) { + child = _MALI_OSK_LIST_ENTRY(parent->group_list.prev, struct mali_group, group_list); + mali_group_remove_group(parent, child); + } + + if (NULL != child) { + if (MALI_GROUP_STATE_ACTIVE != parent->state + && MALI_TRUE == child->power_is_on) { + mali_group_reset(child); + } + } + + return child; +} + +void mali_group_reset(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT(NULL == group->gp_running_job); + MALI_DEBUG_ASSERT(NULL == group->pp_running_job); + + MALI_DEBUG_PRINT(3, ("Group: reset of %s\n", + mali_group_core_description(group))); + + if (NULL != group->dlbu_core) { + mali_dlbu_reset(group->dlbu_core); + } + + if (NULL != group->bcast_core) { + mali_bcast_reset(group->bcast_core); + } + + MALI_DEBUG_ASSERT(NULL != group->mmu); + mali_group_reset_mmu(group); + + if (NULL != group->gp_core) { + MALI_DEBUG_ASSERT(NULL == group->pp_core); + mali_gp_reset(group->gp_core); + } else { + MALI_DEBUG_ASSERT(NULL != group->pp_core); + mali_group_reset_pp(group); + } +} + +void mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job, mali_bool gpu_secure_mode_pre_enabled) +{ + struct mali_session_data *session; + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + MALI_DEBUG_PRINT(3, ("Group: Starting GP job 0x%08X on group %s\n", + job, + mali_group_core_description(group))); + + session = mali_gp_job_get_session(job); + + MALI_DEBUG_ASSERT_POINTER(group->l2_cache_core[0]); + mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_gp_job_get_cache_order(job)); + + /* Reset GPU and disable gpu secure mode if needed. */ + if (MALI_TRUE == _mali_osk_gpu_secure_mode_is_enabled()) { + struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core(); + _mali_osk_gpu_reset_and_secure_mode_disable(); + /* Need to disable the pmu interrupt mask register */ + if (NULL != pmu) { + mali_pmu_reset(pmu); + } + } + + /* Reload mmu page table if needed */ + if (MALI_TRUE == gpu_secure_mode_pre_enabled) { + mali_group_reset(group); + mali_group_activate_page_directory(group, session, MALI_TRUE); + } else { + mali_group_activate_page_directory(group, session, MALI_FALSE); + } + + mali_gp_job_start(group->gp_core, job); + + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0) | + MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH, + mali_gp_job_get_frame_builder_id(job), mali_gp_job_get_flush_id(job), 0, 0, 0); + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | + MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0), + mali_gp_job_get_pid(job), mali_gp_job_get_tid(job), 0, 0, 0); + +#if defined(CONFIG_MALI400_PROFILING) + trace_mali_core_active(mali_gp_job_get_pid(job), 1 /* active */, 1 /* GP */, 0 /* core */, + mali_gp_job_get_frame_builder_id(job), mali_gp_job_get_flush_id(job)); +#endif + +#if defined(CONFIG_MALI400_PROFILING) + if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) && + (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) { + mali_group_report_l2_cache_counters_per_core(group, 0); + } +#endif /* #if defined(CONFIG_MALI400_PROFILING) */ + +#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS) + trace_gpu_sched_switch(mali_gp_core_description(group->gp_core), + sched_clock(), mali_gp_job_get_tid(job), + 0, mali_gp_job_get_id(job)); +#endif + + group->gp_running_job = job; + group->is_working = MALI_TRUE; + + /* Setup SW timer and record start time */ + group->start_time = _mali_osk_time_tickcount(); + _mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime)); + + MALI_DEBUG_PRINT(4, ("Group: Started GP job 0x%08X on group %s at %u\n", + job, + mali_group_core_description(group), + group->start_time)); +} + +/* Used to set all the registers except frame renderer list address and fragment shader stack address + * It means the caller must set these two registers properly before calling this function + */ +void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job, mali_bool gpu_secure_mode_pre_enabled) +{ + struct mali_session_data *session; + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + MALI_DEBUG_PRINT(3, ("Group: Starting PP job 0x%08X part %u/%u on group %s\n", + job, sub_job + 1, + mali_pp_job_get_sub_job_count(job), + mali_group_core_description(group))); + + session = mali_pp_job_get_session(job); + + if (NULL != group->l2_cache_core[0]) { + mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_pp_job_get_cache_order(job)); + } + + if (NULL != group->l2_cache_core[1]) { + mali_l2_cache_invalidate_conditional(group->l2_cache_core[1], mali_pp_job_get_cache_order(job)); + } + + /* Reset GPU and change gpu secure mode if needed. */ + if (MALI_TRUE == mali_pp_job_is_protected_job(job) && MALI_FALSE == _mali_osk_gpu_secure_mode_is_enabled()) { + struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core(); + _mali_osk_gpu_reset_and_secure_mode_enable(); + /* Need to disable the pmu interrupt mask register */ + if (NULL != pmu) { + mali_pmu_reset(pmu); + } + } else if (MALI_FALSE == mali_pp_job_is_protected_job(job) && MALI_TRUE == _mali_osk_gpu_secure_mode_is_enabled()) { + struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core(); + _mali_osk_gpu_reset_and_secure_mode_disable(); + /* Need to disable the pmu interrupt mask register */ + if (NULL != pmu) { + mali_pmu_reset(pmu); + } + } + + /* Reload the mmu page table if needed */ + if ((MALI_TRUE == mali_pp_job_is_protected_job(job) && MALI_FALSE == gpu_secure_mode_pre_enabled) + || (MALI_FALSE == mali_pp_job_is_protected_job(job) && MALI_TRUE == gpu_secure_mode_pre_enabled)) { + mali_group_reset(group); + mali_group_activate_page_directory(group, session, MALI_TRUE); + } else { + mali_group_activate_page_directory(group, session, MALI_FALSE); + } + + if (mali_group_is_virtual(group)) { + struct mali_group *child; + struct mali_group *temp; + u32 core_num = 0; + + MALI_DEBUG_ASSERT(mali_pp_job_is_virtual(job)); + + /* Configure DLBU for the job */ + mali_dlbu_config_job(group->dlbu_core, job); + + /* Write stack address for each child group */ + _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) { + mali_pp_write_addr_stack(child->pp_core, job); + core_num++; + } + + mali_pp_job_start(group->pp_core, job, sub_job, MALI_FALSE); + } else { + mali_pp_job_start(group->pp_core, job, sub_job, MALI_FALSE); + } + + /* if the group is virtual, loop through physical groups which belong to this group + * and call profiling events for its cores as virtual */ + if (MALI_TRUE == mali_group_is_virtual(group)) { + struct mali_group *child; + struct mali_group *temp; + + _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) { + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) | + MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH, + mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0); + + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | + MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) | + MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL, + mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0); + +#if defined(CONFIG_MALI400_PROFILING) + trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core), + mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job)); +#endif + } + +#if defined(CONFIG_MALI400_PROFILING) + if (0 != group->l2_cache_core_ref_count[0]) { + if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) && + (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) { + mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0])); + } + } + if (0 != group->l2_cache_core_ref_count[1]) { + if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) && + (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1]))) { + mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1])); + } + } +#endif /* #if defined(CONFIG_MALI400_PROFILING) */ + + } else { /* group is physical - call profiling events for physical cores */ + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) | + MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH, + mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0); + + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | + MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) | + MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL, + mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0); + +#if defined(CONFIG_MALI400_PROFILING) + trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(group->pp_core), + mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job)); +#endif + +#if defined(CONFIG_MALI400_PROFILING) + if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) && + (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) { + mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0])); + } +#endif /* #if defined(CONFIG_MALI400_PROFILING) */ + } + +#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS) + trace_gpu_sched_switch(mali_pp_core_description(group->pp_core), + sched_clock(), mali_pp_job_get_tid(job), + 0, mali_pp_job_get_id(job)); +#endif + + group->pp_running_job = job; + group->pp_running_sub_job = sub_job; + group->is_working = MALI_TRUE; + + /* Setup SW timer and record start time */ + group->start_time = _mali_osk_time_tickcount(); + _mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime)); + + MALI_DEBUG_PRINT(4, ("Group: Started PP job 0x%08X part %u/%u on group %s at %u\n", + job, sub_job + 1, + mali_pp_job_get_sub_job_count(job), + mali_group_core_description(group), + group->start_time)); + +} + +void mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr) +{ + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + MALI_DEBUG_ASSERT_POINTER(group->l2_cache_core[0]); + mali_l2_cache_invalidate(group->l2_cache_core[0]); + + mali_mmu_zap_tlb_without_stall(group->mmu); + + mali_gp_resume_with_new_heap(group->gp_core, start_addr, end_addr); + + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME | + MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0), + 0, 0, 0, 0, 0); + +#if defined(CONFIG_MALI400_PROFILING) + trace_mali_core_active(mali_gp_job_get_pid(group->gp_running_job), 1 /* active */, 1 /* GP */, 0 /* core */, + mali_gp_job_get_frame_builder_id(group->gp_running_job), mali_gp_job_get_flush_id(group->gp_running_job)); +#endif +} + +static void mali_group_reset_mmu(struct mali_group *group) +{ + struct mali_group *child; + struct mali_group *temp; + _mali_osk_errcode_t err; + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + if (!mali_group_is_virtual(group)) { + /* This is a physical group or an idle virtual group -- simply wait for + * the reset to complete. */ + err = mali_mmu_reset(group->mmu); + MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err); + } else { /* virtual group */ + /* Loop through all members of this virtual group and wait + * until they are done resetting. + */ + _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) { + err = mali_mmu_reset(child->mmu); + MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err); + } + } +} + +static void mali_group_reset_pp(struct mali_group *group) +{ + struct mali_group *child; + struct mali_group *temp; + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + mali_pp_reset_async(group->pp_core); + + if (!mali_group_is_virtual(group) || NULL == group->pp_running_job) { + /* This is a physical group or an idle virtual group -- simply wait for + * the reset to complete. */ + mali_pp_reset_wait(group->pp_core); + } else { + /* Loop through all members of this virtual group and wait until they + * are done resetting. + */ + _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) { + mali_pp_reset_wait(child->pp_core); + } + } +} + +struct mali_pp_job *mali_group_complete_pp(struct mali_group *group, mali_bool success, u32 *sub_job) +{ + struct mali_pp_job *pp_job_to_return; + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->pp_core); + MALI_DEBUG_ASSERT_POINTER(group->pp_running_job); + MALI_DEBUG_ASSERT_POINTER(sub_job); + MALI_DEBUG_ASSERT(MALI_TRUE == group->is_working); + + /* Stop/clear the timeout timer. */ + _mali_osk_timer_del_async(group->timeout_timer); + + if (NULL != group->pp_running_job) { + + /* Deal with HW counters and profiling */ + + if (MALI_TRUE == mali_group_is_virtual(group)) { + struct mali_group *child; + struct mali_group *temp; + + /* update performance counters from each physical pp core within this virtual group */ + _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) { + mali_pp_update_performance_counters(group->pp_core, child->pp_core, group->pp_running_job, mali_pp_core_get_id(child->pp_core)); + } + +#if defined(CONFIG_MALI400_PROFILING) + /* send profiling data per physical core */ + _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) { + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) | + MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL, + mali_pp_job_get_perf_counter_value0(group->pp_running_job, mali_pp_core_get_id(child->pp_core)), + mali_pp_job_get_perf_counter_value1(group->pp_running_job, mali_pp_core_get_id(child->pp_core)), + mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8), + 0, 0); + + trace_mali_core_active(mali_pp_job_get_pid(group->pp_running_job), + 0 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core), + mali_pp_job_get_frame_builder_id(group->pp_running_job), + mali_pp_job_get_flush_id(group->pp_running_job)); + } + if (0 != group->l2_cache_core_ref_count[0]) { + if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) && + (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) { + mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0])); + } + } + if (0 != group->l2_cache_core_ref_count[1]) { + if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) && + (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1]))) { + mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1])); + } + } + +#endif + } else { + /* update performance counters for a physical group's pp core */ + mali_pp_update_performance_counters(group->pp_core, group->pp_core, group->pp_running_job, group->pp_running_sub_job); + +#if defined(CONFIG_MALI400_PROFILING) + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) | + MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL, + mali_pp_job_get_perf_counter_value0(group->pp_running_job, group->pp_running_sub_job), + mali_pp_job_get_perf_counter_value1(group->pp_running_job, group->pp_running_sub_job), + mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8), + 0, 0); + + trace_mali_core_active(mali_pp_job_get_pid(group->pp_running_job), + 0 /* active */, 0 /* PP */, mali_pp_core_get_id(group->pp_core), + mali_pp_job_get_frame_builder_id(group->pp_running_job), + mali_pp_job_get_flush_id(group->pp_running_job)); + + if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) && + (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) { + mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0])); + } +#endif + } + +#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS) + trace_gpu_sched_switch( + mali_gp_core_description(group->gp_core), + sched_clock(), 0, 0, 0); +#endif + + } + + if (success) { + /* Only do soft reset for successful jobs, a full recovery + * reset will be done for failed jobs. */ + mali_pp_reset_async(group->pp_core); + } + + pp_job_to_return = group->pp_running_job; + group->pp_running_job = NULL; + group->is_working = MALI_FALSE; + *sub_job = group->pp_running_sub_job; + + if (!success) { + MALI_DEBUG_PRINT(2, ("Mali group: Executing recovery reset due to job failure\n")); + mali_group_recovery_reset(group); + } else if (_MALI_OSK_ERR_OK != mali_pp_reset_wait(group->pp_core)) { + MALI_PRINT_ERROR(("Mali group: Executing recovery reset due to reset failure\n")); + mali_group_recovery_reset(group); + } + + return pp_job_to_return; +} + +struct mali_gp_job *mali_group_complete_gp(struct mali_group *group, mali_bool success) +{ + struct mali_gp_job *gp_job_to_return; + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->gp_core); + MALI_DEBUG_ASSERT_POINTER(group->gp_running_job); + MALI_DEBUG_ASSERT(MALI_TRUE == group->is_working); + + /* Stop/clear the timeout timer. */ + _mali_osk_timer_del_async(group->timeout_timer); + + if (NULL != group->gp_running_job) { + mali_gp_update_performance_counters(group->gp_core, group->gp_running_job); + +#if defined(CONFIG_MALI400_PROFILING) + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0), + mali_gp_job_get_perf_counter_value0(group->gp_running_job), + mali_gp_job_get_perf_counter_value1(group->gp_running_job), + mali_gp_job_get_perf_counter_src0(group->gp_running_job) | (mali_gp_job_get_perf_counter_src1(group->gp_running_job) << 8), + 0, 0); + + if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) && + (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) + mali_group_report_l2_cache_counters_per_core(group, 0); +#endif + +#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS) + trace_gpu_sched_switch( + mali_pp_core_description(group->pp_core), + sched_clock(), 0, 0, 0); +#endif + +#if defined(CONFIG_MALI400_PROFILING) + trace_mali_core_active(mali_gp_job_get_pid(group->gp_running_job), 0 /* active */, 1 /* GP */, 0 /* core */, + mali_gp_job_get_frame_builder_id(group->gp_running_job), mali_gp_job_get_flush_id(group->gp_running_job)); +#endif + + mali_gp_job_set_current_heap_addr(group->gp_running_job, + mali_gp_read_plbu_alloc_start_addr(group->gp_core)); + } + + if (success) { + /* Only do soft reset for successful jobs, a full recovery + * reset will be done for failed jobs. */ + mali_gp_reset_async(group->gp_core); + } + + gp_job_to_return = group->gp_running_job; + group->gp_running_job = NULL; + group->is_working = MALI_FALSE; + + if (!success) { + MALI_DEBUG_PRINT(2, ("Mali group: Executing recovery reset due to job failure\n")); + mali_group_recovery_reset(group); + } else if (_MALI_OSK_ERR_OK != mali_gp_reset_wait(group->gp_core)) { + MALI_PRINT_ERROR(("Mali group: Executing recovery reset due to reset failure\n")); + mali_group_recovery_reset(group); + } + + return gp_job_to_return; +} + +struct mali_group *mali_group_get_glob_group(u32 index) +{ + if (mali_global_num_groups > index) { + return mali_global_groups[index]; + } + + return NULL; +} + +u32 mali_group_get_glob_num_groups(void) +{ + return mali_global_num_groups; +} + +static void mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session, mali_bool is_reload) +{ + MALI_DEBUG_PRINT(5, ("Mali group: Activating page directory 0x%08X from session 0x%08X on group %s\n", + mali_session_get_page_directory(session), session, + mali_group_core_description(group))); + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + if (group->session != session || MALI_TRUE == is_reload) { + /* Different session than last time, so we need to do some work */ + MALI_DEBUG_PRINT(5, ("Mali group: Activate session: %08x previous: %08x on group %s\n", + session, group->session, + mali_group_core_description(group))); + mali_mmu_activate_page_directory(group->mmu, mali_session_get_page_directory(session)); + group->session = session; + } else { + /* Same session as last time, so no work required */ + MALI_DEBUG_PRINT(4, ("Mali group: Activate existing session 0x%08X on group %s\n", + session->page_directory, + mali_group_core_description(group))); + mali_mmu_zap_tlb_without_stall(group->mmu); + } +} + +static void mali_group_recovery_reset(struct mali_group *group) +{ + _mali_osk_errcode_t err; + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + /* Stop cores, bus stop */ + if (NULL != group->pp_core) { + mali_pp_stop_bus(group->pp_core); + } else { + mali_gp_stop_bus(group->gp_core); + } + + /* Flush MMU and clear page fault (if any) */ + mali_mmu_activate_fault_flush_page_directory(group->mmu); + mali_mmu_page_fault_done(group->mmu); + + /* Wait for cores to stop bus, then do a hard reset on them */ + if (NULL != group->pp_core) { + if (mali_group_is_virtual(group)) { + struct mali_group *child, *temp; + + /* Disable the broadcast unit while we do reset directly on the member cores. */ + mali_bcast_disable(group->bcast_core); + + _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) { + mali_pp_stop_bus_wait(child->pp_core); + mali_pp_hard_reset(child->pp_core); + } + + mali_bcast_enable(group->bcast_core); + } else { + mali_pp_stop_bus_wait(group->pp_core); + mali_pp_hard_reset(group->pp_core); + } + } else { + mali_gp_stop_bus_wait(group->gp_core); + mali_gp_hard_reset(group->gp_core); + } + + /* Reset MMU */ + err = mali_mmu_reset(group->mmu); + MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err); + MALI_IGNORE(err); + + group->session = NULL; +} + +#if MALI_STATE_TRACKING +u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size) +{ + int n = 0; + int i; + struct mali_group *child; + struct mali_group *temp; + + if (mali_group_is_virtual(group)) { + n += _mali_osk_snprintf(buf + n, size - n, + "Virtual PP Group: %p\n", group); + } else if (mali_group_is_in_virtual(group)) { + n += _mali_osk_snprintf(buf + n, size - n, + "Child PP Group: %p\n", group); + } else if (NULL != group->pp_core) { + n += _mali_osk_snprintf(buf + n, size - n, + "Physical PP Group: %p\n", group); + } else { + MALI_DEBUG_ASSERT_POINTER(group->gp_core); + n += _mali_osk_snprintf(buf + n, size - n, + "GP Group: %p\n", group); + } + + switch (group->state) { + case MALI_GROUP_STATE_INACTIVE: + n += _mali_osk_snprintf(buf + n, size - n, + "\tstate: INACTIVE\n"); + break; + case MALI_GROUP_STATE_ACTIVATION_PENDING: + n += _mali_osk_snprintf(buf + n, size - n, + "\tstate: ACTIVATION_PENDING\n"); + break; + case MALI_GROUP_STATE_ACTIVE: + n += _mali_osk_snprintf(buf + n, size - n, + "\tstate: MALI_GROUP_STATE_ACTIVE\n"); + break; + default: + n += _mali_osk_snprintf(buf + n, size - n, + "\tstate: UNKNOWN (%d)\n", group->state); + MALI_DEBUG_ASSERT(0); + break; + } + + n += _mali_osk_snprintf(buf + n, size - n, + "\tSW power: %s\n", + group->power_is_on ? "On" : "Off"); + + n += mali_pm_dump_state_domain(group->pm_domain, buf + n, size - n); + + for (i = 0; i < 2; i++) { + if (NULL != group->l2_cache_core[i]) { + struct mali_pm_domain *domain; + domain = mali_l2_cache_get_pm_domain( + group->l2_cache_core[i]); + n += mali_pm_dump_state_domain(domain, + buf + n, size - n); + } + } + + if (group->gp_core) { + n += mali_gp_dump_state(group->gp_core, buf + n, size - n); + n += _mali_osk_snprintf(buf + n, size - n, + "\tGP running job: %p\n", group->gp_running_job); + } + + if (group->pp_core) { + n += mali_pp_dump_state(group->pp_core, buf + n, size - n); + n += _mali_osk_snprintf(buf + n, size - n, + "\tPP running job: %p, subjob %d \n", + group->pp_running_job, + group->pp_running_sub_job); + } + + _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, + struct mali_group, group_list) { + n += mali_group_dump_state(child, buf + n, size - n); + } + + return n; +} +#endif + +_mali_osk_errcode_t mali_group_upper_half_mmu(void *data) +{ + struct mali_group *group = (struct mali_group *)data; + _mali_osk_errcode_t ret; + + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->mmu); + +#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS) +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + mali_executor_lock(); + if (!mali_group_is_working(group)) { + /* Not working, so nothing to do */ + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; + } +#endif + if (NULL != group->gp_core) { + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, + 0, 0, /* No pid and tid for interrupt handler */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0), + mali_mmu_get_rawstat(group->mmu), 0); + } else { + MALI_DEBUG_ASSERT_POINTER(group->pp_core); + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, + 0, 0, /* No pid and tid for interrupt handler */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU( + mali_pp_core_get_id(group->pp_core)), + mali_mmu_get_rawstat(group->mmu), 0); + } +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + mali_executor_unlock(); +#endif +#endif + + ret = mali_executor_interrupt_mmu(group, MALI_TRUE); + +#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS) +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + mali_executor_lock(); + if (!mali_group_is_working(group) && (!mali_group_power_is_on(group))) { + /* group complete and on job shedule on it, it already power off */ + if (NULL != group->gp_core) { + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, + 0, 0, /* No pid and tid for interrupt handler */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0), + 0xFFFFFFFF, 0); + } else { + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, + 0, 0, /* No pid and tid for interrupt handler */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU( + mali_pp_core_get_id(group->pp_core)), + 0xFFFFFFFF, 0); + } + + mali_executor_unlock(); + return ret; + } +#endif + + if (NULL != group->gp_core) { + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, + 0, 0, /* No pid and tid for interrupt handler */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0), + mali_mmu_get_rawstat(group->mmu), 0); + } else { + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, + 0, 0, /* No pid and tid for interrupt handler */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU( + mali_pp_core_get_id(group->pp_core)), + mali_mmu_get_rawstat(group->mmu), 0); + } +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + mali_executor_unlock(); +#endif +#endif + + return ret; +} + +static void mali_group_bottom_half_mmu(void *data) +{ + struct mali_group *group = (struct mali_group *)data; + + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->mmu); + + if (NULL != group->gp_core) { + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, + 0, _mali_osk_get_tid(), /* pid and tid */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0), + mali_mmu_get_rawstat(group->mmu), 0); + } else { + MALI_DEBUG_ASSERT_POINTER(group->pp_core); + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, + 0, _mali_osk_get_tid(), /* pid and tid */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU( + mali_pp_core_get_id(group->pp_core)), + mali_mmu_get_rawstat(group->mmu), 0); + } + + mali_executor_interrupt_mmu(group, MALI_FALSE); + + if (NULL != group->gp_core) { + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, + 0, _mali_osk_get_tid(), /* pid and tid */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0), + mali_mmu_get_rawstat(group->mmu), 0); + } else { + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, + 0, _mali_osk_get_tid(), /* pid and tid */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU( + mali_pp_core_get_id(group->pp_core)), + mali_mmu_get_rawstat(group->mmu), 0); + } +} + +_mali_osk_errcode_t mali_group_upper_half_gp(void *data) +{ + struct mali_group *group = (struct mali_group *)data; + _mali_osk_errcode_t ret; + + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->gp_core); + MALI_DEBUG_ASSERT_POINTER(group->mmu); + +#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS) +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + mali_executor_lock(); + if (!mali_group_is_working(group)) { + /* Not working, so nothing to do */ + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; + } +#endif + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, + 0, 0, /* No pid and tid for interrupt handler */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0), + mali_gp_get_rawstat(group->gp_core), 0); + + MALI_DEBUG_PRINT(4, ("Group: Interrupt 0x%08X from %s\n", + mali_gp_get_rawstat(group->gp_core), + mali_group_core_description(group))); +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + mali_executor_unlock(); +#endif +#endif + ret = mali_executor_interrupt_gp(group, MALI_TRUE); + +#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS) +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + mali_executor_lock(); + if (!mali_group_is_working(group) && (!mali_group_power_is_on(group))) { + /* group complete and on job shedule on it, it already power off */ + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, + 0, 0, /* No pid and tid for interrupt handler */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0), + 0xFFFFFFFF, 0); + mali_executor_unlock(); + return ret; + } +#endif + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, + 0, 0, /* No pid and tid for interrupt handler */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0), + mali_gp_get_rawstat(group->gp_core), 0); +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + mali_executor_unlock(); +#endif +#endif + return ret; +} + +static void mali_group_bottom_half_gp(void *data) +{ + struct mali_group *group = (struct mali_group *)data; + + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->gp_core); + MALI_DEBUG_ASSERT_POINTER(group->mmu); + + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, + 0, _mali_osk_get_tid(), /* pid and tid */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0), + mali_gp_get_rawstat(group->gp_core), 0); + + mali_executor_interrupt_gp(group, MALI_FALSE); + + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, + 0, _mali_osk_get_tid(), /* pid and tid */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0), + mali_gp_get_rawstat(group->gp_core), 0); +} + +_mali_osk_errcode_t mali_group_upper_half_pp(void *data) +{ + struct mali_group *group = (struct mali_group *)data; + _mali_osk_errcode_t ret; + + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->pp_core); + MALI_DEBUG_ASSERT_POINTER(group->mmu); + +#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS) +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + mali_executor_lock(); + if (!mali_group_is_working(group)) { + /* Not working, so nothing to do */ + mali_executor_unlock(); + return _MALI_OSK_ERR_FAULT; + } +#endif + + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, + 0, 0, /* No pid and tid for interrupt handler */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP( + mali_pp_core_get_id(group->pp_core)), + mali_pp_get_rawstat(group->pp_core), 0); + + MALI_DEBUG_PRINT(4, ("Group: Interrupt 0x%08X from %s\n", + mali_pp_get_rawstat(group->pp_core), + mali_group_core_description(group))); +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + mali_executor_unlock(); +#endif +#endif + + ret = mali_executor_interrupt_pp(group, MALI_TRUE); + +#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS) +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + mali_executor_lock(); + if (!mali_group_is_working(group) && (!mali_group_power_is_on(group))) { + /* group complete and on job shedule on it, it already power off */ + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, + 0, 0, /* No pid and tid for interrupt handler */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP( + mali_pp_core_get_id(group->pp_core)), + 0xFFFFFFFF, 0); + mali_executor_unlock(); + return ret; + } +#endif + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF, + 0, 0, /* No pid and tid for interrupt handler */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP( + mali_pp_core_get_id(group->pp_core)), + mali_pp_get_rawstat(group->pp_core), 0); +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + mali_executor_unlock(); +#endif +#endif + return ret; +} + +static void mali_group_bottom_half_pp(void *data) +{ + struct mali_group *group = (struct mali_group *)data; + + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->pp_core); + MALI_DEBUG_ASSERT_POINTER(group->mmu); + + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, + 0, _mali_osk_get_tid(), /* pid and tid */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP( + mali_pp_core_get_id(group->pp_core)), + mali_pp_get_rawstat(group->pp_core), 0); + + mali_executor_interrupt_pp(group, MALI_FALSE); + + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, + 0, _mali_osk_get_tid(), /* pid and tid */ + MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP( + mali_pp_core_get_id(group->pp_core)), + mali_pp_get_rawstat(group->pp_core), 0); +} + +static void mali_group_timeout(void *data) +{ + struct mali_group *group = (struct mali_group *)data; + MALI_DEBUG_ASSERT_POINTER(group); + + MALI_DEBUG_PRINT(2, ("Group: timeout handler for %s at %u\n", + mali_group_core_description(group), + _mali_osk_time_tickcount())); + + if (NULL != group->gp_core) { + mali_group_schedule_bottom_half_gp(group); + } else { + MALI_DEBUG_ASSERT_POINTER(group->pp_core); + mali_group_schedule_bottom_half_pp(group); + } +} + +mali_bool mali_group_zap_session(struct mali_group *group, + struct mali_session_data *session) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(session); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + if (group->session != session) { + /* not running from this session */ + return MALI_TRUE; /* success */ + } + + if (group->is_working) { + /* The Zap also does the stall and disable_stall */ + mali_bool zap_success = mali_mmu_zap_tlb(group->mmu); + return zap_success; + } else { + /* Just remove the session instead of zapping */ + mali_group_clear_session(group); + return MALI_TRUE; /* success */ + } +} + +#if defined(CONFIG_MALI400_PROFILING) +static void mali_group_report_l2_cache_counters_per_core(struct mali_group *group, u32 core_num) +{ + u32 source0 = 0; + u32 value0 = 0; + u32 source1 = 0; + u32 value1 = 0; + u32 profiling_channel = 0; + + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + switch (core_num) { + case 0: + profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_EVENT_CHANNEL_GPU | + MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS; + break; + case 1: + profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_EVENT_CHANNEL_GPU | + MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L21_COUNTERS; + break; + case 2: + profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_EVENT_CHANNEL_GPU | + MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L22_COUNTERS; + break; + default: + profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_EVENT_CHANNEL_GPU | + MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS; + break; + } + + if (0 == core_num) { + mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1); + } + if (1 == core_num) { + if (1 == mali_l2_cache_get_id(group->l2_cache_core[0])) { + mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1); + } else if (1 == mali_l2_cache_get_id(group->l2_cache_core[1])) { + mali_l2_cache_core_get_counter_values(group->l2_cache_core[1], &source0, &value0, &source1, &value1); + } + } + if (2 == core_num) { + if (2 == mali_l2_cache_get_id(group->l2_cache_core[0])) { + mali_l2_cache_core_get_counter_values(group->l2_cache_core[0], &source0, &value0, &source1, &value1); + } else if (2 == mali_l2_cache_get_id(group->l2_cache_core[1])) { + mali_l2_cache_core_get_counter_values(group->l2_cache_core[1], &source0, &value0, &source1, &value1); + } + } + + _mali_osk_profiling_add_event(profiling_channel, source1 << 8 | source0, value0, value1, 0, 0); +} +#endif /* #if defined(CONFIG_MALI400_PROFILING) */ diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_group.h b/drivers/gpu/arm/mali400/common/mali_group.h --- a/drivers/gpu/arm/mali400/common/mali_group.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_group.h 2018-05-06 08:49:49.174695256 +0200 @@ -0,0 +1,460 @@ +/* + * Copyright (C) 2011-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_GROUP_H__ +#define __MALI_GROUP_H__ + +#include "mali_osk.h" +#include "mali_l2_cache.h" +#include "mali_mmu.h" +#include "mali_gp.h" +#include "mali_pp.h" +#include "mali_session.h" +#include "mali_osk_profiling.h" + +/** + * @brief Default max runtime [ms] for a core job - used by timeout timers + */ +#define MALI_MAX_JOB_RUNTIME_DEFAULT 5000 + +extern int mali_max_job_runtime; + +#define MALI_MAX_NUMBER_OF_GROUPS 10 +#define MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS 8 + +enum mali_group_state { + MALI_GROUP_STATE_INACTIVE, + MALI_GROUP_STATE_ACTIVATION_PENDING, + MALI_GROUP_STATE_ACTIVE, +}; + +/** + * The structure represents a render group + * A render group is defined by all the cores that share the same Mali MMU + */ + +struct mali_group { + struct mali_mmu_core *mmu; + struct mali_session_data *session; + + enum mali_group_state state; + mali_bool power_is_on; + + mali_bool is_working; + unsigned long start_time; /* in ticks */ + + struct mali_gp_core *gp_core; + struct mali_gp_job *gp_running_job; + + struct mali_pp_core *pp_core; + struct mali_pp_job *pp_running_job; + u32 pp_running_sub_job; + + struct mali_pm_domain *pm_domain; + + struct mali_l2_cache_core *l2_cache_core[2]; + u32 l2_cache_core_ref_count[2]; + + /* Parent virtual group (if any) */ + struct mali_group *parent_group; + + struct mali_dlbu_core *dlbu_core; + struct mali_bcast_unit *bcast_core; + + /* Used for working groups which needs to be disabled */ + mali_bool disable_requested; + + /* Used by group to link child groups (for virtual group) */ + _mali_osk_list_t group_list; + + /* Used by executor module in order to link groups of same state */ + _mali_osk_list_t executor_list; + + /* Used by PM domains to link groups of same domain */ + _mali_osk_list_t pm_domain_list; + + _mali_osk_wq_work_t *bottom_half_work_mmu; + _mali_osk_wq_work_t *bottom_half_work_gp; + _mali_osk_wq_work_t *bottom_half_work_pp; + + _mali_osk_timer_t *timeout_timer; +}; + +/** @brief Create a new Mali group object + * + * @return A pointer to a new group object + */ +struct mali_group *mali_group_create(struct mali_l2_cache_core *core, + struct mali_dlbu_core *dlbu, + struct mali_bcast_unit *bcast, + u32 domain_index); + +void mali_group_dump_status(struct mali_group *group); + +void mali_group_delete(struct mali_group *group); + +_mali_osk_errcode_t mali_group_add_mmu_core(struct mali_group *group, + struct mali_mmu_core *mmu_core); +void mali_group_remove_mmu_core(struct mali_group *group); + +_mali_osk_errcode_t mali_group_add_gp_core(struct mali_group *group, + struct mali_gp_core *gp_core); +void mali_group_remove_gp_core(struct mali_group *group); + +_mali_osk_errcode_t mali_group_add_pp_core(struct mali_group *group, + struct mali_pp_core *pp_core); +void mali_group_remove_pp_core(struct mali_group *group); + +MALI_STATIC_INLINE const char *mali_group_core_description( + struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + if (NULL != group->pp_core) { + return mali_pp_core_description(group->pp_core); + } else { + MALI_DEBUG_ASSERT_POINTER(group->gp_core); + return mali_gp_core_description(group->gp_core); + } +} + +MALI_STATIC_INLINE mali_bool mali_group_is_virtual(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + +#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) + return (NULL != group->dlbu_core); +#else + return MALI_FALSE; +#endif +} + +/** @brief Check if a group is a part of a virtual group or not + */ +MALI_STATIC_INLINE mali_bool mali_group_is_in_virtual(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + +#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) + return (NULL != group->parent_group) ? MALI_TRUE : MALI_FALSE; +#else + return MALI_FALSE; +#endif +} + +/** @brief Reset group + * + * This function will reset the entire group, + * including all the cores present in the group. + * + * @param group Pointer to the group to reset + */ +void mali_group_reset(struct mali_group *group); + +MALI_STATIC_INLINE struct mali_session_data *mali_group_get_session( + struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + return group->session; +} + +MALI_STATIC_INLINE void mali_group_clear_session(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + if (NULL != group->session) { + mali_mmu_activate_empty_page_directory(group->mmu); + group->session = NULL; + } +} + +enum mali_group_state mali_group_activate(struct mali_group *group); + +/* + * Change state from ACTIVATION_PENDING to ACTIVE + * For virtual group, all childs need to be ACTIVE first + */ +mali_bool mali_group_set_active(struct mali_group *group); + +/* + * @return MALI_TRUE means one or more domains can now be powered off, + * and caller should call either mali_pm_update_async() or + * mali_pm_update_sync() in order to do so. + */ +mali_bool mali_group_deactivate(struct mali_group *group); + +MALI_STATIC_INLINE enum mali_group_state mali_group_get_state(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + return group->state; +} + +MALI_STATIC_INLINE mali_bool mali_group_power_is_on(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + return group->power_is_on; +} + +void mali_group_power_up(struct mali_group *group); +void mali_group_power_down(struct mali_group *group); + +MALI_STATIC_INLINE void mali_group_set_disable_request( + struct mali_group *group, mali_bool disable) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + group->disable_requested = disable; + + /** + * When one of child group's disable_requeset is set TRUE, then + * the disable_request of parent group should also be set to TRUE. + * While, the disable_request of parent group should only be set to FALSE + * only when all of its child group's disable_request are set to FALSE. + */ + if (NULL != group->parent_group && MALI_TRUE == disable) { + group->parent_group->disable_requested = disable; + } +} + +MALI_STATIC_INLINE mali_bool mali_group_disable_requested( + struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + return group->disable_requested; +} + +/** @brief Virtual groups */ +void mali_group_add_group(struct mali_group *parent, struct mali_group *child); +struct mali_group *mali_group_acquire_group(struct mali_group *parent); +void mali_group_remove_group(struct mali_group *parent, struct mali_group *child); + +/** @brief Checks if the group is working. + */ +MALI_STATIC_INLINE mali_bool mali_group_is_working(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + if (mali_group_is_in_virtual(group)) { + struct mali_group *tmp_group = mali_executor_get_virtual_group(); + return tmp_group->is_working; + } + return group->is_working; +} + +MALI_STATIC_INLINE struct mali_gp_job *mali_group_get_running_gp_job(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + return group->gp_running_job; +} + +/** @brief Zap MMU TLB on all groups + * + * Zap TLB on group if \a session is active. + */ +mali_bool mali_group_zap_session(struct mali_group *group, + struct mali_session_data *session); + +/** @brief Get pointer to GP core object + */ +MALI_STATIC_INLINE struct mali_gp_core *mali_group_get_gp_core(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + return group->gp_core; +} + +/** @brief Get pointer to PP core object + */ +MALI_STATIC_INLINE struct mali_pp_core *mali_group_get_pp_core(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + return group->pp_core; +} + +/** @brief Start GP job + */ +void mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job, mali_bool gpu_secure_mode_pre_enabled); + +void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job, mali_bool gpu_secure_mode_pre_enabled); + +/** @brief Start virtual group Job on a virtual group +*/ +void mali_group_start_job_on_virtual(struct mali_group *group, struct mali_pp_job *job, u32 first_subjob, u32 last_subjob); + + +/** @brief Start a subjob from a particular on a specific PP group +*/ +void mali_group_start_job_on_group(struct mali_group *group, struct mali_pp_job *job, u32 subjob); + + +/** @brief remove all the unused groups in tmp_unused group list, so that the group is in consistent status. + */ +void mali_group_non_dlbu_job_done_virtual(struct mali_group *group); + + +/** @brief Resume GP job that suspended waiting for more heap memory + */ +void mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr); + +MALI_STATIC_INLINE enum mali_interrupt_result mali_group_get_interrupt_result_gp(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->gp_core); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + return mali_gp_get_interrupt_result(group->gp_core); +} + +MALI_STATIC_INLINE enum mali_interrupt_result mali_group_get_interrupt_result_pp(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->pp_core); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + return mali_pp_get_interrupt_result(group->pp_core); +} + +MALI_STATIC_INLINE enum mali_interrupt_result mali_group_get_interrupt_result_mmu(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->mmu); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + return mali_mmu_get_interrupt_result(group->mmu); +} + +MALI_STATIC_INLINE mali_bool mali_group_gp_is_active(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->gp_core); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + return mali_gp_is_active(group->gp_core); +} + +MALI_STATIC_INLINE mali_bool mali_group_pp_is_active(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->pp_core); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + return mali_pp_is_active(group->pp_core); +} + +MALI_STATIC_INLINE mali_bool mali_group_has_timed_out(struct mali_group *group) +{ + unsigned long time_cost; + struct mali_group *tmp_group = group; + + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + + /* if the group is in virtual need to use virtual_group's start time */ + if (mali_group_is_in_virtual(group)) { + tmp_group = mali_executor_get_virtual_group(); + } + + time_cost = _mali_osk_time_tickcount() - tmp_group->start_time; + if (_mali_osk_time_mstoticks(mali_max_job_runtime) <= time_cost) { + /* + * current tick is at or after timeout end time, + * so this is a valid timeout + */ + return MALI_TRUE; + } else { + /* + * Not a valid timeout. A HW interrupt probably beat + * us to it, and the timer wasn't properly deleted + * (async deletion used due to atomic context). + */ + return MALI_FALSE; + } +} + +MALI_STATIC_INLINE void mali_group_mask_all_interrupts_gp(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->gp_core); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + return mali_gp_mask_all_interrupts(group->gp_core); +} + +MALI_STATIC_INLINE void mali_group_mask_all_interrupts_pp(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->pp_core); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + return mali_pp_mask_all_interrupts(group->pp_core); +} + +MALI_STATIC_INLINE void mali_group_enable_interrupts_gp( + struct mali_group *group, + enum mali_interrupt_result exceptions) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->gp_core); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + mali_gp_enable_interrupts(group->gp_core, exceptions); +} + +MALI_STATIC_INLINE void mali_group_schedule_bottom_half_gp(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->gp_core); + _mali_osk_wq_schedule_work(group->bottom_half_work_gp); +} + + +MALI_STATIC_INLINE void mali_group_schedule_bottom_half_pp(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->pp_core); + _mali_osk_wq_schedule_work(group->bottom_half_work_pp); +} + +MALI_STATIC_INLINE void mali_group_schedule_bottom_half_mmu(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT_POINTER(group->mmu); + _mali_osk_wq_schedule_work(group->bottom_half_work_mmu); +} + +struct mali_pp_job *mali_group_complete_pp(struct mali_group *group, mali_bool success, u32 *sub_job); + +struct mali_gp_job *mali_group_complete_gp(struct mali_group *group, mali_bool success); + +#if defined(CONFIG_MALI400_PROFILING) +MALI_STATIC_INLINE void mali_group_oom(struct mali_group *group) +{ + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND | + MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0), + 0, 0, 0, 0, 0); +} +#endif + +struct mali_group *mali_group_get_glob_group(u32 index); +u32 mali_group_get_glob_num_groups(void); + +u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size); + + +_mali_osk_errcode_t mali_group_upper_half_mmu(void *data); +_mali_osk_errcode_t mali_group_upper_half_gp(void *data); +_mali_osk_errcode_t mali_group_upper_half_pp(void *data); + +MALI_STATIC_INLINE mali_bool mali_group_is_empty(struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(group); + MALI_DEBUG_ASSERT(mali_group_is_virtual(group)); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + return _mali_osk_list_empty(&group->group_list); +} + +#endif /* __MALI_GROUP_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_hw_core.c b/drivers/gpu/arm/mali400/common/mali_hw_core.c --- a/drivers/gpu/arm/mali400/common/mali_hw_core.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_hw_core.c 2018-05-06 08:49:49.174695256 +0200 @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2011-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_hw_core.h" +#include "mali_osk.h" +#include "mali_kernel_common.h" +#include "mali_osk_mali.h" + +_mali_osk_errcode_t mali_hw_core_create(struct mali_hw_core *core, const _mali_osk_resource_t *resource, u32 reg_size) +{ + core->phys_addr = resource->base; + core->phys_offset = resource->base - _mali_osk_resource_base_address(); + core->description = resource->description; + core->size = reg_size; + + MALI_DEBUG_ASSERT(core->phys_offset < core->phys_addr); + + if (_MALI_OSK_ERR_OK == _mali_osk_mem_reqregion(core->phys_addr, core->size, core->description)) { + core->mapped_registers = _mali_osk_mem_mapioregion(core->phys_addr, core->size, core->description); + if (NULL != core->mapped_registers) { + return _MALI_OSK_ERR_OK; + } else { + MALI_PRINT_ERROR(("Failed to map memory region for core %s at phys_addr 0x%08X\n", core->description, core->phys_addr)); + } + _mali_osk_mem_unreqregion(core->phys_addr, core->size); + } else { + MALI_PRINT_ERROR(("Failed to request memory region for core %s at phys_addr 0x%08X\n", core->description, core->phys_addr)); + } + + return _MALI_OSK_ERR_FAULT; +} + +void mali_hw_core_delete(struct mali_hw_core *core) +{ + if (NULL != core->mapped_registers) { + _mali_osk_mem_unmapioregion(core->phys_addr, core->size, core->mapped_registers); + core->mapped_registers = NULL; + } + _mali_osk_mem_unreqregion(core->phys_addr, core->size); +} diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_hw_core.h b/drivers/gpu/arm/mali400/common/mali_hw_core.h --- a/drivers/gpu/arm/mali400/common/mali_hw_core.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_hw_core.h 2018-05-06 08:49:49.174695256 +0200 @@ -0,0 +1,149 @@ +/* + * Copyright (C) 2011-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_HW_CORE_H__ +#define __MALI_HW_CORE_H__ + +#include "mali_osk.h" +#include "mali_kernel_common.h" + +/** + * The common parts for all Mali HW cores (GP, PP, MMU, L2 and PMU) + * This struct is embedded inside all core specific structs. + */ +struct mali_hw_core { + uintptr_t phys_addr; /**< Physical address of the registers */ + u32 phys_offset; /**< Offset from start of Mali to registers */ + u32 size; /**< Size of registers */ + mali_io_address mapped_registers; /**< Virtual mapping of the registers */ + const char *description; /**< Name of unit (as specified in device configuration) */ +}; + +#define MALI_REG_POLL_COUNT_FAST 1000000 +#define MALI_REG_POLL_COUNT_SLOW 1000000 + +/* + * GP and PP core translate their int_stat/rawstat into one of these + */ +enum mali_interrupt_result { + MALI_INTERRUPT_RESULT_NONE, + MALI_INTERRUPT_RESULT_SUCCESS, + MALI_INTERRUPT_RESULT_SUCCESS_VS, + MALI_INTERRUPT_RESULT_SUCCESS_PLBU, + MALI_INTERRUPT_RESULT_OOM, + MALI_INTERRUPT_RESULT_ERROR +}; + +_mali_osk_errcode_t mali_hw_core_create(struct mali_hw_core *core, const _mali_osk_resource_t *resource, u32 reg_size); +void mali_hw_core_delete(struct mali_hw_core *core); + +/* nexell add */ +#if defined(CONFIG_ARCH_S5P4418) && defined(CONFIG_SECURE_REG_ACCESS) +#define USE_PSCI_REG_READ_WRITE +extern void write_sec_reg(void __iomem *reg, int val); +extern int read_sec_reg(void __iomem *reg); +#endif + +#ifdef USE_PSCI_REG_READ_WRITE +MALI_STATIC_INLINE u32 nx_register_read(u32 phys_addr_page, u32 offset) +{ + void *phys_addr = (void*)(phys_addr_page + offset); + return read_sec_reg(phys_addr); +} + +MALI_STATIC_INLINE void nx_register_write(u32 phys_addr_page, u32 offset, + u32 new_val) +{ + void *phys_addr = (void*)(phys_addr_page + offset); + write_sec_reg(phys_addr, new_val); +} +#endif + +MALI_STATIC_INLINE u32 mali_hw_core_register_read(struct mali_hw_core *core, u32 relative_address) +{ +#if !defined( USE_PSCI_REG_READ_WRITE ) + u32 read_val; + read_val = _mali_osk_mem_ioread32(core->mapped_registers, relative_address); + MALI_DEBUG_PRINT(6, ("register_read for core %s, relative addr=0x%04X, val=0x%08X\n", + core->description, relative_address, read_val)); + return read_val; +#else + return nx_register_read(core->phys_addr, relative_address); +#endif +} + +MALI_STATIC_INLINE void mali_hw_core_register_write_relaxed(struct mali_hw_core *core, u32 relative_address, u32 new_val) +{ + MALI_DEBUG_PRINT(6, ("register_write_relaxed for core %s, relative addr=0x%04X, val=0x%08X\n", + core->description, relative_address, new_val)); +#if !defined( USE_PSCI_REG_READ_WRITE ) + _mali_osk_mem_iowrite32_relaxed(core->mapped_registers, relative_address, new_val); +#else + nx_register_write(core->phys_addr, relative_address, new_val); +#endif +} + +/* Conditionally write a register. + * The register will only be written if the new value is different from the old_value. + * If the new value is different, the old value will also be updated */ +MALI_STATIC_INLINE void mali_hw_core_register_write_relaxed_conditional(struct mali_hw_core *core, u32 relative_address, u32 new_val, const u32 old_val) +{ + MALI_DEBUG_PRINT(6, ("register_write_relaxed for core %s, relative addr=0x%04X, val=0x%08X\n", + core->description, relative_address, new_val)); + if (old_val != new_val) { +#if !defined( USE_PSCI_REG_READ_WRITE ) + _mali_osk_mem_iowrite32_relaxed(core->mapped_registers, relative_address, new_val); +#else + nx_register_write(core->phys_addr, relative_address, new_val); +#endif + } +} + +MALI_STATIC_INLINE void mali_hw_core_register_write(struct mali_hw_core *core, u32 relative_address, u32 new_val) +{ + MALI_DEBUG_PRINT(6, ("register_write for core %s, relative addr=0x%04X, val=0x%08X\n", + core->description, relative_address, new_val)); +#if !defined( USE_PSCI_REG_READ_WRITE ) + _mali_osk_mem_iowrite32(core->mapped_registers, relative_address, new_val); +#else + nx_register_write(core->phys_addr, relative_address, new_val); +#endif +} + +MALI_STATIC_INLINE void mali_hw_core_register_write_array_relaxed(struct mali_hw_core *core, u32 relative_address, u32 *write_array, u32 nr_of_regs) +{ + u32 i; + MALI_DEBUG_PRINT(6, ("register_write_array: for core %s, relative addr=0x%04X, nr of regs=%u\n", + core->description, relative_address, nr_of_regs)); + + /* Do not use burst writes against the registers */ + for (i = 0; i < nr_of_regs; i++) { + mali_hw_core_register_write_relaxed(core, relative_address + i * 4, write_array[i]); + } +} + +/* Conditionally write a set of registers. + * The register will only be written if the new value is different from the old_value. + * If the new value is different, the old value will also be updated */ +MALI_STATIC_INLINE void mali_hw_core_register_write_array_relaxed_conditional(struct mali_hw_core *core, u32 relative_address, u32 *write_array, u32 nr_of_regs, const u32 *old_array) +{ + u32 i; + MALI_DEBUG_PRINT(6, ("register_write_array: for core %s, relative addr=0x%04X, nr of regs=%u\n", + core->description, relative_address, nr_of_regs)); + + /* Do not use burst writes against the registers */ + for (i = 0; i < nr_of_regs; i++) { + if (old_array[i] != write_array[i]) { + mali_hw_core_register_write_relaxed(core, relative_address + i * 4, write_array[i]); + } + } +} + +#endif /* __MALI_HW_CORE_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_kernel_common.h b/drivers/gpu/arm/mali400/common/mali_kernel_common.h --- a/drivers/gpu/arm/mali400/common/mali_kernel_common.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_kernel_common.h 2018-05-06 08:49:49.174695256 +0200 @@ -0,0 +1,181 @@ +/* + * Copyright (C) 2010, 2012-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_KERNEL_COMMON_H__ +#define __MALI_KERNEL_COMMON_H__ + +#include "mali_osk.h" + +/* Make sure debug is defined when it should be */ +#ifndef DEBUG +#if defined(_DEBUG) +#define DEBUG +#endif +#endif + +/* The file include several useful macros for error checking, debugging and printing. + * - MALI_PRINTF(...) Do not use this function: Will be included in Release builds. + * - MALI_DEBUG_PRINT(nr, (X) ) Prints the second argument if nr<=MALI_DEBUG_LEVEL. + * - MALI_DEBUG_ERROR( (X) ) Prints an errortext, a source trace, and the given error message. + * - MALI_DEBUG_ASSERT(exp,(X)) If the asserted expr is false, the program will exit. + * - MALI_DEBUG_ASSERT_POINTER(pointer) Triggers if the pointer is a zero pointer. + * - MALI_DEBUG_CODE( X ) The code inside the macro is only compiled in Debug builds. + * + * The (X) means that you must add an extra parenthesis around the argumentlist. + * + * The printf function: MALI_PRINTF(...) is routed to _mali_osk_debugmsg + * + * Suggested range for the DEBUG-LEVEL is [1:6] where + * [1:2] Is messages with highest priority, indicate possible errors. + * [3:4] Is messages with medium priority, output important variables. + * [5:6] Is messages with low priority, used during extensive debugging. + */ + +/** +* Fundamental error macro. Reports an error code. This is abstracted to allow us to +* easily switch to a different error reporting method if we want, and also to allow +* us to search for error returns easily. +* +* Note no closing semicolon - this is supplied in typical usage: +* +* MALI_ERROR(MALI_ERROR_OUT_OF_MEMORY); +*/ +#define MALI_ERROR(error_code) return (error_code) + +/** + * Basic error macro, to indicate success. + * Note no closing semicolon - this is supplied in typical usage: + * + * MALI_SUCCESS; + */ +#define MALI_SUCCESS MALI_ERROR(_MALI_OSK_ERR_OK) + +/** + * Basic error macro. This checks whether the given condition is true, and if not returns + * from this function with the supplied error code. This is a macro so that we can override it + * for stress testing. + * + * Note that this uses the do-while-0 wrapping to ensure that we don't get problems with dangling + * else clauses. Note also no closing semicolon - this is supplied in typical usage: + * + * MALI_CHECK((p!=NULL), ERROR_NO_OBJECT); + */ +#define MALI_CHECK(condition, error_code) do { if(!(condition)) MALI_ERROR(error_code); } while(0) + +/** + * Error propagation macro. If the expression given is anything other than + * _MALI_OSK_NO_ERROR, then the value is returned from the enclosing function + * as an error code. This effectively acts as a guard clause, and propagates + * error values up the call stack. This uses a temporary value to ensure that + * the error expression is not evaluated twice. + * If the counter for forcing a failure has been set using _mali_force_error, + * this error will be returned without evaluating the expression in + * MALI_CHECK_NO_ERROR + */ +#define MALI_CHECK_NO_ERROR(expression) \ + do { _mali_osk_errcode_t _check_no_error_result=(expression); \ + if(_check_no_error_result != _MALI_OSK_ERR_OK) \ + MALI_ERROR(_check_no_error_result); \ + } while(0) + +/** + * Pointer check macro. Checks non-null pointer. + */ +#define MALI_CHECK_NON_NULL(pointer, error_code) MALI_CHECK( ((pointer)!=NULL), (error_code) ) + +/** + * Error macro with goto. This checks whether the given condition is true, and if not jumps + * to the specified label using a goto. The label must therefore be local to the function in + * which this macro appears. This is most usually used to execute some clean-up code before + * exiting with a call to ERROR. + * + * Like the other macros, this is a macro to allow us to override the condition if we wish, + * e.g. to force an error during stress testing. + */ +#define MALI_CHECK_GOTO(condition, label) do { if(!(condition)) goto label; } while(0) + +/** + * Explicitly ignore a parameter passed into a function, to suppress compiler warnings. + * Should only be used with parameter names. + */ +#define MALI_IGNORE(x) x=x + +#if defined(CONFIG_MALI_QUIET) +#define MALI_PRINTF(args) +#else +#define MALI_PRINTF(args) _mali_osk_dbgmsg args; +#endif + +#define MALI_PRINT_ERROR(args) do{ \ + MALI_PRINTF(("Mali: ERR: %s\n" ,__FILE__)); \ + MALI_PRINTF((" %s()%4d\n ", __FUNCTION__, __LINE__)) ; \ + MALI_PRINTF(args); \ + MALI_PRINTF(("\n")); \ + } while(0) + +#define MALI_PRINT(args) do{ \ + pr_info("Mali: "); \ + pr_cont args; \ + } while (0) + +#ifdef DEBUG +#ifndef mali_debug_level +extern int mali_debug_level; +#endif + +#define MALI_DEBUG_CODE(code) code +#define MALI_DEBUG_PRINT(level, args) do { \ + if((level) <= mali_debug_level)\ + {MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); } \ + } while (0) + +#define MALI_DEBUG_PRINT_ERROR(args) MALI_PRINT_ERROR(args) + +#define MALI_DEBUG_PRINT_IF(level,condition,args) \ + if((condition)&&((level) <= mali_debug_level))\ + {MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); } + +#define MALI_DEBUG_PRINT_ELSE(level, args)\ + else if((level) <= mali_debug_level)\ + { MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); } + +/** + * @note these variants of DEBUG ASSERTS will cause a debugger breakpoint + * to be entered (see _mali_osk_break() ). An alternative would be to call + * _mali_osk_abort(), on OSs that support it. + */ +#define MALI_DEBUG_PRINT_ASSERT(condition, args) do {if( !(condition)) { MALI_PRINT_ERROR(args); _mali_osk_break(); } } while(0) +#define MALI_DEBUG_ASSERT_POINTER(pointer) do {if( (pointer)== NULL) {MALI_PRINT_ERROR(("NULL pointer " #pointer)); _mali_osk_break();} } while(0) +#define MALI_DEBUG_ASSERT(condition) do {if( !(condition)) {MALI_PRINT_ERROR(("ASSERT failed: " #condition )); _mali_osk_break();} } while(0) + +#else /* DEBUG */ + +#define MALI_DEBUG_CODE(code) +#define MALI_DEBUG_PRINT(string,args) do {} while(0) +#define MALI_DEBUG_PRINT_ERROR(args) do {} while(0) +#define MALI_DEBUG_PRINT_IF(level,condition,args) do {} while(0) +#define MALI_DEBUG_PRINT_ELSE(level,condition,args) do {} while(0) +#define MALI_DEBUG_PRINT_ASSERT(condition,args) do {} while(0) +#define MALI_DEBUG_ASSERT_POINTER(pointer) do {} while(0) +#define MALI_DEBUG_ASSERT(condition) do {} while(0) + +#endif /* DEBUG */ + +/** + * variables from user space cannot be dereferenced from kernel space; tagging them + * with __user allows the GCC compiler to generate a warning. Other compilers may + * not support this so we define it here as an empty macro if the compiler doesn't + * define it. + */ +#ifndef __user +#define __user +#endif + +#endif /* __MALI_KERNEL_COMMON_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_kernel_core.c b/drivers/gpu/arm/mali400/common/mali_kernel_core.c --- a/drivers/gpu/arm/mali400/common/mali_kernel_core.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_kernel_core.c 2018-05-06 08:49:49.174695256 +0200 @@ -0,0 +1,1339 @@ +/* + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_kernel_common.h" +#include "mali_session.h" +#include "mali_osk.h" +#include "mali_osk_mali.h" +#include "mali_ukk.h" +#include "mali_kernel_core.h" +#include "mali_memory.h" +#include "mali_mem_validation.h" +#include "mali_mmu.h" +#include "mali_mmu_page_directory.h" +#include "mali_dlbu.h" +#include "mali_broadcast.h" +#include "mali_gp.h" +#include "mali_pp.h" +#include "mali_executor.h" +#include "mali_pp_job.h" +#include "mali_group.h" +#include "mali_pm.h" +#include "mali_pmu.h" +#include "mali_scheduler.h" +#include "mali_kernel_utilization.h" +#include "mali_l2_cache.h" +#include "mali_timeline.h" +#include "mali_soft_job.h" +#include "mali_pm_domain.h" +#if defined(CONFIG_MALI400_PROFILING) +#include "mali_osk_profiling.h" +#endif +#if defined(CONFIG_MALI400_INTERNAL_PROFILING) +#include "mali_profiling_internal.h" +#endif +#include "mali_control_timer.h" +#include "mali_dvfs_policy.h" +#include +#include +#if defined(CONFIG_MALI_DMA_BUF_FENCE) +#include +#endif + +#define MALI_SHARED_MEMORY_DEFAULT_SIZE 0xffffffff + +/* Mali GPU memory. Real values come from module parameter or from device specific data */ +unsigned int mali_dedicated_mem_start = 0; +unsigned int mali_dedicated_mem_size = 0; + +/* Default shared memory size is set to 4G. */ +unsigned int mali_shared_mem_size = MALI_SHARED_MEMORY_DEFAULT_SIZE; + +/* Frame buffer memory to be accessible by Mali GPU */ +int mali_fb_start = 0; +int mali_fb_size = 0; + +/* Mali max job runtime */ +extern int mali_max_job_runtime; + +/** Start profiling from module load? */ +int mali_boot_profiling = 0; + +/** Limits for the number of PP cores behind each L2 cache. */ +int mali_max_pp_cores_group_1 = 0xFF; +int mali_max_pp_cores_group_2 = 0xFF; + +int mali_inited_pp_cores_group_1 = 0; +int mali_inited_pp_cores_group_2 = 0; + +static _mali_product_id_t global_product_id = _MALI_PRODUCT_ID_UNKNOWN; +static uintptr_t global_gpu_base_address = 0; +static u32 global_gpu_major_version = 0; +static u32 global_gpu_minor_version = 0; + +mali_bool mali_gpu_class_is_mali450 = MALI_FALSE; +mali_bool mali_gpu_class_is_mali470 = MALI_FALSE; + +static _mali_osk_errcode_t mali_set_global_gpu_base_address(void) +{ + _mali_osk_errcode_t err = _MALI_OSK_ERR_OK; + + global_gpu_base_address = _mali_osk_resource_base_address(); + if (0 == global_gpu_base_address) { + err = _MALI_OSK_ERR_ITEM_NOT_FOUND; + } + + return err; +} + +static u32 mali_get_bcast_id(_mali_osk_resource_t *resource_pp) +{ + switch (resource_pp->base - global_gpu_base_address) { + case 0x08000: + case 0x20000: /* fall-through for aliased mapping */ + return 0x01; + case 0x0A000: + case 0x22000: /* fall-through for aliased mapping */ + return 0x02; + case 0x0C000: + case 0x24000: /* fall-through for aliased mapping */ + return 0x04; + case 0x0E000: + case 0x26000: /* fall-through for aliased mapping */ + return 0x08; + case 0x28000: + return 0x10; + case 0x2A000: + return 0x20; + case 0x2C000: + return 0x40; + case 0x2E000: + return 0x80; + default: + return 0; + } +} + +static _mali_osk_errcode_t mali_parse_product_info(void) +{ + _mali_osk_resource_t first_pp_resource; + + /* Find the first PP core resource (again) */ + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_PP0, &first_pp_resource)) { + /* Create a dummy PP object for this core so that we can read the version register */ + struct mali_group *group = mali_group_create(NULL, NULL, NULL, MALI_DOMAIN_INDEX_PP0); + if (NULL != group) { + struct mali_pp_core *pp_core = mali_pp_create(&first_pp_resource, group, MALI_FALSE, mali_get_bcast_id(&first_pp_resource)); + if (NULL != pp_core) { + u32 pp_version; + + pp_version = mali_pp_core_get_version(pp_core); + + mali_group_delete(group); + + global_gpu_major_version = (pp_version >> 8) & 0xFF; + global_gpu_minor_version = pp_version & 0xFF; + + switch (pp_version >> 16) { + case MALI200_PP_PRODUCT_ID: + global_product_id = _MALI_PRODUCT_ID_MALI200; + MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-200 r%up%u\n", global_gpu_major_version, global_gpu_minor_version)); + MALI_PRINT_ERROR(("Mali-200 is not supported by this driver.\n")); + _mali_osk_abort(); + break; + case MALI300_PP_PRODUCT_ID: + global_product_id = _MALI_PRODUCT_ID_MALI300; + MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-300 r%up%u\n", global_gpu_major_version, global_gpu_minor_version)); + break; + case MALI400_PP_PRODUCT_ID: + global_product_id = _MALI_PRODUCT_ID_MALI400; + MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-400 MP r%up%u\n", global_gpu_major_version, global_gpu_minor_version)); + break; + case MALI450_PP_PRODUCT_ID: + global_product_id = _MALI_PRODUCT_ID_MALI450; + MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-450 MP r%up%u\n", global_gpu_major_version, global_gpu_minor_version)); + break; + case MALI470_PP_PRODUCT_ID: + global_product_id = _MALI_PRODUCT_ID_MALI470; + MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-470 MP r%up%u\n", global_gpu_major_version, global_gpu_minor_version)); + break; + default: + MALI_DEBUG_PRINT(2, ("Found unknown Mali GPU (r%up%u)\n", global_gpu_major_version, global_gpu_minor_version)); + return _MALI_OSK_ERR_FAULT; + } + + return _MALI_OSK_ERR_OK; + } else { + MALI_PRINT_ERROR(("Failed to create initial PP object\n")); + } + } else { + MALI_PRINT_ERROR(("Failed to create initial group object\n")); + } + } else { + MALI_PRINT_ERROR(("First PP core not specified in config file\n")); + } + + return _MALI_OSK_ERR_FAULT; +} + +static void mali_delete_groups(void) +{ + struct mali_group *group; + + group = mali_group_get_glob_group(0); + while (NULL != group) { + mali_group_delete(group); + group = mali_group_get_glob_group(0); + } + + MALI_DEBUG_ASSERT(0 == mali_group_get_glob_num_groups()); +} + +static void mali_delete_l2_cache_cores(void) +{ + struct mali_l2_cache_core *l2; + + l2 = mali_l2_cache_core_get_glob_l2_core(0); + while (NULL != l2) { + mali_l2_cache_delete(l2); + l2 = mali_l2_cache_core_get_glob_l2_core(0); + } + + MALI_DEBUG_ASSERT(0 == mali_l2_cache_core_get_glob_num_l2_cores()); +} + +static struct mali_l2_cache_core *mali_create_l2_cache_core(_mali_osk_resource_t *resource, u32 domain_index) +{ + struct mali_l2_cache_core *l2_cache = NULL; + + if (NULL != resource) { + + MALI_DEBUG_PRINT(3, ("Found L2 cache %s\n", resource->description)); + + l2_cache = mali_l2_cache_create(resource, domain_index); + if (NULL == l2_cache) { + MALI_PRINT_ERROR(("Failed to create L2 cache object\n")); + return NULL; + } + } + MALI_DEBUG_PRINT(3, ("Created L2 cache core object\n")); + + return l2_cache; +} + +static _mali_osk_errcode_t mali_parse_config_l2_cache(void) +{ + struct mali_l2_cache_core *l2_cache = NULL; + + if (mali_is_mali400()) { + _mali_osk_resource_t l2_resource; + if (_MALI_OSK_ERR_OK != _mali_osk_resource_find(MALI400_OFFSET_L2_CACHE0, &l2_resource)) { + MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache in config file\n")); + return _MALI_OSK_ERR_FAULT; + } + + l2_cache = mali_create_l2_cache_core(&l2_resource, MALI_DOMAIN_INDEX_L20); + if (NULL == l2_cache) { + return _MALI_OSK_ERR_FAULT; + } + } else if (mali_is_mali450()) { + /* + * L2 for GP at 0x10000 + * L2 for PP0-3 at 0x01000 + * L2 for PP4-7 at 0x11000 (optional) + */ + + _mali_osk_resource_t l2_gp_resource; + _mali_osk_resource_t l2_pp_grp0_resource; + _mali_osk_resource_t l2_pp_grp1_resource; + + /* Make cluster for GP's L2 */ + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI450_OFFSET_L2_CACHE0, &l2_gp_resource)) { + MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for GP\n")); + l2_cache = mali_create_l2_cache_core(&l2_gp_resource, MALI_DOMAIN_INDEX_L20); + if (NULL == l2_cache) { + return _MALI_OSK_ERR_FAULT; + } + } else { + MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache for GP in config file\n")); + return _MALI_OSK_ERR_FAULT; + } + + /* Find corresponding l2 domain */ + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI450_OFFSET_L2_CACHE1, &l2_pp_grp0_resource)) { + MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for PP group 0\n")); + l2_cache = mali_create_l2_cache_core(&l2_pp_grp0_resource, MALI_DOMAIN_INDEX_L21); + if (NULL == l2_cache) { + return _MALI_OSK_ERR_FAULT; + } + } else { + MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache for PP group 0 in config file\n")); + return _MALI_OSK_ERR_FAULT; + } + + /* Second PP core group is optional, don't fail if we don't find it */ + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI450_OFFSET_L2_CACHE2, &l2_pp_grp1_resource)) { + MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for PP group 1\n")); + l2_cache = mali_create_l2_cache_core(&l2_pp_grp1_resource, MALI_DOMAIN_INDEX_L22); + if (NULL == l2_cache) { + return _MALI_OSK_ERR_FAULT; + } + } + } else if (mali_is_mali470()) { + _mali_osk_resource_t l2c1_resource; + + /* Make cluster for L2C1 */ + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI470_OFFSET_L2_CACHE1, &l2c1_resource)) { + MALI_DEBUG_PRINT(3, ("Creating Mali-470 L2 cache 1\n")); + l2_cache = mali_create_l2_cache_core(&l2c1_resource, MALI_DOMAIN_INDEX_L21); + if (NULL == l2_cache) { + return _MALI_OSK_ERR_FAULT; + } + } else { + MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache for L2C1\n")); + return _MALI_OSK_ERR_FAULT; + } + } + + return _MALI_OSK_ERR_OK; +} + +static struct mali_group *mali_create_group(struct mali_l2_cache_core *cache, + _mali_osk_resource_t *resource_mmu, + _mali_osk_resource_t *resource_gp, + _mali_osk_resource_t *resource_pp, + u32 domain_index) +{ + struct mali_mmu_core *mmu; + struct mali_group *group; + + MALI_DEBUG_PRINT(3, ("Starting new group for MMU %s\n", resource_mmu->description)); + + /* Create the group object */ + group = mali_group_create(cache, NULL, NULL, domain_index); + if (NULL == group) { + MALI_PRINT_ERROR(("Failed to create group object for MMU %s\n", resource_mmu->description)); + return NULL; + } + + /* Create the MMU object inside group */ + mmu = mali_mmu_create(resource_mmu, group, MALI_FALSE); + if (NULL == mmu) { + MALI_PRINT_ERROR(("Failed to create MMU object\n")); + mali_group_delete(group); + return NULL; + } + + if (NULL != resource_gp) { + /* Create the GP core object inside this group */ + struct mali_gp_core *gp_core = mali_gp_create(resource_gp, group); + if (NULL == gp_core) { + /* No need to clean up now, as we will clean up everything linked in from the cluster when we fail this function */ + MALI_PRINT_ERROR(("Failed to create GP object\n")); + mali_group_delete(group); + return NULL; + } + } + + if (NULL != resource_pp) { + struct mali_pp_core *pp_core; + + /* Create the PP core object inside this group */ + pp_core = mali_pp_create(resource_pp, group, MALI_FALSE, mali_get_bcast_id(resource_pp)); + if (NULL == pp_core) { + /* No need to clean up now, as we will clean up everything linked in from the cluster when we fail this function */ + MALI_PRINT_ERROR(("Failed to create PP object\n")); + mali_group_delete(group); + return NULL; + } + } + + return group; +} + +static _mali_osk_errcode_t mali_create_virtual_group(_mali_osk_resource_t *resource_mmu_pp_bcast, + _mali_osk_resource_t *resource_pp_bcast, + _mali_osk_resource_t *resource_dlbu, + _mali_osk_resource_t *resource_bcast) +{ + struct mali_mmu_core *mmu_pp_bcast_core; + struct mali_pp_core *pp_bcast_core; + struct mali_dlbu_core *dlbu_core; + struct mali_bcast_unit *bcast_core; + struct mali_group *group; + + MALI_DEBUG_PRINT(2, ("Starting new virtual group for MMU PP broadcast core %s\n", resource_mmu_pp_bcast->description)); + + /* Create the DLBU core object */ + dlbu_core = mali_dlbu_create(resource_dlbu); + if (NULL == dlbu_core) { + MALI_PRINT_ERROR(("Failed to create DLBU object \n")); + return _MALI_OSK_ERR_FAULT; + } + + /* Create the Broadcast unit core */ + bcast_core = mali_bcast_unit_create(resource_bcast); + if (NULL == bcast_core) { + MALI_PRINT_ERROR(("Failed to create Broadcast unit object!\n")); + mali_dlbu_delete(dlbu_core); + return _MALI_OSK_ERR_FAULT; + } + + /* Create the group object */ +#if defined(DEBUG) + /* Get a physical PP group to temporarily add to broadcast unit. IRQ + * verification needs a physical group in the broadcast unit to test + * the broadcast unit interrupt line. */ + { + struct mali_group *phys_group = NULL; + int i; + for (i = 0; i < mali_group_get_glob_num_groups(); i++) { + phys_group = mali_group_get_glob_group(i); + if (NULL != mali_group_get_pp_core(phys_group)) break; + } + MALI_DEBUG_ASSERT(NULL != mali_group_get_pp_core(phys_group)); + + /* Add the group temporarily to the broadcast, and update the + * broadcast HW. Since the HW is not updated when removing the + * group the IRQ check will work when the virtual PP is created + * later. + * + * When the virtual group gets populated, the actually used + * groups will be added to the broadcast unit and the HW will + * be updated. + */ + mali_bcast_add_group(bcast_core, phys_group); + mali_bcast_reset(bcast_core); + mali_bcast_remove_group(bcast_core, phys_group); + } +#endif /* DEBUG */ + group = mali_group_create(NULL, dlbu_core, bcast_core, MALI_DOMAIN_INDEX_DUMMY); + if (NULL == group) { + MALI_PRINT_ERROR(("Failed to create group object for MMU PP broadcast core %s\n", resource_mmu_pp_bcast->description)); + mali_bcast_unit_delete(bcast_core); + mali_dlbu_delete(dlbu_core); + return _MALI_OSK_ERR_FAULT; + } + + /* Create the MMU object inside group */ + mmu_pp_bcast_core = mali_mmu_create(resource_mmu_pp_bcast, group, MALI_TRUE); + if (NULL == mmu_pp_bcast_core) { + MALI_PRINT_ERROR(("Failed to create MMU PP broadcast object\n")); + mali_group_delete(group); + return _MALI_OSK_ERR_FAULT; + } + + /* Create the PP core object inside this group */ + pp_bcast_core = mali_pp_create(resource_pp_bcast, group, MALI_TRUE, 0); + if (NULL == pp_bcast_core) { + /* No need to clean up now, as we will clean up everything linked in from the cluster when we fail this function */ + MALI_PRINT_ERROR(("Failed to create PP object\n")); + mali_group_delete(group); + return _MALI_OSK_ERR_FAULT; + } + + return _MALI_OSK_ERR_OK; +} + +static _mali_osk_errcode_t mali_parse_config_groups(void) +{ + struct mali_group *group; + int cluster_id_gp = 0; + int cluster_id_pp_grp0 = 0; + int cluster_id_pp_grp1 = 0; + int i; + + _mali_osk_resource_t resource_gp; + _mali_osk_resource_t resource_gp_mmu; + _mali_osk_resource_t resource_pp[8]; + _mali_osk_resource_t resource_pp_mmu[8]; + _mali_osk_resource_t resource_pp_mmu_bcast; + _mali_osk_resource_t resource_pp_bcast; + _mali_osk_resource_t resource_dlbu; + _mali_osk_resource_t resource_bcast; + _mali_osk_errcode_t resource_gp_found; + _mali_osk_errcode_t resource_gp_mmu_found; + _mali_osk_errcode_t resource_pp_found[8]; + _mali_osk_errcode_t resource_pp_mmu_found[8]; + _mali_osk_errcode_t resource_pp_mmu_bcast_found; + _mali_osk_errcode_t resource_pp_bcast_found; + _mali_osk_errcode_t resource_dlbu_found; + _mali_osk_errcode_t resource_bcast_found; + + if (!(mali_is_mali400() || mali_is_mali450() || mali_is_mali470())) { + /* No known HW core */ + return _MALI_OSK_ERR_FAULT; + } + + if (MALI_MAX_JOB_RUNTIME_DEFAULT == mali_max_job_runtime) { + /* Group settings are not overridden by module parameters, so use device settings */ + _mali_osk_device_data data = { 0, }; + + if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) { + /* Use device specific settings (if defined) */ + if (0 != data.max_job_runtime) { + mali_max_job_runtime = data.max_job_runtime; + } + } + } + + if (mali_is_mali450()) { + /* Mali-450 have separate L2s for GP, and PP core group(s) */ + cluster_id_pp_grp0 = 1; + cluster_id_pp_grp1 = 2; + } + + resource_gp_found = _mali_osk_resource_find(MALI_OFFSET_GP, &resource_gp); + resource_gp_mmu_found = _mali_osk_resource_find(MALI_OFFSET_GP_MMU, &resource_gp_mmu); + resource_pp_found[0] = _mali_osk_resource_find(MALI_OFFSET_PP0, &(resource_pp[0])); + resource_pp_found[1] = _mali_osk_resource_find(MALI_OFFSET_PP1, &(resource_pp[1])); + resource_pp_found[2] = _mali_osk_resource_find(MALI_OFFSET_PP2, &(resource_pp[2])); + resource_pp_found[3] = _mali_osk_resource_find(MALI_OFFSET_PP3, &(resource_pp[3])); + resource_pp_found[4] = _mali_osk_resource_find(MALI_OFFSET_PP4, &(resource_pp[4])); + resource_pp_found[5] = _mali_osk_resource_find(MALI_OFFSET_PP5, &(resource_pp[5])); + resource_pp_found[6] = _mali_osk_resource_find(MALI_OFFSET_PP6, &(resource_pp[6])); + resource_pp_found[7] = _mali_osk_resource_find(MALI_OFFSET_PP7, &(resource_pp[7])); + resource_pp_mmu_found[0] = _mali_osk_resource_find(MALI_OFFSET_PP0_MMU, &(resource_pp_mmu[0])); + resource_pp_mmu_found[1] = _mali_osk_resource_find(MALI_OFFSET_PP1_MMU, &(resource_pp_mmu[1])); + resource_pp_mmu_found[2] = _mali_osk_resource_find(MALI_OFFSET_PP2_MMU, &(resource_pp_mmu[2])); + resource_pp_mmu_found[3] = _mali_osk_resource_find(MALI_OFFSET_PP3_MMU, &(resource_pp_mmu[3])); + resource_pp_mmu_found[4] = _mali_osk_resource_find(MALI_OFFSET_PP4_MMU, &(resource_pp_mmu[4])); + resource_pp_mmu_found[5] = _mali_osk_resource_find(MALI_OFFSET_PP5_MMU, &(resource_pp_mmu[5])); + resource_pp_mmu_found[6] = _mali_osk_resource_find(MALI_OFFSET_PP6_MMU, &(resource_pp_mmu[6])); + resource_pp_mmu_found[7] = _mali_osk_resource_find(MALI_OFFSET_PP7_MMU, &(resource_pp_mmu[7])); + + + if (mali_is_mali450() || mali_is_mali470()) { + resource_bcast_found = _mali_osk_resource_find(MALI_OFFSET_BCAST, &resource_bcast); + resource_dlbu_found = _mali_osk_resource_find(MALI_OFFSET_DLBU, &resource_dlbu); + resource_pp_mmu_bcast_found = _mali_osk_resource_find(MALI_OFFSET_PP_BCAST_MMU, &resource_pp_mmu_bcast); + resource_pp_bcast_found = _mali_osk_resource_find(MALI_OFFSET_PP_BCAST, &resource_pp_bcast); + + if (_MALI_OSK_ERR_OK != resource_bcast_found || + _MALI_OSK_ERR_OK != resource_dlbu_found || + _MALI_OSK_ERR_OK != resource_pp_mmu_bcast_found || + _MALI_OSK_ERR_OK != resource_pp_bcast_found) { + /* Missing mandatory core(s) for Mali-450 or Mali-470 */ + MALI_DEBUG_PRINT(2, ("Missing mandatory resources, Mali-450 needs DLBU, Broadcast unit, virtual PP core and virtual MMU\n")); + return _MALI_OSK_ERR_FAULT; + } + } + + if (_MALI_OSK_ERR_OK != resource_gp_found || + _MALI_OSK_ERR_OK != resource_gp_mmu_found || + _MALI_OSK_ERR_OK != resource_pp_found[0] || + _MALI_OSK_ERR_OK != resource_pp_mmu_found[0]) { + /* Missing mandatory core(s) */ + MALI_DEBUG_PRINT(2, ("Missing mandatory resource, need at least one GP and one PP, both with a separate MMU\n")); + return _MALI_OSK_ERR_FAULT; + } + + MALI_DEBUG_ASSERT(1 <= mali_l2_cache_core_get_glob_num_l2_cores()); + group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_gp), &resource_gp_mmu, &resource_gp, NULL, MALI_DOMAIN_INDEX_GP); + if (NULL == group) { + return _MALI_OSK_ERR_FAULT; + } + + /* Create group for first (and mandatory) PP core */ + MALI_DEBUG_ASSERT(mali_l2_cache_core_get_glob_num_l2_cores() >= (cluster_id_pp_grp0 + 1)); /* >= 1 on Mali-300 and Mali-400, >= 2 on Mali-450 */ + group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp0), &resource_pp_mmu[0], NULL, &resource_pp[0], MALI_DOMAIN_INDEX_PP0); + if (NULL == group) { + return _MALI_OSK_ERR_FAULT; + } + + mali_inited_pp_cores_group_1++; + + /* Create groups for rest of the cores in the first PP core group */ + for (i = 1; i < 4; i++) { /* First half of the PP cores belong to first core group */ + if (mali_inited_pp_cores_group_1 < mali_max_pp_cores_group_1) { + if (_MALI_OSK_ERR_OK == resource_pp_found[i] && _MALI_OSK_ERR_OK == resource_pp_mmu_found[i]) { + group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp0), &resource_pp_mmu[i], NULL, &resource_pp[i], MALI_DOMAIN_INDEX_PP0 + i); + if (NULL == group) { + return _MALI_OSK_ERR_FAULT; + } + + mali_inited_pp_cores_group_1++; + } + } + } + + /* Create groups for cores in the second PP core group */ + for (i = 4; i < 8; i++) { /* Second half of the PP cores belong to second core group */ + if (mali_inited_pp_cores_group_2 < mali_max_pp_cores_group_2) { + if (_MALI_OSK_ERR_OK == resource_pp_found[i] && _MALI_OSK_ERR_OK == resource_pp_mmu_found[i]) { + MALI_DEBUG_ASSERT(mali_l2_cache_core_get_glob_num_l2_cores() >= 2); /* Only Mali-450 have a second core group */ + group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp1), &resource_pp_mmu[i], NULL, &resource_pp[i], MALI_DOMAIN_INDEX_PP0 + i); + if (NULL == group) { + return _MALI_OSK_ERR_FAULT; + } + + mali_inited_pp_cores_group_2++; + } + } + } + + if (mali_is_mali450() || mali_is_mali470()) { + _mali_osk_errcode_t err = mali_create_virtual_group(&resource_pp_mmu_bcast, &resource_pp_bcast, &resource_dlbu, &resource_bcast); + if (_MALI_OSK_ERR_OK != err) { + return err; + } + } + + mali_max_pp_cores_group_1 = mali_inited_pp_cores_group_1; + mali_max_pp_cores_group_2 = mali_inited_pp_cores_group_2; + MALI_DEBUG_PRINT(2, ("%d+%d PP cores initialized\n", mali_inited_pp_cores_group_1, mali_inited_pp_cores_group_2)); + + return _MALI_OSK_ERR_OK; +} + +static _mali_osk_errcode_t mali_check_shared_interrupts(void) +{ +#if !defined(CONFIG_MALI_SHARED_INTERRUPTS) + if (MALI_TRUE == _mali_osk_shared_interrupts()) { + MALI_PRINT_ERROR(("Shared interrupts detected, but driver support is not enabled\n")); + return _MALI_OSK_ERR_FAULT; + } +#endif /* !defined(CONFIG_MALI_SHARED_INTERRUPTS) */ + + /* It is OK to compile support for shared interrupts even if Mali is not using it. */ + return _MALI_OSK_ERR_OK; +} + +static _mali_osk_errcode_t mali_parse_config_pmu(void) +{ + _mali_osk_resource_t resource_pmu; + + MALI_DEBUG_ASSERT(0 != global_gpu_base_address); + + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_PMU, &resource_pmu)) { + struct mali_pmu_core *pmu; + + pmu = mali_pmu_create(&resource_pmu); + if (NULL == pmu) { + MALI_PRINT_ERROR(("Failed to create PMU\n")); + return _MALI_OSK_ERR_FAULT; + } + } + + /* It's ok if the PMU doesn't exist */ + return _MALI_OSK_ERR_OK; +} + +static _mali_osk_errcode_t mali_parse_config_memory(void) +{ + _mali_osk_device_data data = { 0, }; + _mali_osk_errcode_t ret; + + /* The priority of setting the value of mali_shared_mem_size, + * mali_dedicated_mem_start and mali_dedicated_mem_size: + * 1. module parameter; + * 2. platform data; + * 3. default value; + **/ + if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) { + /* Memory settings are not overridden by module parameters, so use device settings */ + if (0 == mali_dedicated_mem_start && 0 == mali_dedicated_mem_size) { + /* Use device specific settings (if defined) */ + mali_dedicated_mem_start = data.dedicated_mem_start; + mali_dedicated_mem_size = data.dedicated_mem_size; + } + + if (MALI_SHARED_MEMORY_DEFAULT_SIZE == mali_shared_mem_size && + 0 != data.shared_mem_size) { + mali_shared_mem_size = data.shared_mem_size; + } + } + + if (0 < mali_dedicated_mem_size && 0 != mali_dedicated_mem_start) { + MALI_DEBUG_PRINT(2, ("Mali memory settings (dedicated: 0x%08X@0x%08X)\n", + mali_dedicated_mem_size, mali_dedicated_mem_start)); + + /* Dedicated memory */ + ret = mali_memory_core_resource_dedicated_memory(mali_dedicated_mem_start, mali_dedicated_mem_size); + if (_MALI_OSK_ERR_OK != ret) { + MALI_PRINT_ERROR(("Failed to register dedicated memory\n")); + mali_memory_terminate(); + return ret; + } + } + + if (0 < mali_shared_mem_size) { + MALI_DEBUG_PRINT(2, ("Mali memory settings (shared: 0x%08X)\n", mali_shared_mem_size)); + + /* Shared OS memory */ + ret = mali_memory_core_resource_os_memory(mali_shared_mem_size); + if (_MALI_OSK_ERR_OK != ret) { + MALI_PRINT_ERROR(("Failed to register shared OS memory\n")); + mali_memory_terminate(); + return ret; + } + } + + if (0 == mali_fb_start && 0 == mali_fb_size) { + /* Frame buffer settings are not overridden by module parameters, so use device settings */ + _mali_osk_device_data data = { 0, }; + + if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) { + /* Use device specific settings (if defined) */ + mali_fb_start = data.fb_start; + mali_fb_size = data.fb_size; + } + + MALI_DEBUG_PRINT(2, ("Using device defined frame buffer settings (0x%08X@0x%08X)\n", + mali_fb_size, mali_fb_start)); + } else { + MALI_DEBUG_PRINT(2, ("Using module defined frame buffer settings (0x%08X@0x%08X)\n", + mali_fb_size, mali_fb_start)); + } + + if (0 != mali_fb_size) { + /* Register frame buffer */ + ret = mali_mem_validation_add_range(mali_fb_start, mali_fb_size); + if (_MALI_OSK_ERR_OK != ret) { + MALI_PRINT_ERROR(("Failed to register frame buffer memory region\n")); + mali_memory_terminate(); + return ret; + } + } + + return _MALI_OSK_ERR_OK; +} + +static void mali_detect_gpu_class(void) +{ + if (_mali_osk_identify_gpu_resource() == 0x450) + mali_gpu_class_is_mali450 = MALI_TRUE; + + if (_mali_osk_identify_gpu_resource() == 0x470) + mali_gpu_class_is_mali470 = MALI_TRUE; +} + +static _mali_osk_errcode_t mali_init_hw_reset(void) +{ +#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) + _mali_osk_resource_t resource_bcast; + + /* Ensure broadcast unit is in a good state before we start creating + * groups and cores. + */ + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_BCAST, &resource_bcast)) { + struct mali_bcast_unit *bcast_core; + + bcast_core = mali_bcast_unit_create(&resource_bcast); + if (NULL == bcast_core) { + MALI_PRINT_ERROR(("Failed to create Broadcast unit object!\n")); + return _MALI_OSK_ERR_FAULT; + } + mali_bcast_unit_delete(bcast_core); + } +#endif /* (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) */ + + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t mali_initialize_subsystems(void) +{ + _mali_osk_errcode_t err; + +#ifdef CONFIG_MALI_DT + err = _mali_osk_resource_initialize(); + if (_MALI_OSK_ERR_OK != err) { + mali_terminate_subsystems(); + return err; + } +#endif + + mali_pp_job_initialize(); + + mali_timeline_initialize(); + + err = mali_session_initialize(); + if (_MALI_OSK_ERR_OK != err) { + mali_terminate_subsystems(); + return err; + } + + /*Try to init gpu secure mode */ + _mali_osk_gpu_secure_mode_init(); + +#if defined(CONFIG_MALI400_PROFILING) + err = _mali_osk_profiling_init(mali_boot_profiling ? MALI_TRUE : MALI_FALSE); + if (_MALI_OSK_ERR_OK != err) { + /* No biggie if we weren't able to initialize the profiling */ + MALI_PRINT_ERROR(("Failed to initialize profiling, feature will be unavailable\n")); + } +#endif + + err = mali_memory_initialize(); + if (_MALI_OSK_ERR_OK != err) { + mali_terminate_subsystems(); + return err; + } + + err = mali_executor_initialize(); + if (_MALI_OSK_ERR_OK != err) { + mali_terminate_subsystems(); + return err; + } + + err = mali_scheduler_initialize(); + if (_MALI_OSK_ERR_OK != err) { + mali_terminate_subsystems(); + return err; + } + + /* Configure memory early, needed by mali_mmu_initialize. */ + err = mali_parse_config_memory(); + if (_MALI_OSK_ERR_OK != err) { + mali_terminate_subsystems(); + return err; + } + + err = mali_set_global_gpu_base_address(); + if (_MALI_OSK_ERR_OK != err) { + mali_terminate_subsystems(); + return err; + } + + /* Detect GPU class (uses L2 cache count) */ + mali_detect_gpu_class(); + + err = mali_check_shared_interrupts(); + if (_MALI_OSK_ERR_OK != err) { + mali_terminate_subsystems(); + return err; + } + + /* Initialize the MALI PMU (will not touch HW!) */ + err = mali_parse_config_pmu(); + if (_MALI_OSK_ERR_OK != err) { + mali_terminate_subsystems(); + return err; + } + + /* Initialize the power management module */ + err = mali_pm_initialize(); + if (_MALI_OSK_ERR_OK != err) { + mali_terminate_subsystems(); + return err; + } + + /* Make sure the entire GPU stays on for the rest of this function */ + mali_pm_init_begin(); + + /* Ensure HW is in a good state before starting to access cores. */ + err = mali_init_hw_reset(); + if (_MALI_OSK_ERR_OK != err) { + mali_terminate_subsystems(); + return err; + } + + /* Detect which Mali GPU we are dealing with */ + err = mali_parse_product_info(); + if (_MALI_OSK_ERR_OK != err) { + mali_pm_init_end(); + mali_terminate_subsystems(); + return err; + } + + /* The global_product_id is now populated with the correct Mali GPU */ + + /* Start configuring the actual Mali hardware. */ + + err = mali_mmu_initialize(); + if (_MALI_OSK_ERR_OK != err) { + mali_pm_init_end(); + mali_terminate_subsystems(); + return err; + } + + if (mali_is_mali450() || mali_is_mali470()) { + err = mali_dlbu_initialize(); + if (_MALI_OSK_ERR_OK != err) { + mali_pm_init_end(); + mali_terminate_subsystems(); + return err; + } + } + + err = mali_parse_config_l2_cache(); + if (_MALI_OSK_ERR_OK != err) { + mali_pm_init_end(); + mali_terminate_subsystems(); + return err; + } + + err = mali_parse_config_groups(); + if (_MALI_OSK_ERR_OK != err) { + mali_pm_init_end(); + mali_terminate_subsystems(); + return err; + } + + /* Move groups into executor */ + mali_executor_populate(); + + /* Need call after all group has assigned a domain */ + mali_pm_power_cost_setup(); + + /* Initialize the GPU timer */ + err = mali_control_timer_init(); + if (_MALI_OSK_ERR_OK != err) { + mali_pm_init_end(); + mali_terminate_subsystems(); + return err; + } + + /* Initialize the GPU utilization tracking */ + err = mali_utilization_init(); + if (_MALI_OSK_ERR_OK != err) { + mali_pm_init_end(); + mali_terminate_subsystems(); + return err; + } + +#if defined(CONFIG_MALI_DVFS) + err = mali_dvfs_policy_init(); + if (_MALI_OSK_ERR_OK != err) { + mali_pm_init_end(); + mali_terminate_subsystems(); + return err; + } +#endif + + /* Allowing the system to be turned off */ + mali_pm_init_end(); + + return _MALI_OSK_ERR_OK; /* all ok */ +} + +void mali_terminate_subsystems(void) +{ + struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core(); + + MALI_DEBUG_PRINT(2, ("terminate_subsystems() called\n")); + + mali_utilization_term(); + mali_control_timer_term(); + + mali_executor_depopulate(); + mali_delete_groups(); /* Delete groups not added to executor */ + mali_executor_terminate(); + + mali_scheduler_terminate(); + mali_pp_job_terminate(); + mali_delete_l2_cache_cores(); + mali_mmu_terminate(); + + if (mali_is_mali450() || mali_is_mali470()) { + mali_dlbu_terminate(); + } + + mali_pm_terminate(); + + if (NULL != pmu) { + mali_pmu_delete(pmu); + } + +#if defined(CONFIG_MALI400_PROFILING) + _mali_osk_profiling_term(); +#endif + + _mali_osk_gpu_secure_mode_deinit(); + + mali_memory_terminate(); + + mali_session_terminate(); + + mali_timeline_terminate(); + + global_gpu_base_address = 0; +} + +_mali_product_id_t mali_kernel_core_get_product_id(void) +{ + return global_product_id; +} + +u32 mali_kernel_core_get_gpu_major_version(void) +{ + return global_gpu_major_version; +} + +u32 mali_kernel_core_get_gpu_minor_version(void) +{ + return global_gpu_minor_version; +} + +_mali_osk_errcode_t _mali_ukk_get_api_version(_mali_uk_get_api_version_s *args) +{ + MALI_DEBUG_ASSERT_POINTER(args); + MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx); + + /* check compatability */ + if (args->version == _MALI_UK_API_VERSION) { + args->compatible = 1; + } else { + args->compatible = 0; + } + + args->version = _MALI_UK_API_VERSION; /* report our version */ + + /* success regardless of being compatible or not */ + MALI_SUCCESS; +} + +_mali_osk_errcode_t _mali_ukk_get_api_version_v2(_mali_uk_get_api_version_v2_s *args) +{ + MALI_DEBUG_ASSERT_POINTER(args); + MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx); + + /* check compatability */ + if (args->version == _MALI_UK_API_VERSION) { + args->compatible = 1; + } else { + args->compatible = 0; + } + + args->version = _MALI_UK_API_VERSION; /* report our version */ + + /* success regardless of being compatible or not */ + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t _mali_ukk_wait_for_notification(_mali_uk_wait_for_notification_s *args) +{ + _mali_osk_errcode_t err; + _mali_osk_notification_t *notification; + _mali_osk_notification_queue_t *queue; + struct mali_session_data *session; + + /* check input */ + MALI_DEBUG_ASSERT_POINTER(args); + MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx); + + session = (struct mali_session_data *)(uintptr_t)args->ctx; + queue = session->ioctl_queue; + + /* if the queue does not exist we're currently shutting down */ + if (NULL == queue) { + MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n")); + args->type = _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS; + return _MALI_OSK_ERR_OK; + } + + /* receive a notification, might sleep */ + err = _mali_osk_notification_queue_receive(queue, ¬ification); + if (_MALI_OSK_ERR_OK != err) { + MALI_ERROR(err); /* errcode returned, pass on to caller */ + } + + /* copy the buffer to the user */ + args->type = (_mali_uk_notification_type)notification->notification_type; + _mali_osk_memcpy(&args->data, notification->result_buffer, notification->result_buffer_size); + + /* finished with the notification */ + _mali_osk_notification_delete(notification); + + return _MALI_OSK_ERR_OK; /* all ok */ +} + +_mali_osk_errcode_t _mali_ukk_post_notification(_mali_uk_post_notification_s *args) +{ + _mali_osk_notification_t *notification; + _mali_osk_notification_queue_t *queue; + struct mali_session_data *session; + + /* check input */ + MALI_DEBUG_ASSERT_POINTER(args); + MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx); + + session = (struct mali_session_data *)(uintptr_t)args->ctx; + queue = session->ioctl_queue; + + /* if the queue does not exist we're currently shutting down */ + if (NULL == queue) { + MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n")); + return _MALI_OSK_ERR_OK; + } + + notification = _mali_osk_notification_create(args->type, 0); + if (NULL == notification) { + MALI_PRINT_ERROR(("Failed to create notification object\n")); + return _MALI_OSK_ERR_NOMEM; + } + + _mali_osk_notification_queue_send(queue, notification); + + return _MALI_OSK_ERR_OK; /* all ok */ +} + +_mali_osk_errcode_t _mali_ukk_pending_submit(_mali_uk_pending_submit_s *args) +{ + wait_queue_head_t *queue; + + /* check input */ + MALI_DEBUG_ASSERT_POINTER(args); + MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx); + + queue = mali_session_get_wait_queue(); + + /* check pending big job number, might sleep if larger than MAX allowed number */ + if (wait_event_interruptible(*queue, MALI_MAX_PENDING_BIG_JOB > mali_scheduler_job_gp_big_job_count())) { + return _MALI_OSK_ERR_RESTARTSYSCALL; + } + + return _MALI_OSK_ERR_OK; /* all ok */ +} + + +_mali_osk_errcode_t _mali_ukk_request_high_priority(_mali_uk_request_high_priority_s *args) +{ + struct mali_session_data *session; + + MALI_DEBUG_ASSERT_POINTER(args); + MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx); + + session = (struct mali_session_data *)(uintptr_t)args->ctx; + + if (!session->use_high_priority_job_queue) { + session->use_high_priority_job_queue = MALI_TRUE; + MALI_DEBUG_PRINT(2, ("Session 0x%08X with pid %d was granted higher priority.\n", session, _mali_osk_get_pid())); + } + + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t _mali_ukk_open(void **context) +{ + u32 i; + struct mali_session_data *session; + + /* allocated struct to track this session */ + session = (struct mali_session_data *)_mali_osk_calloc(1, sizeof(struct mali_session_data)); + MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_NOMEM); + + MALI_DEBUG_PRINT(3, ("Session starting\n")); + + /* create a response queue for this session */ + session->ioctl_queue = _mali_osk_notification_queue_init(); + if (NULL == session->ioctl_queue) { + goto err; + } + + /*create a wait queue for this session */ + session->wait_queue = _mali_osk_wait_queue_init(); + if (NULL == session->wait_queue) { + goto err_wait_queue; + } + + session->page_directory = mali_mmu_pagedir_alloc(); + if (NULL == session->page_directory) { + goto err_mmu; + } + + if (_MALI_OSK_ERR_OK != mali_mmu_pagedir_map(session->page_directory, MALI_DLBU_VIRT_ADDR, _MALI_OSK_MALI_PAGE_SIZE)) { + MALI_PRINT_ERROR(("Failed to map DLBU page into session\n")); + goto err_mmu; + } + + if (0 != mali_dlbu_phys_addr) { + mali_mmu_pagedir_update(session->page_directory, MALI_DLBU_VIRT_ADDR, mali_dlbu_phys_addr, + _MALI_OSK_MALI_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT); + } + + if (_MALI_OSK_ERR_OK != mali_memory_session_begin(session)) { + goto err_session; + } + + /* Create soft system. */ + session->soft_job_system = mali_soft_job_system_create(session); + if (NULL == session->soft_job_system) { + goto err_soft; + } + + /* Initialize the dma fence context.*/ +#if defined(CONFIG_MALI_DMA_BUF_FENCE) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0) + session->fence_context = dma_fence_context_alloc(1); + _mali_osk_atomic_init(&session->fence_seqno, 0); +#else + MALI_PRINT_ERROR(("The kernel version not support dma fence!\n")); + goto err_time_line; +#endif +#endif + + /* Create timeline system. */ + session->timeline_system = mali_timeline_system_create(session); + if (NULL == session->timeline_system) { + goto err_time_line; + } + +#if defined(CONFIG_MALI_DVFS) + _mali_osk_atomic_init(&session->number_of_window_jobs, 0); +#endif + + _mali_osk_atomic_init(&session->number_of_pp_jobs, 0); + + session->use_high_priority_job_queue = MALI_FALSE; + + /* Initialize list of PP jobs on this session. */ + _MALI_OSK_INIT_LIST_HEAD(&session->pp_job_list); + + /* Initialize the pp_job_fb_lookup_list array used to quickly lookup jobs from a given frame builder */ + for (i = 0; i < MALI_PP_JOB_FB_LOOKUP_LIST_SIZE; ++i) { + _MALI_OSK_INIT_LIST_HEAD(&session->pp_job_fb_lookup_list[i]); + } + + session->pid = _mali_osk_get_pid(); + session->comm = _mali_osk_get_comm(); + session->max_mali_mem_allocated_size = 0; + for (i = 0; i < MALI_MEM_TYPE_MAX; i ++) { + atomic_set(&session->mali_mem_array[i], 0); + } + atomic_set(&session->mali_mem_allocated_pages, 0); + *context = (void *)session; + + /* Add session to the list of all sessions. */ + mali_session_add(session); + + MALI_DEBUG_PRINT(3, ("Session started\n")); + return _MALI_OSK_ERR_OK; + +err_time_line: + mali_soft_job_system_destroy(session->soft_job_system); +err_soft: + mali_memory_session_end(session); +err_session: + mali_mmu_pagedir_free(session->page_directory); +err_mmu: + _mali_osk_wait_queue_term(session->wait_queue); +err_wait_queue: + _mali_osk_notification_queue_term(session->ioctl_queue); +err: + _mali_osk_free(session); + MALI_ERROR(_MALI_OSK_ERR_NOMEM); + +} + +#if defined(DEBUG) +/* parameter used for debug */ +extern u32 num_pm_runtime_resume; +extern u32 num_pm_updates; +extern u32 num_pm_updates_up; +extern u32 num_pm_updates_down; +#endif + +_mali_osk_errcode_t _mali_ukk_close(void **context) +{ + struct mali_session_data *session; + MALI_CHECK_NON_NULL(context, _MALI_OSK_ERR_INVALID_ARGS); + session = (struct mali_session_data *)*context; + + MALI_DEBUG_PRINT(3, ("Session ending\n")); + + MALI_DEBUG_ASSERT_POINTER(session->soft_job_system); + MALI_DEBUG_ASSERT_POINTER(session->timeline_system); + + /* Remove session from list of all sessions. */ + mali_session_remove(session); + + /* This flag is used to prevent queueing of jobs due to activation. */ + session->is_aborting = MALI_TRUE; + + /* Stop the soft job timer. */ + mali_timeline_system_stop_timer(session->timeline_system); + + /* Abort queued jobs */ + mali_scheduler_abort_session(session); + + /* Abort executing jobs */ + mali_executor_abort_session(session); + + /* Abort the soft job system. */ + mali_soft_job_system_abort(session->soft_job_system); + + /* Force execution of all pending bottom half processing for GP and PP. */ + _mali_osk_wq_flush(); + + /* The session PP list should now be empty. */ + MALI_DEBUG_ASSERT(_mali_osk_list_empty(&session->pp_job_list)); + + /* At this point the GP and PP scheduler no longer has any jobs queued or running from this + * session, and all soft jobs in the soft job system has been destroyed. */ + + /* Any trackers left in the timeline system are directly or indirectly waiting on external + * sync fences. Cancel all sync fence waiters to trigger activation of all remaining + * trackers. This call will sleep until all timelines are empty. */ + mali_timeline_system_abort(session->timeline_system); + + /* Flush pending work. + * Needed to make sure all bottom half processing related to this + * session has been completed, before we free internal data structures. + */ + _mali_osk_wq_flush(); + + /* Destroy timeline system. */ + mali_timeline_system_destroy(session->timeline_system); + session->timeline_system = NULL; + + /* Destroy soft system. */ + mali_soft_job_system_destroy(session->soft_job_system); + session->soft_job_system = NULL; + + /*Wait for the session job lists become empty.*/ + _mali_osk_wait_queue_wait_event(session->wait_queue, mali_session_pp_job_is_empty, (void *) session); + + /* Free remaining memory allocated to this session */ + mali_memory_session_end(session); + +#if defined(CONFIG_MALI_DVFS) + _mali_osk_atomic_term(&session->number_of_window_jobs); +#endif + +#if defined(CONFIG_MALI400_PROFILING) + _mali_osk_profiling_stop_sampling(session->pid); +#endif + + /* Free session data structures */ + mali_mmu_pagedir_unmap(session->page_directory, MALI_DLBU_VIRT_ADDR, _MALI_OSK_MALI_PAGE_SIZE); + mali_mmu_pagedir_free(session->page_directory); + _mali_osk_wait_queue_term(session->wait_queue); + _mali_osk_notification_queue_term(session->ioctl_queue); + _mali_osk_free(session); + + *context = NULL; + + MALI_DEBUG_PRINT(3, ("Session has ended\n")); + +#if defined(DEBUG) + MALI_DEBUG_PRINT(3, ("Stats: # runtime resumes: %u\n", num_pm_runtime_resume)); + MALI_DEBUG_PRINT(3, (" # PM updates: .... %u (up %u, down %u)\n", num_pm_updates, num_pm_updates_up, num_pm_updates_down)); + + num_pm_runtime_resume = 0; + num_pm_updates = 0; + num_pm_updates_up = 0; + num_pm_updates_down = 0; +#endif + + return _MALI_OSK_ERR_OK;; +} + +#if MALI_STATE_TRACKING +u32 _mali_kernel_core_dump_state(char *buf, u32 size) +{ + int n = 0; /* Number of bytes written to buf */ + + n += mali_scheduler_dump_state(buf + n, size - n); + n += mali_executor_dump_state(buf + n, size - n); + + return n; +} +#endif diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_kernel_core.h b/drivers/gpu/arm/mali400/common/mali_kernel_core.h --- a/drivers/gpu/arm/mali400/common/mali_kernel_core.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_kernel_core.h 2018-05-06 08:49:49.174695256 +0200 @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_KERNEL_CORE_H__ +#define __MALI_KERNEL_CORE_H__ + +#include "mali_osk.h" + +typedef enum { + _MALI_PRODUCT_ID_UNKNOWN, + _MALI_PRODUCT_ID_MALI200, + _MALI_PRODUCT_ID_MALI300, + _MALI_PRODUCT_ID_MALI400, + _MALI_PRODUCT_ID_MALI450, + _MALI_PRODUCT_ID_MALI470, +} _mali_product_id_t; + +extern mali_bool mali_gpu_class_is_mali450; +extern mali_bool mali_gpu_class_is_mali470; + +_mali_osk_errcode_t mali_initialize_subsystems(void); + +void mali_terminate_subsystems(void); + +_mali_product_id_t mali_kernel_core_get_product_id(void); + +u32 mali_kernel_core_get_gpu_major_version(void); + +u32 mali_kernel_core_get_gpu_minor_version(void); + +u32 _mali_kernel_core_dump_state(char *buf, u32 size); + +MALI_STATIC_INLINE mali_bool mali_is_mali470(void) +{ + return mali_gpu_class_is_mali470; +} + +MALI_STATIC_INLINE mali_bool mali_is_mali450(void) +{ + return mali_gpu_class_is_mali450; +} + +MALI_STATIC_INLINE mali_bool mali_is_mali400(void) +{ + if (mali_gpu_class_is_mali450 || mali_gpu_class_is_mali470) + return MALI_FALSE; + + return MALI_TRUE; +} +#endif /* __MALI_KERNEL_CORE_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_kernel_utilization.c b/drivers/gpu/arm/mali400/common/mali_kernel_utilization.c --- a/drivers/gpu/arm/mali400/common/mali_kernel_utilization.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_kernel_utilization.c 2018-05-06 08:49:49.174695256 +0200 @@ -0,0 +1,440 @@ +/* + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_kernel_utilization.h" +#include "mali_osk.h" +#include "mali_osk_mali.h" +#include "mali_kernel_common.h" +#include "mali_session.h" +#include "mali_scheduler.h" + +#include "mali_executor.h" +#include "mali_dvfs_policy.h" +#include "mali_control_timer.h" + +/* Thresholds for GP bound detection. */ +#define MALI_GP_BOUND_GP_UTILIZATION_THRESHOLD 240 +#define MALI_GP_BOUND_PP_UTILIZATION_THRESHOLD 250 + +static _mali_osk_spinlock_irq_t *utilization_data_lock; + +static u32 num_running_gp_cores = 0; +static u32 num_running_pp_cores = 0; + +static u64 work_start_time_gpu = 0; +static u64 work_start_time_gp = 0; +static u64 work_start_time_pp = 0; +static u64 accumulated_work_time_gpu = 0; +static u64 accumulated_work_time_gp = 0; +static u64 accumulated_work_time_pp = 0; + +static u32 last_utilization_gpu = 0 ; +static u32 last_utilization_gp = 0 ; +static u32 last_utilization_pp = 0 ; + +void (*mali_utilization_callback)(struct mali_gpu_utilization_data *data) = NULL; + +/* Define the first timer control timer timeout in milliseconds */ +static u32 mali_control_first_timeout = 100; +static struct mali_gpu_utilization_data mali_util_data = {0, }; + +struct mali_gpu_utilization_data *mali_utilization_calculate(u64 *start_time, u64 *time_period, mali_bool *need_add_timer) +{ + u64 time_now; + u32 leading_zeroes; + u32 shift_val; + u32 work_normalized_gpu; + u32 work_normalized_gp; + u32 work_normalized_pp; + u32 period_normalized; + u32 utilization_gpu; + u32 utilization_gp; + u32 utilization_pp; + + mali_utilization_data_lock(); + + time_now = _mali_osk_time_get_ns(); + + *time_period = time_now - *start_time; + + if (accumulated_work_time_gpu == 0 && work_start_time_gpu == 0) { + mali_control_timer_pause(); + /* + * No work done for this period + * - No need to reschedule timer + * - Report zero usage + */ + last_utilization_gpu = 0; + last_utilization_gp = 0; + last_utilization_pp = 0; + + mali_util_data.utilization_gpu = last_utilization_gpu; + mali_util_data.utilization_gp = last_utilization_gp; + mali_util_data.utilization_pp = last_utilization_pp; + + mali_utilization_data_unlock(); + + *need_add_timer = MALI_FALSE; + + mali_executor_hint_disable(MALI_EXECUTOR_HINT_GP_BOUND); + + MALI_DEBUG_PRINT(4, ("last_utilization_gpu = %d \n", last_utilization_gpu)); + MALI_DEBUG_PRINT(4, ("last_utilization_gp = %d \n", last_utilization_gp)); + MALI_DEBUG_PRINT(4, ("last_utilization_pp = %d \n", last_utilization_pp)); + + return &mali_util_data; + } + + /* If we are currently busy, update working period up to now */ + if (work_start_time_gpu != 0) { + accumulated_work_time_gpu += (time_now - work_start_time_gpu); + work_start_time_gpu = time_now; + + /* GP and/or PP will also be busy if the GPU is busy at this point */ + + if (work_start_time_gp != 0) { + accumulated_work_time_gp += (time_now - work_start_time_gp); + work_start_time_gp = time_now; + } + + if (work_start_time_pp != 0) { + accumulated_work_time_pp += (time_now - work_start_time_pp); + work_start_time_pp = time_now; + } + } + + /* + * We have two 64-bit values, a dividend and a divisor. + * To avoid dependencies to a 64-bit divider, we shift down the two values + * equally first. + * We shift the dividend up and possibly the divisor down, making the result X in 256. + */ + + /* Shift the 64-bit values down so they fit inside a 32-bit integer */ + leading_zeroes = _mali_osk_clz((u32)(*time_period >> 32)); + shift_val = 32 - leading_zeroes; + work_normalized_gpu = (u32)(accumulated_work_time_gpu >> shift_val); + work_normalized_gp = (u32)(accumulated_work_time_gp >> shift_val); + work_normalized_pp = (u32)(accumulated_work_time_pp >> shift_val); + period_normalized = (u32)(*time_period >> shift_val); + + /* + * Now, we should report the usage in parts of 256 + * this means we must shift up the dividend or down the divisor by 8 + * (we could do a combination, but we just use one for simplicity, + * but the end result should be good enough anyway) + */ + if (period_normalized > 0x00FFFFFF) { + /* The divisor is so big that it is safe to shift it down */ + period_normalized >>= 8; + } else { + /* + * The divisor is so small that we can shift up the dividend, without loosing any data. + * (dividend is always smaller than the divisor) + */ + work_normalized_gpu <<= 8; + work_normalized_gp <<= 8; + work_normalized_pp <<= 8; + } + + utilization_gpu = work_normalized_gpu / period_normalized; + utilization_gp = work_normalized_gp / period_normalized; + utilization_pp = work_normalized_pp / period_normalized; + + last_utilization_gpu = utilization_gpu; + last_utilization_gp = utilization_gp; + last_utilization_pp = utilization_pp; + + if ((MALI_GP_BOUND_GP_UTILIZATION_THRESHOLD < last_utilization_gp) && + (MALI_GP_BOUND_PP_UTILIZATION_THRESHOLD > last_utilization_pp)) { + mali_executor_hint_enable(MALI_EXECUTOR_HINT_GP_BOUND); + } else { + mali_executor_hint_disable(MALI_EXECUTOR_HINT_GP_BOUND); + } + + /* starting a new period */ + accumulated_work_time_gpu = 0; + accumulated_work_time_gp = 0; + accumulated_work_time_pp = 0; + + *start_time = time_now; + + mali_util_data.utilization_gp = last_utilization_gp; + mali_util_data.utilization_gpu = last_utilization_gpu; + mali_util_data.utilization_pp = last_utilization_pp; + + mali_utilization_data_unlock(); + + *need_add_timer = MALI_TRUE; + + MALI_DEBUG_PRINT(4, ("last_utilization_gpu = %d \n", last_utilization_gpu)); + MALI_DEBUG_PRINT(4, ("last_utilization_gp = %d \n", last_utilization_gp)); + MALI_DEBUG_PRINT(4, ("last_utilization_pp = %d \n", last_utilization_pp)); + + return &mali_util_data; +} + +_mali_osk_errcode_t mali_utilization_init(void) +{ +#if USING_GPU_UTILIZATION + _mali_osk_device_data data; + + if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) { + if (NULL != data.utilization_callback) { + mali_utilization_callback = data.utilization_callback; + MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: Utilization handler installed \n")); + } + } +#endif /* defined(USING_GPU_UTILIZATION) */ + + if (NULL == mali_utilization_callback) { + MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: No platform utilization handler installed\n")); + } + + utilization_data_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_UTILIZATION); + if (NULL == utilization_data_lock) { + return _MALI_OSK_ERR_FAULT; + } + + num_running_gp_cores = 0; + num_running_pp_cores = 0; + + return _MALI_OSK_ERR_OK; +} + +void mali_utilization_term(void) +{ + if (NULL != utilization_data_lock) { + _mali_osk_spinlock_irq_term(utilization_data_lock); + } +} + +void mali_utilization_gp_start(void) +{ + mali_utilization_data_lock(); + + ++num_running_gp_cores; + if (1 == num_running_gp_cores) { + u64 time_now = _mali_osk_time_get_ns(); + + /* First GP core started, consider GP busy from now and onwards */ + work_start_time_gp = time_now; + + if (0 == num_running_pp_cores) { + mali_bool is_resume = MALI_FALSE; + /* + * There are no PP cores running, so this is also the point + * at which we consider the GPU to be busy as well. + */ + work_start_time_gpu = time_now; + + is_resume = mali_control_timer_resume(time_now); + + mali_utilization_data_unlock(); + + if (is_resume) { + /* Do some policy in new period for performance consideration */ +#if defined(CONFIG_MALI_DVFS) + /* Clear session->number_of_window_jobs, prepare parameter for dvfs */ + mali_session_max_window_num(); + if (0 == last_utilization_gpu) { + /* + * for mali_dev_pause is called in set clock, + * so each time we change clock, we will set clock to + * highest step even if under down clock case, + * it is not nessesary, so we only set the clock under + * last time utilization equal 0, we stop the timer then + * start the GPU again case + */ + mali_dvfs_policy_new_period(); + } +#endif + /* + * First timeout using short interval for power consideration + * because we give full power in the new period, but if the + * job loading is light, finish in 10ms, the other time all keep + * in high freq it will wast time. + */ + mali_control_timer_add(mali_control_first_timeout); + } + } else { + mali_utilization_data_unlock(); + } + + } else { + /* Nothing to do */ + mali_utilization_data_unlock(); + } +} + +void mali_utilization_pp_start(void) +{ + mali_utilization_data_lock(); + + ++num_running_pp_cores; + if (1 == num_running_pp_cores) { + u64 time_now = _mali_osk_time_get_ns(); + + /* First PP core started, consider PP busy from now and onwards */ + work_start_time_pp = time_now; + + if (0 == num_running_gp_cores) { + mali_bool is_resume = MALI_FALSE; + /* + * There are no GP cores running, so this is also the point + * at which we consider the GPU to be busy as well. + */ + work_start_time_gpu = time_now; + + /* Start a new period if stoped */ + is_resume = mali_control_timer_resume(time_now); + + mali_utilization_data_unlock(); + + if (is_resume) { +#if defined(CONFIG_MALI_DVFS) + /* Clear session->number_of_window_jobs, prepare parameter for dvfs */ + mali_session_max_window_num(); + if (0 == last_utilization_gpu) { + /* + * for mali_dev_pause is called in set clock, + * so each time we change clock, we will set clock to + * highest step even if under down clock case, + * it is not nessesary, so we only set the clock under + * last time utilization equal 0, we stop the timer then + * start the GPU again case + */ + mali_dvfs_policy_new_period(); + } +#endif + + /* + * First timeout using short interval for power consideration + * because we give full power in the new period, but if the + * job loading is light, finish in 10ms, the other time all keep + * in high freq it will wast time. + */ + mali_control_timer_add(mali_control_first_timeout); + } + } else { + mali_utilization_data_unlock(); + } + } else { + /* Nothing to do */ + mali_utilization_data_unlock(); + } +} + +void mali_utilization_gp_end(void) +{ + mali_utilization_data_lock(); + + --num_running_gp_cores; + if (0 == num_running_gp_cores) { + u64 time_now = _mali_osk_time_get_ns(); + + /* Last GP core ended, consider GP idle from now and onwards */ + accumulated_work_time_gp += (time_now - work_start_time_gp); + work_start_time_gp = 0; + + if (0 == num_running_pp_cores) { + /* + * There are no PP cores running, so this is also the point + * at which we consider the GPU to be idle as well. + */ + accumulated_work_time_gpu += (time_now - work_start_time_gpu); + work_start_time_gpu = 0; + } + } + + mali_utilization_data_unlock(); +} + +void mali_utilization_pp_end(void) +{ + mali_utilization_data_lock(); + + --num_running_pp_cores; + if (0 == num_running_pp_cores) { + u64 time_now = _mali_osk_time_get_ns(); + + /* Last PP core ended, consider PP idle from now and onwards */ + accumulated_work_time_pp += (time_now - work_start_time_pp); + work_start_time_pp = 0; + + if (0 == num_running_gp_cores) { + /* + * There are no GP cores running, so this is also the point + * at which we consider the GPU to be idle as well. + */ + accumulated_work_time_gpu += (time_now - work_start_time_gpu); + work_start_time_gpu = 0; + } + } + + mali_utilization_data_unlock(); +} + +mali_bool mali_utilization_enabled(void) +{ +#if defined(CONFIG_MALI_DVFS) + return mali_dvfs_policy_enabled(); +#else + return (NULL != mali_utilization_callback); +#endif /* defined(CONFIG_MALI_DVFS) */ +} + +void mali_utilization_platform_realize(struct mali_gpu_utilization_data *util_data) +{ + MALI_DEBUG_ASSERT_POINTER(mali_utilization_callback); + + mali_utilization_callback(util_data); +} + +void mali_utilization_reset(void) +{ + accumulated_work_time_gpu = 0; + accumulated_work_time_gp = 0; + accumulated_work_time_pp = 0; + + last_utilization_gpu = 0; + last_utilization_gp = 0; + last_utilization_pp = 0; +} + +void mali_utilization_data_lock(void) +{ + _mali_osk_spinlock_irq_lock(utilization_data_lock); +} + +void mali_utilization_data_unlock(void) +{ + _mali_osk_spinlock_irq_unlock(utilization_data_lock); +} + +void mali_utilization_data_assert_locked(void) +{ + MALI_DEBUG_ASSERT_LOCK_HELD(utilization_data_lock); +} + +u32 _mali_ukk_utilization_gp_pp(void) +{ + return last_utilization_gpu; +} + +u32 _mali_ukk_utilization_gp(void) +{ + return last_utilization_gp; +} + +u32 _mali_ukk_utilization_pp(void) +{ + return last_utilization_pp; +} diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_kernel_utilization.h b/drivers/gpu/arm/mali400/common/mali_kernel_utilization.h --- a/drivers/gpu/arm/mali400/common/mali_kernel_utilization.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_kernel_utilization.h 2018-05-06 08:49:49.174695256 +0200 @@ -0,0 +1,72 @@ +/* + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_KERNEL_UTILIZATION_H__ +#define __MALI_KERNEL_UTILIZATION_H__ + +#include +#include "mali_osk.h" + +/** + * Initialize/start the Mali GPU utilization metrics reporting. + * + * @return _MALI_OSK_ERR_OK on success, otherwise failure. + */ +_mali_osk_errcode_t mali_utilization_init(void); + +/** + * Terminate the Mali GPU utilization metrics reporting + */ +void mali_utilization_term(void); + +/** + * Check if Mali utilization is enabled + */ +mali_bool mali_utilization_enabled(void); + +/** + * Should be called when a job is about to execute a GP job + */ +void mali_utilization_gp_start(void); + +/** + * Should be called when a job has completed executing a GP job + */ +void mali_utilization_gp_end(void); + +/** + * Should be called when a job is about to execute a PP job + */ +void mali_utilization_pp_start(void); + +/** + * Should be called when a job has completed executing a PP job + */ +void mali_utilization_pp_end(void); + +/** + * Should be called to calcution the GPU utilization + */ +struct mali_gpu_utilization_data *mali_utilization_calculate(u64 *start_time, u64 *time_period, mali_bool *need_add_timer); + +_mali_osk_spinlock_irq_t *mali_utilization_get_lock(void); + +void mali_utilization_platform_realize(struct mali_gpu_utilization_data *util_data); + +void mali_utilization_data_lock(void); + +void mali_utilization_data_unlock(void); + +void mali_utilization_data_assert_locked(void); + +void mali_utilization_reset(void); + + +#endif /* __MALI_KERNEL_UTILIZATION_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_kernel_vsync.c b/drivers/gpu/arm/mali400/common/mali_kernel_vsync.c --- a/drivers/gpu/arm/mali400/common/mali_kernel_vsync.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_kernel_vsync.c 2018-05-06 08:49:49.174695256 +0200 @@ -0,0 +1,45 @@ +/* + * Copyright (C) 2011-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_kernel_common.h" +#include "mali_osk.h" +#include "mali_ukk.h" + +#include "mali_osk_profiling.h" + +_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args) +{ + _mali_uk_vsync_event event = (_mali_uk_vsync_event)args->event; + MALI_IGNORE(event); /* event is not used for release code, and that is OK */ + + /* + * Manually generate user space events in kernel space. + * This saves user space from calling kernel space twice in this case. + * We just need to remember to add pid and tid manually. + */ + if (event == _MALI_UK_VSYNC_EVENT_BEGIN_WAIT) { + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC, + _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0); + } + + if (event == _MALI_UK_VSYNC_EVENT_END_WAIT) { + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC, + _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0); + } + + + MALI_DEBUG_PRINT(4, ("Received VSYNC event: %d\n", event)); + MALI_SUCCESS; +} + diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_l2_cache.c b/drivers/gpu/arm/mali400/common/mali_l2_cache.c --- a/drivers/gpu/arm/mali400/common/mali_l2_cache.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_l2_cache.c 2018-05-06 08:49:49.174695256 +0200 @@ -0,0 +1,534 @@ +/* + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ +#include "mali_kernel_common.h" +#include "mali_osk.h" +#include "mali_l2_cache.h" +#include "mali_hw_core.h" +#include "mali_scheduler.h" +#include "mali_pm.h" +#include "mali_pm_domain.h" + +/** + * Size of the Mali L2 cache registers in bytes + */ +#define MALI400_L2_CACHE_REGISTERS_SIZE 0x30 + +/** + * Mali L2 cache register numbers + * Used in the register read/write routines. + * See the hardware documentation for more information about each register + */ +typedef enum mali_l2_cache_register { + MALI400_L2_CACHE_REGISTER_SIZE = 0x0004, + MALI400_L2_CACHE_REGISTER_STATUS = 0x0008, + /*unused = 0x000C */ + MALI400_L2_CACHE_REGISTER_COMMAND = 0x0010, + MALI400_L2_CACHE_REGISTER_CLEAR_PAGE = 0x0014, + MALI400_L2_CACHE_REGISTER_MAX_READS = 0x0018, + MALI400_L2_CACHE_REGISTER_ENABLE = 0x001C, + MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0 = 0x0020, + MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0 = 0x0024, + MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1 = 0x0028, + MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1 = 0x002C, +} mali_l2_cache_register; + +/** + * Mali L2 cache commands + * These are the commands that can be sent to the Mali L2 cache unit + */ +typedef enum mali_l2_cache_command { + MALI400_L2_CACHE_COMMAND_CLEAR_ALL = 0x01, +} mali_l2_cache_command; + +/** + * Mali L2 cache commands + * These are the commands that can be sent to the Mali L2 cache unit + */ +typedef enum mali_l2_cache_enable { + MALI400_L2_CACHE_ENABLE_DEFAULT = 0x0, /* Default */ + MALI400_L2_CACHE_ENABLE_ACCESS = 0x01, + MALI400_L2_CACHE_ENABLE_READ_ALLOCATE = 0x02, +} mali_l2_cache_enable; + +/** + * Mali L2 cache status bits + */ +typedef enum mali_l2_cache_status { + MALI400_L2_CACHE_STATUS_COMMAND_BUSY = 0x01, + MALI400_L2_CACHE_STATUS_DATA_BUSY = 0x02, +} mali_l2_cache_status; + +#define MALI400_L2_MAX_READS_NOT_SET -1 + +static struct mali_l2_cache_core * + mali_global_l2s[MALI_MAX_NUMBER_OF_L2_CACHE_CORES] = { NULL, }; +static u32 mali_global_num_l2s = 0; + +int mali_l2_max_reads = MALI400_L2_MAX_READS_NOT_SET; + + +/* Local helper functions */ + +static void mali_l2_cache_reset(struct mali_l2_cache_core *cache); + +static _mali_osk_errcode_t mali_l2_cache_send_command( + struct mali_l2_cache_core *cache, u32 reg, u32 val); + +static void mali_l2_cache_lock(struct mali_l2_cache_core *cache) +{ + MALI_DEBUG_ASSERT_POINTER(cache); + _mali_osk_spinlock_irq_lock(cache->lock); +} + +static void mali_l2_cache_unlock(struct mali_l2_cache_core *cache) +{ + MALI_DEBUG_ASSERT_POINTER(cache); + _mali_osk_spinlock_irq_unlock(cache->lock); +} + +/* Implementation of the L2 cache interface */ + +struct mali_l2_cache_core *mali_l2_cache_create( + _mali_osk_resource_t *resource, u32 domain_index) +{ + struct mali_l2_cache_core *cache = NULL; +#if defined(DEBUG) + u32 cache_size; +#endif + + MALI_DEBUG_PRINT(4, ("Mali L2 cache: Creating Mali L2 cache: %s\n", + resource->description)); + + if (mali_global_num_l2s >= MALI_MAX_NUMBER_OF_L2_CACHE_CORES) { + MALI_PRINT_ERROR(("Mali L2 cache: Too many L2 caches\n")); + return NULL; + } + + cache = _mali_osk_malloc(sizeof(struct mali_l2_cache_core)); + if (NULL == cache) { + MALI_PRINT_ERROR(("Mali L2 cache: Failed to allocate memory for L2 cache core\n")); + return NULL; + } + + cache->core_id = mali_global_num_l2s; + cache->counter_src0 = MALI_HW_CORE_NO_COUNTER; + cache->counter_src1 = MALI_HW_CORE_NO_COUNTER; + cache->counter_value0_base = 0; + cache->counter_value1_base = 0; + cache->pm_domain = NULL; + cache->power_is_on = MALI_FALSE; + cache->last_invalidated_id = 0; + + if (_MALI_OSK_ERR_OK != mali_hw_core_create(&cache->hw_core, + resource, MALI400_L2_CACHE_REGISTERS_SIZE)) { + _mali_osk_free(cache); + return NULL; + } + +#if defined(DEBUG) + cache_size = mali_hw_core_register_read(&cache->hw_core, + MALI400_L2_CACHE_REGISTER_SIZE); + MALI_DEBUG_PRINT(2, ("Mali L2 cache: Created %s: % 3uK, %u-way, % 2ubyte cache line, % 3ubit external bus\n", + resource->description, + 1 << (((cache_size >> 16) & 0xff) - 10), + 1 << ((cache_size >> 8) & 0xff), + 1 << (cache_size & 0xff), + 1 << ((cache_size >> 24) & 0xff))); +#endif + + cache->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, + _MALI_OSK_LOCK_ORDER_L2); + if (NULL == cache->lock) { + MALI_PRINT_ERROR(("Mali L2 cache: Failed to create counter lock for L2 cache core %s\n", + cache->hw_core.description)); + mali_hw_core_delete(&cache->hw_core); + _mali_osk_free(cache); + return NULL; + } + + /* register with correct power domain */ + cache->pm_domain = mali_pm_register_l2_cache( + domain_index, cache); + + mali_global_l2s[mali_global_num_l2s] = cache; + mali_global_num_l2s++; + + return cache; +} + +void mali_l2_cache_delete(struct mali_l2_cache_core *cache) +{ + u32 i; + for (i = 0; i < mali_global_num_l2s; i++) { + if (mali_global_l2s[i] != cache) { + continue; + } + + mali_global_l2s[i] = NULL; + mali_global_num_l2s--; + + if (i == mali_global_num_l2s) { + /* Removed last element, nothing more to do */ + break; + } + + /* + * We removed a l2 cache from the middle of the array, + * so move the last l2 cache to current position + */ + mali_global_l2s[i] = mali_global_l2s[mali_global_num_l2s]; + mali_global_l2s[mali_global_num_l2s] = NULL; + + /* All good */ + break; + } + + _mali_osk_spinlock_irq_term(cache->lock); + mali_hw_core_delete(&cache->hw_core); + _mali_osk_free(cache); +} + +void mali_l2_cache_power_up(struct mali_l2_cache_core *cache) +{ + MALI_DEBUG_ASSERT_POINTER(cache); + + mali_l2_cache_lock(cache); + + mali_l2_cache_reset(cache); + + if ((1 << MALI_DOMAIN_INDEX_DUMMY) != cache->pm_domain->pmu_mask) + MALI_DEBUG_ASSERT(MALI_FALSE == cache->power_is_on); + cache->power_is_on = MALI_TRUE; + + mali_l2_cache_unlock(cache); +} + +void mali_l2_cache_power_down(struct mali_l2_cache_core *cache) +{ + MALI_DEBUG_ASSERT_POINTER(cache); + + mali_l2_cache_lock(cache); + + MALI_DEBUG_ASSERT(MALI_TRUE == cache->power_is_on); + + /* + * The HW counters will start from zero again when we resume, + * but we should report counters as always increasing. + * Take a copy of the HW values now in order to add this to + * the values we report after being powered up. + * + * The physical power off of the L2 cache might be outside our + * own control (e.g. runtime PM). That is why we must manually + * set set the counter value to zero as well. + */ + + if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) { + cache->counter_value0_base += mali_hw_core_register_read( + &cache->hw_core, + MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0); + mali_hw_core_register_write(&cache->hw_core, + MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0, 0); + } + + if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) { + cache->counter_value1_base += mali_hw_core_register_read( + &cache->hw_core, + MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1); + mali_hw_core_register_write(&cache->hw_core, + MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1, 0); + } + + + cache->power_is_on = MALI_FALSE; + + mali_l2_cache_unlock(cache); +} + +void mali_l2_cache_core_set_counter_src( + struct mali_l2_cache_core *cache, u32 source_id, u32 counter) +{ + u32 reg_offset_src; + u32 reg_offset_val; + + MALI_DEBUG_ASSERT_POINTER(cache); + MALI_DEBUG_ASSERT(source_id >= 0 && source_id <= 1); + + mali_l2_cache_lock(cache); + + if (0 == source_id) { + /* start counting from 0 */ + cache->counter_value0_base = 0; + cache->counter_src0 = counter; + reg_offset_src = MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0; + reg_offset_val = MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0; + } else { + /* start counting from 0 */ + cache->counter_value1_base = 0; + cache->counter_src1 = counter; + reg_offset_src = MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1; + reg_offset_val = MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1; + } + + if (cache->power_is_on) { + u32 hw_src; + + if (MALI_HW_CORE_NO_COUNTER != counter) { + hw_src = counter; + } else { + hw_src = 0; /* disable value for HW */ + } + + /* Set counter src */ + mali_hw_core_register_write(&cache->hw_core, + reg_offset_src, hw_src); + + /* Make sure the HW starts counting from 0 again */ + mali_hw_core_register_write(&cache->hw_core, + reg_offset_val, 0); + } + + mali_l2_cache_unlock(cache); +} + +void mali_l2_cache_core_get_counter_values( + struct mali_l2_cache_core *cache, + u32 *src0, u32 *value0, u32 *src1, u32 *value1) +{ + MALI_DEBUG_ASSERT_POINTER(cache); + MALI_DEBUG_ASSERT(NULL != src0); + MALI_DEBUG_ASSERT(NULL != value0); + MALI_DEBUG_ASSERT(NULL != src1); + MALI_DEBUG_ASSERT(NULL != value1); + + mali_l2_cache_lock(cache); + + *src0 = cache->counter_src0; + *src1 = cache->counter_src1; + + if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) { + if (MALI_TRUE == cache->power_is_on) { + *value0 = mali_hw_core_register_read(&cache->hw_core, + MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0); + } else { + *value0 = 0; + } + + /* Add base offset value (in case we have been power off) */ + *value0 += cache->counter_value0_base; + } + + if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) { + if (MALI_TRUE == cache->power_is_on) { + *value1 = mali_hw_core_register_read(&cache->hw_core, + MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1); + } else { + *value1 = 0; + } + + /* Add base offset value (in case we have been power off) */ + *value1 += cache->counter_value1_base; + } + + mali_l2_cache_unlock(cache); +} + +struct mali_l2_cache_core *mali_l2_cache_core_get_glob_l2_core(u32 index) +{ + if (mali_global_num_l2s > index) { + return mali_global_l2s[index]; + } + + return NULL; +} + +u32 mali_l2_cache_core_get_glob_num_l2_cores(void) +{ + return mali_global_num_l2s; +} + +void mali_l2_cache_invalidate(struct mali_l2_cache_core *cache) +{ + MALI_DEBUG_ASSERT_POINTER(cache); + + if (NULL == cache) { + return; + } + + mali_l2_cache_lock(cache); + + cache->last_invalidated_id = mali_scheduler_get_new_cache_order(); + mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, + MALI400_L2_CACHE_COMMAND_CLEAR_ALL); + + mali_l2_cache_unlock(cache); +} + +void mali_l2_cache_invalidate_conditional( + struct mali_l2_cache_core *cache, u32 id) +{ + MALI_DEBUG_ASSERT_POINTER(cache); + + if (NULL == cache) { + return; + } + + /* + * If the last cache invalidation was done by a job with a higher id we + * don't have to flush. Since user space will store jobs w/ their + * corresponding memory in sequence (first job #0, then job #1, ...), + * we don't have to flush for job n-1 if job n has already invalidated + * the cache since we know for sure that job n-1's memory was already + * written when job n was started. + */ + + mali_l2_cache_lock(cache); + + if (((s32)id) > ((s32)cache->last_invalidated_id)) { + /* Set latest invalidated id to current "point in time" */ + cache->last_invalidated_id = + mali_scheduler_get_new_cache_order(); + mali_l2_cache_send_command(cache, + MALI400_L2_CACHE_REGISTER_COMMAND, + MALI400_L2_CACHE_COMMAND_CLEAR_ALL); + } + + mali_l2_cache_unlock(cache); +} + +void mali_l2_cache_invalidate_all(void) +{ + u32 i; + for (i = 0; i < mali_global_num_l2s; i++) { + struct mali_l2_cache_core *cache = mali_global_l2s[i]; + _mali_osk_errcode_t ret; + + MALI_DEBUG_ASSERT_POINTER(cache); + + mali_l2_cache_lock(cache); + + if (MALI_TRUE != cache->power_is_on) { + mali_l2_cache_unlock(cache); + continue; + } + + cache->last_invalidated_id = + mali_scheduler_get_new_cache_order(); + + ret = mali_l2_cache_send_command(cache, + MALI400_L2_CACHE_REGISTER_COMMAND, + MALI400_L2_CACHE_COMMAND_CLEAR_ALL); + if (_MALI_OSK_ERR_OK != ret) { + MALI_PRINT_ERROR(("Failed to invalidate cache\n")); + } + + mali_l2_cache_unlock(cache); + } +} + +void mali_l2_cache_invalidate_all_pages(u32 *pages, u32 num_pages) +{ + u32 i; + for (i = 0; i < mali_global_num_l2s; i++) { + struct mali_l2_cache_core *cache = mali_global_l2s[i]; + u32 j; + + MALI_DEBUG_ASSERT_POINTER(cache); + + mali_l2_cache_lock(cache); + + if (MALI_TRUE != cache->power_is_on) { + mali_l2_cache_unlock(cache); + continue; + } + + for (j = 0; j < num_pages; j++) { + _mali_osk_errcode_t ret; + + ret = mali_l2_cache_send_command(cache, + MALI400_L2_CACHE_REGISTER_CLEAR_PAGE, + pages[j]); + if (_MALI_OSK_ERR_OK != ret) { + MALI_PRINT_ERROR(("Failed to invalidate cache (page)\n")); + } + } + + mali_l2_cache_unlock(cache); + } +} + +/* -------- local helper functions below -------- */ + +static void mali_l2_cache_reset(struct mali_l2_cache_core *cache) +{ + MALI_DEBUG_ASSERT_POINTER(cache); + MALI_DEBUG_ASSERT_LOCK_HELD(cache->lock); + + /* Invalidate cache (just to keep it in a known state at startup) */ + mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, + MALI400_L2_CACHE_COMMAND_CLEAR_ALL); + + /* Enable cache */ + mali_hw_core_register_write(&cache->hw_core, + MALI400_L2_CACHE_REGISTER_ENABLE, + (u32)MALI400_L2_CACHE_ENABLE_ACCESS | + (u32)MALI400_L2_CACHE_ENABLE_READ_ALLOCATE); + + if (MALI400_L2_MAX_READS_NOT_SET != mali_l2_max_reads) { + mali_hw_core_register_write(&cache->hw_core, + MALI400_L2_CACHE_REGISTER_MAX_READS, + (u32)mali_l2_max_reads); + } + + /* Restart any performance counters (if enabled) */ + if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) { + + mali_hw_core_register_write(&cache->hw_core, + MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0, + cache->counter_src0); + } + + if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) { + mali_hw_core_register_write(&cache->hw_core, + MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, + cache->counter_src1); + } +} + +static _mali_osk_errcode_t mali_l2_cache_send_command( + struct mali_l2_cache_core *cache, u32 reg, u32 val) +{ + int i = 0; + const int loop_count = 100000; + + MALI_DEBUG_ASSERT_POINTER(cache); + MALI_DEBUG_ASSERT_LOCK_HELD(cache->lock); + + /* + * First, wait for L2 cache command handler to go idle. + * (Commands received while processing another command will be ignored) + */ + for (i = 0; i < loop_count; i++) { + if (!(mali_hw_core_register_read(&cache->hw_core, + MALI400_L2_CACHE_REGISTER_STATUS) & + (u32)MALI400_L2_CACHE_STATUS_COMMAND_BUSY)) { + break; + } + } + + if (i == loop_count) { + MALI_DEBUG_PRINT(1, ("Mali L2 cache: aborting wait for command interface to go idle\n")); + return _MALI_OSK_ERR_FAULT; + } + + /* then issue the command */ + mali_hw_core_register_write(&cache->hw_core, reg, val); + + return _MALI_OSK_ERR_OK; +} diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_l2_cache.h b/drivers/gpu/arm/mali400/common/mali_l2_cache.h --- a/drivers/gpu/arm/mali400/common/mali_l2_cache.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_l2_cache.h 2018-05-06 08:49:49.174695256 +0200 @@ -0,0 +1,124 @@ +/* + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_KERNEL_L2_CACHE_H__ +#define __MALI_KERNEL_L2_CACHE_H__ + +#include "mali_osk.h" +#include "mali_hw_core.h" + +#define MALI_MAX_NUMBER_OF_L2_CACHE_CORES 3 +/* Maximum 1 GP and 4 PP for an L2 cache core (Mali-400 MP4) */ +#define MALI_MAX_NUMBER_OF_GROUPS_PER_L2_CACHE 5 + +/** + * Definition of the L2 cache core struct + * Used to track a L2 cache unit in the system. + * Contains information about the mapping of the registers + */ +struct mali_l2_cache_core { + /* Common HW core functionality */ + struct mali_hw_core hw_core; + + /* Synchronize L2 cache access */ + _mali_osk_spinlock_irq_t *lock; + + /* Unique core ID */ + u32 core_id; + + /* The power domain this L2 cache belongs to */ + struct mali_pm_domain *pm_domain; + + /* MALI_TRUE if power is on for this L2 cache */ + mali_bool power_is_on; + + /* A "timestamp" to avoid unnecessary flushes */ + u32 last_invalidated_id; + + /* Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */ + u32 counter_src0; + + /* Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */ + u32 counter_src1; + + /* + * Performance counter 0 value base/offset + * (allows accumulative reporting even after power off) + */ + u32 counter_value0_base; + + /* + * Performance counter 0 value base/offset + * (allows accumulative reporting even after power off) + */ + u32 counter_value1_base; + + /* Used by PM domains to link L2 caches of same domain */ + _mali_osk_list_t pm_domain_list; +}; + +_mali_osk_errcode_t mali_l2_cache_initialize(void); +void mali_l2_cache_terminate(void); + +struct mali_l2_cache_core *mali_l2_cache_create( + _mali_osk_resource_t *resource, u32 domain_index); +void mali_l2_cache_delete(struct mali_l2_cache_core *cache); + +MALI_STATIC_INLINE u32 mali_l2_cache_get_id(struct mali_l2_cache_core *cache) +{ + MALI_DEBUG_ASSERT_POINTER(cache); + return cache->core_id; +} + +MALI_STATIC_INLINE struct mali_pm_domain *mali_l2_cache_get_pm_domain( + struct mali_l2_cache_core *cache) +{ + MALI_DEBUG_ASSERT_POINTER(cache); + return cache->pm_domain; +} + +void mali_l2_cache_power_up(struct mali_l2_cache_core *cache); +void mali_l2_cache_power_down(struct mali_l2_cache_core *cache); + +void mali_l2_cache_core_set_counter_src( + struct mali_l2_cache_core *cache, u32 source_id, u32 counter); + +MALI_STATIC_INLINE u32 mali_l2_cache_core_get_counter_src0( + struct mali_l2_cache_core *cache) +{ + MALI_DEBUG_ASSERT_POINTER(cache); + return cache->counter_src0; +} + +MALI_STATIC_INLINE u32 mali_l2_cache_core_get_counter_src1( + struct mali_l2_cache_core *cache) +{ + MALI_DEBUG_ASSERT_POINTER(cache); + return cache->counter_src1; +} + +void mali_l2_cache_core_get_counter_values( + struct mali_l2_cache_core *cache, + u32 *src0, u32 *value0, u32 *src1, u32 *value1); + +struct mali_l2_cache_core *mali_l2_cache_core_get_glob_l2_core(u32 index); +u32 mali_l2_cache_core_get_glob_num_l2_cores(void); + +struct mali_group *mali_l2_cache_get_group( + struct mali_l2_cache_core *cache, u32 index); + +void mali_l2_cache_invalidate(struct mali_l2_cache_core *cache); +void mali_l2_cache_invalidate_conditional( + struct mali_l2_cache_core *cache, u32 id); + +void mali_l2_cache_invalidate_all(void); +void mali_l2_cache_invalidate_all_pages(u32 *pages, u32 num_pages); + +#endif /* __MALI_KERNEL_L2_CACHE_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_mem_validation.c b/drivers/gpu/arm/mali400/common/mali_mem_validation.c --- a/drivers/gpu/arm/mali400/common/mali_mem_validation.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_mem_validation.c 2018-05-06 08:49:49.174695256 +0200 @@ -0,0 +1,65 @@ +/* + * Copyright (C) 2011-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_mem_validation.h" +#include "mali_osk.h" +#include "mali_kernel_common.h" + +#define MALI_INVALID_MEM_ADDR 0xFFFFFFFF + +typedef struct { + u32 phys_base; /**< Mali physical base of the memory, page aligned */ + u32 size; /**< size in bytes of the memory, multiple of page size */ +} _mali_mem_validation_t; + +static _mali_mem_validation_t mali_mem_validator = { MALI_INVALID_MEM_ADDR, MALI_INVALID_MEM_ADDR }; + +_mali_osk_errcode_t mali_mem_validation_add_range(u32 start, u32 size) +{ + /* Check that no other MEM_VALIDATION resources exist */ + if (MALI_INVALID_MEM_ADDR != mali_mem_validator.phys_base) { + MALI_PRINT_ERROR(("Failed to add frame buffer memory; another range is already specified\n")); + return _MALI_OSK_ERR_FAULT; + } + + /* Check restrictions on page alignment */ + if ((0 != (start & (~_MALI_OSK_CPU_PAGE_MASK))) || + (0 != (size & (~_MALI_OSK_CPU_PAGE_MASK)))) { + MALI_PRINT_ERROR(("Failed to add frame buffer memory; incorrect alignment\n")); + return _MALI_OSK_ERR_FAULT; + } + + mali_mem_validator.phys_base = start; + mali_mem_validator.size = size; + MALI_DEBUG_PRINT(2, ("Memory Validator installed for Mali physical address base=0x%08X, size=0x%08X\n", + mali_mem_validator.phys_base, mali_mem_validator.size)); + + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t mali_mem_validation_check(u32 phys_addr, u32 size) +{ + if (phys_addr < (phys_addr + size)) { /* Don't allow overflow (or zero size) */ + if ((0 == (phys_addr & (~_MALI_OSK_CPU_PAGE_MASK))) && + (0 == (size & (~_MALI_OSK_CPU_PAGE_MASK)))) { + if ((phys_addr >= mali_mem_validator.phys_base) && + ((phys_addr + (size - 1)) >= mali_mem_validator.phys_base) && + (phys_addr <= (mali_mem_validator.phys_base + (mali_mem_validator.size - 1))) && + ((phys_addr + (size - 1)) <= (mali_mem_validator.phys_base + (mali_mem_validator.size - 1)))) { + MALI_DEBUG_PRINT(3, ("Accepted range 0x%08X + size 0x%08X (= 0x%08X)\n", phys_addr, size, (phys_addr + size - 1))); + return _MALI_OSK_ERR_OK; + } + } + } + + MALI_PRINT_ERROR(("MALI PHYSICAL RANGE VALIDATION ERROR: The range supplied was: phys_base=0x%08X, size=0x%08X\n", phys_addr, size)); + + return _MALI_OSK_ERR_FAULT; +} diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_mem_validation.h b/drivers/gpu/arm/mali400/common/mali_mem_validation.h --- a/drivers/gpu/arm/mali400/common/mali_mem_validation.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_mem_validation.h 2018-05-06 08:49:49.174695256 +0200 @@ -0,0 +1,19 @@ +/* + * Copyright (C) 2011-2013, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_MEM_VALIDATION_H__ +#define __MALI_MEM_VALIDATION_H__ + +#include "mali_osk.h" + +_mali_osk_errcode_t mali_mem_validation_add_range(u32 start, u32 size); +_mali_osk_errcode_t mali_mem_validation_check(u32 phys_addr, u32 size); + +#endif /* __MALI_MEM_VALIDATION_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_mmu.c b/drivers/gpu/arm/mali400/common/mali_mmu.c --- a/drivers/gpu/arm/mali400/common/mali_mmu.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_mmu.c 2018-05-06 08:49:49.174695256 +0200 @@ -0,0 +1,433 @@ +/* + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_kernel_common.h" +#include "mali_osk.h" +#include "mali_osk_list.h" +#include "mali_ukk.h" + +#include "mali_mmu.h" +#include "mali_hw_core.h" +#include "mali_group.h" +#include "mali_mmu_page_directory.h" + +/** + * Size of the MMU registers in bytes + */ +#define MALI_MMU_REGISTERS_SIZE 0x24 + +/** + * MMU commands + * These are the commands that can be sent + * to the MMU unit. + */ +typedef enum mali_mmu_command { + MALI_MMU_COMMAND_ENABLE_PAGING = 0x00, /**< Enable paging (memory translation) */ + MALI_MMU_COMMAND_DISABLE_PAGING = 0x01, /**< Disable paging (memory translation) */ + MALI_MMU_COMMAND_ENABLE_STALL = 0x02, /**< Enable stall on page fault */ + MALI_MMU_COMMAND_DISABLE_STALL = 0x03, /**< Disable stall on page fault */ + MALI_MMU_COMMAND_ZAP_CACHE = 0x04, /**< Zap the entire page table cache */ + MALI_MMU_COMMAND_PAGE_FAULT_DONE = 0x05, /**< Page fault processed */ + MALI_MMU_COMMAND_HARD_RESET = 0x06 /**< Reset the MMU back to power-on settings */ +} mali_mmu_command; + +static void mali_mmu_probe_trigger(void *data); +static _mali_osk_errcode_t mali_mmu_probe_ack(void *data); + +MALI_STATIC_INLINE _mali_osk_errcode_t mali_mmu_raw_reset(struct mali_mmu_core *mmu); + +/* page fault queue flush helper pages + * note that the mapping pointers are currently unused outside of the initialization functions */ +static mali_dma_addr mali_page_fault_flush_page_directory = MALI_INVALID_PAGE; +static mali_io_address mali_page_fault_flush_page_directory_mapping = NULL; +static mali_dma_addr mali_page_fault_flush_page_table = MALI_INVALID_PAGE; +static mali_io_address mali_page_fault_flush_page_table_mapping = NULL; +static mali_dma_addr mali_page_fault_flush_data_page = MALI_INVALID_PAGE; +static mali_io_address mali_page_fault_flush_data_page_mapping = NULL; + +/* an empty page directory (no address valid) which is active on any MMU not currently marked as in use */ +static mali_dma_addr mali_empty_page_directory_phys = MALI_INVALID_PAGE; +static mali_io_address mali_empty_page_directory_virt = NULL; + + +_mali_osk_errcode_t mali_mmu_initialize(void) +{ + /* allocate the helper pages */ + mali_empty_page_directory_phys = mali_allocate_empty_page(&mali_empty_page_directory_virt); + if (0 == mali_empty_page_directory_phys) { + MALI_DEBUG_PRINT_ERROR(("Mali MMU: Could not allocate empty page directory.\n")); + mali_empty_page_directory_phys = MALI_INVALID_PAGE; + return _MALI_OSK_ERR_NOMEM; + } + + if (_MALI_OSK_ERR_OK != mali_create_fault_flush_pages(&mali_page_fault_flush_page_directory, + &mali_page_fault_flush_page_directory_mapping, + &mali_page_fault_flush_page_table, + &mali_page_fault_flush_page_table_mapping, + &mali_page_fault_flush_data_page, + &mali_page_fault_flush_data_page_mapping)) { + MALI_DEBUG_PRINT_ERROR(("Mali MMU: Could not allocate fault flush pages\n")); + mali_free_empty_page(mali_empty_page_directory_phys, mali_empty_page_directory_virt); + mali_empty_page_directory_phys = MALI_INVALID_PAGE; + mali_empty_page_directory_virt = NULL; + return _MALI_OSK_ERR_NOMEM; + } + + return _MALI_OSK_ERR_OK; +} + +void mali_mmu_terminate(void) +{ + MALI_DEBUG_PRINT(3, ("Mali MMU: terminating\n")); + + /* Free global helper pages */ + mali_free_empty_page(mali_empty_page_directory_phys, mali_empty_page_directory_virt); + mali_empty_page_directory_phys = MALI_INVALID_PAGE; + mali_empty_page_directory_virt = NULL; + + /* Free the page fault flush pages */ + mali_destroy_fault_flush_pages(&mali_page_fault_flush_page_directory, + &mali_page_fault_flush_page_directory_mapping, + &mali_page_fault_flush_page_table, + &mali_page_fault_flush_page_table_mapping, + &mali_page_fault_flush_data_page, + &mali_page_fault_flush_data_page_mapping); +} + +struct mali_mmu_core *mali_mmu_create(_mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual) +{ + struct mali_mmu_core *mmu = NULL; + + MALI_DEBUG_ASSERT_POINTER(resource); + + MALI_DEBUG_PRINT(2, ("Mali MMU: Creating Mali MMU: %s\n", resource->description)); + + mmu = _mali_osk_calloc(1, sizeof(struct mali_mmu_core)); + if (NULL != mmu) { + if (_MALI_OSK_ERR_OK == mali_hw_core_create(&mmu->hw_core, resource, MALI_MMU_REGISTERS_SIZE)) { + if (_MALI_OSK_ERR_OK == mali_group_add_mmu_core(group, mmu)) { + if (is_virtual) { + /* Skip reset and IRQ setup for virtual MMU */ + return mmu; + } + + if (_MALI_OSK_ERR_OK == mali_mmu_reset(mmu)) { + /* Setup IRQ handlers (which will do IRQ probing if needed) */ + mmu->irq = _mali_osk_irq_init(resource->irq, + mali_group_upper_half_mmu, + group, + mali_mmu_probe_trigger, + mali_mmu_probe_ack, + mmu, + resource->description); + if (NULL != mmu->irq) { + return mmu; + } else { + MALI_PRINT_ERROR(("Mali MMU: Failed to setup interrupt handlers for MMU %s\n", mmu->hw_core.description)); + } + } + mali_group_remove_mmu_core(group); + } else { + MALI_PRINT_ERROR(("Mali MMU: Failed to add core %s to group\n", mmu->hw_core.description)); + } + mali_hw_core_delete(&mmu->hw_core); + } + + _mali_osk_free(mmu); + } else { + MALI_PRINT_ERROR(("Failed to allocate memory for MMU\n")); + } + + return NULL; +} + +void mali_mmu_delete(struct mali_mmu_core *mmu) +{ + if (NULL != mmu->irq) { + _mali_osk_irq_term(mmu->irq); + } + + mali_hw_core_delete(&mmu->hw_core); + _mali_osk_free(mmu); +} + +static void mali_mmu_enable_paging(struct mali_mmu_core *mmu) +{ + int i; + + mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_PAGING); + + for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) { + if (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS) & MALI_MMU_STATUS_BIT_PAGING_ENABLED) { + break; + } + } + if (MALI_REG_POLL_COUNT_FAST == i) { + MALI_PRINT_ERROR(("Enable paging request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS))); + } +} + +/** + * Issues the enable stall command to the MMU and waits for HW to complete the request + * @param mmu The MMU to enable paging for + * @return MALI_TRUE if HW stall was successfully engaged, otherwise MALI_FALSE (req timed out) + */ +static mali_bool mali_mmu_enable_stall(struct mali_mmu_core *mmu) +{ + int i; + u32 mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS); + + if (0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED)) { + MALI_DEBUG_PRINT(4, ("MMU stall is implicit when Paging is not enabled.\n")); + return MALI_TRUE; + } + + if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) { + MALI_DEBUG_PRINT(3, ("Aborting MMU stall request since it is in pagefault state.\n")); + return MALI_FALSE; + } + + mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_STALL); + + for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) { + mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS); + if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) { + break; + } + if ((mmu_status & MALI_MMU_STATUS_BIT_STALL_ACTIVE) && (0 == (mmu_status & MALI_MMU_STATUS_BIT_STALL_NOT_ACTIVE))) { + break; + } + if (0 == (mmu_status & (MALI_MMU_STATUS_BIT_PAGING_ENABLED))) { + break; + } + } + if (MALI_REG_POLL_COUNT_FAST == i) { + MALI_DEBUG_PRINT(2, ("Enable stall request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS))); + return MALI_FALSE; + } + + if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) { + MALI_DEBUG_PRINT(2, ("Aborting MMU stall request since it has a pagefault.\n")); + return MALI_FALSE; + } + + return MALI_TRUE; +} + +/** + * Issues the disable stall command to the MMU and waits for HW to complete the request + * @param mmu The MMU to enable paging for + */ +static void mali_mmu_disable_stall(struct mali_mmu_core *mmu) +{ + int i; + u32 mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS); + + if (0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED)) { + MALI_DEBUG_PRINT(3, ("MMU disable skipped since it was not enabled.\n")); + return; + } + if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) { + MALI_DEBUG_PRINT(2, ("Aborting MMU disable stall request since it is in pagefault state.\n")); + return; + } + + mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_DISABLE_STALL); + + for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) { + u32 status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS); + if (0 == (status & MALI_MMU_STATUS_BIT_STALL_ACTIVE)) { + break; + } + if (status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) { + break; + } + if (0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED)) { + break; + } + } + if (MALI_REG_POLL_COUNT_FAST == i) MALI_DEBUG_PRINT(1, ("Disable stall request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS))); +} + +void mali_mmu_page_fault_done(struct mali_mmu_core *mmu) +{ + MALI_DEBUG_PRINT(4, ("Mali MMU: %s: Leaving page fault mode\n", mmu->hw_core.description)); + mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_PAGE_FAULT_DONE); +} + +MALI_STATIC_INLINE _mali_osk_errcode_t mali_mmu_raw_reset(struct mali_mmu_core *mmu) +{ + int i; + + mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, 0xCAFEBABE); + MALI_DEBUG_ASSERT(0xCAFEB000 == mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR)); + mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_HARD_RESET); + + for (i = 0; i < MALI_REG_POLL_COUNT_FAST; ++i) { + if (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR) == 0) { + break; + } + } + if (MALI_REG_POLL_COUNT_FAST == i) { + MALI_PRINT_ERROR(("Reset request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS))); + return _MALI_OSK_ERR_FAULT; + } + + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t mali_mmu_reset(struct mali_mmu_core *mmu) +{ + _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT; + mali_bool stall_success; + MALI_DEBUG_ASSERT_POINTER(mmu); + + stall_success = mali_mmu_enable_stall(mmu); + if (!stall_success) { + err = _MALI_OSK_ERR_BUSY; + } + + MALI_DEBUG_PRINT(3, ("Mali MMU: mali_kernel_mmu_reset: %s\n", mmu->hw_core.description)); + + if (_MALI_OSK_ERR_OK == mali_mmu_raw_reset(mmu)) { + mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_MASK, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR); + /* no session is active, so just activate the empty page directory */ + mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, mali_empty_page_directory_phys); + mali_mmu_enable_paging(mmu); + err = _MALI_OSK_ERR_OK; + } + mali_mmu_disable_stall(mmu); + + return err; +} + +mali_bool mali_mmu_zap_tlb(struct mali_mmu_core *mmu) +{ + mali_bool stall_success = mali_mmu_enable_stall(mmu); + + mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE); + + if (MALI_FALSE == stall_success) { + /* False means that it is in Pagefault state. Not possible to disable_stall then */ + return MALI_FALSE; + } + + mali_mmu_disable_stall(mmu); + return MALI_TRUE; +} + +void mali_mmu_zap_tlb_without_stall(struct mali_mmu_core *mmu) +{ + mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE); +} + + +void mali_mmu_invalidate_page(struct mali_mmu_core *mmu, u32 mali_address) +{ + mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_ZAP_ONE_LINE, MALI_MMU_PDE_ENTRY(mali_address)); +} + +static void mali_mmu_activate_address_space(struct mali_mmu_core *mmu, u32 page_directory) +{ + /* The MMU must be in stalled or page fault mode, for this writing to work */ + MALI_DEBUG_ASSERT(0 != (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS) + & (MALI_MMU_STATUS_BIT_STALL_ACTIVE | MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE))); + mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, page_directory); + mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE); + +} + +void mali_mmu_activate_page_directory(struct mali_mmu_core *mmu, struct mali_page_directory *pagedir) +{ + mali_bool stall_success; + MALI_DEBUG_ASSERT_POINTER(mmu); + + MALI_DEBUG_PRINT(5, ("Asked to activate page directory 0x%x on MMU %s\n", pagedir, mmu->hw_core.description)); + + stall_success = mali_mmu_enable_stall(mmu); + MALI_DEBUG_ASSERT(stall_success); + MALI_IGNORE(stall_success); + mali_mmu_activate_address_space(mmu, pagedir->page_directory); + mali_mmu_disable_stall(mmu); +} + +void mali_mmu_activate_empty_page_directory(struct mali_mmu_core *mmu) +{ + mali_bool stall_success; + + MALI_DEBUG_ASSERT_POINTER(mmu); + MALI_DEBUG_PRINT(3, ("Activating the empty page directory on MMU %s\n", mmu->hw_core.description)); + + stall_success = mali_mmu_enable_stall(mmu); + + /* This function can only be called when the core is idle, so it could not fail. */ + MALI_DEBUG_ASSERT(stall_success); + MALI_IGNORE(stall_success); + + mali_mmu_activate_address_space(mmu, mali_empty_page_directory_phys); + mali_mmu_disable_stall(mmu); +} + +void mali_mmu_activate_fault_flush_page_directory(struct mali_mmu_core *mmu) +{ + mali_bool stall_success; + MALI_DEBUG_ASSERT_POINTER(mmu); + + MALI_DEBUG_PRINT(3, ("Activating the page fault flush page directory on MMU %s\n", mmu->hw_core.description)); + stall_success = mali_mmu_enable_stall(mmu); + /* This function is expect to fail the stalling, since it might be in PageFault mode when it is called */ + mali_mmu_activate_address_space(mmu, mali_page_fault_flush_page_directory); + if (MALI_TRUE == stall_success) mali_mmu_disable_stall(mmu); +} + +/* Is called when we want the mmu to give an interrupt */ +static void mali_mmu_probe_trigger(void *data) +{ + struct mali_mmu_core *mmu = (struct mali_mmu_core *)data; + mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR); +} + +/* Is called when the irq probe wants the mmu to acknowledge an interrupt from the hw */ +static _mali_osk_errcode_t mali_mmu_probe_ack(void *data) +{ + struct mali_mmu_core *mmu = (struct mali_mmu_core *)data; + u32 int_stat; + + int_stat = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_STATUS); + + MALI_DEBUG_PRINT(2, ("mali_mmu_probe_irq_acknowledge: intstat 0x%x\n", int_stat)); + if (int_stat & MALI_MMU_INTERRUPT_PAGE_FAULT) { + MALI_DEBUG_PRINT(2, ("Probe: Page fault detect: PASSED\n")); + mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_PAGE_FAULT); + } else { + MALI_DEBUG_PRINT(1, ("Probe: Page fault detect: FAILED\n")); + } + + if (int_stat & MALI_MMU_INTERRUPT_READ_BUS_ERROR) { + MALI_DEBUG_PRINT(2, ("Probe: Bus read error detect: PASSED\n")); + mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_READ_BUS_ERROR); + } else { + MALI_DEBUG_PRINT(1, ("Probe: Bus read error detect: FAILED\n")); + } + + if ((int_stat & (MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR)) == + (MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR)) { + return _MALI_OSK_ERR_OK; + } + + return _MALI_OSK_ERR_FAULT; +} + +#if 0 +void mali_mmu_print_state(struct mali_mmu_core *mmu) +{ + MALI_DEBUG_PRINT(2, ("MMU: State of %s is 0x%08x\n", mmu->hw_core.description, mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS))); +} +#endif diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_mmu.h b/drivers/gpu/arm/mali400/common/mali_mmu.h --- a/drivers/gpu/arm/mali400/common/mali_mmu.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_mmu.h 2018-05-06 08:49:49.174695256 +0200 @@ -0,0 +1,124 @@ +/* + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_MMU_H__ +#define __MALI_MMU_H__ + +#include "mali_osk.h" +#include "mali_mmu_page_directory.h" +#include "mali_hw_core.h" + +/* Forward declaration from mali_group.h */ +struct mali_group; + +/** + * MMU register numbers + * Used in the register read/write routines. + * See the hardware documentation for more information about each register + */ +typedef enum mali_mmu_register { + MALI_MMU_REGISTER_DTE_ADDR = 0x0000, /**< Current Page Directory Pointer */ + MALI_MMU_REGISTER_STATUS = 0x0004, /**< Status of the MMU */ + MALI_MMU_REGISTER_COMMAND = 0x0008, /**< Command register, used to control the MMU */ + MALI_MMU_REGISTER_PAGE_FAULT_ADDR = 0x000C, /**< Logical address of the last page fault */ + MALI_MMU_REGISTER_ZAP_ONE_LINE = 0x010, /**< Used to invalidate the mapping of a single page from the MMU */ + MALI_MMU_REGISTER_INT_RAWSTAT = 0x0014, /**< Raw interrupt status, all interrupts visible */ + MALI_MMU_REGISTER_INT_CLEAR = 0x0018, /**< Indicate to the MMU that the interrupt has been received */ + MALI_MMU_REGISTER_INT_MASK = 0x001C, /**< Enable/disable types of interrupts */ + MALI_MMU_REGISTER_INT_STATUS = 0x0020 /**< Interrupt status based on the mask */ +} mali_mmu_register; + +/** + * MMU interrupt register bits + * Each cause of the interrupt is reported + * through the (raw) interrupt status registers. + * Multiple interrupts can be pending, so multiple bits + * can be set at once. + */ +typedef enum mali_mmu_interrupt { + MALI_MMU_INTERRUPT_PAGE_FAULT = 0x01, /**< A page fault occured */ + MALI_MMU_INTERRUPT_READ_BUS_ERROR = 0x02 /**< A bus read error occured */ +} mali_mmu_interrupt; + +typedef enum mali_mmu_status_bits { + MALI_MMU_STATUS_BIT_PAGING_ENABLED = 1 << 0, + MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE = 1 << 1, + MALI_MMU_STATUS_BIT_STALL_ACTIVE = 1 << 2, + MALI_MMU_STATUS_BIT_IDLE = 1 << 3, + MALI_MMU_STATUS_BIT_REPLAY_BUFFER_EMPTY = 1 << 4, + MALI_MMU_STATUS_BIT_PAGE_FAULT_IS_WRITE = 1 << 5, + MALI_MMU_STATUS_BIT_STALL_NOT_ACTIVE = 1 << 31, +} mali_mmu_status_bits; + +/** + * Definition of the MMU struct + * Used to track a MMU unit in the system. + * Contains information about the mapping of the registers + */ +struct mali_mmu_core { + struct mali_hw_core hw_core; /**< Common for all HW cores */ + _mali_osk_irq_t *irq; /**< IRQ handler */ +}; + +_mali_osk_errcode_t mali_mmu_initialize(void); + +void mali_mmu_terminate(void); + +struct mali_mmu_core *mali_mmu_create(_mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual); +void mali_mmu_delete(struct mali_mmu_core *mmu); + +_mali_osk_errcode_t mali_mmu_reset(struct mali_mmu_core *mmu); +mali_bool mali_mmu_zap_tlb(struct mali_mmu_core *mmu); +void mali_mmu_zap_tlb_without_stall(struct mali_mmu_core *mmu); +void mali_mmu_invalidate_page(struct mali_mmu_core *mmu, u32 mali_address); + +void mali_mmu_activate_page_directory(struct mali_mmu_core *mmu, struct mali_page_directory *pagedir); +void mali_mmu_activate_empty_page_directory(struct mali_mmu_core *mmu); +void mali_mmu_activate_fault_flush_page_directory(struct mali_mmu_core *mmu); + +void mali_mmu_page_fault_done(struct mali_mmu_core *mmu); + +MALI_STATIC_INLINE enum mali_interrupt_result mali_mmu_get_interrupt_result(struct mali_mmu_core *mmu) +{ + u32 rawstat_used = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT); + if (0 == rawstat_used) { + return MALI_INTERRUPT_RESULT_NONE; + } + + return MALI_INTERRUPT_RESULT_ERROR; +} + + +MALI_STATIC_INLINE u32 mali_mmu_get_int_status(struct mali_mmu_core *mmu) +{ + return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_STATUS); +} + +MALI_STATIC_INLINE u32 mali_mmu_get_rawstat(struct mali_mmu_core *mmu) +{ + return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT); +} + +MALI_STATIC_INLINE void mali_mmu_mask_all_interrupts(struct mali_mmu_core *mmu) +{ + mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_MASK, 0); +} + +MALI_STATIC_INLINE u32 mali_mmu_get_status(struct mali_mmu_core *mmu) +{ + return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS); +} + +MALI_STATIC_INLINE u32 mali_mmu_get_page_fault_addr(struct mali_mmu_core *mmu) +{ + return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_PAGE_FAULT_ADDR); +} + +#endif /* __MALI_MMU_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_mmu_page_directory.c b/drivers/gpu/arm/mali400/common/mali_mmu_page_directory.c --- a/drivers/gpu/arm/mali400/common/mali_mmu_page_directory.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_mmu_page_directory.c 2018-05-06 08:49:49.174695256 +0200 @@ -0,0 +1,495 @@ +/* + * Copyright (C) 2011-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_kernel_common.h" +#include "mali_osk.h" +#include "mali_ukk.h" +#include "mali_uk_types.h" +#include "mali_mmu_page_directory.h" +#include "mali_memory.h" +#include "mali_l2_cache.h" + +static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data); + +u32 mali_allocate_empty_page(mali_io_address *virt_addr) +{ + _mali_osk_errcode_t err; + mali_io_address mapping; + mali_dma_addr address; + + if (_MALI_OSK_ERR_OK != mali_mmu_get_table_page(&address, &mapping)) { + /* Allocation failed */ + MALI_DEBUG_PRINT(2, ("Mali MMU: Failed to get table page for empty pgdir\n")); + return 0; + } + + MALI_DEBUG_ASSERT_POINTER(mapping); + + err = fill_page(mapping, 0); + if (_MALI_OSK_ERR_OK != err) { + mali_mmu_release_table_page(address, mapping); + MALI_DEBUG_PRINT(2, ("Mali MMU: Failed to zero page\n")); + return 0; + } + + *virt_addr = mapping; + return address; +} + +void mali_free_empty_page(mali_dma_addr address, mali_io_address virt_addr) +{ + if (MALI_INVALID_PAGE != address) { + mali_mmu_release_table_page(address, virt_addr); + } +} + +_mali_osk_errcode_t mali_create_fault_flush_pages(mali_dma_addr *page_directory, + mali_io_address *page_directory_mapping, + mali_dma_addr *page_table, mali_io_address *page_table_mapping, + mali_dma_addr *data_page, mali_io_address *data_page_mapping) +{ + _mali_osk_errcode_t err; + + err = mali_mmu_get_table_page(data_page, data_page_mapping); + if (_MALI_OSK_ERR_OK == err) { + err = mali_mmu_get_table_page(page_table, page_table_mapping); + if (_MALI_OSK_ERR_OK == err) { + err = mali_mmu_get_table_page(page_directory, page_directory_mapping); + if (_MALI_OSK_ERR_OK == err) { + fill_page(*data_page_mapping, 0); + fill_page(*page_table_mapping, *data_page | MALI_MMU_FLAGS_DEFAULT); + fill_page(*page_directory_mapping, *page_table | MALI_MMU_FLAGS_PRESENT); + MALI_SUCCESS; + } + mali_mmu_release_table_page(*page_table, *page_table_mapping); + *page_table = MALI_INVALID_PAGE; + } + mali_mmu_release_table_page(*data_page, *data_page_mapping); + *data_page = MALI_INVALID_PAGE; + } + return err; +} + +void mali_destroy_fault_flush_pages( + mali_dma_addr *page_directory, mali_io_address *page_directory_mapping, + mali_dma_addr *page_table, mali_io_address *page_table_mapping, + mali_dma_addr *data_page, mali_io_address *data_page_mapping) +{ + if (MALI_INVALID_PAGE != *page_directory) { + mali_mmu_release_table_page(*page_directory, *page_directory_mapping); + *page_directory = MALI_INVALID_PAGE; + *page_directory_mapping = NULL; + } + + if (MALI_INVALID_PAGE != *page_table) { + mali_mmu_release_table_page(*page_table, *page_table_mapping); + *page_table = MALI_INVALID_PAGE; + *page_table_mapping = NULL; + } + + if (MALI_INVALID_PAGE != *data_page) { + mali_mmu_release_table_page(*data_page, *data_page_mapping); + *data_page = MALI_INVALID_PAGE; + *data_page_mapping = NULL; + } +} + +static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data) +{ + int i; + MALI_DEBUG_ASSERT_POINTER(mapping); + + for (i = 0; i < MALI_MMU_PAGE_SIZE / 4; i++) { + _mali_osk_mem_iowrite32_relaxed(mapping, i * sizeof(u32), data); + } + _mali_osk_mem_barrier(); + MALI_SUCCESS; +} + +_mali_osk_errcode_t mali_mmu_pagedir_map(struct mali_page_directory *pagedir, u32 mali_address, u32 size) +{ + const int first_pde = MALI_MMU_PDE_ENTRY(mali_address); + const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1); + _mali_osk_errcode_t err; + mali_io_address pde_mapping; + mali_dma_addr pde_phys; + int i, page_count; + u32 start_address; + if (last_pde < first_pde) + return _MALI_OSK_ERR_INVALID_ARGS; + + for (i = first_pde; i <= last_pde; i++) { + if (0 == (_mali_osk_mem_ioread32(pagedir->page_directory_mapped, + i * sizeof(u32)) & MALI_MMU_FLAGS_PRESENT)) { + /* Page table not present */ + MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]); + MALI_DEBUG_ASSERT(NULL == pagedir->page_entries_mapped[i]); + + err = mali_mmu_get_table_page(&pde_phys, &pde_mapping); + if (_MALI_OSK_ERR_OK != err) { + MALI_PRINT_ERROR(("Failed to allocate page table page.\n")); + return err; + } + pagedir->page_entries_mapped[i] = pde_mapping; + + /* Update PDE, mark as present */ + _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32), + pde_phys | MALI_MMU_FLAGS_PRESENT); + + MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]); + } + + if (first_pde == last_pde) { + pagedir->page_entries_usage_count[i] += size / MALI_MMU_PAGE_SIZE; + } else if (i == first_pde) { + start_address = i * MALI_MMU_VIRTUAL_PAGE_SIZE; + page_count = (start_address + MALI_MMU_VIRTUAL_PAGE_SIZE - mali_address) / MALI_MMU_PAGE_SIZE; + pagedir->page_entries_usage_count[i] += page_count; + } else if (i == last_pde) { + start_address = i * MALI_MMU_VIRTUAL_PAGE_SIZE; + page_count = (mali_address + size - start_address) / MALI_MMU_PAGE_SIZE; + pagedir->page_entries_usage_count[i] += page_count; + } else { + pagedir->page_entries_usage_count[i] = 1024; + } + } + _mali_osk_write_mem_barrier(); + + return _MALI_OSK_ERR_OK; +} + +MALI_STATIC_INLINE void mali_mmu_zero_pte(mali_io_address page_table, u32 mali_address, u32 size) +{ + int i; + const int first_pte = MALI_MMU_PTE_ENTRY(mali_address); + const int last_pte = MALI_MMU_PTE_ENTRY(mali_address + size - 1); + + for (i = first_pte; i <= last_pte; i++) { + _mali_osk_mem_iowrite32_relaxed(page_table, i * sizeof(u32), 0); + } +} + +static u32 mali_page_directory_get_phys_address(struct mali_page_directory *pagedir, u32 index) +{ + return (_mali_osk_mem_ioread32(pagedir->page_directory_mapped, + index * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK); +} + + +_mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size) +{ + const int first_pde = MALI_MMU_PDE_ENTRY(mali_address); + const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1); + u32 left = size; + int i; + mali_bool pd_changed = MALI_FALSE; + u32 pages_to_invalidate[3]; /* hard-coded to 3: max two pages from the PT level plus max one page from PD level */ + u32 num_pages_inv = 0; + mali_bool invalidate_all = MALI_FALSE; /* safety mechanism in case page_entries_usage_count is unreliable */ + + /* For all page directory entries in range. */ + for (i = first_pde; i <= last_pde; i++) { + u32 size_in_pde, offset; + + MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[i]); + MALI_DEBUG_ASSERT(0 != pagedir->page_entries_usage_count[i]); + + /* Offset into page table, 0 if mali_address is 4MiB aligned */ + offset = (mali_address & (MALI_MMU_VIRTUAL_PAGE_SIZE - 1)); + if (left < MALI_MMU_VIRTUAL_PAGE_SIZE - offset) { + size_in_pde = left; + } else { + size_in_pde = MALI_MMU_VIRTUAL_PAGE_SIZE - offset; + } + + pagedir->page_entries_usage_count[i] -= size_in_pde / MALI_MMU_PAGE_SIZE; + + /* If entire page table is unused, free it */ + if (0 == pagedir->page_entries_usage_count[i]) { + u32 page_phys; + void *page_virt; + MALI_DEBUG_PRINT(4, ("Releasing page table as this is the last reference\n")); + /* last reference removed, no need to zero out each PTE */ + + page_phys = MALI_MMU_ENTRY_ADDRESS(_mali_osk_mem_ioread32(pagedir->page_directory_mapped, i * sizeof(u32))); + page_virt = pagedir->page_entries_mapped[i]; + pagedir->page_entries_mapped[i] = NULL; + _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32), 0); + + mali_mmu_release_table_page(page_phys, page_virt); + pd_changed = MALI_TRUE; + } else { + MALI_DEBUG_ASSERT(num_pages_inv < 2); + if (num_pages_inv < 2) { + pages_to_invalidate[num_pages_inv] = mali_page_directory_get_phys_address(pagedir, i); + num_pages_inv++; + } else { + invalidate_all = MALI_TRUE; + } + + /* If part of the page table is still in use, zero the relevant PTEs */ + mali_mmu_zero_pte(pagedir->page_entries_mapped[i], mali_address, size_in_pde); + } + + left -= size_in_pde; + mali_address += size_in_pde; + } + _mali_osk_write_mem_barrier(); + + /* L2 pages invalidation */ + if (MALI_TRUE == pd_changed) { + MALI_DEBUG_ASSERT(num_pages_inv < 3); + if (num_pages_inv < 3) { + pages_to_invalidate[num_pages_inv] = pagedir->page_directory; + num_pages_inv++; + } else { + invalidate_all = MALI_TRUE; + } + } + + if (invalidate_all) { + mali_l2_cache_invalidate_all(); + } else { + mali_l2_cache_invalidate_all_pages(pages_to_invalidate, num_pages_inv); + } + + MALI_SUCCESS; +} + +struct mali_page_directory *mali_mmu_pagedir_alloc(void) +{ + struct mali_page_directory *pagedir; + _mali_osk_errcode_t err; + mali_dma_addr phys; + + pagedir = _mali_osk_calloc(1, sizeof(struct mali_page_directory)); + if (NULL == pagedir) { + return NULL; + } + + err = mali_mmu_get_table_page(&phys, &pagedir->page_directory_mapped); + if (_MALI_OSK_ERR_OK != err) { + _mali_osk_free(pagedir); + return NULL; + } + + pagedir->page_directory = (u32)phys; + + /* Zero page directory */ + fill_page(pagedir->page_directory_mapped, 0); + + return pagedir; +} + +void mali_mmu_pagedir_free(struct mali_page_directory *pagedir) +{ + const int num_page_table_entries = sizeof(pagedir->page_entries_mapped) / sizeof(pagedir->page_entries_mapped[0]); + int i; + + /* Free referenced page tables and zero PDEs. */ + for (i = 0; i < num_page_table_entries; i++) { + if (pagedir->page_directory_mapped && (_mali_osk_mem_ioread32( + pagedir->page_directory_mapped, + sizeof(u32)*i) & MALI_MMU_FLAGS_PRESENT)) { + mali_dma_addr phys = _mali_osk_mem_ioread32(pagedir->page_directory_mapped, + i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK; + _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32), 0); + mali_mmu_release_table_page(phys, pagedir->page_entries_mapped[i]); + } + } + _mali_osk_write_mem_barrier(); + + /* Free the page directory page. */ + mali_mmu_release_table_page(pagedir->page_directory, pagedir->page_directory_mapped); + + _mali_osk_free(pagedir); +} + + +void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address, + mali_dma_addr phys_address, u32 size, u32 permission_bits) +{ + u32 end_address = mali_address + size; + u32 mali_phys = (u32)phys_address; + + /* Map physical pages into MMU page tables */ + for (; mali_address < end_address; mali_address += MALI_MMU_PAGE_SIZE, mali_phys += MALI_MMU_PAGE_SIZE) { + MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)]); + _mali_osk_mem_iowrite32_relaxed(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)], + MALI_MMU_PTE_ENTRY(mali_address) * sizeof(u32), + mali_phys | permission_bits); + } +} + +void mali_mmu_pagedir_diag(struct mali_page_directory *pagedir, u32 fault_addr) +{ +#if defined(DEBUG) + u32 pde_index, pte_index; + u32 pde, pte; + + pde_index = MALI_MMU_PDE_ENTRY(fault_addr); + pte_index = MALI_MMU_PTE_ENTRY(fault_addr); + + + pde = _mali_osk_mem_ioread32(pagedir->page_directory_mapped, + pde_index * sizeof(u32)); + + + if (pde & MALI_MMU_FLAGS_PRESENT) { + u32 pte_addr = MALI_MMU_ENTRY_ADDRESS(pde); + + pte = _mali_osk_mem_ioread32(pagedir->page_entries_mapped[pde_index], + pte_index * sizeof(u32)); + + MALI_DEBUG_PRINT(2, ("\tMMU: %08x: Page table present: %08x\n" + "\t\tPTE: %08x, page %08x is %s\n", + fault_addr, pte_addr, pte, + MALI_MMU_ENTRY_ADDRESS(pte), + pte & MALI_MMU_FLAGS_DEFAULT ? "rw" : "not present")); + } else { + MALI_DEBUG_PRINT(2, ("\tMMU: %08x: Page table not present: %08x\n", + fault_addr, pde)); + } +#else + MALI_IGNORE(pagedir); + MALI_IGNORE(fault_addr); +#endif +} + +/* For instrumented */ +struct dump_info { + u32 buffer_left; + u32 register_writes_size; + u32 page_table_dump_size; + u32 *buffer; +}; + +static _mali_osk_errcode_t writereg(u32 where, u32 what, const char *comment, struct dump_info *info) +{ + if (NULL != info) { + info->register_writes_size += sizeof(u32) * 2; /* two 32-bit words */ + + if (NULL != info->buffer) { + /* check that we have enough space */ + if (info->buffer_left < sizeof(u32) * 2) MALI_ERROR(_MALI_OSK_ERR_NOMEM); + + *info->buffer = where; + info->buffer++; + + *info->buffer = what; + info->buffer++; + + info->buffer_left -= sizeof(u32) * 2; + } + } + + MALI_SUCCESS; +} + +static _mali_osk_errcode_t mali_mmu_dump_page(mali_io_address page, u32 phys_addr, struct dump_info *info) +{ + if (NULL != info) { + /* 4096 for the page and 4 bytes for the address */ + const u32 page_size_in_elements = MALI_MMU_PAGE_SIZE / 4; + const u32 page_size_in_bytes = MALI_MMU_PAGE_SIZE; + const u32 dump_size_in_bytes = MALI_MMU_PAGE_SIZE + 4; + + info->page_table_dump_size += dump_size_in_bytes; + + if (NULL != info->buffer) { + if (info->buffer_left < dump_size_in_bytes) MALI_ERROR(_MALI_OSK_ERR_NOMEM); + + *info->buffer = phys_addr; + info->buffer++; + + _mali_osk_memcpy(info->buffer, page, page_size_in_bytes); + info->buffer += page_size_in_elements; + + info->buffer_left -= dump_size_in_bytes; + } + } + + MALI_SUCCESS; +} + +static _mali_osk_errcode_t dump_mmu_page_table(struct mali_page_directory *pagedir, struct dump_info *info) +{ + MALI_DEBUG_ASSERT_POINTER(pagedir); + MALI_DEBUG_ASSERT_POINTER(info); + + if (NULL != pagedir->page_directory_mapped) { + int i; + + MALI_CHECK_NO_ERROR( + mali_mmu_dump_page(pagedir->page_directory_mapped, pagedir->page_directory, info) + ); + + for (i = 0; i < 1024; i++) { + if (NULL != pagedir->page_entries_mapped[i]) { + MALI_CHECK_NO_ERROR( + mali_mmu_dump_page(pagedir->page_entries_mapped[i], + _mali_osk_mem_ioread32(pagedir->page_directory_mapped, + i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK, info) + ); + } + } + } + + MALI_SUCCESS; +} + +static _mali_osk_errcode_t dump_mmu_registers(struct mali_page_directory *pagedir, struct dump_info *info) +{ + MALI_CHECK_NO_ERROR(writereg(0x00000000, pagedir->page_directory, + "set the page directory address", info)); + MALI_CHECK_NO_ERROR(writereg(0x00000008, 4, "zap???", info)); + MALI_CHECK_NO_ERROR(writereg(0x00000008, 0, "enable paging", info)); + MALI_SUCCESS; +} + +_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size(_mali_uk_query_mmu_page_table_dump_size_s *args) +{ + struct dump_info info = { 0, 0, 0, NULL }; + struct mali_session_data *session_data; + + session_data = (struct mali_session_data *)(uintptr_t)(args->ctx); + MALI_DEBUG_ASSERT_POINTER(session_data); + MALI_DEBUG_ASSERT_POINTER(args); + + MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data->page_directory, &info)); + MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data->page_directory, &info)); + args->size = info.register_writes_size + info.page_table_dump_size; + MALI_SUCCESS; +} + +_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table(_mali_uk_dump_mmu_page_table_s *args) +{ + struct dump_info info = { 0, 0, 0, NULL }; + struct mali_session_data *session_data; + + MALI_DEBUG_ASSERT_POINTER(args); + + session_data = (struct mali_session_data *)(uintptr_t)(args->ctx); + MALI_DEBUG_ASSERT_POINTER(session_data); + + info.buffer_left = args->size; + info.buffer = (u32 *)(uintptr_t)args->buffer; + + args->register_writes = (uintptr_t)info.buffer; + MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data->page_directory, &info)); + + args->page_table_dump = (uintptr_t)info.buffer; + MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data->page_directory, &info)); + + args->register_writes_size = info.register_writes_size; + args->page_table_dump_size = info.page_table_dump_size; + + MALI_SUCCESS; +} diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_mmu_page_directory.h b/drivers/gpu/arm/mali400/common/mali_mmu_page_directory.h --- a/drivers/gpu/arm/mali400/common/mali_mmu_page_directory.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_mmu_page_directory.h 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,110 @@ +/* + * Copyright (C) 2011-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_MMU_PAGE_DIRECTORY_H__ +#define __MALI_MMU_PAGE_DIRECTORY_H__ + +#include "mali_osk.h" + +/** + * Size of an MMU page in bytes + */ +#define MALI_MMU_PAGE_SIZE 0x1000 + +/* + * Size of the address space referenced by a page table page + */ +#define MALI_MMU_VIRTUAL_PAGE_SIZE 0x400000 /* 4 MiB */ + +/** + * Page directory index from address + * Calculates the page directory index from the given address + */ +#define MALI_MMU_PDE_ENTRY(address) (((address)>>22) & 0x03FF) + +/** + * Page table index from address + * Calculates the page table index from the given address + */ +#define MALI_MMU_PTE_ENTRY(address) (((address)>>12) & 0x03FF) + +/** + * Extract the memory address from an PDE/PTE entry + */ +#define MALI_MMU_ENTRY_ADDRESS(value) ((value) & 0xFFFFFC00) + +#define MALI_INVALID_PAGE ((u32)(~0)) + +/** + * + */ +typedef enum mali_mmu_entry_flags { + MALI_MMU_FLAGS_PRESENT = 0x01, + MALI_MMU_FLAGS_READ_PERMISSION = 0x02, + MALI_MMU_FLAGS_WRITE_PERMISSION = 0x04, + MALI_MMU_FLAGS_OVERRIDE_CACHE = 0x8, + MALI_MMU_FLAGS_WRITE_CACHEABLE = 0x10, + MALI_MMU_FLAGS_WRITE_ALLOCATE = 0x20, + MALI_MMU_FLAGS_WRITE_BUFFERABLE = 0x40, + MALI_MMU_FLAGS_READ_CACHEABLE = 0x80, + MALI_MMU_FLAGS_READ_ALLOCATE = 0x100, + MALI_MMU_FLAGS_MASK = 0x1FF, +} mali_mmu_entry_flags; + + +#define MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE ( \ + MALI_MMU_FLAGS_PRESENT | \ + MALI_MMU_FLAGS_READ_PERMISSION | \ + MALI_MMU_FLAGS_WRITE_PERMISSION | \ + MALI_MMU_FLAGS_OVERRIDE_CACHE | \ + MALI_MMU_FLAGS_WRITE_CACHEABLE | \ + MALI_MMU_FLAGS_WRITE_BUFFERABLE | \ + MALI_MMU_FLAGS_READ_CACHEABLE | \ + MALI_MMU_FLAGS_READ_ALLOCATE ) + +#define MALI_MMU_FLAGS_DEFAULT ( \ + MALI_MMU_FLAGS_PRESENT | \ + MALI_MMU_FLAGS_READ_PERMISSION | \ + MALI_MMU_FLAGS_WRITE_PERMISSION ) + + +struct mali_page_directory { + u32 page_directory; /**< Physical address of the memory session's page directory */ + mali_io_address page_directory_mapped; /**< Pointer to the mapped version of the page directory into the kernel's address space */ + + mali_io_address page_entries_mapped[1024]; /**< Pointers to the page tables which exists in the page directory mapped into the kernel's address space */ + u32 page_entries_usage_count[1024]; /**< Tracks usage count of the page table pages, so they can be releases on the last reference */ +}; + +/* Map Mali virtual address space (i.e. ensure page tables exist for the virtual range) */ +_mali_osk_errcode_t mali_mmu_pagedir_map(struct mali_page_directory *pagedir, u32 mali_address, u32 size); +_mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size); + +/* Back virtual address space with actual pages. Assumes input is contiguous and 4k aligned. */ +void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address, + mali_dma_addr phys_address, u32 size, u32 permission_bits); + +u32 mali_allocate_empty_page(mali_io_address *virtual); +void mali_free_empty_page(mali_dma_addr address, mali_io_address virt_addr); +_mali_osk_errcode_t mali_create_fault_flush_pages(mali_dma_addr *page_directory, + mali_io_address *page_directory_mapping, + mali_dma_addr *page_table, mali_io_address *page_table_mapping, + mali_dma_addr *data_page, mali_io_address *data_page_mapping); +void mali_destroy_fault_flush_pages( + mali_dma_addr *page_directory, mali_io_address *page_directory_mapping, + mali_dma_addr *page_table, mali_io_address *page_table_mapping, + mali_dma_addr *data_page, mali_io_address *data_page_mapping); + +struct mali_page_directory *mali_mmu_pagedir_alloc(void); +void mali_mmu_pagedir_free(struct mali_page_directory *pagedir); + +void mali_mmu_pagedir_diag(struct mali_page_directory *pagedir, u32 fault_addr); + +#endif /* __MALI_MMU_PAGE_DIRECTORY_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_osk_bitops.h b/drivers/gpu/arm/mali400/common/mali_osk_bitops.h --- a/drivers/gpu/arm/mali400/common/mali_osk_bitops.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_osk_bitops.h 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,162 @@ +/* + * Copyright (C) 2010, 2013-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_osk_bitops.h + * Implementation of the OS abstraction layer for the kernel device driver + */ + +#ifndef __MALI_OSK_BITOPS_H__ +#define __MALI_OSK_BITOPS_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +MALI_STATIC_INLINE void _mali_internal_clear_bit(u32 bit, u32 *addr) +{ + MALI_DEBUG_ASSERT(bit < 32); + MALI_DEBUG_ASSERT(NULL != addr); + + (*addr) &= ~(1 << bit); +} + +MALI_STATIC_INLINE void _mali_internal_set_bit(u32 bit, u32 *addr) +{ + MALI_DEBUG_ASSERT(bit < 32); + MALI_DEBUG_ASSERT(NULL != addr); + + (*addr) |= (1 << bit); +} + +MALI_STATIC_INLINE u32 _mali_internal_test_bit(u32 bit, u32 value) +{ + MALI_DEBUG_ASSERT(bit < 32); + return value & (1 << bit); +} + +MALI_STATIC_INLINE int _mali_internal_find_first_zero_bit(u32 value) +{ + u32 inverted; + u32 negated; + u32 isolated; + u32 leading_zeros; + + /* Begin with xxx...x0yyy...y, where ys are 1, number of ys is in range 0..31 */ + inverted = ~value; /* zzz...z1000...0 */ + /* Using count_trailing_zeros on inverted value - + * See ARM System Developers Guide for details of count_trailing_zeros */ + + /* Isolate the zero: it is preceeded by a run of 1s, so add 1 to it */ + negated = (u32) - inverted ; /* -a == ~a + 1 (mod 2^n) for n-bit numbers */ + /* negated = xxx...x1000...0 */ + + isolated = negated & inverted ; /* xxx...x1000...0 & zzz...z1000...0, zs are ~xs */ + /* And so the first zero bit is in the same position as the 1 == number of 1s that preceeded it + * Note that the output is zero if value was all 1s */ + + leading_zeros = _mali_osk_clz(isolated); + + return 31 - leading_zeros; +} + + +/** @defgroup _mali_osk_bitops OSK Non-atomic Bit-operations + * @{ */ + +/** + * These bit-operations do not work atomically, and so locks must be used if + * atomicity is required. + * + * Reference implementations for Little Endian are provided, and so it should + * not normally be necessary to re-implement these. Efficient bit-twiddling + * techniques are used where possible, implemented in portable C. + * + * Note that these reference implementations rely on _mali_osk_clz() being + * implemented. + */ + +/** @brief Clear a bit in a sequence of 32-bit words + * @param nr bit number to clear, starting from the (Little-endian) least + * significant bit + * @param addr starting point for counting. + */ +MALI_STATIC_INLINE void _mali_osk_clear_nonatomic_bit(u32 nr, u32 *addr) +{ + addr += nr >> 5; /* find the correct word */ + nr = nr & ((1 << 5) - 1); /* The bit number within the word */ + + _mali_internal_clear_bit(nr, addr); +} + +/** @brief Set a bit in a sequence of 32-bit words + * @param nr bit number to set, starting from the (Little-endian) least + * significant bit + * @param addr starting point for counting. + */ +MALI_STATIC_INLINE void _mali_osk_set_nonatomic_bit(u32 nr, u32 *addr) +{ + addr += nr >> 5; /* find the correct word */ + nr = nr & ((1 << 5) - 1); /* The bit number within the word */ + + _mali_internal_set_bit(nr, addr); +} + +/** @brief Test a bit in a sequence of 32-bit words + * @param nr bit number to test, starting from the (Little-endian) least + * significant bit + * @param addr starting point for counting. + * @return zero if bit was clear, non-zero if set. Do not rely on the return + * value being related to the actual word under test. + */ +MALI_STATIC_INLINE u32 _mali_osk_test_bit(u32 nr, u32 *addr) +{ + addr += nr >> 5; /* find the correct word */ + nr = nr & ((1 << 5) - 1); /* The bit number within the word */ + + return _mali_internal_test_bit(nr, *addr); +} + +/* Return maxbit if not found */ +/** @brief Find the first zero bit in a sequence of 32-bit words + * @param addr starting point for search. + * @param maxbit the maximum number of bits to search + * @return the number of the first zero bit found, or maxbit if none were found + * in the specified range. + */ +MALI_STATIC_INLINE u32 _mali_osk_find_first_zero_bit(const u32 *addr, u32 maxbit) +{ + u32 total; + + for (total = 0; total < maxbit; total += 32, ++addr) { + int result; + result = _mali_internal_find_first_zero_bit(*addr); + + /* non-negative signifies the bit was found */ + if (result >= 0) { + total += (u32)result; + break; + } + } + + /* Now check if we reached maxbit or above */ + if (total >= maxbit) { + total = maxbit; + } + + return total; /* either the found bit nr, or maxbit if not found */ +} +/** @} */ /* end group _mali_osk_bitops */ + +#ifdef __cplusplus +} +#endif + +#endif /* __MALI_OSK_BITOPS_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_osk.h b/drivers/gpu/arm/mali400/common/mali_osk.h --- a/drivers/gpu/arm/mali400/common/mali_osk.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_osk.h 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,1389 @@ +/* + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_osk.h + * Defines the OS abstraction layer for the kernel device driver (OSK) + */ + +#ifndef __MALI_OSK_H__ +#define __MALI_OSK_H__ + +#include +#include "mali_osk_types.h" +#include "mali_osk_specific.h" /* include any per-os specifics */ +#include "mali_osk_locks.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @addtogroup uddapi Unified Device Driver (UDD) APIs + * + * @{ + */ + +/** + * @addtogroup oskapi UDD OS Abstraction for Kernel-side (OSK) APIs + * + * @{ + */ + +/** @addtogroup _mali_osk_lock OSK Mutual Exclusion Locks + * @{ */ + +#ifdef DEBUG +/** @brief Macro for asserting that the current thread holds a given lock + */ +#define MALI_DEBUG_ASSERT_LOCK_HELD(l) MALI_DEBUG_ASSERT(_mali_osk_lock_get_owner((_mali_osk_lock_debug_t *)l) == _mali_osk_get_tid()); + +/** @brief returns a lock's owner (thread id) if debugging is enabled + */ +#else +#define MALI_DEBUG_ASSERT_LOCK_HELD(l) do {} while(0) +#endif + +#define _mali_osk_ctxprintf seq_printf + +/** @} */ /* end group _mali_osk_lock */ + +/** @addtogroup _mali_osk_miscellaneous + * @{ */ + +/** @brief Find the containing structure of another structure + * + * This is the reverse of the operation 'offsetof'. This means that the + * following condition is satisfied: + * + * ptr == _MALI_OSK_CONTAINER_OF( &ptr->member, type, member ) + * + * When ptr is of type 'type'. + * + * Its purpose it to recover a larger structure that has wrapped a smaller one. + * + * @note no type or memory checking occurs to ensure that a wrapper structure + * does in fact exist, and that it is being recovered with respect to the + * correct member. + * + * @param ptr the pointer to the member that is contained within the larger + * structure + * @param type the type of the structure that contains the member + * @param member the name of the member in the structure that ptr points to. + * @return a pointer to a \a type object which contains \a member, as pointed + * to by \a ptr. + */ +#define _MALI_OSK_CONTAINER_OF(ptr, type, member) \ + ((type *)( ((char *)ptr) - offsetof(type,member) )) + +/** @addtogroup _mali_osk_wq + * @{ */ + +/** @brief Initialize work queues (for deferred work) + * + * @return _MALI_OSK_ERR_OK on success, otherwise failure. + */ +_mali_osk_errcode_t _mali_osk_wq_init(void); + +/** @brief Terminate work queues (for deferred work) + */ +void _mali_osk_wq_term(void); + +/** @brief Create work in the work queue + * + * Creates a work object which can be scheduled in the work queue. When + * scheduled, \a handler will be called with \a data as the argument. + * + * Refer to \ref _mali_osk_wq_schedule_work() for details on how work + * is scheduled in the queue. + * + * The returned pointer must be freed with \ref _mali_osk_wq_delete_work() + * when no longer needed. + */ +_mali_osk_wq_work_t *_mali_osk_wq_create_work(_mali_osk_wq_work_handler_t handler, void *data); + +/** @brief A high priority version of \a _mali_osk_wq_create_work() + * + * Creates a work object which can be scheduled in the high priority work queue. + * + * This is unfortunately needed to get low latency scheduling of the Mali cores. Normally we would + * schedule the next job in hw_irq or tasklet, but often we can't since we need to synchronously map + * and unmap shared memory when a job is connected to external fences (timelines). And this requires + * taking a mutex. + * + * We do signal a lot of other (low priority) work also as part of the job being finished, and if we + * don't set this Mali scheduling thread as high priority, we see that the CPU scheduler often runs + * random things instead of starting the next GPU job when the GPU is idle. So setting the gpu + * scheduler to high priority does give a visually more responsive system. + * + * Start the high priority work with: \a _mali_osk_wq_schedule_work_high_pri() + */ +_mali_osk_wq_work_t *_mali_osk_wq_create_work_high_pri(_mali_osk_wq_work_handler_t handler, void *data); + +/** @brief Delete a work object + * + * This will flush the work queue to ensure that the work handler will not + * be called after deletion. + */ +void _mali_osk_wq_delete_work(_mali_osk_wq_work_t *work); + +/** @brief Delete a work object + * + * This will NOT flush the work queue, so only call this if you are sure that the work handler will + * not be called after deletion. + */ +void _mali_osk_wq_delete_work_nonflush(_mali_osk_wq_work_t *work); + +/** @brief Cause a queued, deferred call of the work handler + * + * _mali_osk_wq_schedule_work provides a mechanism for enqueuing deferred calls + * to the work handler. After calling \ref _mali_osk_wq_schedule_work(), the + * work handler will be scheduled to run at some point in the future. + * + * Typically this is called by the IRQ upper-half to defer further processing of + * IRQ-related work to the IRQ bottom-half handler. This is necessary for work + * that cannot be done in an IRQ context by the IRQ upper-half handler. Timer + * callbacks also use this mechanism, because they are treated as though they + * operate in an IRQ context. Refer to \ref _mali_osk_timer_t for more + * information. + * + * Code that operates in a kernel-process context (with no IRQ context + * restrictions) may also enqueue deferred calls to the IRQ bottom-half. The + * advantage over direct calling is that deferred calling allows the caller and + * IRQ bottom half to hold the same mutex, with a guarantee that they will not + * deadlock just by using this mechanism. + * + * _mali_osk_wq_schedule_work() places deferred call requests on a queue, to + * allow for more than one thread to make a deferred call. Therfore, if it is + * called 'K' times, then the IRQ bottom-half will be scheduled 'K' times too. + * 'K' is a number that is implementation-specific. + * + * _mali_osk_wq_schedule_work() is guaranteed to not block on: + * - enqueuing a deferred call request. + * - the completion of the work handler. + * + * This is to prevent deadlock. For example, if _mali_osk_wq_schedule_work() + * blocked, then it would cause a deadlock when the following two conditions + * hold: + * - The work handler callback (of type _mali_osk_wq_work_handler_t) locks + * a mutex + * - And, at the same time, the caller of _mali_osk_wq_schedule_work() also + * holds the same mutex + * + * @note care must be taken to not overflow the queue that + * _mali_osk_wq_schedule_work() operates on. Code must be structured to + * ensure that the number of requests made to the queue is bounded. Otherwise, + * work will be lost. + * + * The queue that _mali_osk_wq_schedule_work implements is a FIFO of N-writer, + * 1-reader type. The writers are the callers of _mali_osk_wq_schedule_work + * (all OSK-registered IRQ upper-half handlers in the system, watchdog timers, + * callers from a Kernel-process context). The reader is a single thread that + * handles all OSK-registered work. + * + * @param work a pointer to the _mali_osk_wq_work_t object corresponding to the + * work to begin processing. + */ +void _mali_osk_wq_schedule_work(_mali_osk_wq_work_t *work); + +/** @brief Cause a queued, deferred call of the high priority work handler + * + * Function is the same as \a _mali_osk_wq_schedule_work() with the only + * difference that it runs in a high (real time) priority on the system. + * + * Should only be used as a substitue for doing the same work in interrupts. + * + * This is allowed to sleep, but the work should be small since it will block + * all other applications. +*/ +void _mali_osk_wq_schedule_work_high_pri(_mali_osk_wq_work_t *work); + +/** @brief Flush the work queue + * + * This will flush the OSK work queue, ensuring all work in the queue has + * completed before returning. + * + * Since this blocks on the completion of work in the work-queue, the + * caller of this function \b must \b not hold any mutexes that are taken by + * any registered work handler. To do so may cause a deadlock. + * + */ +void _mali_osk_wq_flush(void); + +/** @brief Create work in the delayed work queue + * + * Creates a work object which can be scheduled in the work queue. When + * scheduled, a timer will be start and the \a handler will be called with + * \a data as the argument when timer out + * + * Refer to \ref _mali_osk_wq_delayed_schedule_work() for details on how work + * is scheduled in the queue. + * + * The returned pointer must be freed with \ref _mali_osk_wq_delayed_delete_work_nonflush() + * when no longer needed. + */ +_mali_osk_wq_delayed_work_t *_mali_osk_wq_delayed_create_work(_mali_osk_wq_work_handler_t handler, void *data); + +/** @brief Delete a work object + * + * This will NOT flush the work queue, so only call this if you are sure that the work handler will + * not be called after deletion. + */ +void _mali_osk_wq_delayed_delete_work_nonflush(_mali_osk_wq_delayed_work_t *work); + +/** @brief Cancel a delayed work without waiting for it to finish + * + * Note that the \a work callback function may still be running on return from + * _mali_osk_wq_delayed_cancel_work_async(). + * + * @param work The delayed work to be cancelled + */ +void _mali_osk_wq_delayed_cancel_work_async(_mali_osk_wq_delayed_work_t *work); + +/** @brief Cancel a delayed work and wait for it to finish + * + * When this function returns, the \a work was either cancelled or it finished running. + * + * @param work The delayed work to be cancelled + */ +void _mali_osk_wq_delayed_cancel_work_sync(_mali_osk_wq_delayed_work_t *work); + +/** @brief Put \a work task in global workqueue after delay + * + * After waiting for a given time this puts a job in the kernel-global + * workqueue. + * + * If \a work was already on a queue, this function will return without doing anything + * + * @param work job to be done + * @param delay number of jiffies to wait or 0 for immediate execution + */ +void _mali_osk_wq_delayed_schedule_work(_mali_osk_wq_delayed_work_t *work, u32 delay); + +/** @} */ /* end group _mali_osk_wq */ + + +/** @addtogroup _mali_osk_irq + * @{ */ + +/** @brief Initialize IRQ handling for a resource + * + * Registers an interrupt handler \a uhandler for the given IRQ number \a irqnum. + * \a data will be passed as argument to the handler when an interrupt occurs. + * + * If \a irqnum is -1, _mali_osk_irq_init will probe for the IRQ number using + * the supplied \a trigger_func and \a ack_func. These functions will also + * receive \a data as their argument. + * + * @param irqnum The IRQ number that the resource uses, as seen by the CPU. + * The value -1 has a special meaning which indicates the use of probing, and + * trigger_func and ack_func must be non-NULL. + * @param uhandler The interrupt handler, corresponding to a ISR handler for + * the resource + * @param int_data resource specific data, which will be passed to uhandler + * @param trigger_func Optional: a function to trigger the resource's irq, to + * probe for the interrupt. Use NULL if irqnum != -1. + * @param ack_func Optional: a function to acknowledge the resource's irq, to + * probe for the interrupt. Use NULL if irqnum != -1. + * @param probe_data resource-specific data, which will be passed to + * (if present) trigger_func and ack_func + * @param description textual description of the IRQ resource. + * @return on success, a pointer to a _mali_osk_irq_t object, which represents + * the IRQ handling on this resource. NULL on failure. + */ +_mali_osk_irq_t *_mali_osk_irq_init(u32 irqnum, _mali_osk_irq_uhandler_t uhandler, void *int_data, _mali_osk_irq_trigger_t trigger_func, _mali_osk_irq_ack_t ack_func, void *probe_data, const char *description); + +/** @brief Terminate IRQ handling on a resource. + * + * This will disable the interrupt from the device, and then waits for any + * currently executing IRQ handlers to complete. + * + * @note If work is deferred to an IRQ bottom-half handler through + * \ref _mali_osk_wq_schedule_work(), be sure to flush any remaining work + * with \ref _mali_osk_wq_flush() or (implicitly) with \ref _mali_osk_wq_delete_work() + * + * @param irq a pointer to the _mali_osk_irq_t object corresponding to the + * resource whose IRQ handling is to be terminated. + */ +void _mali_osk_irq_term(_mali_osk_irq_t *irq); + +/** @} */ /* end group _mali_osk_irq */ + + +/** @addtogroup _mali_osk_atomic + * @{ */ + +/** @brief Decrement an atomic counter + * + * @note It is an error to decrement the counter beyond -(1<<23) + * + * @param atom pointer to an atomic counter */ +void _mali_osk_atomic_dec(_mali_osk_atomic_t *atom); + +/** @brief Decrement an atomic counter, return new value + * + * @param atom pointer to an atomic counter + * @return The new value, after decrement */ +u32 _mali_osk_atomic_dec_return(_mali_osk_atomic_t *atom); + +/** @brief Increment an atomic counter + * + * @note It is an error to increment the counter beyond (1<<23)-1 + * + * @param atom pointer to an atomic counter */ +void _mali_osk_atomic_inc(_mali_osk_atomic_t *atom); + +/** @brief Increment an atomic counter, return new value + * + * @param atom pointer to an atomic counter */ +u32 _mali_osk_atomic_inc_return(_mali_osk_atomic_t *atom); + +/** @brief Initialize an atomic counter + * + * @note the parameter required is a u32, and so signed integers should be + * cast to u32. + * + * @param atom pointer to an atomic counter + * @param val the value to initialize the atomic counter. + */ +void _mali_osk_atomic_init(_mali_osk_atomic_t *atom, u32 val); + +/** @brief Read a value from an atomic counter + * + * This can only be safely used to determine the value of the counter when it + * is guaranteed that other threads will not be modifying the counter. This + * makes its usefulness limited. + * + * @param atom pointer to an atomic counter + */ +u32 _mali_osk_atomic_read(_mali_osk_atomic_t *atom); + +/** @brief Terminate an atomic counter + * + * @param atom pointer to an atomic counter + */ +void _mali_osk_atomic_term(_mali_osk_atomic_t *atom); + +/** @brief Assign a new val to atomic counter, and return the old atomic counter + * + * @param atom pointer to an atomic counter + * @param val the new value assign to the atomic counter + * @return the old value of the atomic counter + */ +u32 _mali_osk_atomic_xchg(_mali_osk_atomic_t *atom, u32 val); +/** @} */ /* end group _mali_osk_atomic */ + + +/** @defgroup _mali_osk_memory OSK Memory Allocation + * @{ */ + +/** @brief Allocate zero-initialized memory. + * + * Returns a buffer capable of containing at least \a n elements of \a size + * bytes each. The buffer is initialized to zero. + * + * If there is a need for a bigger block of memory (16KB or bigger), then + * consider to use _mali_osk_vmalloc() instead, as this function might + * map down to a OS function with size limitations. + * + * The buffer is suitably aligned for storage and subsequent access of every + * type that the compiler supports. Therefore, the pointer to the start of the + * buffer may be cast into any pointer type, and be subsequently accessed from + * such a pointer, without loss of information. + * + * When the buffer is no longer in use, it must be freed with _mali_osk_free(). + * Failure to do so will cause a memory leak. + * + * @note Most toolchains supply memory allocation functions that meet the + * compiler's alignment requirements. + * + * @param n Number of elements to allocate + * @param size Size of each element + * @return On success, the zero-initialized buffer allocated. NULL on failure + */ +void *_mali_osk_calloc(u32 n, u32 size); + +/** @brief Allocate memory. + * + * Returns a buffer capable of containing at least \a size bytes. The + * contents of the buffer are undefined. + * + * If there is a need for a bigger block of memory (16KB or bigger), then + * consider to use _mali_osk_vmalloc() instead, as this function might + * map down to a OS function with size limitations. + * + * The buffer is suitably aligned for storage and subsequent access of every + * type that the compiler supports. Therefore, the pointer to the start of the + * buffer may be cast into any pointer type, and be subsequently accessed from + * such a pointer, without loss of information. + * + * When the buffer is no longer in use, it must be freed with _mali_osk_free(). + * Failure to do so will cause a memory leak. + * + * @note Most toolchains supply memory allocation functions that meet the + * compiler's alignment requirements. + * + * Remember to free memory using _mali_osk_free(). + * @param size Number of bytes to allocate + * @return On success, the buffer allocated. NULL on failure. + */ +void *_mali_osk_malloc(u32 size); + +/** @brief Free memory. + * + * Reclaims the buffer pointed to by the parameter \a ptr for the system. + * All memory returned from _mali_osk_malloc() and _mali_osk_calloc() + * must be freed before the application exits. Otherwise, + * a memory leak will occur. + * + * Memory must be freed once. It is an error to free the same non-NULL pointer + * more than once. + * + * It is legal to free the NULL pointer. + * + * @param ptr Pointer to buffer to free + */ +void _mali_osk_free(void *ptr); + +/** @brief Allocate memory. + * + * Returns a buffer capable of containing at least \a size bytes. The + * contents of the buffer are undefined. + * + * This function is potentially slower than _mali_osk_malloc() and _mali_osk_calloc(), + * but do support bigger sizes. + * + * The buffer is suitably aligned for storage and subsequent access of every + * type that the compiler supports. Therefore, the pointer to the start of the + * buffer may be cast into any pointer type, and be subsequently accessed from + * such a pointer, without loss of information. + * + * When the buffer is no longer in use, it must be freed with _mali_osk_free(). + * Failure to do so will cause a memory leak. + * + * @note Most toolchains supply memory allocation functions that meet the + * compiler's alignment requirements. + * + * Remember to free memory using _mali_osk_free(). + * @param size Number of bytes to allocate + * @return On success, the buffer allocated. NULL on failure. + */ +void *_mali_osk_valloc(u32 size); + +/** @brief Free memory. + * + * Reclaims the buffer pointed to by the parameter \a ptr for the system. + * All memory returned from _mali_osk_valloc() must be freed before the + * application exits. Otherwise a memory leak will occur. + * + * Memory must be freed once. It is an error to free the same non-NULL pointer + * more than once. + * + * It is legal to free the NULL pointer. + * + * @param ptr Pointer to buffer to free + */ +void _mali_osk_vfree(void *ptr); + +/** @brief Copies memory. + * + * Copies the \a len bytes from the buffer pointed by the parameter \a src + * directly to the buffer pointed by \a dst. + * + * It is an error for \a src to overlap \a dst anywhere in \a len bytes. + * + * @param dst Pointer to the destination array where the content is to be + * copied. + * @param src Pointer to the source of data to be copied. + * @param len Number of bytes to copy. + * @return \a dst is always passed through unmodified. + */ +void *_mali_osk_memcpy(void *dst, const void *src, u32 len); + +/** @brief Fills memory. + * + * Sets the first \a n bytes of the block of memory pointed to by \a s to + * the specified value + * @param s Pointer to the block of memory to fill. + * @param c Value to be set, passed as u32. Only the 8 Least Significant Bits (LSB) + * are used. + * @param n Number of bytes to be set to the value. + * @return \a s is always passed through unmodified + */ +void *_mali_osk_memset(void *s, u32 c, u32 n); +/** @} */ /* end group _mali_osk_memory */ + + +/** @brief Checks the amount of memory allocated + * + * Checks that not more than \a max_allocated bytes are allocated. + * + * Some OS bring up an interactive out of memory dialogue when the + * system runs out of memory. This can stall non-interactive + * apps (e.g. automated test runs). This function can be used to + * not trigger the OOM dialogue by keeping allocations + * within a certain limit. + * + * @return MALI_TRUE when \a max_allocated bytes are not in use yet. MALI_FALSE + * when at least \a max_allocated bytes are in use. + */ +mali_bool _mali_osk_mem_check_allocated(u32 max_allocated); + + +/** @addtogroup _mali_osk_low_level_memory + * @{ */ + +/** @brief Issue a memory barrier + * + * This defines an arbitrary memory barrier operation, which forces an ordering constraint + * on memory read and write operations. + */ +void _mali_osk_mem_barrier(void); + +/** @brief Issue a write memory barrier + * + * This defines an write memory barrier operation which forces an ordering constraint + * on memory write operations. + */ +void _mali_osk_write_mem_barrier(void); + +/** @brief Map a physically contiguous region into kernel space + * + * This is primarily used for mapping in registers from resources, and Mali-MMU + * page tables. The mapping is only visable from kernel-space. + * + * Access has to go through _mali_osk_mem_ioread32 and _mali_osk_mem_iowrite32 + * + * @param phys CPU-physical base address of the memory to map in. This must + * be aligned to the system's page size, which is assumed to be 4K. + * @param size the number of bytes of physically contiguous address space to + * map in + * @param description A textual description of the memory being mapped in. + * @return On success, a Mali IO address through which the mapped-in + * memory/registers can be accessed. NULL on failure. + */ +mali_io_address _mali_osk_mem_mapioregion(uintptr_t phys, u32 size, const char *description); + +/** @brief Unmap a physically contiguous address range from kernel space. + * + * The address range should be one previously mapped in through + * _mali_osk_mem_mapioregion. + * + * It is a programming error to do (but not limited to) the following: + * - attempt an unmap twice + * - unmap only part of a range obtained through _mali_osk_mem_mapioregion + * - unmap more than the range obtained through _mali_osk_mem_mapioregion + * - unmap an address range that was not successfully mapped using + * _mali_osk_mem_mapioregion + * - provide a mapping that does not map to phys. + * + * @param phys CPU-physical base address of the memory that was originally + * mapped in. This must be aligned to the system's page size, which is assumed + * to be 4K + * @param size The number of bytes that were originally mapped in. + * @param mapping The Mali IO address through which the mapping is + * accessed. + */ +void _mali_osk_mem_unmapioregion(uintptr_t phys, u32 size, mali_io_address mapping); + +/** @brief Allocate and Map a physically contiguous region into kernel space + * + * This is used for allocating physically contiguous regions (such as Mali-MMU + * page tables) and mapping them into kernel space. The mapping is only + * visible from kernel-space. + * + * The alignment of the returned memory is guaranteed to be at least + * _MALI_OSK_CPU_PAGE_SIZE. + * + * Access must go through _mali_osk_mem_ioread32 and _mali_osk_mem_iowrite32 + * + * @note This function is primarily to provide support for OSs that are + * incapable of separating the tasks 'allocate physically contiguous memory' + * and 'map it into kernel space' + * + * @param[out] phys CPU-physical base address of memory that was allocated. + * (*phys) will be guaranteed to be aligned to at least + * _MALI_OSK_CPU_PAGE_SIZE on success. + * + * @param[in] size the number of bytes of physically contiguous memory to + * allocate. This must be a multiple of _MALI_OSK_CPU_PAGE_SIZE. + * + * @return On success, a Mali IO address through which the mapped-in + * memory/registers can be accessed. NULL on failure, and (*phys) is unmodified. + */ +mali_io_address _mali_osk_mem_allocioregion(u32 *phys, u32 size); + +/** @brief Free a physically contiguous address range from kernel space. + * + * The address range should be one previously mapped in through + * _mali_osk_mem_allocioregion. + * + * It is a programming error to do (but not limited to) the following: + * - attempt a free twice on the same ioregion + * - free only part of a range obtained through _mali_osk_mem_allocioregion + * - free more than the range obtained through _mali_osk_mem_allocioregion + * - free an address range that was not successfully mapped using + * _mali_osk_mem_allocioregion + * - provide a mapping that does not map to phys. + * + * @param phys CPU-physical base address of the memory that was originally + * mapped in, which was aligned to _MALI_OSK_CPU_PAGE_SIZE. + * @param size The number of bytes that were originally mapped in, which was + * a multiple of _MALI_OSK_CPU_PAGE_SIZE. + * @param mapping The Mali IO address through which the mapping is + * accessed. + */ +void _mali_osk_mem_freeioregion(u32 phys, u32 size, mali_io_address mapping); + +/** @brief Request a region of physically contiguous memory + * + * This is used to ensure exclusive access to a region of physically contigous + * memory. + * + * It is acceptable to implement this as a stub. However, it is then the job + * of the System Integrator to ensure that no other device driver will be using + * the physical address ranges used by Mali, while the Mali device driver is + * loaded. + * + * @param phys CPU-physical base address of the memory to request. This must + * be aligned to the system's page size, which is assumed to be 4K. + * @param size the number of bytes of physically contiguous address space to + * request. + * @param description A textual description of the memory being requested. + * @return _MALI_OSK_ERR_OK on success. Otherwise, a suitable + * _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_osk_mem_reqregion(uintptr_t phys, u32 size, const char *description); + +/** @brief Un-request a region of physically contiguous memory + * + * This is used to release a regious of physically contiguous memory previously + * requested through _mali_osk_mem_reqregion, so that other device drivers may + * use it. This will be called at time of Mali device driver termination. + * + * It is a programming error to attempt to: + * - unrequest a region twice + * - unrequest only part of a range obtained through _mali_osk_mem_reqregion + * - unrequest more than the range obtained through _mali_osk_mem_reqregion + * - unrequest an address range that was not successfully requested using + * _mali_osk_mem_reqregion + * + * @param phys CPU-physical base address of the memory to un-request. This must + * be aligned to the system's page size, which is assumed to be 4K + * @param size the number of bytes of physically contiguous address space to + * un-request. + */ +void _mali_osk_mem_unreqregion(uintptr_t phys, u32 size); + +/** @brief Read from a location currently mapped in through + * _mali_osk_mem_mapioregion + * + * This reads a 32-bit word from a 32-bit aligned location. It is a programming + * error to provide unaligned locations, or to read from memory that is not + * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or + * _mali_osk_mem_allocioregion(). + * + * @param mapping Mali IO address to read from + * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4 + * @return the 32-bit word from the specified location. + */ +u32 _mali_osk_mem_ioread32(volatile mali_io_address mapping, u32 offset); + +/** @brief Write to a location currently mapped in through + * _mali_osk_mem_mapioregion without memory barriers + * + * This write a 32-bit word to a 32-bit aligned location without using memory barrier. + * It is a programming error to provide unaligned locations, or to write to memory that is not + * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or + * _mali_osk_mem_allocioregion(). + * + * @param mapping Mali IO address to write to + * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4 + * @param val the 32-bit word to write. + */ +void _mali_osk_mem_iowrite32_relaxed(volatile mali_io_address addr, u32 offset, u32 val); + +/** @brief Write to a location currently mapped in through + * _mali_osk_mem_mapioregion with write memory barrier + * + * This write a 32-bit word to a 32-bit aligned location. It is a programming + * error to provide unaligned locations, or to write to memory that is not + * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or + * _mali_osk_mem_allocioregion(). + * + * @param mapping Mali IO address to write to + * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4 + * @param val the 32-bit word to write. + */ +void _mali_osk_mem_iowrite32(volatile mali_io_address mapping, u32 offset, u32 val); + +/** @brief Flush all CPU caches + * + * This should only be implemented if flushing of the cache is required for + * memory mapped in through _mali_osk_mem_mapregion. + */ +void _mali_osk_cache_flushall(void); + +/** @brief Flush any caches necessary for the CPU and MALI to have the same view of a range of uncached mapped memory + * + * This should only be implemented if your OS doesn't do a full cache flush (inner & outer) + * after allocating uncached mapped memory. + * + * Some OS do not perform a full cache flush (including all outer caches) for uncached mapped memory. + * They zero the memory through a cached mapping, then flush the inner caches but not the outer caches. + * This is required for MALI to have the correct view of the memory. + */ +void _mali_osk_cache_ensure_uncached_range_flushed(void *uncached_mapping, u32 offset, u32 size); + +/** @brief Safely copy as much data as possible from src to dest + * + * Do not crash if src or dest isn't available. + * + * @param dest Destination buffer (limited to user space mapped Mali memory) + * @param src Source buffer + * @param size Number of bytes to copy + * @return Number of bytes actually copied + */ +u32 _mali_osk_mem_write_safe(void *dest, const void *src, u32 size); + +/** @} */ /* end group _mali_osk_low_level_memory */ + + +/** @addtogroup _mali_osk_notification + * + * User space notification framework + * + * Communication with user space of asynchronous events is performed through a + * synchronous call to the \ref u_k_api. + * + * Since the events are asynchronous, the events have to be queued until a + * synchronous U/K API call can be made by user-space. A U/K API call might also + * be received before any event has happened. Therefore the notifications the + * different subsystems wants to send to user space has to be queued for later + * reception, or a U/K API call has to be blocked until an event has occured. + * + * Typical uses of notifications are after running of jobs on the hardware or + * when changes to the system is detected that needs to be relayed to user + * space. + * + * After an event has occured user space has to be notified using some kind of + * message. The notification framework supports sending messages to waiting + * threads or queueing of messages until a U/K API call is made. + * + * The notification queue is a FIFO. There are no restrictions on the numbers + * of readers or writers in the queue. + * + * A message contains what user space needs to identifiy how to handle an + * event. This includes a type field and a possible type specific payload. + * + * A notification to user space is represented by a + * \ref _mali_osk_notification_t object. A sender gets hold of such an object + * using _mali_osk_notification_create(). The buffer given by the + * _mali_osk_notification_t::result_buffer field in the object is used to store + * any type specific data. The other fields are internal to the queue system + * and should not be touched. + * + * @{ */ + +/** @brief Create a notification object + * + * Returns a notification object which can be added to the queue of + * notifications pending for user space transfer. + * + * The implementation will initialize all members of the + * \ref _mali_osk_notification_t object. In particular, the + * _mali_osk_notification_t::result_buffer member will be initialized to point + * to \a size bytes of storage, and that storage will be suitably aligned for + * storage of any structure. That is, the created buffer meets the same + * requirements as _mali_osk_malloc(). + * + * The notification object must be deleted when not in use. Use + * _mali_osk_notification_delete() for deleting it. + * + * @note You \b must \b not call _mali_osk_free() on a \ref _mali_osk_notification_t, + * object, or on a _mali_osk_notification_t::result_buffer. You must only use + * _mali_osk_notification_delete() to free the resources assocaited with a + * \ref _mali_osk_notification_t object. + * + * @param type The notification type + * @param size The size of the type specific buffer to send + * @return Pointer to a notification object with a suitable buffer, or NULL on error. + */ +_mali_osk_notification_t *_mali_osk_notification_create(u32 type, u32 size); + +/** @brief Delete a notification object + * + * This must be called to reclaim the resources of a notification object. This + * includes: + * - The _mali_osk_notification_t::result_buffer + * - The \ref _mali_osk_notification_t itself. + * + * A notification object \b must \b not be used after it has been deleted by + * _mali_osk_notification_delete(). + * + * In addition, the notification object may not be deleted while it is in a + * queue. That is, if it has been placed on a queue with + * _mali_osk_notification_queue_send(), then it must not be deleted until + * it has been received by a call to _mali_osk_notification_queue_receive(). + * Otherwise, the queue may be corrupted. + * + * @param object the notification object to delete. + */ +void _mali_osk_notification_delete(_mali_osk_notification_t *object); + +/** @brief Create a notification queue + * + * Creates a notification queue which can be used to queue messages for user + * delivery and get queued messages from + * + * The queue is a FIFO, and has no restrictions on the numbers of readers or + * writers. + * + * When the queue is no longer in use, it must be terminated with + * \ref _mali_osk_notification_queue_term(). Failure to do so will result in a + * memory leak. + * + * @return Pointer to a new notification queue or NULL on error. + */ +_mali_osk_notification_queue_t *_mali_osk_notification_queue_init(void); + +/** @brief Destroy a notification queue + * + * Destroys a notification queue and frees associated resources from the queue. + * + * A notification queue \b must \b not be destroyed in the following cases: + * - while there are \ref _mali_osk_notification_t objects in the queue. + * - while there are writers currently acting upon the queue. That is, while + * a thread is currently calling \ref _mali_osk_notification_queue_send() on + * the queue, or while a thread may call + * \ref _mali_osk_notification_queue_send() on the queue in the future. + * - while there are readers currently waiting upon the queue. That is, while + * a thread is currently calling \ref _mali_osk_notification_queue_receive() on + * the queue, or while a thread may call + * \ref _mali_osk_notification_queue_receive() on the queue in the future. + * + * Therefore, all \ref _mali_osk_notification_t objects must be flushed and + * deleted by the code that makes use of the notification queues, since only + * they know the structure of the _mali_osk_notification_t::result_buffer + * (even if it may only be a flat sturcture). + * + * @note Since the queue is a FIFO, the code using notification queues may + * create its own 'flush' type of notification, to assist in flushing the + * queue. + * + * Once the queue has been destroyed, it must not be used again. + * + * @param queue The queue to destroy + */ +void _mali_osk_notification_queue_term(_mali_osk_notification_queue_t *queue); + +/** @brief Schedule notification for delivery + * + * When a \ref _mali_osk_notification_t object has been created successfully + * and set up, it may be added to the queue of objects waiting for user space + * transfer. + * + * The sending will not block if the queue is full. + * + * A \ref _mali_osk_notification_t object \b must \b not be put on two different + * queues at the same time, or enqueued twice onto a single queue before + * reception. However, it is acceptable for it to be requeued \em after reception + * from a call to _mali_osk_notification_queue_receive(), even onto the same queue. + * + * Again, requeuing must also not enqueue onto two different queues at the same + * time, or enqueue onto the same queue twice before reception. + * + * @param queue The notification queue to add this notification to + * @param object The entry to add + */ +void _mali_osk_notification_queue_send(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t *object); + +/** @brief Receive a notification from a queue + * + * Receives a single notification from the given queue. + * + * If no notifciations are ready the thread will sleep until one becomes ready. + * Therefore, notifications may not be received into an + * IRQ or 'atomic' context (that is, a context where sleeping is disallowed). + * + * @param queue The queue to receive from + * @param result Pointer to storage of a pointer of type + * \ref _mali_osk_notification_t*. \a result will be written to such that the + * expression \a (*result) will evaluate to a pointer to a valid + * \ref _mali_osk_notification_t object, or NULL if none were received. + * @return _MALI_OSK_ERR_OK on success. _MALI_OSK_ERR_RESTARTSYSCALL if the sleep was interrupted. + */ +_mali_osk_errcode_t _mali_osk_notification_queue_receive(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result); + +/** @brief Dequeues a notification from a queue + * + * Receives a single notification from the given queue. + * + * If no notifciations are ready the function call will return an error code. + * + * @param queue The queue to receive from + * @param result Pointer to storage of a pointer of type + * \ref _mali_osk_notification_t*. \a result will be written to such that the + * expression \a (*result) will evaluate to a pointer to a valid + * \ref _mali_osk_notification_t object, or NULL if none were received. + * @return _MALI_OSK_ERR_OK on success, _MALI_OSK_ERR_ITEM_NOT_FOUND if queue was empty. + */ +_mali_osk_errcode_t _mali_osk_notification_queue_dequeue(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result); + +/** @} */ /* end group _mali_osk_notification */ + + +/** @addtogroup _mali_osk_timer + * + * Timers use the OS's representation of time, which are 'ticks'. This is to + * prevent aliasing problems between the internal timer time, and the time + * asked for. + * + * @{ */ + +/** @brief Initialize a timer + * + * Allocates resources for a new timer, and initializes them. This does not + * start the timer. + * + * @return a pointer to the allocated timer object, or NULL on failure. + */ +_mali_osk_timer_t *_mali_osk_timer_init(void); + +/** @brief Start a timer + * + * It is an error to start a timer without setting the callback via + * _mali_osk_timer_setcallback(). + * + * It is an error to use this to start an already started timer. + * + * The timer will expire in \a ticks_to_expire ticks, at which point, the + * callback function will be invoked with the callback-specific data, + * as registered by _mali_osk_timer_setcallback(). + * + * @param tim the timer to start + * @param ticks_to_expire the amount of time in ticks for the timer to run + * before triggering. + */ +void _mali_osk_timer_add(_mali_osk_timer_t *tim, unsigned long ticks_to_expire); + +/** @brief Modify a timer + * + * Set the relative time at which a timer will expire, and start it if it is + * stopped. If \a ticks_to_expire 0 the timer fires immediately. + * + * It is an error to modify a timer without setting the callback via + * _mali_osk_timer_setcallback(). + * + * The timer will expire at \a ticks_to_expire from the time of the call, at + * which point, the callback function will be invoked with the + * callback-specific data, as set by _mali_osk_timer_setcallback(). + * + * @param tim the timer to modify, and start if necessary + * @param ticks_to_expire the \em absolute time in ticks at which this timer + * should trigger. + * + */ +void _mali_osk_timer_mod(_mali_osk_timer_t *tim, unsigned long ticks_to_expire); + +/** @brief Stop a timer, and block on its completion. + * + * Stop the timer. When the function returns, it is guaranteed that the timer's + * callback will not be running on any CPU core. + * + * Since stoping the timer blocks on compeletion of the callback, the callback + * may not obtain any mutexes that the caller holds. Otherwise, a deadlock will + * occur. + * + * @note While the callback itself is guaranteed to not be running, work + * enqueued on the work-queue by the timer (with + * \ref _mali_osk_wq_schedule_work()) may still run. The timer callback and + * work handler must take this into account. + * + * It is legal to stop an already stopped timer. + * + * @param tim the timer to stop. + * + */ +void _mali_osk_timer_del(_mali_osk_timer_t *tim); + +/** @brief Stop a timer. + * + * Stop the timer. When the function returns, the timer's callback may still be + * running on any CPU core. + * + * It is legal to stop an already stopped timer. + * + * @param tim the timer to stop. + */ +void _mali_osk_timer_del_async(_mali_osk_timer_t *tim); + +/** @brief Check if timer is pending. + * + * Check if timer is active. + * + * @param tim the timer to check + * @return MALI_TRUE if time is active, MALI_FALSE if it is not active + */ +mali_bool _mali_osk_timer_pending(_mali_osk_timer_t *tim); + +/** @brief Set a timer's callback parameters. + * + * This must be called at least once before a timer is started/modified. + * + * After a timer has been stopped or expires, the callback remains set. This + * means that restarting the timer will call the same function with the same + * parameters on expiry. + * + * @param tim the timer to set callback on. + * @param callback Function to call when timer expires + * @param data Function-specific data to supply to the function on expiry. + */ +void _mali_osk_timer_setcallback(_mali_osk_timer_t *tim, _mali_osk_timer_callback_t callback, void *data); + +/** @brief Terminate a timer, and deallocate resources. + * + * The timer must first be stopped by calling _mali_osk_timer_del(). + * + * It is a programming error for _mali_osk_timer_term() to be called on: + * - timer that is currently running + * - a timer that is currently executing its callback. + * + * @param tim the timer to deallocate. + */ +void _mali_osk_timer_term(_mali_osk_timer_t *tim); +/** @} */ /* end group _mali_osk_timer */ + + +/** @defgroup _mali_osk_time OSK Time functions + * + * \ref _mali_osk_time use the OS's representation of time, which are + * 'ticks'. This is to prevent aliasing problems between the internal timer + * time, and the time asked for. + * + * OS tick time is measured as a u32. The time stored in a u32 may either be + * an absolute time, or a time delta between two events. Whilst it is valid to + * use math opeartors to \em change the tick value represented as a u32, it + * is often only meaningful to do such operations on time deltas, rather than + * on absolute time. However, it is meaningful to add/subtract time deltas to + * absolute times. + * + * Conversion between tick time and milliseconds (ms) may not be loss-less, + * and are \em implementation \em depenedant. + * + * Code use OS time must take this into account, since: + * - a small OS time may (or may not) be rounded + * - a large time may (or may not) overflow + * + * @{ */ + +/** @brief Return whether ticka occurs after or at the same time as tickb + * + * Systems where ticks can wrap must handle that. + * + * @param ticka ticka + * @param tickb tickb + * @return MALI_TRUE if ticka represents a time that occurs at or after tickb. + */ +mali_bool _mali_osk_time_after_eq(unsigned long ticka, unsigned long tickb); + +/** @brief Convert milliseconds to OS 'ticks' + * + * @param ms time interval in milliseconds + * @return the corresponding time interval in OS ticks. + */ +unsigned long _mali_osk_time_mstoticks(u32 ms); + +/** @brief Convert OS 'ticks' to milliseconds + * + * @param ticks time interval in OS ticks. + * @return the corresponding time interval in milliseconds + */ +u32 _mali_osk_time_tickstoms(unsigned long ticks); + + +/** @brief Get the current time in OS 'ticks'. + * @return the current time in OS 'ticks'. + */ +unsigned long _mali_osk_time_tickcount(void); + +/** @brief Cause a microsecond delay + * + * The delay will have microsecond resolution, and is necessary for correct + * operation of the driver. At worst, the delay will be \b at least \a usecs + * microseconds, and so may be (significantly) more. + * + * This function may be implemented as a busy-wait, which is the most sensible + * implementation. On OSs where there are situations in which a thread must not + * sleep, this is definitely implemented as a busy-wait. + * + * @param usecs the number of microseconds to wait for. + */ +void _mali_osk_time_ubusydelay(u32 usecs); + +/** @brief Return time in nano seconds, since any given reference. + * + * @return Time in nano seconds + */ +u64 _mali_osk_time_get_ns(void); + +/** @brief Return time in nano seconds, since boot time. + * + * @return Time in nano seconds + */ +u64 _mali_osk_boot_time_get_ns(void); + +/** @} */ /* end group _mali_osk_time */ + +/** @defgroup _mali_osk_math OSK Math + * @{ */ + +/** @brief Count Leading Zeros (Little-endian) + * + * @note This function must be implemented to support the reference + * implementation of _mali_osk_find_first_zero_bit, as defined in + * mali_osk_bitops.h. + * + * @param val 32-bit words to count leading zeros on + * @return the number of leading zeros. + */ +u32 _mali_osk_clz(u32 val); + +/** @brief find last (most-significant) bit set + * + * @param val 32-bit words to count last bit set on + * @return last bit set. + */ +u32 _mali_osk_fls(u32 val); + +/** @} */ /* end group _mali_osk_math */ + +/** @addtogroup _mali_osk_wait_queue OSK Wait Queue functionality + * @{ */ + +/** @brief Initialize an empty Wait Queue */ +_mali_osk_wait_queue_t *_mali_osk_wait_queue_init(void); + +/** @brief Sleep if condition is false + * + * @param queue the queue to use + * @param condition function pointer to a boolean function + * @param data data parameter for condition function + * + * Put thread to sleep if the given \a condition function returns false. When + * being asked to wake up again, the condition will be re-checked and the + * thread only woken up if the condition is now true. + */ +void _mali_osk_wait_queue_wait_event(_mali_osk_wait_queue_t *queue, mali_bool(*condition)(void *), void *data); + +/** @brief Sleep if condition is false + * + * @param queue the queue to use + * @param condition function pointer to a boolean function + * @param data data parameter for condition function + * @param timeout timeout in ms + * + * Put thread to sleep if the given \a condition function returns false. When + * being asked to wake up again, the condition will be re-checked and the + * thread only woken up if the condition is now true. Will return if time + * exceeds timeout. + */ +void _mali_osk_wait_queue_wait_event_timeout(_mali_osk_wait_queue_t *queue, mali_bool(*condition)(void *), void *data, u32 timeout); + +/** @brief Wake up all threads in wait queue if their respective conditions are + * true + * + * @param queue the queue whose threads should be woken up + * + * Wake up all threads in wait queue \a queue whose condition is now true. + */ +void _mali_osk_wait_queue_wake_up(_mali_osk_wait_queue_t *queue); + +/** @brief terminate a wait queue + * + * @param queue the queue to terminate. + */ +void _mali_osk_wait_queue_term(_mali_osk_wait_queue_t *queue); +/** @} */ /* end group _mali_osk_wait_queue */ + + +/** @addtogroup _mali_osk_miscellaneous + * @{ */ + +/** @brief Output a device driver debug message. + * + * The interpretation of \a fmt is the same as the \c format parameter in + * _mali_osu_vsnprintf(). + * + * @param fmt a _mali_osu_vsnprintf() style format string + * @param ... a variable-number of parameters suitable for \a fmt + */ +void _mali_osk_dbgmsg(const char *fmt, ...); + +/** @brief Print fmt into buf. + * + * The interpretation of \a fmt is the same as the \c format parameter in + * _mali_osu_vsnprintf(). + * + * @param buf a pointer to the result buffer + * @param size the total number of bytes allowed to write to \a buf + * @param fmt a _mali_osu_vsnprintf() style format string + * @param ... a variable-number of parameters suitable for \a fmt + * @return The number of bytes written to \a buf + */ +u32 _mali_osk_snprintf(char *buf, u32 size, const char *fmt, ...); + +/** @brief Abnormal process abort. + * + * Terminates the caller-process if this function is called. + * + * This function will be called from Debug assert-macros in mali_kernel_common.h. + * + * This function will never return - because to continue from a Debug assert + * could cause even more problems, and hinder debugging of the initial problem. + * + * This function is only used in Debug builds, and is not used in Release builds. + */ +void _mali_osk_abort(void); + +/** @brief Sets breakpoint at point where function is called. + * + * This function will be called from Debug assert-macros in mali_kernel_common.h, + * to assist in debugging. If debugging at this level is not required, then this + * function may be implemented as a stub. + * + * This function is only used in Debug builds, and is not used in Release builds. + */ +void _mali_osk_break(void); + +/** @brief Return an identificator for calling process. + * + * @return Identificator for calling process. + */ +u32 _mali_osk_get_pid(void); + +/** @brief Return an name for calling process. + * + * @return name for calling process. + */ +char *_mali_osk_get_comm(void); + +/** @brief Return an identificator for calling thread. + * + * @return Identificator for calling thread. + */ +u32 _mali_osk_get_tid(void); + + +/** @brief Take a reference to the power manager system for the Mali device (synchronously). + * + * When function returns successfully, Mali is ON. + * + * @note Call \a _mali_osk_pm_dev_ref_put() to release this reference. + */ +_mali_osk_errcode_t _mali_osk_pm_dev_ref_get_sync(void); + +/** @brief Take a reference to the external power manager system for the Mali device (asynchronously). + * + * Mali might not yet be on after this function as returned. + * Please use \a _mali_osk_pm_dev_barrier() or \a _mali_osk_pm_dev_ref_get_sync() + * to wait for Mali to be powered on. + * + * @note Call \a _mali_osk_pm_dev_ref_dec() to release this reference. + */ +_mali_osk_errcode_t _mali_osk_pm_dev_ref_get_async(void); + +/** @brief Release the reference to the external power manger system for the Mali device. + * + * When reference count reach zero, the cores can be off. + * + * @note This must be used to release references taken with + * \a _mali_osk_pm_dev_ref_get_sync() or \a _mali_osk_pm_dev_ref_get_sync(). + */ +void _mali_osk_pm_dev_ref_put(void); + +/** @brief Block until pending PM operations are done + */ +void _mali_osk_pm_dev_barrier(void); + +/** @} */ /* end group _mali_osk_miscellaneous */ + +/** @defgroup _mali_osk_bitmap OSK Bitmap + * @{ */ + +/** @brief Allocate a unique number from the bitmap object. + * + * @param bitmap Initialized bitmap object. + * @return An unique existence in the bitmap object. + */ +u32 _mali_osk_bitmap_alloc(struct _mali_osk_bitmap *bitmap); + +/** @brief Free a interger to the bitmap object. + * + * @param bitmap Initialized bitmap object. + * @param obj An number allocated from bitmap object. + */ +void _mali_osk_bitmap_free(struct _mali_osk_bitmap *bitmap, u32 obj); + +/** @brief Allocate continuous number from the bitmap object. + * + * @param bitmap Initialized bitmap object. + * @return start number of the continuous number block. + */ +u32 _mali_osk_bitmap_alloc_range(struct _mali_osk_bitmap *bitmap, int cnt); + +/** @brief Free a block of continuous number block to the bitmap object. + * + * @param bitmap Initialized bitmap object. + * @param obj Start number. + * @param cnt The size of the continuous number block. + */ +void _mali_osk_bitmap_free_range(struct _mali_osk_bitmap *bitmap, u32 obj, int cnt); + +/** @brief Available count could be used to allocate in the given bitmap object. + * + */ +u32 _mali_osk_bitmap_avail(struct _mali_osk_bitmap *bitmap); + +/** @brief Initialize an bitmap object.. + * + * @param bitmap An poiter of uninitialized bitmap object. + * @param num Size of thei bitmap object and decide the memory size allocated. + * @param reserve start number used to allocate. + */ +int _mali_osk_bitmap_init(struct _mali_osk_bitmap *bitmap, u32 num, u32 reserve); + +/** @brief Free the given bitmap object. + * + * @param bitmap Initialized bitmap object. + */ +void _mali_osk_bitmap_term(struct _mali_osk_bitmap *bitmap); +/** @} */ /* end group _mali_osk_bitmap */ + +/** @} */ /* end group osuapi */ + +/** @} */ /* end group uddapi */ + + + +#ifdef __cplusplus +} +#endif + +/* Check standard inlines */ +#ifndef MALI_STATIC_INLINE +#error MALI_STATIC_INLINE not defined on your OS +#endif + +#ifndef MALI_NON_STATIC_INLINE +#error MALI_NON_STATIC_INLINE not defined on your OS +#endif + +#endif /* __MALI_OSK_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_osk_list.h b/drivers/gpu/arm/mali400/common/mali_osk_list.h --- a/drivers/gpu/arm/mali400/common/mali_osk_list.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_osk_list.h 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,273 @@ +/* + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_osk_list.h + * Implementation of the OS abstraction layer for the kernel device driver + */ + +#ifndef __MALI_OSK_LIST_H__ +#define __MALI_OSK_LIST_H__ + +#include "mali_osk.h" +#include "mali_kernel_common.h" + +#ifdef __cplusplus +extern "C" { +#endif + +MALI_STATIC_INLINE void __mali_osk_list_add(_mali_osk_list_t *new_entry, _mali_osk_list_t *prev, _mali_osk_list_t *next) +{ + next->prev = new_entry; + new_entry->next = next; + new_entry->prev = prev; + prev->next = new_entry; +} + +MALI_STATIC_INLINE void __mali_osk_list_del(_mali_osk_list_t *prev, _mali_osk_list_t *next) +{ + next->prev = prev; + prev->next = next; +} + +/** @addtogroup _mali_osk_list OSK Doubly-Linked Circular Lists + * @{ */ + +/** Reference implementations of Doubly-linked Circular Lists are provided. + * There is often no need to re-implement these. + * + * @note The implementation may differ subtly from any lists the OS provides. + * For this reason, these lists should not be mixed with OS-specific lists + * inside the OSK/UKK implementation. */ + +/** @brief Initialize a list to be a head of an empty list + * @param exp the list to initialize. */ +#define _MALI_OSK_INIT_LIST_HEAD(exp) _mali_osk_list_init(exp) + +/** @brief Define a list variable, which is uninitialized. + * @param exp the name of the variable that the list will be defined as. */ +#define _MALI_OSK_LIST_HEAD(exp) _mali_osk_list_t exp + +/** @brief Define a list variable, which is initialized. + * @param exp the name of the variable that the list will be defined as. */ +#define _MALI_OSK_LIST_HEAD_STATIC_INIT(exp) _mali_osk_list_t exp = { &exp, &exp } + +/** @brief Initialize a list element. + * + * All list elements must be initialized before use. + * + * Do not use on any list element that is present in a list without using + * _mali_osk_list_del first, otherwise this will break the list. + * + * @param list the list element to initialize + */ +MALI_STATIC_INLINE void _mali_osk_list_init(_mali_osk_list_t *list) +{ + list->next = list; + list->prev = list; +} + +/** @brief Insert a single list element after an entry in a list + * + * As an example, if this is inserted to the head of a list, then this becomes + * the first element of the list. + * + * Do not use to move list elements from one list to another, as it will break + * the originating list. + * + * + * @param newlist the list element to insert + * @param list the list in which to insert. The new element will be the next + * entry in this list + */ +MALI_STATIC_INLINE void _mali_osk_list_add(_mali_osk_list_t *new_entry, _mali_osk_list_t *list) +{ + __mali_osk_list_add(new_entry, list, list->next); +} + +/** @brief Insert a single list element before an entry in a list + * + * As an example, if this is inserted to the head of a list, then this becomes + * the last element of the list. + * + * Do not use to move list elements from one list to another, as it will break + * the originating list. + * + * @param newlist the list element to insert + * @param list the list in which to insert. The new element will be the previous + * entry in this list + */ +MALI_STATIC_INLINE void _mali_osk_list_addtail(_mali_osk_list_t *new_entry, _mali_osk_list_t *list) +{ + __mali_osk_list_add(new_entry, list->prev, list); +} + +/** @brief Remove a single element from a list + * + * The element will no longer be present in the list. The removed list element + * will be uninitialized, and so should not be traversed. It must be + * initialized before further use. + * + * @param list the list element to remove. + */ +MALI_STATIC_INLINE void _mali_osk_list_del(_mali_osk_list_t *list) +{ + __mali_osk_list_del(list->prev, list->next); +} + +/** @brief Remove a single element from a list, and re-initialize it + * + * The element will no longer be present in the list. The removed list element + * will initialized, and so can be used as normal. + * + * @param list the list element to remove and initialize. + */ +MALI_STATIC_INLINE void _mali_osk_list_delinit(_mali_osk_list_t *list) +{ + __mali_osk_list_del(list->prev, list->next); + _mali_osk_list_init(list); +} + +/** @brief Determine whether a list is empty. + * + * An empty list is one that contains a single element that points to itself. + * + * @param list the list to check. + * @return non-zero if the list is empty, and zero otherwise. + */ +MALI_STATIC_INLINE mali_bool _mali_osk_list_empty(_mali_osk_list_t *list) +{ + return list->next == list; +} + +/** @brief Move a list element from one list to another. + * + * The list element must be initialized. + * + * As an example, moving a list item to the head of a new list causes this item + * to be the first element in the new list. + * + * @param move the list element to move + * @param list the new list into which the element will be inserted, as the next + * element in the list. + */ +MALI_STATIC_INLINE void _mali_osk_list_move(_mali_osk_list_t *move_entry, _mali_osk_list_t *list) +{ + __mali_osk_list_del(move_entry->prev, move_entry->next); + _mali_osk_list_add(move_entry, list); +} + +/** @brief Move an entire list + * + * The list element must be initialized. + * + * Allows you to move a list from one list head to another list head + * + * @param old_list The existing list head + * @param new_list The new list head (must be an empty list) + */ +MALI_STATIC_INLINE void _mali_osk_list_move_list(_mali_osk_list_t *old_list, _mali_osk_list_t *new_list) +{ + MALI_DEBUG_ASSERT(_mali_osk_list_empty(new_list)); + if (!_mali_osk_list_empty(old_list)) { + new_list->next = old_list->next; + new_list->prev = old_list->prev; + new_list->next->prev = new_list; + new_list->prev->next = new_list; + old_list->next = old_list; + old_list->prev = old_list; + } +} + +/** @brief Find the containing structure of a list + * + * When traversing a list, this is used to recover the containing structure, + * given that is contains a _mali_osk_list_t member. + * + * Each list must be of structures of one type, and must link the same members + * together, otherwise it will not be possible to correctly recover the + * sturctures that the lists link. + * + * @note no type or memory checking occurs to ensure that a structure does in + * fact exist for the list entry, and that it is being recovered with respect + * to the correct list member. + * + * @param ptr the pointer to the _mali_osk_list_t member in this structure + * @param type the type of the structure that contains the member + * @param member the member of the structure that ptr points to. + * @return a pointer to a \a type object which contains the _mali_osk_list_t + * \a member, as pointed to by the _mali_osk_list_t \a *ptr. + */ +#define _MALI_OSK_LIST_ENTRY(ptr, type, member) \ + _MALI_OSK_CONTAINER_OF(ptr, type, member) + +/** @brief Enumerate a list safely + * + * With this macro, lists can be enumerated in a 'safe' manner. That is, + * entries can be deleted from the list without causing an error during + * enumeration. To achieve this, a 'temporary' pointer is required, which must + * be provided to the macro. + * + * Use it like a 'for()', 'while()' or 'do()' construct, and so it must be + * followed by a statement or compound-statement which will be executed for + * each list entry. + * + * Upon loop completion, providing that an early out was not taken in the + * loop body, then it is guaranteed that ptr->member == list, even if the loop + * body never executed. + * + * @param ptr a pointer to an object of type 'type', which points to the + * structure that contains the currently enumerated list entry. + * @param tmp a pointer to an object of type 'type', which must not be used + * inside the list-execution statement. + * @param list a pointer to a _mali_osk_list_t, from which enumeration will + * begin + * @param type the type of the structure that contains the _mali_osk_list_t + * member that is part of the list to be enumerated. + * @param member the _mali_osk_list_t member of the structure that is part of + * the list to be enumerated. + */ +#define _MALI_OSK_LIST_FOREACHENTRY(ptr, tmp, list, type, member) \ + for (ptr = _MALI_OSK_LIST_ENTRY((list)->next, type, member), \ + tmp = _MALI_OSK_LIST_ENTRY(ptr->member.next, type, member); \ + &ptr->member != (list); \ + ptr = tmp, \ + tmp = _MALI_OSK_LIST_ENTRY(tmp->member.next, type, member)) + +/** @brief Enumerate a list in reverse order safely + * + * This macro is identical to @ref _MALI_OSK_LIST_FOREACHENTRY, except that + * entries are enumerated in reverse order. + * + * @param ptr a pointer to an object of type 'type', which points to the + * structure that contains the currently enumerated list entry. + * @param tmp a pointer to an object of type 'type', which must not be used + * inside the list-execution statement. + * @param list a pointer to a _mali_osk_list_t, from which enumeration will + * begin + * @param type the type of the structure that contains the _mali_osk_list_t + * member that is part of the list to be enumerated. + * @param member the _mali_osk_list_t member of the structure that is part of + * the list to be enumerated. + */ +#define _MALI_OSK_LIST_FOREACHENTRY_REVERSE(ptr, tmp, list, type, member) \ + for (ptr = _MALI_OSK_LIST_ENTRY((list)->prev, type, member), \ + tmp = _MALI_OSK_LIST_ENTRY(ptr->member.prev, type, member); \ + &ptr->member != (list); \ + ptr = tmp, \ + tmp = _MALI_OSK_LIST_ENTRY(tmp->member.prev, type, member)) + +/** @} */ /* end group _mali_osk_list */ + +#ifdef __cplusplus +} +#endif + +#endif /* __MALI_OSK_LIST_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_osk_mali.h b/drivers/gpu/arm/mali400/common/mali_osk_mali.h --- a/drivers/gpu/arm/mali400/common/mali_osk_mali.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_osk_mali.h 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,151 @@ +/* + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_osk_mali.h + * Defines the OS abstraction layer which is specific for the Mali kernel device driver (OSK) + */ + +#ifndef __MALI_OSK_MALI_H__ +#define __MALI_OSK_MALI_H__ + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef CONFIG_MALI_DEVFREQ +struct mali_device { + struct device *dev; +#ifdef CONFIG_HAVE_CLK + struct clk *clock; +#endif +#ifdef CONFIG_REGULATOR + struct regulator *regulator; +#endif +#ifdef CONFIG_PM_DEVFREQ + struct devfreq_dev_profile devfreq_profile; + struct devfreq *devfreq; + unsigned long current_freq; + unsigned long current_voltage; +#ifdef CONFIG_DEVFREQ_THERMAL + struct thermal_cooling_device *devfreq_cooling; +#endif +#endif + struct mali_pm_metrics_data mali_metrics; +}; +#endif + +/** @addtogroup _mali_osk_miscellaneous + * @{ */ + +/** @brief Struct with device specific configuration data + */ +typedef struct mali_gpu_device_data _mali_osk_device_data; + +#ifdef CONFIG_MALI_DT +/** @brief Initialize those device resources when we use device tree + * + * @return _MALI_OSK_ERR_OK on success, otherwise failure. + */ +_mali_osk_errcode_t _mali_osk_resource_initialize(void); +#endif + +/** @brief Find Mali GPU HW resource + * + * @param addr Address of Mali GPU resource to find + * @param res Storage for resource information if resource is found. + * @return _MALI_OSK_ERR_OK on success, _MALI_OSK_ERR_ITEM_NOT_FOUND if resource is not found + */ +_mali_osk_errcode_t _mali_osk_resource_find(u32 addr, _mali_osk_resource_t *res); + + +/** @brief Find Mali GPU HW base address + * + * @return 0 if resources are found, otherwise the Mali GPU component with lowest address. + */ +uintptr_t _mali_osk_resource_base_address(void); + +/** @brief Find the specific GPU resource. + * + * @return value + * 0x400 if Mali 400 specific GPU resource identified + * 0x450 if Mali 450 specific GPU resource identified + * 0x470 if Mali 470 specific GPU resource identified + * + */ +u32 _mali_osk_identify_gpu_resource(void); + +/** @brief Retrieve the Mali GPU specific data + * + * @return _MALI_OSK_ERR_OK on success, otherwise failure. + */ +_mali_osk_errcode_t _mali_osk_device_data_get(_mali_osk_device_data *data); + +/** @brief Find the pmu domain config from device data. + * + * @param domain_config_array used to store pmu domain config found in device data. + * @param array_size is the size of array domain_config_array. + */ +void _mali_osk_device_data_pmu_config_get(u16 *domain_config_array, int array_size); + +/** @brief Get Mali PMU switch delay + * + *@return pmu switch delay if it is configured + */ +u32 _mali_osk_get_pmu_switch_delay(void); + +/** @brief Determines if Mali GPU has been configured with shared interrupts. + * + * @return MALI_TRUE if shared interrupts, MALI_FALSE if not. + */ +mali_bool _mali_osk_shared_interrupts(void); + +/** @brief Initialize the gpu secure mode. + * The gpu secure mode will initially be in a disabled state. + * @return _MALI_OSK_ERR_OK on success, otherwise failure. + */ +_mali_osk_errcode_t _mali_osk_gpu_secure_mode_init(void); + +/** @brief Deinitialize the gpu secure mode. + * @return _MALI_OSK_ERR_OK on success, otherwise failure. + */ +_mali_osk_errcode_t _mali_osk_gpu_secure_mode_deinit(void); + +/** @brief Reset GPU and enable the gpu secure mode. + * @return _MALI_OSK_ERR_OK on success, otherwise failure. + */ +_mali_osk_errcode_t _mali_osk_gpu_reset_and_secure_mode_enable(void); + +/** @brief Reset GPU and disable the gpu secure mode. + * @return _MALI_OSK_ERR_OK on success, otherwise failure. + */ +_mali_osk_errcode_t _mali_osk_gpu_reset_and_secure_mode_disable(void); + +/** @brief Check if the gpu secure mode has been enabled. + * @return MALI_TRUE if enabled, otherwise MALI_FALSE. + */ +mali_bool _mali_osk_gpu_secure_mode_is_enabled(void); + +/** @brief Check if the gpu secure mode is supported. + * @return MALI_TRUE if supported, otherwise MALI_FALSE. + */ +mali_bool _mali_osk_gpu_secure_mode_is_supported(void); + + +/** @} */ /* end group _mali_osk_miscellaneous */ + +#ifdef __cplusplus +} +#endif + +#endif /* __MALI_OSK_MALI_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_osk_profiling.h b/drivers/gpu/arm/mali400/common/mali_osk_profiling.h --- a/drivers/gpu/arm/mali400/common/mali_osk_profiling.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_osk_profiling.h 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,146 @@ +/* + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_OSK_PROFILING_H__ +#define __MALI_OSK_PROFILING_H__ + +#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS) + +#include "mali_linux_trace.h" +#include "mali_profiling_events.h" +#include "mali_profiling_gator_api.h" + +#define MALI_PROFILING_MAX_BUFFER_ENTRIES 1048576 + +#define MALI_PROFILING_NO_HW_COUNTER = ((u32)-1) + +/** @defgroup _mali_osk_profiling External profiling connectivity + * @{ */ + +/** + * Initialize the profiling module. + * @return _MALI_OSK_ERR_OK on success, otherwise failure. + */ +_mali_osk_errcode_t _mali_osk_profiling_init(mali_bool auto_start); + +/* + * Terminate the profiling module. + */ +void _mali_osk_profiling_term(void); + +/** + * Stop the profile sampling operation. + */ +void _mali_osk_profiling_stop_sampling(u32 pid); + +/** + * Start recording profiling data + * + * The specified limit will determine how large the capture buffer is. + * MALI_PROFILING_MAX_BUFFER_ENTRIES determines the maximum size allowed by the device driver. + * + * @param limit The desired maximum number of events to record on input, the actual maximum on output. + * @return _MALI_OSK_ERR_OK on success, otherwise failure. + */ +_mali_osk_errcode_t _mali_osk_profiling_start(u32 *limit); + +/** + * Add an profiling event + * + * @param event_id The event identificator. + * @param data0 First data parameter, depending on event_id specified. + * @param data1 Second data parameter, depending on event_id specified. + * @param data2 Third data parameter, depending on event_id specified. + * @param data3 Fourth data parameter, depending on event_id specified. + * @param data4 Fifth data parameter, depending on event_id specified. + */ +void _mali_osk_profiling_add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4); + +/** + * Report a hardware counter event. + * + * @param counter_id The ID of the counter. + * @param value The value of the counter. + */ + +/* Call Linux tracepoint directly */ +#define _mali_osk_profiling_report_hw_counter(counter_id, value) trace_mali_hw_counter(counter_id, value) + +/** + * Report SW counters + * + * @param counters array of counter values + */ +void _mali_osk_profiling_report_sw_counters(u32 *counters); + +void _mali_osk_profiling_record_global_counters(int counter_id, u32 value); + +/** + * Stop recording profiling data + * + * @param count Returns the number of recorded events. + * @return _MALI_OSK_ERR_OK on success, otherwise failure. + */ +_mali_osk_errcode_t _mali_osk_profiling_stop(u32 *count); + +/** + * Retrieves the number of events that can be retrieved + * + * @return The number of recorded events that can be retrieved. + */ +u32 _mali_osk_profiling_get_count(void); + +/** + * Retrieve an event + * + * @param index Event index (start with 0 and continue until this function fails to retrieve all events) + * @param timestamp The timestamp for the retrieved event will be stored here. + * @param event_id The event ID for the retrieved event will be stored here. + * @param data The 5 data values for the retrieved event will be stored here. + * @return _MALI_OSK_ERR_OK on success, otherwise failure. + */ +_mali_osk_errcode_t _mali_osk_profiling_get_event(u32 index, u64 *timestamp, u32 *event_id, u32 data[5]); + +/** + * Clear the recorded buffer. + * + * This is needed in order to start another recording. + * + * @return _MALI_OSK_ERR_OK on success, otherwise failure. + */ +_mali_osk_errcode_t _mali_osk_profiling_clear(void); + +/** + * Checks if a recording of profiling data is in progress + * + * @return MALI_TRUE if recording of profiling data is in progress, MALI_FALSE if not + */ +mali_bool _mali_osk_profiling_is_recording(void); + +/** + * Checks if profiling data is available for retrival + * + * @return MALI_TRUE if profiling data is avaiable, MALI_FALSE if not + */ +mali_bool _mali_osk_profiling_have_recording(void); + +/** @} */ /* end group _mali_osk_profiling */ + +#else /* defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_TRACEPOINTS) */ + +/* Dummy add_event, for when profiling is disabled. */ + +#define _mali_osk_profiling_add_event(event_id, data0, data1, data2, data3, data4) + +#endif /* defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_TRACEPOINTS) */ + +#endif /* __MALI_OSK_PROFILING_H__ */ + + diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_osk_types.h b/drivers/gpu/arm/mali400/common/mali_osk_types.h --- a/drivers/gpu/arm/mali400/common/mali_osk_types.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_osk_types.h 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,471 @@ +/* + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_osk_types.h + * Defines types of the OS abstraction layer for the kernel device driver (OSK) + */ + +#ifndef __MALI_OSK_TYPES_H__ +#define __MALI_OSK_TYPES_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @addtogroup uddapi Unified Device Driver (UDD) APIs + * + * @{ + */ + +/** + * @addtogroup oskapi UDD OS Abstraction for Kernel-side (OSK) APIs + * + * @{ + */ + +/** @defgroup _mali_osk_miscellaneous OSK Miscellaneous functions, constants and types + * @{ */ + +/* Define integer types used by OSK. Note: these currently clash with Linux so we only define them if not defined already */ +#ifndef __KERNEL__ +typedef unsigned char u8; +typedef signed char s8; +typedef unsigned short u16; +typedef signed short s16; +typedef unsigned int u32; +typedef signed int s32; +typedef unsigned long long u64; +#define BITS_PER_LONG (sizeof(long)*8) +#else +/* Ensure Linux types u32, etc. are defined */ +#include +#endif + +/** @brief Mali Boolean type which uses MALI_TRUE and MALI_FALSE + */ +typedef unsigned long mali_bool; + +#ifndef MALI_TRUE +#define MALI_TRUE ((mali_bool)1) +#endif + +#ifndef MALI_FALSE +#define MALI_FALSE ((mali_bool)0) +#endif + +#define MALI_HW_CORE_NO_COUNTER ((u32)-1) + + +#define MALI_S32_MAX 0x7fffffff + +/** + * @brief OSK Error codes + * + * Each OS may use its own set of error codes, and may require that the + * User/Kernel interface take certain error code. This means that the common + * error codes need to be sufficiently rich to pass the correct error code + * thorugh from the OSK to U/K layer, across all OSs. + * + * The result is that some error codes will appear redundant on some OSs. + * Under all OSs, the OSK layer must translate native OS error codes to + * _mali_osk_errcode_t codes. Similarly, the U/K layer must translate from + * _mali_osk_errcode_t codes to native OS error codes. + */ +typedef enum { + _MALI_OSK_ERR_OK = 0, /**< Success. */ + _MALI_OSK_ERR_FAULT = -1, /**< General non-success */ + _MALI_OSK_ERR_INVALID_FUNC = -2, /**< Invalid function requested through User/Kernel interface (e.g. bad IOCTL number) */ + _MALI_OSK_ERR_INVALID_ARGS = -3, /**< Invalid arguments passed through User/Kernel interface */ + _MALI_OSK_ERR_NOMEM = -4, /**< Insufficient memory */ + _MALI_OSK_ERR_TIMEOUT = -5, /**< Timeout occurred */ + _MALI_OSK_ERR_RESTARTSYSCALL = -6, /**< Special: On certain OSs, must report when an interruptable mutex is interrupted. Ignore otherwise. */ + _MALI_OSK_ERR_ITEM_NOT_FOUND = -7, /**< Table Lookup failed */ + _MALI_OSK_ERR_BUSY = -8, /**< Device/operation is busy. Try again later */ + _MALI_OSK_ERR_UNSUPPORTED = -9, /**< Optional part of the interface used, and is unsupported */ +} _mali_osk_errcode_t; + +/** @} */ /* end group _mali_osk_miscellaneous */ + +/** @defgroup _mali_osk_wq OSK work queues + * @{ */ + +/** @brief Private type for work objects */ +typedef struct _mali_osk_wq_work_s _mali_osk_wq_work_t; +typedef struct _mali_osk_wq_delayed_work_s _mali_osk_wq_delayed_work_t; + +/** @brief Work queue handler function + * + * This function type is called when the work is scheduled by the work queue, + * e.g. as an IRQ bottom-half handler. + * + * Refer to \ref _mali_osk_wq_schedule_work() for more information on the + * work-queue and work handlers. + * + * @param arg resource-specific data + */ +typedef void (*_mali_osk_wq_work_handler_t)(void *arg); + +/* @} */ /* end group _mali_osk_wq */ + +/** @defgroup _mali_osk_irq OSK IRQ handling + * @{ */ + +/** @brief Private type for IRQ handling objects */ +typedef struct _mali_osk_irq_t_struct _mali_osk_irq_t; + +/** @brief Optional function to trigger an irq from a resource + * + * This function is implemented by the common layer to allow probing of a resource's IRQ. + * @param arg resource-specific data */ +typedef void (*_mali_osk_irq_trigger_t)(void *arg); + +/** @brief Optional function to acknowledge an irq from a resource + * + * This function is implemented by the common layer to allow probing of a resource's IRQ. + * @param arg resource-specific data + * @return _MALI_OSK_ERR_OK if the IRQ was successful, or a suitable _mali_osk_errcode_t on failure. */ +typedef _mali_osk_errcode_t (*_mali_osk_irq_ack_t)(void *arg); + +/** @brief IRQ 'upper-half' handler callback. + * + * This function is implemented by the common layer to do the initial handling of a + * resource's IRQ. This maps on to the concept of an ISR that does the minimum + * work necessary before handing off to an IST. + * + * The communication of the resource-specific data from the ISR to the IST is + * handled by the OSK implementation. + * + * On most systems, the IRQ upper-half handler executes in IRQ context. + * Therefore, the system may have restrictions about what can be done in this + * context + * + * If an IRQ upper-half handler requires more work to be done than can be + * acheived in an IRQ context, then it may defer the work with + * _mali_osk_wq_schedule_work(). Refer to \ref _mali_osk_wq_create_work() for + * more information. + * + * @param arg resource-specific data + * @return _MALI_OSK_ERR_OK if the IRQ was correctly handled, or a suitable + * _mali_osk_errcode_t otherwise. + */ +typedef _mali_osk_errcode_t (*_mali_osk_irq_uhandler_t)(void *arg); + + +/** @} */ /* end group _mali_osk_irq */ + + +/** @defgroup _mali_osk_atomic OSK Atomic counters + * @{ */ + +/** @brief Public type of atomic counters + * + * This is public for allocation on stack. On systems that support it, this is just a single 32-bit value. + * On others, it could be encapsulating an object stored elsewhere. + * + * Regardless of implementation, the \ref _mali_osk_atomic functions \b must be used + * for all accesses to the variable's value, even if atomicity is not required. + * Do not access u.val or u.obj directly. + */ +typedef struct { + union { + u32 val; + void *obj; + } u; +} _mali_osk_atomic_t; +/** @} */ /* end group _mali_osk_atomic */ + + +/** @defgroup _mali_osk_lock OSK Mutual Exclusion Locks + * @{ */ + + +/** @brief OSK Mutual Exclusion Lock ordered list + * + * This lists the various types of locks in the system and is used to check + * that locks are taken in the correct order. + * + * - Holding more than one lock of the same order at the same time is not + * allowed. + * - Taking a lock of a lower order than the highest-order lock currently held + * is not allowed. + * + */ +typedef enum { + /* || Locks || */ + /* || must be || */ + /* _||_ taken in _||_ */ + /* \ / this \ / */ + /* \/ order! \/ */ + + _MALI_OSK_LOCK_ORDER_FIRST = 0, + + _MALI_OSK_LOCK_ORDER_SESSIONS, + _MALI_OSK_LOCK_ORDER_MEM_SESSION, + _MALI_OSK_LOCK_ORDER_MEM_INFO, + _MALI_OSK_LOCK_ORDER_MEM_PT_CACHE, + _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP, + _MALI_OSK_LOCK_ORDER_PM_EXECUTION, + _MALI_OSK_LOCK_ORDER_EXECUTOR, + _MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM, + _MALI_OSK_LOCK_ORDER_SCHEDULER, + _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED, + _MALI_OSK_LOCK_ORDER_PROFILING, + _MALI_OSK_LOCK_ORDER_L2, + _MALI_OSK_LOCK_ORDER_L2_COMMAND, + _MALI_OSK_LOCK_ORDER_UTILIZATION, + _MALI_OSK_LOCK_ORDER_SESSION_PENDING_JOBS, + _MALI_OSK_LOCK_ORDER_PM_STATE, + + _MALI_OSK_LOCK_ORDER_LAST, +} _mali_osk_lock_order_t; + + +/** @brief OSK Mutual Exclusion Lock flags type + * + * - Any lock can use the order parameter. + */ +typedef enum { + _MALI_OSK_LOCKFLAG_UNORDERED = 0x1, /**< Indicate that the order of this lock should not be checked */ + _MALI_OSK_LOCKFLAG_ORDERED = 0x2, + /** @enum _mali_osk_lock_flags_t + * + * Flags from 0x10000--0x80000000 are RESERVED for User-mode */ + +} _mali_osk_lock_flags_t; + +/** @brief Mutual Exclusion Lock Mode Optimization hint + * + * The lock mode is used to implement the read/write locking of locks when we call + * functions _mali_osk_mutex_rw_init/wait/signal/term/. In this case, the RO mode can + * be used to allow multiple concurrent readers, but no writers. The RW mode is used for + * writers, and so will wait for all readers to release the lock (if any present). + * Further readers and writers will wait until the writer releases the lock. + * + * The mode is purely an optimization hint: for example, it is permissible for + * all locks to behave in RW mode, regardless of that supplied. + * + * It is an error to attempt to use locks in anything other that RW mode when + * call functions _mali_osk_mutex_rw_wait/signal(). + * + */ +typedef enum { + _MALI_OSK_LOCKMODE_UNDEF = -1, /**< Undefined lock mode. For internal use only */ + _MALI_OSK_LOCKMODE_RW = 0x0, /**< Read-write mode, default. All readers and writers are mutually-exclusive */ + _MALI_OSK_LOCKMODE_RO, /**< Read-only mode, to support multiple concurrent readers, but mutual exclusion in the presence of writers. */ + /** @enum _mali_osk_lock_mode_t + * + * Lock modes 0x40--0x7F are RESERVED for User-mode */ +} _mali_osk_lock_mode_t; + +/** @brief Private types for Mutual Exclusion lock objects */ +typedef struct _mali_osk_lock_debug_s _mali_osk_lock_debug_t; +typedef struct _mali_osk_spinlock_s _mali_osk_spinlock_t; +typedef struct _mali_osk_spinlock_irq_s _mali_osk_spinlock_irq_t; +typedef struct _mali_osk_mutex_s _mali_osk_mutex_t; +typedef struct _mali_osk_mutex_rw_s _mali_osk_mutex_rw_t; + +/** @} */ /* end group _mali_osk_lock */ + +/** @defgroup _mali_osk_low_level_memory OSK Low-level Memory Operations + * @{ */ + +/** + * @brief Private data type for use in IO accesses to/from devices. + * + * This represents some range that is accessible from the device. Examples + * include: + * - Device Registers, which could be readable and/or writeable. + * - Memory that the device has access to, for storing configuration structures. + * + * Access to this range must be made through the _mali_osk_mem_ioread32() and + * _mali_osk_mem_iowrite32() functions. + */ +typedef struct _mali_io_address *mali_io_address; + +/** @defgroup _MALI_OSK_CPU_PAGE CPU Physical page size macros. + * + * The order of the page size is supplied for + * ease of use by algorithms that might require it, since it is easier to know + * it ahead of time rather than calculating it. + * + * The Mali Page Mask macro masks off the lower bits of a physical address to + * give the start address of the page for that physical address. + * + * @note The Mali device driver code is designed for systems with 4KB page size. + * Changing these macros will not make the entire Mali device driver work with + * page sizes other than 4KB. + * + * @note The CPU Physical Page Size has been assumed to be the same as the Mali + * Physical Page Size. + * + * @{ + */ + +/** CPU Page Order, as log to base 2 of the Page size. @see _MALI_OSK_CPU_PAGE_SIZE */ +#define _MALI_OSK_CPU_PAGE_ORDER ((u32)12) +/** CPU Page Size, in bytes. */ +#define _MALI_OSK_CPU_PAGE_SIZE (((u32)1) << (_MALI_OSK_CPU_PAGE_ORDER)) +/** CPU Page Mask, which masks off the offset within a page */ +#define _MALI_OSK_CPU_PAGE_MASK (~((((u32)1) << (_MALI_OSK_CPU_PAGE_ORDER)) - ((u32)1))) +/** @} */ /* end of group _MALI_OSK_CPU_PAGE */ + +/** @defgroup _MALI_OSK_MALI_PAGE Mali Physical Page size macros + * + * Mali Physical page size macros. The order of the page size is supplied for + * ease of use by algorithms that might require it, since it is easier to know + * it ahead of time rather than calculating it. + * + * The Mali Page Mask macro masks off the lower bits of a physical address to + * give the start address of the page for that physical address. + * + * @note The Mali device driver code is designed for systems with 4KB page size. + * Changing these macros will not make the entire Mali device driver work with + * page sizes other than 4KB. + * + * @note The Mali Physical Page Size has been assumed to be the same as the CPU + * Physical Page Size. + * + * @{ + */ + +/** Mali Page Order, as log to base 2 of the Page size. @see _MALI_OSK_MALI_PAGE_SIZE */ +#define _MALI_OSK_MALI_PAGE_ORDER PAGE_SHIFT +/** Mali Page Size, in bytes. */ +#define _MALI_OSK_MALI_PAGE_SIZE PAGE_SIZE +/** Mali Page Mask, which masks off the offset within a page */ +#define _MALI_OSK_MALI_PAGE_MASK PAGE_MASK +/** @} */ /* end of group _MALI_OSK_MALI_PAGE*/ + +/** @brief flags for mapping a user-accessible memory range + * + * Where a function with prefix '_mali_osk_mem_mapregion' accepts flags as one + * of the function parameters, it will use one of these. These allow per-page + * control over mappings. Compare with the mali_memory_allocation_flag type, + * which acts over an entire range + * + * These may be OR'd together with bitwise OR (|), but must be cast back into + * the type after OR'ing. + */ +typedef enum { + _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR = 0x1, /**< Physical address is OS Allocated */ +} _mali_osk_mem_mapregion_flags_t; +/** @} */ /* end group _mali_osk_low_level_memory */ + +/** @defgroup _mali_osk_notification OSK Notification Queues + * @{ */ + +/** @brief Private type for notification queue objects */ +typedef struct _mali_osk_notification_queue_t_struct _mali_osk_notification_queue_t; + +/** @brief Public notification data object type */ +typedef struct _mali_osk_notification_t_struct { + u32 notification_type; /**< The notification type */ + u32 result_buffer_size; /**< Size of the result buffer to copy to user space */ + void *result_buffer; /**< Buffer containing any type specific data */ +} _mali_osk_notification_t; + +/** @} */ /* end group _mali_osk_notification */ + + +/** @defgroup _mali_osk_timer OSK Timer Callbacks + * @{ */ + +/** @brief Function to call when a timer expires + * + * When a timer expires, this function is called. Note that on many systems, + * a timer callback will be executed in IRQ context. Therefore, restrictions + * may apply on what can be done inside the timer callback. + * + * If a timer requires more work to be done than can be acheived in an IRQ + * context, then it may defer the work with a work-queue. For example, it may + * use \ref _mali_osk_wq_schedule_work() to make use of a bottom-half handler + * to carry out the remaining work. + * + * Stopping the timer with \ref _mali_osk_timer_del() blocks on compeletion of + * the callback. Therefore, the callback may not obtain any mutexes also held + * by any callers of _mali_osk_timer_del(). Otherwise, a deadlock may occur. + * + * @param arg Function-specific data */ +typedef void (*_mali_osk_timer_callback_t)(void *arg); + +/** @brief Private type for Timer Callback Objects */ +typedef struct _mali_osk_timer_t_struct _mali_osk_timer_t; +/** @} */ /* end group _mali_osk_timer */ + + +/** @addtogroup _mali_osk_list OSK Doubly-Linked Circular Lists + * @{ */ + +/** @brief Public List objects. + * + * To use, add a _mali_osk_list_t member to the structure that may become part + * of a list. When traversing the _mali_osk_list_t objects, use the + * _MALI_OSK_CONTAINER_OF() macro to recover the structure from its + *_mali_osk_list_t member + * + * Each structure may have multiple _mali_osk_list_t members, so that the + * structure is part of multiple lists. When traversing lists, ensure that the + * correct _mali_osk_list_t member is used, because type-checking will be + * lost by the compiler. + */ +typedef struct _mali_osk_list_s { + struct _mali_osk_list_s *next; + struct _mali_osk_list_s *prev; +} _mali_osk_list_t; +/** @} */ /* end group _mali_osk_list */ + +/** @addtogroup _mali_osk_miscellaneous + * @{ */ + +/** @brief resource description struct + * + * Platform independent representation of a Mali HW resource + */ +typedef struct _mali_osk_resource { + const char *description; /**< short description of the resource */ + uintptr_t base; /**< Physical base address of the resource, as seen by Mali resources. */ + const char *irq_name; /**< Name of irq belong to this resource */ + u32 irq; /**< IRQ number delivered to the CPU, or -1 to tell the driver to probe for it (if possible) */ +} _mali_osk_resource_t; +/** @} */ /* end group _mali_osk_miscellaneous */ + +/** @defgroup _mali_osk_wait_queue OSK Wait Queue functionality + * @{ */ +/** @brief Private type for wait queue objects */ +typedef struct _mali_osk_wait_queue_t_struct _mali_osk_wait_queue_t; +/** @} */ /* end group _mali_osk_wait_queue */ + +/** @} */ /* end group osuapi */ + +/** @} */ /* end group uddapi */ + +/** @brief Mali print ctx type which uses seq_file + */ +typedef struct seq_file _mali_osk_print_ctx; + +#define _MALI_OSK_BITMAP_INVALIDATE_INDEX -1 + +typedef struct _mali_osk_bitmap { + u32 reserve; + u32 last; + u32 max; + u32 avail; + _mali_osk_spinlock_t *lock; + unsigned long *table; +} _mali_osk_bitmap_t; + + +#ifdef __cplusplus +} +#endif + +#endif /* __MALI_OSK_TYPES_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_pm.c b/drivers/gpu/arm/mali400/common/mali_pm.c --- a/drivers/gpu/arm/mali400/common/mali_pm.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_pm.c 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,1362 @@ +/* + * Copyright (C) 2011-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_pm.h" +#include "mali_kernel_common.h" +#include "mali_osk.h" +#include "mali_osk_mali.h" +#include "mali_scheduler.h" +#include "mali_group.h" +#include "mali_pm_domain.h" +#include "mali_pmu.h" + +#include "mali_executor.h" +#include "mali_control_timer.h" + +#if defined(DEBUG) +u32 num_pm_runtime_resume = 0; +u32 num_pm_updates = 0; +u32 num_pm_updates_up = 0; +u32 num_pm_updates_down = 0; +#endif + +#define MALI_PM_DOMAIN_DUMMY_MASK (1 << MALI_DOMAIN_INDEX_DUMMY) + +/* lock protecting power state (including pm_domains) */ +static _mali_osk_spinlock_irq_t *pm_lock_state = NULL; + +/* the wanted domain mask (protected by pm_lock_state) */ +static u32 pd_mask_wanted = 0; + +/* used to deferring the actual power changes */ +static _mali_osk_wq_work_t *pm_work = NULL; + +/* lock protecting power change execution */ +static _mali_osk_mutex_t *pm_lock_exec = NULL; + +/* PMU domains which are actually powered on (protected by pm_lock_exec) */ +static u32 pmu_mask_current = 0; + +/* + * domains which marked as powered on (protected by pm_lock_exec) + * This can be different from pmu_mask_current right after GPU power on + * if the PMU domains default to powered up. + */ +static u32 pd_mask_current = 0; + +static u16 domain_config[MALI_MAX_NUMBER_OF_DOMAINS] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1 << MALI_DOMAIN_INDEX_DUMMY +}; + +/* The relative core power cost */ +#define MALI_GP_COST 3 +#define MALI_PP_COST 6 +#define MALI_L2_COST 1 + +/* + *We have MALI_MAX_NUMBER_OF_PP_PHYSICAL_CORES + 1 rows in this matrix + *because we mush store the mask of different pp cores: 0, 1, 2, 3, 4, 5, 6, 7, 8. + */ +static int mali_pm_domain_power_cost_result[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS + 1][MALI_MAX_NUMBER_OF_DOMAINS]; +/* + * Keep track of runtime PM state, so that we know + * how to resume during OS resume. + */ +#ifdef CONFIG_PM_RUNTIME +static mali_bool mali_pm_runtime_active = MALI_FALSE; +#else +/* when kernel don't enable PM_RUNTIME, set the flag always true, + * for GPU will not power off by runtime */ +static mali_bool mali_pm_runtime_active = MALI_TRUE; +#endif + +static void mali_pm_state_lock(void); +static void mali_pm_state_unlock(void); +static _mali_osk_errcode_t mali_pm_create_pm_domains(void); +static void mali_pm_set_pmu_domain_config(void); +static u32 mali_pm_get_registered_cores_mask(void); +static void mali_pm_update_sync_internal(void); +static mali_bool mali_pm_common_suspend(void); +static void mali_pm_update_work(void *data); +#if defined(DEBUG) +const char *mali_pm_mask_to_string(u32 mask); +const char *mali_pm_group_stats_to_string(void); +#endif + +_mali_osk_errcode_t mali_pm_initialize(void) +{ + _mali_osk_errcode_t err; + struct mali_pmu_core *pmu; + + pm_lock_state = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, + _MALI_OSK_LOCK_ORDER_PM_STATE); + if (NULL == pm_lock_state) { + mali_pm_terminate(); + return _MALI_OSK_ERR_FAULT; + } + + pm_lock_exec = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_ORDERED, + _MALI_OSK_LOCK_ORDER_PM_STATE); + if (NULL == pm_lock_exec) { + mali_pm_terminate(); + return _MALI_OSK_ERR_FAULT; + } + + pm_work = _mali_osk_wq_create_work(mali_pm_update_work, NULL); + if (NULL == pm_work) { + mali_pm_terminate(); + return _MALI_OSK_ERR_FAULT; + } + + pmu = mali_pmu_get_global_pmu_core(); + if (NULL != pmu) { + /* + * We have a Mali PMU, set the correct domain + * configuration (default or custom) + */ + + u32 registered_cores_mask; + + mali_pm_set_pmu_domain_config(); + + registered_cores_mask = mali_pm_get_registered_cores_mask(); + mali_pmu_set_registered_cores_mask(pmu, registered_cores_mask); + + MALI_DEBUG_ASSERT(0 == pd_mask_wanted); + } + + /* Create all power domains needed (at least one dummy domain) */ + err = mali_pm_create_pm_domains(); + if (_MALI_OSK_ERR_OK != err) { + mali_pm_terminate(); + return err; + } + + return _MALI_OSK_ERR_OK; +} + +void mali_pm_terminate(void) +{ + if (NULL != pm_work) { + _mali_osk_wq_delete_work(pm_work); + pm_work = NULL; + } + + mali_pm_domain_terminate(); + + if (NULL != pm_lock_exec) { + _mali_osk_mutex_term(pm_lock_exec); + pm_lock_exec = NULL; + } + + if (NULL != pm_lock_state) { + _mali_osk_spinlock_irq_term(pm_lock_state); + pm_lock_state = NULL; + } +} + +struct mali_pm_domain *mali_pm_register_l2_cache(u32 domain_index, + struct mali_l2_cache_core *l2_cache) +{ + struct mali_pm_domain *domain; + + domain = mali_pm_domain_get_from_mask(domain_config[domain_index]); + if (NULL == domain) { + MALI_DEBUG_ASSERT(0 == domain_config[domain_index]); + domain = mali_pm_domain_get_from_index( + MALI_DOMAIN_INDEX_DUMMY); + domain_config[domain_index] = MALI_PM_DOMAIN_DUMMY_MASK; + } else { + MALI_DEBUG_ASSERT(0 != domain_config[domain_index]); + } + + MALI_DEBUG_ASSERT(NULL != domain); + + mali_pm_domain_add_l2_cache(domain, l2_cache); + + return domain; /* return the actual domain this was registered in */ +} + +struct mali_pm_domain *mali_pm_register_group(u32 domain_index, + struct mali_group *group) +{ + struct mali_pm_domain *domain; + + domain = mali_pm_domain_get_from_mask(domain_config[domain_index]); + if (NULL == domain) { + MALI_DEBUG_ASSERT(0 == domain_config[domain_index]); + domain = mali_pm_domain_get_from_index( + MALI_DOMAIN_INDEX_DUMMY); + domain_config[domain_index] = MALI_PM_DOMAIN_DUMMY_MASK; + } else { + MALI_DEBUG_ASSERT(0 != domain_config[domain_index]); + } + + MALI_DEBUG_ASSERT(NULL != domain); + + mali_pm_domain_add_group(domain, group); + + return domain; /* return the actual domain this was registered in */ +} + +mali_bool mali_pm_get_domain_refs(struct mali_pm_domain **domains, + struct mali_group **groups, + u32 num_domains) +{ + mali_bool ret = MALI_TRUE; /* Assume all is powered on instantly */ + u32 i; + + mali_pm_state_lock(); + + for (i = 0; i < num_domains; i++) { + MALI_DEBUG_ASSERT_POINTER(domains[i]); + pd_mask_wanted |= mali_pm_domain_ref_get(domains[i]); + if (MALI_FALSE == mali_pm_domain_power_is_on(domains[i])) { + /* + * Tell caller that the corresponding group + * was not already powered on. + */ + ret = MALI_FALSE; + } else { + /* + * There is a time gap between we power on the domain and + * set the power state of the corresponding groups to be on. + */ + if (NULL != groups[i] && + MALI_FALSE == mali_group_power_is_on(groups[i])) { + ret = MALI_FALSE; + } + } + } + + MALI_DEBUG_PRINT(3, ("PM: wanted domain mask = 0x%08X (get refs)\n", pd_mask_wanted)); + + mali_pm_state_unlock(); + + return ret; +} + +mali_bool mali_pm_put_domain_refs(struct mali_pm_domain **domains, + u32 num_domains) +{ + u32 mask = 0; + mali_bool ret; + u32 i; + + mali_pm_state_lock(); + + for (i = 0; i < num_domains; i++) { + MALI_DEBUG_ASSERT_POINTER(domains[i]); + mask |= mali_pm_domain_ref_put(domains[i]); + } + + if (0 == mask) { + /* return false, all domains should still stay on */ + ret = MALI_FALSE; + } else { + /* Assert that we are dealing with a change */ + MALI_DEBUG_ASSERT((pd_mask_wanted & mask) == mask); + + /* Update our desired domain mask */ + pd_mask_wanted &= ~mask; + + /* return true; one or more domains can now be powered down */ + ret = MALI_TRUE; + } + + MALI_DEBUG_PRINT(3, ("PM: wanted domain mask = 0x%08X (put refs)\n", pd_mask_wanted)); + + mali_pm_state_unlock(); + + return ret; +} + +void mali_pm_init_begin(void) +{ + struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core(); + + _mali_osk_pm_dev_ref_get_sync(); + + /* Ensure all PMU domains are on */ + if (NULL != pmu) { + mali_pmu_power_up_all(pmu); + } +} + +void mali_pm_init_end(void) +{ + struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core(); + + /* Ensure all PMU domains are off */ + if (NULL != pmu) { + mali_pmu_power_down_all(pmu); + } + + _mali_osk_pm_dev_ref_put(); +} + +void mali_pm_update_sync(void) +{ + mali_pm_exec_lock(); + + if (MALI_TRUE == mali_pm_runtime_active) { + /* + * Only update if GPU is powered on. + * Deactivation of the last group will result in both a + * deferred runtime PM suspend operation and + * deferred execution of this function. + * mali_pm_runtime_active will be false if runtime PM + * executed first and thus the GPU is now fully powered off. + */ + mali_pm_update_sync_internal(); + } + + mali_pm_exec_unlock(); +} + +void mali_pm_update_async(void) +{ + _mali_osk_wq_schedule_work(pm_work); +} + +void mali_pm_os_suspend(mali_bool os_suspend) +{ + int ret; + + MALI_DEBUG_PRINT(3, ("Mali PM: OS suspend\n")); + + /* Suspend execution of all jobs, and go to inactive state */ + mali_executor_suspend(); + + if (os_suspend) { + mali_control_timer_suspend(MALI_TRUE); + } + + mali_pm_exec_lock(); + + ret = mali_pm_common_suspend(); + + MALI_DEBUG_ASSERT(MALI_TRUE == ret); + MALI_IGNORE(ret); + + mali_pm_exec_unlock(); +} + +void mali_pm_os_resume(void) +{ + struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core(); + + MALI_DEBUG_PRINT(3, ("Mali PM: OS resume\n")); + + mali_pm_exec_lock(); + +#if defined(DEBUG) + mali_pm_state_lock(); + + /* Assert that things are as we left them in os_suspend(). */ + MALI_DEBUG_ASSERT(0 == pd_mask_wanted); + MALI_DEBUG_ASSERT(0 == pd_mask_current); + MALI_DEBUG_ASSERT(0 == pmu_mask_current); + + MALI_DEBUG_ASSERT(MALI_TRUE == mali_pm_domain_all_unused()); + + mali_pm_state_unlock(); +#endif + + if (MALI_TRUE == mali_pm_runtime_active) { + /* Runtime PM was active, so reset PMU */ + if (NULL != pmu) { + mali_pmu_reset(pmu); + pmu_mask_current = mali_pmu_get_mask(pmu); + + MALI_DEBUG_PRINT(3, ("Mali PM: OS resume 0x%x \n", pmu_mask_current)); + } + + mali_pm_update_sync_internal(); + } + + mali_pm_exec_unlock(); + + /* Start executing jobs again */ + mali_executor_resume(); +} + +mali_bool mali_pm_runtime_suspend(void) +{ + mali_bool ret; + + MALI_DEBUG_PRINT(3, ("Mali PM: Runtime suspend\n")); + + mali_pm_exec_lock(); + + /* + * Put SW state directly into "off" state, and do not bother to power + * down each power domain, because entire GPU will be powered off + * when we return. + * For runtime PM suspend, in contrast to OS suspend, there is a race + * between this function and the mali_pm_update_sync_internal(), which + * is fine... + */ + ret = mali_pm_common_suspend(); + if (MALI_TRUE == ret) { + mali_pm_runtime_active = MALI_FALSE; + } else { + /* + * Process the "power up" instead, + * which could have been "lost" + */ + mali_pm_update_sync_internal(); + } + + mali_pm_exec_unlock(); + + return ret; +} + +void mali_pm_runtime_resume(void) +{ + struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core(); + + mali_pm_exec_lock(); + + mali_pm_runtime_active = MALI_TRUE; + +#if defined(DEBUG) + ++num_pm_runtime_resume; + + mali_pm_state_lock(); + + /* + * Assert that things are as we left them in runtime_suspend(), + * except for pd_mask_wanted which normally will be the reason we + * got here (job queued => domains wanted) + */ + MALI_DEBUG_ASSERT(0 == pd_mask_current); + MALI_DEBUG_ASSERT(0 == pmu_mask_current); + + mali_pm_state_unlock(); +#endif + + if (NULL != pmu) { + mali_pmu_reset(pmu); + pmu_mask_current = mali_pmu_get_mask(pmu); + MALI_DEBUG_PRINT(3, ("Mali PM: Runtime resume 0x%x \n", pmu_mask_current)); + } + + /* + * Normally we are resumed because a job has just been queued. + * pd_mask_wanted should thus be != 0. + * It is however possible for others to take a Mali Runtime PM ref + * without having a job queued. + * We should however always call mali_pm_update_sync_internal(), + * because this will take care of any potential mismatch between + * pmu_mask_current and pd_mask_current. + */ + mali_pm_update_sync_internal(); + + mali_pm_exec_unlock(); +} + +#if MALI_STATE_TRACKING +u32 mali_pm_dump_state_domain(struct mali_pm_domain *domain, + char *buf, u32 size) +{ + int n = 0; + + n += _mali_osk_snprintf(buf + n, size - n, + "\tPower domain: id %u\n", + mali_pm_domain_get_id(domain)); + + n += _mali_osk_snprintf(buf + n, size - n, + "\t\tMask: 0x%04x\n", + mali_pm_domain_get_mask(domain)); + + n += _mali_osk_snprintf(buf + n, size - n, + "\t\tUse count: %u\n", + mali_pm_domain_get_use_count(domain)); + + n += _mali_osk_snprintf(buf + n, size - n, + "\t\tCurrent power state: %s\n", + (mali_pm_domain_get_mask(domain) & pd_mask_current) ? + "On" : "Off"); + + n += _mali_osk_snprintf(buf + n, size - n, + "\t\tWanted power state: %s\n", + (mali_pm_domain_get_mask(domain) & pd_mask_wanted) ? + "On" : "Off"); + + return n; +} +#endif + +static void mali_pm_state_lock(void) +{ + _mali_osk_spinlock_irq_lock(pm_lock_state); +} + +static void mali_pm_state_unlock(void) +{ + _mali_osk_spinlock_irq_unlock(pm_lock_state); +} + +void mali_pm_exec_lock(void) +{ + _mali_osk_mutex_wait(pm_lock_exec); +} + +void mali_pm_exec_unlock(void) +{ + _mali_osk_mutex_signal(pm_lock_exec); +} + +static void mali_pm_domain_power_up(u32 power_up_mask, + struct mali_group *groups_up[MALI_MAX_NUMBER_OF_GROUPS], + u32 *num_groups_up, + struct mali_l2_cache_core *l2_up[MALI_MAX_NUMBER_OF_L2_CACHE_CORES], + u32 *num_l2_up) +{ + u32 domain_bit; + u32 notify_mask = power_up_mask; + + MALI_DEBUG_ASSERT(0 != power_up_mask); + MALI_DEBUG_ASSERT_POINTER(groups_up); + MALI_DEBUG_ASSERT_POINTER(num_groups_up); + MALI_DEBUG_ASSERT(0 == *num_groups_up); + MALI_DEBUG_ASSERT_POINTER(l2_up); + MALI_DEBUG_ASSERT_POINTER(num_l2_up); + MALI_DEBUG_ASSERT(0 == *num_l2_up); + + MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec); + MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_state); + + MALI_DEBUG_PRINT(5, + ("PM update: Powering up domains: . [%s]\n", + mali_pm_mask_to_string(power_up_mask))); + + pd_mask_current |= power_up_mask; + + domain_bit = _mali_osk_fls(notify_mask); + while (0 != domain_bit) { + u32 domain_id = domain_bit - 1; + struct mali_pm_domain *domain = + mali_pm_domain_get_from_index( + domain_id); + struct mali_l2_cache_core *l2_cache; + struct mali_l2_cache_core *l2_cache_tmp; + struct mali_group *group; + struct mali_group *group_tmp; + + /* Mark domain as powered up */ + mali_pm_domain_set_power_on(domain, MALI_TRUE); + + /* + * Make a note of the L2 and/or group(s) to notify + * (need to release the PM state lock before doing so) + */ + + _MALI_OSK_LIST_FOREACHENTRY(l2_cache, + l2_cache_tmp, + mali_pm_domain_get_l2_cache_list( + domain), + struct mali_l2_cache_core, + pm_domain_list) { + MALI_DEBUG_ASSERT(*num_l2_up < + MALI_MAX_NUMBER_OF_L2_CACHE_CORES); + l2_up[*num_l2_up] = l2_cache; + (*num_l2_up)++; + } + + _MALI_OSK_LIST_FOREACHENTRY(group, + group_tmp, + mali_pm_domain_get_group_list(domain), + struct mali_group, + pm_domain_list) { + MALI_DEBUG_ASSERT(*num_groups_up < + MALI_MAX_NUMBER_OF_GROUPS); + groups_up[*num_groups_up] = group; + + (*num_groups_up)++; + } + + /* Remove current bit and find next */ + notify_mask &= ~(1 << (domain_id)); + domain_bit = _mali_osk_fls(notify_mask); + } +} +static void mali_pm_domain_power_down(u32 power_down_mask, + struct mali_group *groups_down[MALI_MAX_NUMBER_OF_GROUPS], + u32 *num_groups_down, + struct mali_l2_cache_core *l2_down[MALI_MAX_NUMBER_OF_L2_CACHE_CORES], + u32 *num_l2_down) +{ + u32 domain_bit; + u32 notify_mask = power_down_mask; + + MALI_DEBUG_ASSERT(0 != power_down_mask); + MALI_DEBUG_ASSERT_POINTER(groups_down); + MALI_DEBUG_ASSERT_POINTER(num_groups_down); + MALI_DEBUG_ASSERT(0 == *num_groups_down); + MALI_DEBUG_ASSERT_POINTER(l2_down); + MALI_DEBUG_ASSERT_POINTER(num_l2_down); + MALI_DEBUG_ASSERT(0 == *num_l2_down); + + MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec); + MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_state); + + MALI_DEBUG_PRINT(5, + ("PM update: Powering down domains: [%s]\n", + mali_pm_mask_to_string(power_down_mask))); + + pd_mask_current &= ~power_down_mask; + + domain_bit = _mali_osk_fls(notify_mask); + while (0 != domain_bit) { + u32 domain_id = domain_bit - 1; + struct mali_pm_domain *domain = + mali_pm_domain_get_from_index(domain_id); + struct mali_l2_cache_core *l2_cache; + struct mali_l2_cache_core *l2_cache_tmp; + struct mali_group *group; + struct mali_group *group_tmp; + + /* Mark domain as powered down */ + mali_pm_domain_set_power_on(domain, MALI_FALSE); + + /* + * Make a note of the L2s and/or groups to notify + * (need to release the PM state lock before doing so) + */ + + _MALI_OSK_LIST_FOREACHENTRY(l2_cache, + l2_cache_tmp, + mali_pm_domain_get_l2_cache_list(domain), + struct mali_l2_cache_core, + pm_domain_list) { + MALI_DEBUG_ASSERT(*num_l2_down < + MALI_MAX_NUMBER_OF_L2_CACHE_CORES); + l2_down[*num_l2_down] = l2_cache; + (*num_l2_down)++; + } + + _MALI_OSK_LIST_FOREACHENTRY(group, + group_tmp, + mali_pm_domain_get_group_list(domain), + struct mali_group, + pm_domain_list) { + MALI_DEBUG_ASSERT(*num_groups_down < + MALI_MAX_NUMBER_OF_GROUPS); + groups_down[*num_groups_down] = group; + (*num_groups_down)++; + } + + /* Remove current bit and find next */ + notify_mask &= ~(1 << (domain_id)); + domain_bit = _mali_osk_fls(notify_mask); + } +} + +/* + * Execute pending power domain changes + * pm_lock_exec lock must be taken by caller. + */ +static void mali_pm_update_sync_internal(void) +{ + /* + * This should only be called in non-atomic context + * (normally as deferred work) + * + * Look at the pending power domain changes, and execute these. + * Make sure group and schedulers are notified about changes. + */ + + struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core(); + + u32 power_down_mask; + u32 power_up_mask; + + MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec); + +#if defined(DEBUG) + ++num_pm_updates; +#endif + + /* Hold PM state lock while we look at (and obey) the wanted state */ + mali_pm_state_lock(); + + MALI_DEBUG_PRINT(5, ("PM update pre: Wanted domain mask: .. [%s]\n", + mali_pm_mask_to_string(pd_mask_wanted))); + MALI_DEBUG_PRINT(5, ("PM update pre: Current domain mask: . [%s]\n", + mali_pm_mask_to_string(pd_mask_current))); + MALI_DEBUG_PRINT(5, ("PM update pre: Current PMU mask: .... [%s]\n", + mali_pm_mask_to_string(pmu_mask_current))); + MALI_DEBUG_PRINT(5, ("PM update pre: Group power stats: ... <%s>\n", + mali_pm_group_stats_to_string())); + + /* Figure out which cores we need to power on */ + power_up_mask = pd_mask_wanted & + (pd_mask_wanted ^ pd_mask_current); + + if (0 != power_up_mask) { + u32 power_up_mask_pmu; + struct mali_group *groups_up[MALI_MAX_NUMBER_OF_GROUPS]; + u32 num_groups_up = 0; + struct mali_l2_cache_core * + l2_up[MALI_MAX_NUMBER_OF_L2_CACHE_CORES]; + u32 num_l2_up = 0; + u32 i; + +#if defined(DEBUG) + ++num_pm_updates_up; +#endif + + /* + * Make sure dummy/global domain is always included when + * powering up, since this is controlled by runtime PM, + * and device power is on at this stage. + */ + power_up_mask |= MALI_PM_DOMAIN_DUMMY_MASK; + + /* Power up only real PMU domains */ + power_up_mask_pmu = power_up_mask & ~MALI_PM_DOMAIN_DUMMY_MASK; + + /* But not those that happen to be powered on already */ + power_up_mask_pmu &= (power_up_mask ^ pmu_mask_current) & + power_up_mask; + + if (0 != power_up_mask_pmu) { + MALI_DEBUG_ASSERT(NULL != pmu); + pmu_mask_current |= power_up_mask_pmu; + mali_pmu_power_up(pmu, power_up_mask_pmu); + } + + /* + * Put the domains themselves in power up state. + * We get the groups and L2s to notify in return. + */ + mali_pm_domain_power_up(power_up_mask, + groups_up, &num_groups_up, + l2_up, &num_l2_up); + + /* Need to unlock PM state lock before notifying L2 + groups */ + mali_pm_state_unlock(); + + /* Notify each L2 cache that we have be powered up */ + for (i = 0; i < num_l2_up; i++) { + mali_l2_cache_power_up(l2_up[i]); + } + + /* + * Tell execution module about all the groups we have + * powered up. Groups will be notified as a result of this. + */ + mali_executor_group_power_up(groups_up, num_groups_up); + + /* Lock state again before checking for power down */ + mali_pm_state_lock(); + } + + /* Figure out which cores we need to power off */ + power_down_mask = pd_mask_current & + (pd_mask_wanted ^ pd_mask_current); + + /* + * Never power down the dummy/global domain here. This is to be done + * from a suspend request (since this domain is only physicall powered + * down at that point) + */ + power_down_mask &= ~MALI_PM_DOMAIN_DUMMY_MASK; + + if (0 != power_down_mask) { + u32 power_down_mask_pmu; + struct mali_group *groups_down[MALI_MAX_NUMBER_OF_GROUPS]; + u32 num_groups_down = 0; + struct mali_l2_cache_core * + l2_down[MALI_MAX_NUMBER_OF_L2_CACHE_CORES]; + u32 num_l2_down = 0; + u32 i; + +#if defined(DEBUG) + ++num_pm_updates_down; +#endif + + /* + * Put the domains themselves in power down state. + * We get the groups and L2s to notify in return. + */ + mali_pm_domain_power_down(power_down_mask, + groups_down, &num_groups_down, + l2_down, &num_l2_down); + + /* Need to unlock PM state lock before notifying L2 + groups */ + mali_pm_state_unlock(); + + /* + * Tell execution module about all the groups we will be + * powering down. Groups will be notified as a result of this. + */ + if (0 < num_groups_down) { + mali_executor_group_power_down(groups_down, num_groups_down); + } + + /* Notify each L2 cache that we will be powering down */ + for (i = 0; i < num_l2_down; i++) { + mali_l2_cache_power_down(l2_down[i]); + } + + /* + * Power down only PMU domains which should not stay on + * Some domains might for instance currently be incorrectly + * powered up if default domain power state is all on. + */ + power_down_mask_pmu = pmu_mask_current & (~pd_mask_current); + + if (0 != power_down_mask_pmu) { + MALI_DEBUG_ASSERT(NULL != pmu); + pmu_mask_current &= ~power_down_mask_pmu; + mali_pmu_power_down(pmu, power_down_mask_pmu); + + } + } else { + /* + * Power down only PMU domains which should not stay on + * Some domains might for instance currently be incorrectly + * powered up if default domain power state is all on. + */ + u32 power_down_mask_pmu; + + /* No need for state lock since we'll only update PMU */ + mali_pm_state_unlock(); + + power_down_mask_pmu = pmu_mask_current & (~pd_mask_current); + + if (0 != power_down_mask_pmu) { + MALI_DEBUG_ASSERT(NULL != pmu); + pmu_mask_current &= ~power_down_mask_pmu; + mali_pmu_power_down(pmu, power_down_mask_pmu); + } + } + + MALI_DEBUG_PRINT(5, ("PM update post: Current domain mask: . [%s]\n", + mali_pm_mask_to_string(pd_mask_current))); + MALI_DEBUG_PRINT(5, ("PM update post: Current PMU mask: .... [%s]\n", + mali_pm_mask_to_string(pmu_mask_current))); + MALI_DEBUG_PRINT(5, ("PM update post: Group power stats: ... <%s>\n", + mali_pm_group_stats_to_string())); +} + +static mali_bool mali_pm_common_suspend(void) +{ + mali_pm_state_lock(); + + if (0 != pd_mask_wanted) { + MALI_DEBUG_PRINT(5, ("PM: Aborting suspend operation\n\n\n")); + mali_pm_state_unlock(); + return MALI_FALSE; + } + + MALI_DEBUG_PRINT(5, ("PM suspend pre: Wanted domain mask: .. [%s]\n", + mali_pm_mask_to_string(pd_mask_wanted))); + MALI_DEBUG_PRINT(5, ("PM suspend pre: Current domain mask: . [%s]\n", + mali_pm_mask_to_string(pd_mask_current))); + MALI_DEBUG_PRINT(5, ("PM suspend pre: Current PMU mask: .... [%s]\n", + mali_pm_mask_to_string(pmu_mask_current))); + MALI_DEBUG_PRINT(5, ("PM suspend pre: Group power stats: ... <%s>\n", + mali_pm_group_stats_to_string())); + + if (0 != pd_mask_current) { + /* + * We have still some domains powered on. + * It is for instance very normal that at least the + * dummy/global domain is marked as powered on at this point. + * (because it is physically powered on until this function + * returns) + */ + + struct mali_group *groups_down[MALI_MAX_NUMBER_OF_GROUPS]; + u32 num_groups_down = 0; + struct mali_l2_cache_core * + l2_down[MALI_MAX_NUMBER_OF_L2_CACHE_CORES]; + u32 num_l2_down = 0; + u32 i; + + /* + * Put the domains themselves in power down state. + * We get the groups and L2s to notify in return. + */ + mali_pm_domain_power_down(pd_mask_current, + groups_down, + &num_groups_down, + l2_down, + &num_l2_down); + + MALI_DEBUG_ASSERT(0 == pd_mask_current); + MALI_DEBUG_ASSERT(MALI_TRUE == mali_pm_domain_all_unused()); + + /* Need to unlock PM state lock before notifying L2 + groups */ + mali_pm_state_unlock(); + + /* + * Tell execution module about all the groups we will be + * powering down. Groups will be notified as a result of this. + */ + if (0 < num_groups_down) { + mali_executor_group_power_down(groups_down, num_groups_down); + } + + /* Notify each L2 cache that we will be powering down */ + for (i = 0; i < num_l2_down; i++) { + mali_l2_cache_power_down(l2_down[i]); + } + + pmu_mask_current = 0; + } else { + MALI_DEBUG_ASSERT(0 == pmu_mask_current); + + MALI_DEBUG_ASSERT(MALI_TRUE == mali_pm_domain_all_unused()); + + mali_pm_state_unlock(); + } + + MALI_DEBUG_PRINT(5, ("PM suspend post: Current domain mask: [%s]\n", + mali_pm_mask_to_string(pd_mask_current))); + MALI_DEBUG_PRINT(5, ("PM suspend post: Current PMU mask: ... [%s]\n", + mali_pm_mask_to_string(pmu_mask_current))); + MALI_DEBUG_PRINT(5, ("PM suspend post: Group power stats: .. <%s>\n", + mali_pm_group_stats_to_string())); + + return MALI_TRUE; +} + +static void mali_pm_update_work(void *data) +{ + MALI_IGNORE(data); + mali_pm_update_sync(); +} + +static _mali_osk_errcode_t mali_pm_create_pm_domains(void) +{ + int i; + + /* Create all domains (including dummy domain) */ + for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) { + if (0x0 == domain_config[i]) continue; + + if (NULL == mali_pm_domain_create(domain_config[i])) { + return _MALI_OSK_ERR_NOMEM; + } + } + + return _MALI_OSK_ERR_OK; +} + +static void mali_pm_set_default_pm_domain_config(void) +{ + MALI_DEBUG_ASSERT(0 != _mali_osk_resource_base_address()); + + /* GP core */ + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find( + MALI_OFFSET_GP, NULL)) { + domain_config[MALI_DOMAIN_INDEX_GP] = 0x01; + } + + /* PP0 - PP3 core */ + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find( + MALI_OFFSET_PP0, NULL)) { + if (mali_is_mali400()) { + domain_config[MALI_DOMAIN_INDEX_PP0] = 0x01 << 2; + } else if (mali_is_mali450()) { + domain_config[MALI_DOMAIN_INDEX_PP0] = 0x01 << 1; + } else if (mali_is_mali470()) { + domain_config[MALI_DOMAIN_INDEX_PP0] = 0x01 << 0; + } + } + + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find( + MALI_OFFSET_PP1, NULL)) { + if (mali_is_mali400()) { + domain_config[MALI_DOMAIN_INDEX_PP1] = 0x01 << 3; + } else if (mali_is_mali450()) { + domain_config[MALI_DOMAIN_INDEX_PP1] = 0x01 << 2; + } else if (mali_is_mali470()) { + domain_config[MALI_DOMAIN_INDEX_PP1] = 0x01 << 1; + } + } + + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find( + MALI_OFFSET_PP2, NULL)) { + if (mali_is_mali400()) { + domain_config[MALI_DOMAIN_INDEX_PP2] = 0x01 << 4; + } else if (mali_is_mali450()) { + domain_config[MALI_DOMAIN_INDEX_PP2] = 0x01 << 2; + } else if (mali_is_mali470()) { + domain_config[MALI_DOMAIN_INDEX_PP2] = 0x01 << 1; + } + } + + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find( + MALI_OFFSET_PP3, NULL)) { + if (mali_is_mali400()) { + domain_config[MALI_DOMAIN_INDEX_PP3] = 0x01 << 5; + } else if (mali_is_mali450()) { + domain_config[MALI_DOMAIN_INDEX_PP3] = 0x01 << 2; + } else if (mali_is_mali470()) { + domain_config[MALI_DOMAIN_INDEX_PP3] = 0x01 << 1; + } + } + + /* PP4 - PP7 */ + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find( + MALI_OFFSET_PP4, NULL)) { + domain_config[MALI_DOMAIN_INDEX_PP4] = 0x01 << 3; + } + + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find( + MALI_OFFSET_PP5, NULL)) { + domain_config[MALI_DOMAIN_INDEX_PP5] = 0x01 << 3; + } + + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find( + MALI_OFFSET_PP6, NULL)) { + domain_config[MALI_DOMAIN_INDEX_PP6] = 0x01 << 3; + } + + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find( + MALI_OFFSET_PP7, NULL)) { + domain_config[MALI_DOMAIN_INDEX_PP7] = 0x01 << 3; + } + + /* L2gp/L2PP0/L2PP4 */ + if (mali_is_mali400()) { + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find( + MALI400_OFFSET_L2_CACHE0, NULL)) { + domain_config[MALI_DOMAIN_INDEX_L20] = 0x01 << 1; + } + } else if (mali_is_mali450()) { + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find( + MALI450_OFFSET_L2_CACHE0, NULL)) { + domain_config[MALI_DOMAIN_INDEX_L20] = 0x01 << 0; + } + + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find( + MALI450_OFFSET_L2_CACHE1, NULL)) { + domain_config[MALI_DOMAIN_INDEX_L21] = 0x01 << 1; + } + + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find( + MALI450_OFFSET_L2_CACHE2, NULL)) { + domain_config[MALI_DOMAIN_INDEX_L22] = 0x01 << 3; + } + } else if (mali_is_mali470()) { + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find( + MALI470_OFFSET_L2_CACHE1, NULL)) { + domain_config[MALI_DOMAIN_INDEX_L21] = 0x01 << 0; + } + } +} + +static u32 mali_pm_get_registered_cores_mask(void) +{ + int i = 0; + u32 mask = 0; + + for (i = 0; i < MALI_DOMAIN_INDEX_DUMMY; i++) { + mask |= domain_config[i]; + } + + return mask; +} + +static void mali_pm_set_pmu_domain_config(void) +{ + int i = 0; + + _mali_osk_device_data_pmu_config_get(domain_config, MALI_MAX_NUMBER_OF_DOMAINS - 1); + + for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS - 1; i++) { + if (0 != domain_config[i]) { + MALI_DEBUG_PRINT(2, ("Using customer pmu config:\n")); + break; + } + } + + if (MALI_MAX_NUMBER_OF_DOMAINS - 1 == i) { + MALI_DEBUG_PRINT(2, ("Using hw detect pmu config:\n")); + mali_pm_set_default_pm_domain_config(); + } + + for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS - 1; i++) { + if (domain_config[i]) { + MALI_DEBUG_PRINT(2, ("domain_config[%d] = 0x%x \n", i, domain_config[i])); + } + } + /* Can't override dummy domain mask */ + domain_config[MALI_DOMAIN_INDEX_DUMMY] = + 1 << MALI_DOMAIN_INDEX_DUMMY; +} + +#if defined(DEBUG) +const char *mali_pm_mask_to_string(u32 mask) +{ + static char bit_str[MALI_MAX_NUMBER_OF_DOMAINS + 1]; + int bit; + int str_pos = 0; + + /* Must be protected by lock since we use shared string buffer */ + if (NULL != pm_lock_exec) { + MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec); + } + + for (bit = MALI_MAX_NUMBER_OF_DOMAINS - 1; bit >= 0; bit--) { + if (mask & (1 << bit)) { + bit_str[str_pos] = 'X'; + } else { + bit_str[str_pos] = '-'; + } + str_pos++; + } + + bit_str[MALI_MAX_NUMBER_OF_DOMAINS] = '\0'; + + return bit_str; +} + +const char *mali_pm_group_stats_to_string(void) +{ + static char bit_str[MALI_MAX_NUMBER_OF_GROUPS + 1]; + u32 num_groups = mali_group_get_glob_num_groups(); + u32 i; + + /* Must be protected by lock since we use shared string buffer */ + if (NULL != pm_lock_exec) { + MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec); + } + + for (i = 0; i < num_groups && i < MALI_MAX_NUMBER_OF_GROUPS; i++) { + struct mali_group *group; + + group = mali_group_get_glob_group(i); + + if (MALI_TRUE == mali_group_power_is_on(group)) { + bit_str[i] = 'X'; + } else { + bit_str[i] = '-'; + } + } + + bit_str[i] = '\0'; + + return bit_str; +} +#endif + +/* + * num_pp is the number of PP cores which will be powered on given this mask + * cost is the total power cost of cores which will be powered on given this mask + */ +static void mali_pm_stat_from_mask(u32 mask, u32 *num_pp, u32 *cost) +{ + u32 i; + + /* loop through all cores */ + for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) { + if (!(domain_config[i] & mask)) { + continue; + } + + switch (i) { + case MALI_DOMAIN_INDEX_GP: + *cost += MALI_GP_COST; + + break; + case MALI_DOMAIN_INDEX_PP0: /* Fall through */ + case MALI_DOMAIN_INDEX_PP1: /* Fall through */ + case MALI_DOMAIN_INDEX_PP2: /* Fall through */ + case MALI_DOMAIN_INDEX_PP3: + if (mali_is_mali400()) { + if ((domain_config[MALI_DOMAIN_INDEX_L20] & mask) + || (domain_config[MALI_DOMAIN_INDEX_DUMMY] + == domain_config[MALI_DOMAIN_INDEX_L20])) { + *num_pp += 1; + } + } else { + if ((domain_config[MALI_DOMAIN_INDEX_L21] & mask) + || (domain_config[MALI_DOMAIN_INDEX_DUMMY] + == domain_config[MALI_DOMAIN_INDEX_L21])) { + *num_pp += 1; + } + } + + *cost += MALI_PP_COST; + break; + case MALI_DOMAIN_INDEX_PP4: /* Fall through */ + case MALI_DOMAIN_INDEX_PP5: /* Fall through */ + case MALI_DOMAIN_INDEX_PP6: /* Fall through */ + case MALI_DOMAIN_INDEX_PP7: + MALI_DEBUG_ASSERT(mali_is_mali450()); + + if ((domain_config[MALI_DOMAIN_INDEX_L22] & mask) + || (domain_config[MALI_DOMAIN_INDEX_DUMMY] + == domain_config[MALI_DOMAIN_INDEX_L22])) { + *num_pp += 1; + } + + *cost += MALI_PP_COST; + break; + case MALI_DOMAIN_INDEX_L20: /* Fall through */ + case MALI_DOMAIN_INDEX_L21: /* Fall through */ + case MALI_DOMAIN_INDEX_L22: + *cost += MALI_L2_COST; + + break; + } + } +} + +void mali_pm_power_cost_setup(void) +{ + /* + * Two parallel arrays which store the best domain mask and its cost + * The index is the number of PP cores, E.g. Index 0 is for 1 PP option, + * might have mask 0x2 and with cost of 1, lower cost is better + */ + u32 best_mask[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS] = { 0 }; + u32 best_cost[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS] = { 0 }; + /* Array cores_in_domain is used to store the total pp cores in each pm domain. */ + u32 cores_in_domain[MALI_MAX_NUMBER_OF_DOMAINS] = { 0 }; + /* Domain_count is used to represent the max domain we have.*/ + u32 max_domain_mask = 0; + u32 max_domain_id = 0; + u32 always_on_pp_cores = 0; + + u32 num_pp, cost, mask; + u32 i, j , k; + + /* Initialize statistics */ + for (i = 0; i < MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS; i++) { + best_mask[i] = 0; + best_cost[i] = 0xFFFFFFFF; /* lower cost is better */ + } + + for (i = 0; i < MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS + 1; i++) { + for (j = 0; j < MALI_MAX_NUMBER_OF_DOMAINS; j++) { + mali_pm_domain_power_cost_result[i][j] = 0; + } + } + + /* Caculate number of pp cores of a given domain config. */ + for (i = MALI_DOMAIN_INDEX_PP0; i <= MALI_DOMAIN_INDEX_PP7; i++) { + if (0 < domain_config[i]) { + /* Get the max domain mask value used to caculate power cost + * and we don't count in always on pp cores. */ + if (MALI_PM_DOMAIN_DUMMY_MASK != domain_config[i] + && max_domain_mask < domain_config[i]) { + max_domain_mask = domain_config[i]; + } + + if (MALI_PM_DOMAIN_DUMMY_MASK == domain_config[i]) { + always_on_pp_cores++; + } + } + } + max_domain_id = _mali_osk_fls(max_domain_mask); + + /* + * Try all combinations of power domains and check how many PP cores + * they have and their power cost. + */ + for (mask = 0; mask < (1 << max_domain_id); mask++) { + num_pp = 0; + cost = 0; + + mali_pm_stat_from_mask(mask, &num_pp, &cost); + + /* This mask is usable for all MP1 up to num_pp PP cores, check statistics for all */ + for (i = 0; i < num_pp; i++) { + if (best_cost[i] >= cost) { + best_cost[i] = cost; + best_mask[i] = mask; + } + } + } + + /* + * If we want to enable x pp cores, if x is less than number of always_on pp cores, + * all of pp cores we will enable must be always_on pp cores. + */ + for (i = 0; i < mali_executor_get_num_cores_total(); i++) { + if (i < always_on_pp_cores) { + mali_pm_domain_power_cost_result[i + 1][MALI_MAX_NUMBER_OF_DOMAINS - 1] + = i + 1; + } else { + mali_pm_domain_power_cost_result[i + 1][MALI_MAX_NUMBER_OF_DOMAINS - 1] + = always_on_pp_cores; + } + } + + /* In this loop, variable i represent for the number of non-always on pp cores we want to enabled. */ + for (i = 0; i < (mali_executor_get_num_cores_total() - always_on_pp_cores); i++) { + if (best_mask[i] == 0) { + /* This MP variant is not available */ + continue; + } + + for (j = 0; j < MALI_MAX_NUMBER_OF_DOMAINS; j++) { + cores_in_domain[j] = 0; + } + + for (j = MALI_DOMAIN_INDEX_PP0; j <= MALI_DOMAIN_INDEX_PP7; j++) { + if (0 < domain_config[j] + && (MALI_PM_DOMAIN_DUMMY_MASK != domain_config[i])) { + cores_in_domain[_mali_osk_fls(domain_config[j]) - 1]++; + } + } + + /* In this loop, j represent for the number we have already enabled.*/ + for (j = 0; j <= i;) { + /* j used to visit all of domain to get the number of pp cores remained in it. */ + for (k = 0; k < max_domain_id; k++) { + /* If domain k in best_mask[i] is enabled and this domain has extra pp cores, + * we know we must pick at least one pp core from this domain. + * And then we move to next enabled pm domain. */ + if ((best_mask[i] & (0x1 << k)) && (0 < cores_in_domain[k])) { + cores_in_domain[k]--; + mali_pm_domain_power_cost_result[always_on_pp_cores + i + 1][k]++; + j++; + if (j > i) { + break; + } + } + } + } + } +} + +/* + * When we are doing core scaling, + * this function is called to return the best mask to + * achieve the best pp group power cost. + */ +void mali_pm_get_best_power_cost_mask(int num_requested, int *dst) +{ + MALI_DEBUG_ASSERT((mali_executor_get_num_cores_total() >= num_requested) && (0 <= num_requested)); + + _mali_osk_memcpy(dst, mali_pm_domain_power_cost_result[num_requested], MALI_MAX_NUMBER_OF_DOMAINS * sizeof(int)); +} + +u32 mali_pm_get_current_mask(void) +{ + return pd_mask_current; +} + +u32 mali_pm_get_wanted_mask(void) +{ + return pd_mask_wanted; +} diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_pm_domain.c b/drivers/gpu/arm/mali400/common/mali_pm_domain.c --- a/drivers/gpu/arm/mali400/common/mali_pm_domain.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_pm_domain.c 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,209 @@ +/* + * Copyright (C) 2013-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_kernel_common.h" +#include "mali_osk.h" +#include "mali_pm_domain.h" +#include "mali_pmu.h" +#include "mali_group.h" +#include "mali_pm.h" + +static struct mali_pm_domain *mali_pm_domains[MALI_MAX_NUMBER_OF_DOMAINS] = +{ NULL, }; + +void mali_pm_domain_initialize(void) +{ + /* Domains will be initialized/created on demand */ +} + +void mali_pm_domain_terminate(void) +{ + int i; + + /* Delete all domains that has been created */ + for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) { + mali_pm_domain_delete(mali_pm_domains[i]); + mali_pm_domains[i] = NULL; + } +} + +struct mali_pm_domain *mali_pm_domain_create(u32 pmu_mask) +{ + struct mali_pm_domain *domain = NULL; + u32 domain_id = 0; + + domain = mali_pm_domain_get_from_mask(pmu_mask); + if (NULL != domain) return domain; + + MALI_DEBUG_PRINT(2, + ("Mali PM domain: Creating Mali PM domain (mask=0x%08X)\n", + pmu_mask)); + + domain = (struct mali_pm_domain *)_mali_osk_malloc( + sizeof(struct mali_pm_domain)); + if (NULL != domain) { + domain->power_is_on = MALI_FALSE; + domain->pmu_mask = pmu_mask; + domain->use_count = 0; + _mali_osk_list_init(&domain->group_list); + _mali_osk_list_init(&domain->l2_cache_list); + + domain_id = _mali_osk_fls(pmu_mask) - 1; + /* Verify the domain_id */ + MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > domain_id); + /* Verify that pmu_mask only one bit is set */ + MALI_DEBUG_ASSERT((1 << domain_id) == pmu_mask); + mali_pm_domains[domain_id] = domain; + + return domain; + } else { + MALI_DEBUG_PRINT_ERROR(("Unable to create PM domain\n")); + } + + return NULL; +} + +void mali_pm_domain_delete(struct mali_pm_domain *domain) +{ + if (NULL == domain) { + return; + } + + _mali_osk_list_delinit(&domain->group_list); + _mali_osk_list_delinit(&domain->l2_cache_list); + + _mali_osk_free(domain); +} + +void mali_pm_domain_add_group(struct mali_pm_domain *domain, + struct mali_group *group) +{ + MALI_DEBUG_ASSERT_POINTER(domain); + MALI_DEBUG_ASSERT_POINTER(group); + + /* + * Use addtail because virtual group is created last and it needs + * to be at the end of the list (in order to be activated after + * all children. + */ + _mali_osk_list_addtail(&group->pm_domain_list, &domain->group_list); +} + +void mali_pm_domain_add_l2_cache(struct mali_pm_domain *domain, + struct mali_l2_cache_core *l2_cache) +{ + MALI_DEBUG_ASSERT_POINTER(domain); + MALI_DEBUG_ASSERT_POINTER(l2_cache); + _mali_osk_list_add(&l2_cache->pm_domain_list, &domain->l2_cache_list); +} + +struct mali_pm_domain *mali_pm_domain_get_from_mask(u32 mask) +{ + u32 id = 0; + + if (0 == mask) { + return NULL; + } + + id = _mali_osk_fls(mask) - 1; + + MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > id); + /* Verify that pmu_mask only one bit is set */ + MALI_DEBUG_ASSERT((1 << id) == mask); + + return mali_pm_domains[id]; +} + +struct mali_pm_domain *mali_pm_domain_get_from_index(u32 id) +{ + MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > id); + + return mali_pm_domains[id]; +} + +u32 mali_pm_domain_ref_get(struct mali_pm_domain *domain) +{ + MALI_DEBUG_ASSERT_POINTER(domain); + + if (0 == domain->use_count) { + _mali_osk_pm_dev_ref_get_async(); + } + + ++domain->use_count; + MALI_DEBUG_PRINT(4, ("PM domain %p: ref_get, use_count => %u\n", domain, domain->use_count)); + + /* Return our mask so caller can check this against wanted mask */ + return domain->pmu_mask; +} + +u32 mali_pm_domain_ref_put(struct mali_pm_domain *domain) +{ + MALI_DEBUG_ASSERT_POINTER(domain); + + --domain->use_count; + MALI_DEBUG_PRINT(4, ("PM domain %p: ref_put, use_count => %u\n", domain, domain->use_count)); + + if (0 == domain->use_count) { + _mali_osk_pm_dev_ref_put(); + } + + /* + * Return the PMU mask which now could be be powered down + * (the bit for this domain). + * This is the responsibility of the caller (mali_pm) + */ + return (0 == domain->use_count ? domain->pmu_mask : 0); +} + +#if MALI_STATE_TRACKING +u32 mali_pm_domain_get_id(struct mali_pm_domain *domain) +{ + u32 id = 0; + + MALI_DEBUG_ASSERT_POINTER(domain); + MALI_DEBUG_ASSERT(0 != domain->pmu_mask); + + id = _mali_osk_fls(domain->pmu_mask) - 1; + + MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > id); + /* Verify that pmu_mask only one bit is set */ + MALI_DEBUG_ASSERT((1 << id) == domain->pmu_mask); + /* Verify that we have stored the domain at right id/index */ + MALI_DEBUG_ASSERT(domain == mali_pm_domains[id]); + + return id; +} +#endif + +#if defined(DEBUG) +mali_bool mali_pm_domain_all_unused(void) +{ + int i; + + for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) { + if (NULL == mali_pm_domains[i]) { + /* Nothing to check */ + continue; + } + + if (MALI_TRUE == mali_pm_domains[i]->power_is_on) { + /* Not ready for suspend! */ + return MALI_FALSE; + } + + if (0 != mali_pm_domains[i]->use_count) { + /* Not ready for suspend! */ + return MALI_FALSE; + } + } + + return MALI_TRUE; +} +#endif diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_pm_domain.h b/drivers/gpu/arm/mali400/common/mali_pm_domain.h --- a/drivers/gpu/arm/mali400/common/mali_pm_domain.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_pm_domain.h 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,104 @@ +/* + * Copyright (C) 2013-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_PM_DOMAIN_H__ +#define __MALI_PM_DOMAIN_H__ + +#include "mali_kernel_common.h" +#include "mali_osk.h" + +#include "mali_l2_cache.h" +#include "mali_group.h" +#include "mali_pmu.h" + +/* Instances are protected by PM state lock */ +struct mali_pm_domain { + mali_bool power_is_on; + s32 use_count; + u32 pmu_mask; + + /* Zero or more groups can belong to this domain */ + _mali_osk_list_t group_list; + + /* Zero or more L2 caches can belong to this domain */ + _mali_osk_list_t l2_cache_list; +}; + + +void mali_pm_domain_initialize(void); +void mali_pm_domain_terminate(void); + +struct mali_pm_domain *mali_pm_domain_create(u32 pmu_mask); +void mali_pm_domain_delete(struct mali_pm_domain *domain); + +void mali_pm_domain_add_l2_cache( + struct mali_pm_domain *domain, + struct mali_l2_cache_core *l2_cache); +void mali_pm_domain_add_group(struct mali_pm_domain *domain, + struct mali_group *group); + +struct mali_pm_domain *mali_pm_domain_get_from_mask(u32 mask); +struct mali_pm_domain *mali_pm_domain_get_from_index(u32 id); + +/* Ref counting */ +u32 mali_pm_domain_ref_get(struct mali_pm_domain *domain); +u32 mali_pm_domain_ref_put(struct mali_pm_domain *domain); + +MALI_STATIC_INLINE _mali_osk_list_t *mali_pm_domain_get_group_list( + struct mali_pm_domain *domain) +{ + MALI_DEBUG_ASSERT_POINTER(domain); + return &domain->group_list; +} + +MALI_STATIC_INLINE _mali_osk_list_t *mali_pm_domain_get_l2_cache_list( + struct mali_pm_domain *domain) +{ + MALI_DEBUG_ASSERT_POINTER(domain); + return &domain->l2_cache_list; +} + +MALI_STATIC_INLINE mali_bool mali_pm_domain_power_is_on( + struct mali_pm_domain *domain) +{ + MALI_DEBUG_ASSERT_POINTER(domain); + return domain->power_is_on; +} + +MALI_STATIC_INLINE void mali_pm_domain_set_power_on( + struct mali_pm_domain *domain, + mali_bool power_is_on) +{ + MALI_DEBUG_ASSERT_POINTER(domain); + domain->power_is_on = power_is_on; +} + +MALI_STATIC_INLINE u32 mali_pm_domain_get_use_count( + struct mali_pm_domain *domain) +{ + MALI_DEBUG_ASSERT_POINTER(domain); + return domain->use_count; +} + +#if MALI_STATE_TRACKING +u32 mali_pm_domain_get_id(struct mali_pm_domain *domain); + +MALI_STATIC_INLINE u32 mali_pm_domain_get_mask(struct mali_pm_domain *domain) +{ + MALI_DEBUG_ASSERT_POINTER(domain); + return domain->pmu_mask; +} +#endif + +#if defined(DEBUG) +mali_bool mali_pm_domain_all_unused(void); +#endif + +#endif /* __MALI_PM_DOMAIN_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_pm.h b/drivers/gpu/arm/mali400/common/mali_pm.h --- a/drivers/gpu/arm/mali400/common/mali_pm.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_pm.h 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,91 @@ +/* + * Copyright (C) 2011-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_PM_H__ +#define __MALI_PM_H__ + +#include "mali_osk.h" +#include "mali_pm_domain.h" + +#define MALI_DOMAIN_INDEX_GP 0 +#define MALI_DOMAIN_INDEX_PP0 1 +#define MALI_DOMAIN_INDEX_PP1 2 +#define MALI_DOMAIN_INDEX_PP2 3 +#define MALI_DOMAIN_INDEX_PP3 4 +#define MALI_DOMAIN_INDEX_PP4 5 +#define MALI_DOMAIN_INDEX_PP5 6 +#define MALI_DOMAIN_INDEX_PP6 7 +#define MALI_DOMAIN_INDEX_PP7 8 +#define MALI_DOMAIN_INDEX_L20 9 +#define MALI_DOMAIN_INDEX_L21 10 +#define MALI_DOMAIN_INDEX_L22 11 +/* + * The dummy domain is used when there is no physical power domain + * (e.g. no PMU or always on cores) + */ +#define MALI_DOMAIN_INDEX_DUMMY 12 +#define MALI_MAX_NUMBER_OF_DOMAINS 13 + +/** + * Initialize the Mali PM module + * + * PM module covers Mali PM core, PM domains and Mali PMU + */ +_mali_osk_errcode_t mali_pm_initialize(void); + +/** + * Terminate the Mali PM module + */ +void mali_pm_terminate(void); + +void mali_pm_exec_lock(void); +void mali_pm_exec_unlock(void); + + +struct mali_pm_domain *mali_pm_register_l2_cache(u32 domain_index, + struct mali_l2_cache_core *l2_cache); +struct mali_pm_domain *mali_pm_register_group(u32 domain_index, + struct mali_group *group); + +mali_bool mali_pm_get_domain_refs(struct mali_pm_domain **domains, + struct mali_group **groups, + u32 num_domains); +mali_bool mali_pm_put_domain_refs(struct mali_pm_domain **domains, + u32 num_domains); + +void mali_pm_init_begin(void); +void mali_pm_init_end(void); + +void mali_pm_update_sync(void); +void mali_pm_update_async(void); + +/* Callback functions for system power management */ +void mali_pm_os_suspend(mali_bool os_suspend); +void mali_pm_os_resume(void); + +mali_bool mali_pm_runtime_suspend(void); +void mali_pm_runtime_resume(void); + +#if MALI_STATE_TRACKING +u32 mali_pm_dump_state_domain(struct mali_pm_domain *domain, + char *buf, u32 size); +#endif + +void mali_pm_power_cost_setup(void); + +void mali_pm_get_best_power_cost_mask(int num_requested, int *dst); + +#if defined(DEBUG) +const char *mali_pm_mask_to_string(u32 mask); +#endif + +u32 mali_pm_get_current_mask(void); +u32 mali_pm_get_wanted_mask(void); +#endif /* __MALI_PM_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_pm_metrics.c b/drivers/gpu/arm/mali400/common/mali_pm_metrics.c --- a/drivers/gpu/arm/mali400/common/mali_pm_metrics.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_pm_metrics.c 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,255 @@ +/* + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ +#include "mali_pm_metrics.h" +#include "mali_osk_locks.h" +#include "mali_osk_mali.h" +#include + +#define MALI_PM_TIME_SHIFT 0 +#define MALI_UTILIZATION_MAX_PERIOD 80000000/* ns = 100ms */ + +_mali_osk_errcode_t mali_pm_metrics_init(struct mali_device *mdev) +{ + int i = 0; + + MALI_DEBUG_ASSERT(mdev != NULL); + + mdev->mali_metrics.time_period_start = ktime_get(); + mdev->mali_metrics.time_period_start_gp = mdev->mali_metrics.time_period_start; + mdev->mali_metrics.time_period_start_pp = mdev->mali_metrics.time_period_start; + + mdev->mali_metrics.time_busy = 0; + mdev->mali_metrics.time_idle = 0; + mdev->mali_metrics.prev_busy = 0; + mdev->mali_metrics.prev_idle = 0; + mdev->mali_metrics.num_running_gp_cores = 0; + mdev->mali_metrics.num_running_pp_cores = 0; + mdev->mali_metrics.time_busy_gp = 0; + mdev->mali_metrics.time_idle_gp = 0; + + for (i = 0; i < MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS; i++) { + mdev->mali_metrics.time_busy_pp[i] = 0; + mdev->mali_metrics.time_idle_pp[i] = 0; + } + mdev->mali_metrics.gpu_active = MALI_FALSE; + + mdev->mali_metrics.lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_UNORDERED, _MALI_OSK_LOCK_ORDER_FIRST); + if (NULL == mdev->mali_metrics.lock) { + return _MALI_OSK_ERR_NOMEM; + } + + return _MALI_OSK_ERR_OK; +} + +void mali_pm_metrics_term(struct mali_device *mdev) +{ + _mali_osk_spinlock_irq_term(mdev->mali_metrics.lock); +} + +/*caller needs to hold mdev->mali_metrics.lock before calling this function*/ +void mali_pm_record_job_status(struct mali_device *mdev) +{ + ktime_t now; + ktime_t diff; + u64 ns_time; + + MALI_DEBUG_ASSERT(mdev != NULL); + + now = ktime_get(); + diff = ktime_sub(now, mdev->mali_metrics.time_period_start); + + ns_time = (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT); + mdev->mali_metrics.time_busy += ns_time; + mdev->mali_metrics.time_period_start = now; +} + +void mali_pm_record_gpu_idle(mali_bool is_gp) +{ + ktime_t now; + ktime_t diff; + u64 ns_time; + struct mali_device *mdev = dev_get_drvdata(&mali_platform_device->dev); + + MALI_DEBUG_ASSERT(mdev != NULL); + + _mali_osk_spinlock_irq_lock(mdev->mali_metrics.lock); + now = ktime_get(); + + if (MALI_TRUE == is_gp) { + --mdev->mali_metrics.num_running_gp_cores; + if (0 == mdev->mali_metrics.num_running_gp_cores) { + diff = ktime_sub(now, mdev->mali_metrics.time_period_start_gp); + ns_time = (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT); + mdev->mali_metrics.time_busy_gp += ns_time; + mdev->mali_metrics.time_period_start_gp = now; + + if (0 == mdev->mali_metrics.num_running_pp_cores) { + MALI_DEBUG_ASSERT(mdev->mali_metrics.gpu_active == MALI_TRUE); + diff = ktime_sub(now, mdev->mali_metrics.time_period_start); + ns_time = (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT); + mdev->mali_metrics.time_busy += ns_time; + mdev->mali_metrics.time_period_start = now; + mdev->mali_metrics.gpu_active = MALI_FALSE; + } + } + } else { + --mdev->mali_metrics.num_running_pp_cores; + if (0 == mdev->mali_metrics.num_running_pp_cores) { + diff = ktime_sub(now, mdev->mali_metrics.time_period_start_pp); + ns_time = (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT); + mdev->mali_metrics.time_busy_pp[0] += ns_time; + mdev->mali_metrics.time_period_start_pp = now; + + if (0 == mdev->mali_metrics.num_running_gp_cores) { + MALI_DEBUG_ASSERT(mdev->mali_metrics.gpu_active == MALI_TRUE); + diff = ktime_sub(now, mdev->mali_metrics.time_period_start); + ns_time = (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT); + mdev->mali_metrics.time_busy += ns_time; + mdev->mali_metrics.time_period_start = now; + mdev->mali_metrics.gpu_active = MALI_FALSE; + } + } + } + + _mali_osk_spinlock_irq_unlock(mdev->mali_metrics.lock); +} + +void mali_pm_record_gpu_active(mali_bool is_gp) +{ + ktime_t now; + ktime_t diff; + struct mali_device *mdev = dev_get_drvdata(&mali_platform_device->dev); + + MALI_DEBUG_ASSERT(mdev != NULL); + + _mali_osk_spinlock_irq_lock(mdev->mali_metrics.lock); + now = ktime_get(); + + if (MALI_TRUE == is_gp) { + mdev->mali_metrics.num_running_gp_cores++; + if (1 == mdev->mali_metrics.num_running_gp_cores) { + diff = ktime_sub(now, mdev->mali_metrics.time_period_start_gp); + mdev->mali_metrics.time_idle_gp += (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT); + mdev->mali_metrics.time_period_start_gp = now; + if (0 == mdev->mali_metrics.num_running_pp_cores) { + MALI_DEBUG_ASSERT(mdev->mali_metrics.gpu_active == MALI_FALSE); + diff = ktime_sub(now, mdev->mali_metrics.time_period_start); + mdev->mali_metrics.time_idle += (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT); + mdev->mali_metrics.time_period_start = now; + mdev->mali_metrics.gpu_active = MALI_TRUE; + } + } else { + MALI_DEBUG_ASSERT(mdev->mali_metrics.gpu_active == MALI_TRUE); + } + } else { + mdev->mali_metrics.num_running_pp_cores++; + if (1 == mdev->mali_metrics.num_running_pp_cores) { + diff = ktime_sub(now, mdev->mali_metrics.time_period_start_pp); + mdev->mali_metrics.time_idle_pp[0] += (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT); + mdev->mali_metrics.time_period_start_pp = now; + if (0 == mdev->mali_metrics.num_running_gp_cores) { + MALI_DEBUG_ASSERT(mdev->mali_metrics.gpu_active == MALI_FALSE); + diff = ktime_sub(now, mdev->mali_metrics.time_period_start); + mdev->mali_metrics.time_idle += (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT); + mdev->mali_metrics.time_period_start = now; + mdev->mali_metrics.gpu_active = MALI_TRUE; + } + } else { + MALI_DEBUG_ASSERT(mdev->mali_metrics.gpu_active == MALI_TRUE); + } + } + + _mali_osk_spinlock_irq_unlock(mdev->mali_metrics.lock); +} + + +/*caller needs to hold mdev->mali_metrics.lock before calling this function*/ +static void mali_pm_get_dvfs_utilisation_calc(struct mali_device *mdev, ktime_t now) +{ + ktime_t diff; + + MALI_DEBUG_ASSERT(mdev != NULL); + + diff = ktime_sub(now, mdev->mali_metrics.time_period_start); + + if (mdev->mali_metrics.gpu_active) { + mdev->mali_metrics.time_busy += (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT); + } else { + mdev->mali_metrics.time_idle += (u64)(ktime_to_ns(diff) >> MALI_PM_TIME_SHIFT); + } +} + +/* Caller needs to hold mdev->mali_metrics.lock before calling this function. */ +static void mali_pm_reset_dvfs_utilisation_unlocked(struct mali_device *mdev, ktime_t now) +{ + /* Store previous value */ + mdev->mali_metrics.prev_idle = mdev->mali_metrics.time_idle; + mdev->mali_metrics.prev_busy = mdev->mali_metrics.time_busy; + + /* Reset current values */ + mdev->mali_metrics.time_period_start = now; + mdev->mali_metrics.time_period_start_gp = now; + mdev->mali_metrics.time_period_start_pp = now; + mdev->mali_metrics.time_idle = 0; + mdev->mali_metrics.time_busy = 0; + + mdev->mali_metrics.time_busy_gp = 0; + mdev->mali_metrics.time_idle_gp = 0; + mdev->mali_metrics.time_busy_pp[0] = 0; + mdev->mali_metrics.time_idle_pp[0] = 0; +} + +void mali_pm_reset_dvfs_utilisation(struct mali_device *mdev) +{ + _mali_osk_spinlock_irq_lock(mdev->mali_metrics.lock); + mali_pm_reset_dvfs_utilisation_unlocked(mdev, ktime_get()); + _mali_osk_spinlock_irq_unlock(mdev->mali_metrics.lock); +} + +void mali_pm_get_dvfs_utilisation(struct mali_device *mdev, + unsigned long *total_out, unsigned long *busy_out) +{ + ktime_t now = ktime_get(); + u64 busy = 0; + u64 total = 0; + + _mali_osk_spinlock_irq_lock(mdev->mali_metrics.lock); + + mali_pm_get_dvfs_utilisation_calc(mdev, now); + + busy = mdev->mali_metrics.time_busy; + total = busy + mdev->mali_metrics.time_idle; + + /* Reset stats if older than MALI_UTILIZATION_MAX_PERIOD (default + * 100ms) */ + if (total >= MALI_UTILIZATION_MAX_PERIOD) { + mali_pm_reset_dvfs_utilisation_unlocked(mdev, now); + } else if (total < (MALI_UTILIZATION_MAX_PERIOD / 2)) { + total += mdev->mali_metrics.prev_idle + + mdev->mali_metrics.prev_busy; + busy += mdev->mali_metrics.prev_busy; + } + + *total_out = (unsigned long)total; + *busy_out = (unsigned long)busy; + _mali_osk_spinlock_irq_unlock(mdev->mali_metrics.lock); +} + +void mali_pm_metrics_spin_lock(void) +{ + struct mali_device *mdev = dev_get_drvdata(&mali_platform_device->dev); + _mali_osk_spinlock_irq_lock(mdev->mali_metrics.lock); +} + +void mali_pm_metrics_spin_unlock(void) +{ + struct mali_device *mdev = dev_get_drvdata(&mali_platform_device->dev); + _mali_osk_spinlock_irq_unlock(mdev->mali_metrics.lock); +} diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_pm_metrics.h b/drivers/gpu/arm/mali400/common/mali_pm_metrics.h --- a/drivers/gpu/arm/mali400/common/mali_pm_metrics.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_pm_metrics.h 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_PM_METRICS_H__ +#define __MALI_PM_METRICS_H__ + +#ifdef CONFIG_MALI_DEVFREQ +#include "mali_osk_locks.h" +#include "mali_group.h" + +struct mali_device; + +/** + * Metrics data collected for use by the power management framework. + */ +struct mali_pm_metrics_data { + ktime_t time_period_start; + u64 time_busy; + u64 time_idle; + u64 prev_busy; + u64 prev_idle; + u32 num_running_gp_cores; + u32 num_running_pp_cores; + ktime_t time_period_start_gp; + u64 time_busy_gp; + u64 time_idle_gp; + ktime_t time_period_start_pp; + u64 time_busy_pp[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS]; + u64 time_idle_pp[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS]; + mali_bool gpu_active; + _mali_osk_spinlock_irq_t *lock; +}; + +/** + * Initialize/start the Mali GPU pm_metrics metrics reporting. + * + * @return _MALI_OSK_ERR_OK on success, otherwise failure. + */ +_mali_osk_errcode_t mali_pm_metrics_init(struct mali_device *mdev); + +/** + * Terminate the Mali GPU pm_metrics metrics reporting + */ +void mali_pm_metrics_term(struct mali_device *mdev); + +/** + * Should be called when a job is about to execute a GPU job + */ +void mali_pm_record_gpu_active(mali_bool is_gp); + +/** + * Should be called when a job is finished + */ +void mali_pm_record_gpu_idle(mali_bool is_gp); + +void mali_pm_reset_dvfs_utilisation(struct mali_device *mdev); + +void mali_pm_get_dvfs_utilisation(struct mali_device *mdev, unsigned long *total_out, unsigned long *busy_out); + +void mali_pm_metrics_spin_lock(void); + +void mali_pm_metrics_spin_unlock(void); +#else +void mali_pm_record_gpu_idle(mali_bool is_gp) {} +void mali_pm_record_gpu_active(mali_bool is_gp) {} +#endif +#endif /* __MALI_PM_METRICS_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_pmu.c b/drivers/gpu/arm/mali400/common/mali_pmu.c --- a/drivers/gpu/arm/mali400/common/mali_pmu.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_pmu.c 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,270 @@ +/* + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_pmu.c + * Mali driver functions for Mali 400 PMU hardware + */ +#include "mali_hw_core.h" +#include "mali_pmu.h" +#include "mali_pp.h" +#include "mali_kernel_common.h" +#include "mali_osk.h" +#include "mali_pm.h" +#include "mali_osk_mali.h" + +struct mali_pmu_core *mali_global_pmu_core = NULL; + +static _mali_osk_errcode_t mali_pmu_wait_for_command_finish( + struct mali_pmu_core *pmu); + +struct mali_pmu_core *mali_pmu_create(_mali_osk_resource_t *resource) +{ + struct mali_pmu_core *pmu; + + MALI_DEBUG_ASSERT(NULL == mali_global_pmu_core); + MALI_DEBUG_PRINT(2, ("Mali PMU: Creating Mali PMU core\n")); + + pmu = (struct mali_pmu_core *)_mali_osk_malloc( + sizeof(struct mali_pmu_core)); + if (NULL != pmu) { + pmu->registered_cores_mask = 0; /* to be set later */ + + if (_MALI_OSK_ERR_OK == mali_hw_core_create(&pmu->hw_core, + resource, PMU_REGISTER_ADDRESS_SPACE_SIZE)) { + + pmu->switch_delay = _mali_osk_get_pmu_switch_delay(); + + mali_global_pmu_core = pmu; + + return pmu; + } + _mali_osk_free(pmu); + } + + return NULL; +} + +void mali_pmu_delete(struct mali_pmu_core *pmu) +{ + MALI_DEBUG_ASSERT_POINTER(pmu); + MALI_DEBUG_ASSERT(pmu == mali_global_pmu_core); + + MALI_DEBUG_PRINT(2, ("Mali PMU: Deleting Mali PMU core\n")); + + mali_global_pmu_core = NULL; + + mali_hw_core_delete(&pmu->hw_core); + _mali_osk_free(pmu); +} + +void mali_pmu_set_registered_cores_mask(struct mali_pmu_core *pmu, u32 mask) +{ + pmu->registered_cores_mask = mask; +} + +void mali_pmu_reset(struct mali_pmu_core *pmu) +{ + MALI_DEBUG_ASSERT_POINTER(pmu); + MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0); + + /* Setup the desired defaults */ + mali_hw_core_register_write_relaxed(&pmu->hw_core, + PMU_REG_ADDR_MGMT_INT_MASK, 0); + mali_hw_core_register_write_relaxed(&pmu->hw_core, + PMU_REG_ADDR_MGMT_SW_DELAY, pmu->switch_delay); +} + +void mali_pmu_power_up_all(struct mali_pmu_core *pmu) +{ + u32 stat; + + MALI_DEBUG_ASSERT_POINTER(pmu); + MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0); + + mali_pm_exec_lock(); + + mali_pmu_reset(pmu); + + /* Now simply power up the domains which are marked as powered down */ + stat = mali_hw_core_register_read(&pmu->hw_core, + PMU_REG_ADDR_MGMT_STATUS); + mali_pmu_power_up(pmu, stat); + + mali_pm_exec_unlock(); +} + +void mali_pmu_power_down_all(struct mali_pmu_core *pmu) +{ + u32 stat; + + MALI_DEBUG_ASSERT_POINTER(pmu); + MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0); + + mali_pm_exec_lock(); + + /* Now simply power down the domains which are marked as powered up */ + stat = mali_hw_core_register_read(&pmu->hw_core, + PMU_REG_ADDR_MGMT_STATUS); + mali_pmu_power_down(pmu, (~stat) & pmu->registered_cores_mask); + + mali_pm_exec_unlock(); +} + +_mali_osk_errcode_t mali_pmu_power_down(struct mali_pmu_core *pmu, u32 mask) +{ + u32 stat; + _mali_osk_errcode_t err; + + MALI_DEBUG_ASSERT_POINTER(pmu); + MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0); + MALI_DEBUG_ASSERT(mask <= pmu->registered_cores_mask); + MALI_DEBUG_ASSERT(0 == (mali_hw_core_register_read(&pmu->hw_core, + PMU_REG_ADDR_MGMT_INT_RAWSTAT) & + PMU_REG_VAL_IRQ)); + + MALI_DEBUG_PRINT(3, + ("PMU power down: ...................... [%s]\n", + mali_pm_mask_to_string(mask))); + + stat = mali_hw_core_register_read(&pmu->hw_core, + PMU_REG_ADDR_MGMT_STATUS); + + /* + * Assert that we are not powering down domains which are already + * powered down. + */ + MALI_DEBUG_ASSERT(0 == (stat & mask)); + + mask &= ~(0x1 << MALI_DOMAIN_INDEX_DUMMY); + + if (0 == mask || 0 == ((~stat) & mask)) return _MALI_OSK_ERR_OK; + + mali_hw_core_register_write(&pmu->hw_core, + PMU_REG_ADDR_MGMT_POWER_DOWN, mask); + + /* + * Do not wait for interrupt on Mali-300/400 if all domains are + * powered off by our power down command, because the HW will simply + * not generate an interrupt in this case. + */ + if (mali_is_mali450() || mali_is_mali470() || pmu->registered_cores_mask != (mask | stat)) { + err = mali_pmu_wait_for_command_finish(pmu); + if (_MALI_OSK_ERR_OK != err) { + return err; + } + } else { + mali_hw_core_register_write(&pmu->hw_core, + PMU_REG_ADDR_MGMT_INT_CLEAR, PMU_REG_VAL_IRQ); + } + +#if defined(DEBUG) + /* Verify power status of domains after power down */ + stat = mali_hw_core_register_read(&pmu->hw_core, + PMU_REG_ADDR_MGMT_STATUS); + MALI_DEBUG_ASSERT(mask == (stat & mask)); +#endif + + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t mali_pmu_power_up(struct mali_pmu_core *pmu, u32 mask) +{ + u32 stat; + _mali_osk_errcode_t err; +#if !defined(CONFIG_MALI_PMU_PARALLEL_POWER_UP) + u32 current_domain; +#endif + + MALI_DEBUG_ASSERT_POINTER(pmu); + MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0); + MALI_DEBUG_ASSERT(mask <= pmu->registered_cores_mask); + MALI_DEBUG_ASSERT(0 == (mali_hw_core_register_read(&pmu->hw_core, + PMU_REG_ADDR_MGMT_INT_RAWSTAT) & + PMU_REG_VAL_IRQ)); + + MALI_DEBUG_PRINT(3, + ("PMU power up: ........................ [%s]\n", + mali_pm_mask_to_string(mask))); + + stat = mali_hw_core_register_read(&pmu->hw_core, + PMU_REG_ADDR_MGMT_STATUS); + stat &= pmu->registered_cores_mask; + + mask &= ~(0x1 << MALI_DOMAIN_INDEX_DUMMY); + if (0 == mask || 0 == (stat & mask)) return _MALI_OSK_ERR_OK; + + /* + * Assert that we are only powering up domains which are currently + * powered down. + */ + MALI_DEBUG_ASSERT(mask == (stat & mask)); + +#if defined(CONFIG_MALI_PMU_PARALLEL_POWER_UP) + mali_hw_core_register_write(&pmu->hw_core, + PMU_REG_ADDR_MGMT_POWER_UP, mask); + + err = mali_pmu_wait_for_command_finish(pmu); + if (_MALI_OSK_ERR_OK != err) { + return err; + } +#else + for (current_domain = 1; + current_domain <= pmu->registered_cores_mask; + current_domain <<= 1) { + if (current_domain & mask & stat) { + mali_hw_core_register_write(&pmu->hw_core, + PMU_REG_ADDR_MGMT_POWER_UP, + current_domain); + + err = mali_pmu_wait_for_command_finish(pmu); + if (_MALI_OSK_ERR_OK != err) { + return err; + } + } + } +#endif + +#if defined(DEBUG) + /* Verify power status of domains after power up */ + stat = mali_hw_core_register_read(&pmu->hw_core, + PMU_REG_ADDR_MGMT_STATUS); + MALI_DEBUG_ASSERT(0 == (stat & mask)); +#endif /* defined(DEBUG) */ + + return _MALI_OSK_ERR_OK; +} + +static _mali_osk_errcode_t mali_pmu_wait_for_command_finish( + struct mali_pmu_core *pmu) +{ + u32 rawstat; + u32 timeout = MALI_REG_POLL_COUNT_SLOW; + + MALI_DEBUG_ASSERT(pmu); + + /* Wait for the command to complete */ + do { + rawstat = mali_hw_core_register_read(&pmu->hw_core, + PMU_REG_ADDR_MGMT_INT_RAWSTAT); + --timeout; + } while (0 == (rawstat & PMU_REG_VAL_IRQ) && 0 < timeout); + + MALI_DEBUG_ASSERT(0 < timeout); + + if (0 == timeout) { + return _MALI_OSK_ERR_TIMEOUT; + } + + mali_hw_core_register_write(&pmu->hw_core, + PMU_REG_ADDR_MGMT_INT_CLEAR, PMU_REG_VAL_IRQ); + + return _MALI_OSK_ERR_OK; +} diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_pmu.h b/drivers/gpu/arm/mali400/common/mali_pmu.h --- a/drivers/gpu/arm/mali400/common/mali_pmu.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_pmu.h 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,123 @@ +/* + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_platform.h + * Platform specific Mali driver functions + */ + +#ifndef __MALI_PMU_H__ +#define __MALI_PMU_H__ + +#include "mali_osk.h" +#include "mali_kernel_common.h" +#include "mali_hw_core.h" + +/** @brief MALI inbuilt PMU hardware info and PMU hardware has knowledge of cores power mask + */ +struct mali_pmu_core { + struct mali_hw_core hw_core; + u32 registered_cores_mask; + u32 switch_delay; +}; + +/** @brief Register layout for hardware PMU + */ +typedef enum { + PMU_REG_ADDR_MGMT_POWER_UP = 0x00, /*< Power up register */ + PMU_REG_ADDR_MGMT_POWER_DOWN = 0x04, /*< Power down register */ + PMU_REG_ADDR_MGMT_STATUS = 0x08, /*< Core sleep status register */ + PMU_REG_ADDR_MGMT_INT_MASK = 0x0C, /*< Interrupt mask register */ + PMU_REG_ADDR_MGMT_INT_RAWSTAT = 0x10, /*< Interrupt raw status register */ + PMU_REG_ADDR_MGMT_INT_CLEAR = 0x18, /*< Interrupt clear register */ + PMU_REG_ADDR_MGMT_SW_DELAY = 0x1C, /*< Switch delay register */ + PMU_REGISTER_ADDRESS_SPACE_SIZE = 0x28, /*< Size of register space */ +} pmu_reg_addr_mgmt_addr; + +#define PMU_REG_VAL_IRQ 1 + +extern struct mali_pmu_core *mali_global_pmu_core; + +/** @brief Initialisation of MALI PMU + * + * This is called from entry point of the driver in order to create and intialize the PMU resource + * + * @param resource it will be a pointer to a PMU resource + * @param number_of_pp_cores Number of found PP resources in configuration + * @param number_of_l2_caches Number of found L2 cache resources in configuration + * @return The created PMU object, or NULL in case of failure. + */ +struct mali_pmu_core *mali_pmu_create(_mali_osk_resource_t *resource); + +/** @brief It deallocates the PMU resource + * + * This is called on the exit of the driver to terminate the PMU resource + * + * @param pmu Pointer to PMU core object to delete + */ +void mali_pmu_delete(struct mali_pmu_core *pmu); + +/** @brief Set registered cores mask + * + * @param pmu Pointer to PMU core object + * @param mask All available/valid domain bits + */ +void mali_pmu_set_registered_cores_mask(struct mali_pmu_core *pmu, u32 mask); + +/** @brief Retrieves the Mali PMU core object (if any) + * + * @return The Mali PMU object, or NULL if no PMU exists. + */ +MALI_STATIC_INLINE struct mali_pmu_core *mali_pmu_get_global_pmu_core(void) +{ + return mali_global_pmu_core; +} + +/** @brief Reset PMU core + * + * @param pmu Pointer to PMU core object to reset + */ +void mali_pmu_reset(struct mali_pmu_core *pmu); + +void mali_pmu_power_up_all(struct mali_pmu_core *pmu); + +void mali_pmu_power_down_all(struct mali_pmu_core *pmu); + +/** @brief Returns a mask of the currently powered up domains + * + * @param pmu Pointer to PMU core object + */ +MALI_STATIC_INLINE u32 mali_pmu_get_mask(struct mali_pmu_core *pmu) +{ + u32 stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS); + return ((~stat) & pmu->registered_cores_mask); +} + +/** @brief MALI GPU power down using MALI in-built PMU + * + * Called to power down the specified cores. + * + * @param pmu Pointer to PMU core object to power down + * @param mask Mask specifying which power domains to power down + * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error. + */ +_mali_osk_errcode_t mali_pmu_power_down(struct mali_pmu_core *pmu, u32 mask); + +/** @brief MALI GPU power up using MALI in-built PMU + * + * Called to power up the specified cores. + * + * @param pmu Pointer to PMU core object to power up + * @param mask Mask specifying which power domains to power up + * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error. + */ +_mali_osk_errcode_t mali_pmu_power_up(struct mali_pmu_core *pmu, u32 mask); + +#endif /* __MALI_PMU_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_pp.c b/drivers/gpu/arm/mali400/common/mali_pp.c --- a/drivers/gpu/arm/mali400/common/mali_pp.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_pp.c 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,502 @@ +/* + * Copyright (C) 2011-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_pp_job.h" +#include "mali_pp.h" +#include "mali_hw_core.h" +#include "mali_group.h" +#include "regs/mali_200_regs.h" +#include "mali_kernel_common.h" +#include "mali_kernel_core.h" + +#if defined(CONFIG_MALI400_PROFILING) +#include "mali_osk_profiling.h" +#endif + +/* Number of frame registers on Mali-200 */ +#define MALI_PP_MALI200_NUM_FRAME_REGISTERS ((0x04C/4)+1) +/* Number of frame registers on Mali-300 and later */ +#define MALI_PP_MALI400_NUM_FRAME_REGISTERS ((0x058/4)+1) + +static struct mali_pp_core *mali_global_pp_cores[MALI_MAX_NUMBER_OF_PP_CORES] = { NULL }; +static u32 mali_global_num_pp_cores = 0; + +/* Interrupt handlers */ +static void mali_pp_irq_probe_trigger(void *data); +static _mali_osk_errcode_t mali_pp_irq_probe_ack(void *data); + +struct mali_pp_core *mali_pp_create(const _mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual, u32 bcast_id) +{ + struct mali_pp_core *core = NULL; + + MALI_DEBUG_PRINT(2, ("Mali PP: Creating Mali PP core: %s\n", resource->description)); + MALI_DEBUG_PRINT(2, ("Mali PP: Base address of PP core: 0x%x\n", resource->base)); + + if (mali_global_num_pp_cores >= MALI_MAX_NUMBER_OF_PP_CORES) { + MALI_PRINT_ERROR(("Mali PP: Too many PP core objects created\n")); + return NULL; + } + + core = _mali_osk_calloc(1, sizeof(struct mali_pp_core)); + if (NULL != core) { + core->core_id = mali_global_num_pp_cores; + core->bcast_id = bcast_id; + + if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALI200_REG_SIZEOF_REGISTER_BANK)) { + _mali_osk_errcode_t ret; + + if (!is_virtual) { + ret = mali_pp_reset(core); + } else { + ret = _MALI_OSK_ERR_OK; + } + + if (_MALI_OSK_ERR_OK == ret) { + ret = mali_group_add_pp_core(group, core); + if (_MALI_OSK_ERR_OK == ret) { + /* Setup IRQ handlers (which will do IRQ probing if needed) */ + MALI_DEBUG_ASSERT(!is_virtual || -1 != resource->irq); + + core->irq = _mali_osk_irq_init(resource->irq, + mali_group_upper_half_pp, + group, + mali_pp_irq_probe_trigger, + mali_pp_irq_probe_ack, + core, + resource->description); + if (NULL != core->irq) { + mali_global_pp_cores[mali_global_num_pp_cores] = core; + mali_global_num_pp_cores++; + + return core; + } else { + MALI_PRINT_ERROR(("Mali PP: Failed to setup interrupt handlers for PP core %s\n", core->hw_core.description)); + } + mali_group_remove_pp_core(group); + } else { + MALI_PRINT_ERROR(("Mali PP: Failed to add core %s to group\n", core->hw_core.description)); + } + } + mali_hw_core_delete(&core->hw_core); + } + + _mali_osk_free(core); + } else { + MALI_PRINT_ERROR(("Mali PP: Failed to allocate memory for PP core\n")); + } + + return NULL; +} + +void mali_pp_delete(struct mali_pp_core *core) +{ + u32 i; + + MALI_DEBUG_ASSERT_POINTER(core); + + _mali_osk_irq_term(core->irq); + mali_hw_core_delete(&core->hw_core); + + /* Remove core from global list */ + for (i = 0; i < mali_global_num_pp_cores; i++) { + if (mali_global_pp_cores[i] == core) { + mali_global_pp_cores[i] = NULL; + mali_global_num_pp_cores--; + + if (i != mali_global_num_pp_cores) { + /* We removed a PP core from the middle of the array -- move the last + * PP core to the current position to close the gap */ + mali_global_pp_cores[i] = mali_global_pp_cores[mali_global_num_pp_cores]; + mali_global_pp_cores[mali_global_num_pp_cores] = NULL; + } + + break; + } + } + + _mali_osk_free(core); +} + +void mali_pp_stop_bus(struct mali_pp_core *core) +{ + MALI_DEBUG_ASSERT_POINTER(core); + /* Will only send the stop bus command, and not wait for it to complete */ + mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_STOP_BUS); +} + +_mali_osk_errcode_t mali_pp_stop_bus_wait(struct mali_pp_core *core) +{ + int i; + + MALI_DEBUG_ASSERT_POINTER(core); + + /* Send the stop bus command. */ + mali_pp_stop_bus(core); + + /* Wait for bus to be stopped */ + for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) { + if (mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS) & MALI200_REG_VAL_STATUS_BUS_STOPPED) + break; + } + + if (MALI_REG_POLL_COUNT_FAST == i) { + MALI_PRINT_ERROR(("Mali PP: Failed to stop bus on %s. Status: 0x%08x\n", core->hw_core.description, mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS))); + return _MALI_OSK_ERR_FAULT; + } + return _MALI_OSK_ERR_OK; +} + +/* Frame register reset values. + * Taken from the Mali400 TRM, 3.6. Pixel processor control register summary */ +static const u32 mali_frame_registers_reset_values[_MALI_PP_MAX_FRAME_REGISTERS] = { + 0x0, /* Renderer List Address Register */ + 0x0, /* Renderer State Word Base Address Register */ + 0x0, /* Renderer Vertex Base Register */ + 0x2, /* Feature Enable Register */ + 0x0, /* Z Clear Value Register */ + 0x0, /* Stencil Clear Value Register */ + 0x0, /* ABGR Clear Value 0 Register */ + 0x0, /* ABGR Clear Value 1 Register */ + 0x0, /* ABGR Clear Value 2 Register */ + 0x0, /* ABGR Clear Value 3 Register */ + 0x0, /* Bounding Box Left Right Register */ + 0x0, /* Bounding Box Bottom Register */ + 0x0, /* FS Stack Address Register */ + 0x0, /* FS Stack Size and Initial Value Register */ + 0x0, /* Reserved */ + 0x0, /* Reserved */ + 0x0, /* Origin Offset X Register */ + 0x0, /* Origin Offset Y Register */ + 0x75, /* Subpixel Specifier Register */ + 0x0, /* Tiebreak mode Register */ + 0x0, /* Polygon List Format Register */ + 0x0, /* Scaling Register */ + 0x0 /* Tilebuffer configuration Register */ +}; + +/* WBx register reset values */ +static const u32 mali_wb_registers_reset_values[_MALI_PP_MAX_WB_REGISTERS] = { + 0x0, /* WBx Source Select Register */ + 0x0, /* WBx Target Address Register */ + 0x0, /* WBx Target Pixel Format Register */ + 0x0, /* WBx Target AA Format Register */ + 0x0, /* WBx Target Layout */ + 0x0, /* WBx Target Scanline Length */ + 0x0, /* WBx Target Flags Register */ + 0x0, /* WBx MRT Enable Register */ + 0x0, /* WBx MRT Offset Register */ + 0x0, /* WBx Global Test Enable Register */ + 0x0, /* WBx Global Test Reference Value Register */ + 0x0 /* WBx Global Test Compare Function Register */ +}; + +/* Performance Counter 0 Enable Register reset value */ +static const u32 mali_perf_cnt_enable_reset_value = 0; + +_mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core) +{ + /* Bus must be stopped before calling this function */ + const u32 reset_wait_target_register = MALI200_REG_ADDR_MGMT_PERF_CNT_0_LIMIT; + const u32 reset_invalid_value = 0xC0FFE000; + const u32 reset_check_value = 0xC01A0000; + int i; + + MALI_DEBUG_ASSERT_POINTER(core); + MALI_DEBUG_PRINT(2, ("Mali PP: Hard reset of core %s\n", core->hw_core.description)); + + /* Set register to a bogus value. The register will be used to detect when reset is complete */ + mali_hw_core_register_write_relaxed(&core->hw_core, reset_wait_target_register, reset_invalid_value); + mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_NONE); + + /* Force core to reset */ + mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_FORCE_RESET); + + /* Wait for reset to be complete */ + for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) { + mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_check_value); + if (reset_check_value == mali_hw_core_register_read(&core->hw_core, reset_wait_target_register)) { + break; + } + } + + if (MALI_REG_POLL_COUNT_FAST == i) { + MALI_PRINT_ERROR(("Mali PP: The hard reset loop didn't work, unable to recover\n")); + } + + mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, 0x00000000); /* set it back to the default */ + /* Re-enable interrupts */ + mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL); + mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED); + + return _MALI_OSK_ERR_OK; +} + +void mali_pp_reset_async(struct mali_pp_core *core) +{ + MALI_DEBUG_ASSERT_POINTER(core); + + MALI_DEBUG_PRINT(4, ("Mali PP: Reset of core %s\n", core->hw_core.description)); + + mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, 0); /* disable the IRQs */ + mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_MASK_ALL); + mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI400PP_REG_VAL_CTRL_MGMT_SOFT_RESET); +} + +_mali_osk_errcode_t mali_pp_reset_wait(struct mali_pp_core *core) +{ + int i; + u32 rawstat = 0; + + for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) { + u32 status = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS); + if (!(status & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE)) { + rawstat = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT); + if (rawstat == MALI400PP_REG_VAL_IRQ_RESET_COMPLETED) { + break; + } + } + } + + if (i == MALI_REG_POLL_COUNT_FAST) { + MALI_PRINT_ERROR(("Mali PP: Failed to reset core %s, rawstat: 0x%08x\n", + core->hw_core.description, rawstat)); + return _MALI_OSK_ERR_FAULT; + } + + /* Re-enable interrupts */ + mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL); + mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED); + + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t mali_pp_reset(struct mali_pp_core *core) +{ + mali_pp_reset_async(core); + return mali_pp_reset_wait(core); +} + +void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job, mali_bool restart_virtual) +{ + u32 relative_address; + u32 start_index; + u32 nr_of_regs; + u32 *frame_registers = mali_pp_job_get_frame_registers(job); + u32 *wb0_registers = mali_pp_job_get_wb0_registers(job); + u32 *wb1_registers = mali_pp_job_get_wb1_registers(job); + u32 *wb2_registers = mali_pp_job_get_wb2_registers(job); + u32 counter_src0 = mali_pp_job_get_perf_counter_src0(job, sub_job); + u32 counter_src1 = mali_pp_job_get_perf_counter_src1(job, sub_job); + + MALI_DEBUG_ASSERT_POINTER(core); + + /* Write frame registers */ + + /* + * There are two frame registers which are different for each sub job: + * 1. The Renderer List Address Register (MALI200_REG_ADDR_FRAME) + * 2. The FS Stack Address Register (MALI200_REG_ADDR_STACK) + */ + mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_FRAME, mali_pp_job_get_addr_frame(job, sub_job), mali_frame_registers_reset_values[MALI200_REG_ADDR_FRAME / sizeof(u32)]); + + /* For virtual jobs, the stack address shouldn't be broadcast but written individually */ + if (!mali_pp_job_is_virtual(job) || restart_virtual) { + mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_STACK, mali_pp_job_get_addr_stack(job, sub_job), mali_frame_registers_reset_values[MALI200_REG_ADDR_STACK / sizeof(u32)]); + } + + /* Write registers between MALI200_REG_ADDR_FRAME and MALI200_REG_ADDR_STACK */ + relative_address = MALI200_REG_ADDR_RSW; + start_index = MALI200_REG_ADDR_RSW / sizeof(u32); + nr_of_regs = (MALI200_REG_ADDR_STACK - MALI200_REG_ADDR_RSW) / sizeof(u32); + + mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, + relative_address, &frame_registers[start_index], + nr_of_regs, &mali_frame_registers_reset_values[start_index]); + + /* MALI200_REG_ADDR_STACK_SIZE */ + relative_address = MALI200_REG_ADDR_STACK_SIZE; + start_index = MALI200_REG_ADDR_STACK_SIZE / sizeof(u32); + + mali_hw_core_register_write_relaxed_conditional(&core->hw_core, + relative_address, frame_registers[start_index], + mali_frame_registers_reset_values[start_index]); + + /* Skip 2 reserved registers */ + + /* Write remaining registers */ + relative_address = MALI200_REG_ADDR_ORIGIN_OFFSET_X; + start_index = MALI200_REG_ADDR_ORIGIN_OFFSET_X / sizeof(u32); + nr_of_regs = MALI_PP_MALI400_NUM_FRAME_REGISTERS - MALI200_REG_ADDR_ORIGIN_OFFSET_X / sizeof(u32); + + mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, + relative_address, &frame_registers[start_index], + nr_of_regs, &mali_frame_registers_reset_values[start_index]); + + /* Write WBx registers */ + if (wb0_registers[0]) { /* M200_WB0_REG_SOURCE_SELECT register */ + mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB0, wb0_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values); + } + + if (wb1_registers[0]) { /* M200_WB1_REG_SOURCE_SELECT register */ + mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB1, wb1_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values); + } + + if (wb2_registers[0]) { /* M200_WB2_REG_SOURCE_SELECT register */ + mali_hw_core_register_write_array_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_WB2, wb2_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values); + } + + if (MALI_HW_CORE_NO_COUNTER != counter_src0) { + mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC, counter_src0); + mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value); + } + if (MALI_HW_CORE_NO_COUNTER != counter_src1) { + mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC, counter_src1); + mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value); + } + +#ifdef CONFIG_MALI400_HEATMAPS_ENABLED + if (job->uargs.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_HEATMAP_ENABLE) { + mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERFMON_CONTR, ((job->uargs.tilesx & 0x3FF) << 16) | 1); + mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_PERFMON_BASE, job->uargs.heatmap_mem & 0xFFFFFFF8); + } +#endif /* CONFIG_MALI400_HEATMAPS_ENABLED */ + + MALI_DEBUG_PRINT(3, ("Mali PP: Starting job 0x%08X part %u/%u on PP core %s\n", job, sub_job + 1, mali_pp_job_get_sub_job_count(job), core->hw_core.description)); + + /* Adding barrier to make sure all rester writes are finished */ + _mali_osk_write_mem_barrier(); + + /* This is the command that starts the core. + * + * Don't actually run the job if PROFILING_SKIP_PP_JOBS are set, just + * force core to assert the completion interrupt. + */ +#if !defined(PROFILING_SKIP_PP_JOBS) + mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_START_RENDERING); +#else + mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_END_OF_FRAME); +#endif + + /* Adding barrier to make sure previous rester writes is finished */ + _mali_osk_write_mem_barrier(); +} + +u32 mali_pp_core_get_version(struct mali_pp_core *core) +{ + MALI_DEBUG_ASSERT_POINTER(core); + return mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_VERSION); +} + +struct mali_pp_core *mali_pp_get_global_pp_core(u32 index) +{ + if (mali_global_num_pp_cores > index) { + return mali_global_pp_cores[index]; + } + + return NULL; +} + +u32 mali_pp_get_glob_num_pp_cores(void) +{ + return mali_global_num_pp_cores; +} + +/* ------------- interrupt handling below ------------------ */ +static void mali_pp_irq_probe_trigger(void *data) +{ + struct mali_pp_core *core = (struct mali_pp_core *)data; + mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED); + mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_BUS_ERROR); + _mali_osk_mem_barrier(); +} + +static _mali_osk_errcode_t mali_pp_irq_probe_ack(void *data) +{ + struct mali_pp_core *core = (struct mali_pp_core *)data; + u32 irq_readout; + + irq_readout = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_STATUS); + if (MALI200_REG_VAL_IRQ_BUS_ERROR & irq_readout) { + mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_BUS_ERROR); + _mali_osk_mem_barrier(); + return _MALI_OSK_ERR_OK; + } + + return _MALI_OSK_ERR_FAULT; +} + + +#if 0 +static void mali_pp_print_registers(struct mali_pp_core *core) +{ + MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_VERSION = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_VERSION))); + MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR))); + MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_STATUS = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS))); + MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_INT_RAWSTAT = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT))); + MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_INT_MASK = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK))); + MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_INT_STATUS = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_STATUS))); + MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS))); + MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE))); + MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC))); + MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE))); + MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE))); + MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC))); + MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE))); +} +#endif + +#if 0 +void mali_pp_print_state(struct mali_pp_core *core) +{ + MALI_DEBUG_PRINT(2, ("Mali PP: State: 0x%08x\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS))); +} +#endif + +void mali_pp_update_performance_counters(struct mali_pp_core *parent, struct mali_pp_core *child, struct mali_pp_job *job, u32 subjob) +{ + u32 val0 = 0; + u32 val1 = 0; + u32 counter_src0 = mali_pp_job_get_perf_counter_src0(job, subjob); + u32 counter_src1 = mali_pp_job_get_perf_counter_src1(job, subjob); +#if defined(CONFIG_MALI400_PROFILING) + int counter_index = COUNTER_FP_0_C0 + (2 * child->core_id); +#endif + + if (MALI_HW_CORE_NO_COUNTER != counter_src0) { + val0 = mali_hw_core_register_read(&child->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE); + mali_pp_job_set_perf_counter_value0(job, subjob, val0); + +#if defined(CONFIG_MALI400_PROFILING) + _mali_osk_profiling_report_hw_counter(counter_index, val0); + _mali_osk_profiling_record_global_counters(counter_index, val0); +#endif + } + + if (MALI_HW_CORE_NO_COUNTER != counter_src1) { + val1 = mali_hw_core_register_read(&child->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE); + mali_pp_job_set_perf_counter_value1(job, subjob, val1); + +#if defined(CONFIG_MALI400_PROFILING) + _mali_osk_profiling_report_hw_counter(counter_index + 1, val1); + _mali_osk_profiling_record_global_counters(counter_index + 1, val1); +#endif + } +} + +#if MALI_STATE_TRACKING +u32 mali_pp_dump_state(struct mali_pp_core *core, char *buf, u32 size) +{ + int n = 0; + + n += _mali_osk_snprintf(buf + n, size - n, "\tPP #%d: %s\n", core->core_id, core->hw_core.description); + + return n; +} +#endif diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_pp.h b/drivers/gpu/arm/mali400/common/mali_pp.h --- a/drivers/gpu/arm/mali400/common/mali_pp.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_pp.h 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,138 @@ +/* + * Copyright (C) 2011-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_PP_H__ +#define __MALI_PP_H__ + +#include "mali_osk.h" +#include "mali_pp_job.h" +#include "mali_hw_core.h" + +struct mali_group; + +#define MALI_MAX_NUMBER_OF_PP_CORES 9 + +/** + * Definition of the PP core struct + * Used to track a PP core in the system. + */ +struct mali_pp_core { + struct mali_hw_core hw_core; /**< Common for all HW cores */ + _mali_osk_irq_t *irq; /**< IRQ handler */ + u32 core_id; /**< Unique core ID */ + u32 bcast_id; /**< The "flag" value used by the Mali-450 broadcast and DLBU unit */ +}; + +_mali_osk_errcode_t mali_pp_initialize(void); +void mali_pp_terminate(void); + +struct mali_pp_core *mali_pp_create(const _mali_osk_resource_t *resource, struct mali_group *group, mali_bool is_virtual, u32 bcast_id); +void mali_pp_delete(struct mali_pp_core *core); + +void mali_pp_stop_bus(struct mali_pp_core *core); +_mali_osk_errcode_t mali_pp_stop_bus_wait(struct mali_pp_core *core); +void mali_pp_reset_async(struct mali_pp_core *core); +_mali_osk_errcode_t mali_pp_reset_wait(struct mali_pp_core *core); +_mali_osk_errcode_t mali_pp_reset(struct mali_pp_core *core); +_mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core); + +void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job, mali_bool restart_virtual); + +u32 mali_pp_core_get_version(struct mali_pp_core *core); + +MALI_STATIC_INLINE u32 mali_pp_core_get_id(struct mali_pp_core *core) +{ + MALI_DEBUG_ASSERT_POINTER(core); + return core->core_id; +} + +MALI_STATIC_INLINE u32 mali_pp_core_get_bcast_id(struct mali_pp_core *core) +{ + MALI_DEBUG_ASSERT_POINTER(core); + return core->bcast_id; +} + +struct mali_pp_core *mali_pp_get_global_pp_core(u32 index); +u32 mali_pp_get_glob_num_pp_cores(void); + +/* Debug */ +u32 mali_pp_dump_state(struct mali_pp_core *core, char *buf, u32 size); + +/** + * Put instrumented HW counters from the core(s) to the job object (if enabled) + * + * parent and child is always the same, except for virtual jobs on Mali-450. + * In this case, the counters will be enabled on the virtual core (parent), + * but values need to be read from the child cores. + * + * @param parent The core used to see if the counters was enabled + * @param child The core to actually read the values from + * @job Job object to update with counter values (if enabled) + * @subjob Which subjob the counters are applicable for (core ID for virtual jobs) + */ +void mali_pp_update_performance_counters(struct mali_pp_core *parent, struct mali_pp_core *child, struct mali_pp_job *job, u32 subjob); + +MALI_STATIC_INLINE const char *mali_pp_core_description(struct mali_pp_core *core) +{ + return core->hw_core.description; +} + +MALI_STATIC_INLINE enum mali_interrupt_result mali_pp_get_interrupt_result(struct mali_pp_core *core) +{ + u32 rawstat_used = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT) & + MALI200_REG_VAL_IRQ_MASK_USED; + if (0 == rawstat_used) { + return MALI_INTERRUPT_RESULT_NONE; + } else if (MALI200_REG_VAL_IRQ_END_OF_FRAME == rawstat_used) { + return MALI_INTERRUPT_RESULT_SUCCESS; + } + + return MALI_INTERRUPT_RESULT_ERROR; +} + +MALI_STATIC_INLINE u32 mali_pp_get_rawstat(struct mali_pp_core *core) +{ + MALI_DEBUG_ASSERT_POINTER(core); + return mali_hw_core_register_read(&core->hw_core, + MALI200_REG_ADDR_MGMT_INT_RAWSTAT); +} + + +MALI_STATIC_INLINE u32 mali_pp_is_active(struct mali_pp_core *core) +{ + u32 status = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS); + return (status & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE) ? MALI_TRUE : MALI_FALSE; +} + +MALI_STATIC_INLINE void mali_pp_mask_all_interrupts(struct mali_pp_core *core) +{ + mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_NONE); +} + +MALI_STATIC_INLINE void mali_pp_enable_interrupts(struct mali_pp_core *core) +{ + mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED); +} + +MALI_STATIC_INLINE void mali_pp_write_addr_renderer_list(struct mali_pp_core *core, + struct mali_pp_job *job, u32 subjob) +{ + u32 addr = mali_pp_job_get_addr_frame(job, subjob); + mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_FRAME, addr); +} + + +MALI_STATIC_INLINE void mali_pp_write_addr_stack(struct mali_pp_core *core, struct mali_pp_job *job) +{ + u32 addr = mali_pp_job_get_addr_stack(job, core->core_id); + mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_STACK, addr); +} + +#endif /* __MALI_PP_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_pp_job.c b/drivers/gpu/arm/mali400/common/mali_pp_job.c --- a/drivers/gpu/arm/mali400/common/mali_pp_job.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_pp_job.c 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,315 @@ +/* + * Copyright (C) 2011-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_pp.h" +#include "mali_pp_job.h" +#include "mali_osk.h" +#include "mali_osk_list.h" +#include "mali_kernel_common.h" +#include "mali_uk_types.h" +#include "mali_executor.h" +#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) +#include "linux/mali_memory_dma_buf.h" +#endif +#include "mali_memory_swap_alloc.h" +#include "mali_scheduler.h" + +static u32 pp_counter_src0 = MALI_HW_CORE_NO_COUNTER; /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */ +static u32 pp_counter_src1 = MALI_HW_CORE_NO_COUNTER; /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */ +static _mali_osk_atomic_t pp_counter_per_sub_job_count; /**< Number of values in the two arrays which is != MALI_HW_CORE_NO_COUNTER */ +static u32 pp_counter_per_sub_job_src0[_MALI_PP_MAX_SUB_JOBS] = { MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER }; +static u32 pp_counter_per_sub_job_src1[_MALI_PP_MAX_SUB_JOBS] = { MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER, MALI_HW_CORE_NO_COUNTER }; + +void mali_pp_job_initialize(void) +{ + _mali_osk_atomic_init(&pp_counter_per_sub_job_count, 0); +} + +void mali_pp_job_terminate(void) +{ + _mali_osk_atomic_term(&pp_counter_per_sub_job_count); +} + +struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, + _mali_uk_pp_start_job_s __user *uargs, u32 id) +{ + struct mali_pp_job *job; + u32 perf_counter_flag; + + job = _mali_osk_calloc(1, sizeof(struct mali_pp_job)); + if (NULL != job) { + _mali_osk_list_init(&job->list); + _mali_osk_list_init(&job->session_fb_lookup_list); + _mali_osk_atomic_inc(&session->number_of_pp_jobs); + + if (0 != _mali_osk_copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_pp_start_job_s))) { + goto fail; + } + + if (job->uargs.num_cores > _MALI_PP_MAX_SUB_JOBS) { + MALI_PRINT_ERROR(("Mali PP job: Too many sub jobs specified in job object\n")); + goto fail; + } + + if (!mali_pp_job_use_no_notification(job)) { + job->finished_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_FINISHED, sizeof(_mali_uk_pp_job_finished_s)); + if (NULL == job->finished_notification) goto fail; + } + + perf_counter_flag = mali_pp_job_get_perf_counter_flag(job); + + /* case when no counters came from user space + * so pass the debugfs / DS-5 provided global ones to the job object */ + if (!((perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE) || + (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE))) { + u32 sub_job_count = _mali_osk_atomic_read(&pp_counter_per_sub_job_count); + + /* These counters apply for all virtual jobs, and where no per sub job counter is specified */ + job->uargs.perf_counter_src0 = pp_counter_src0; + job->uargs.perf_counter_src1 = pp_counter_src1; + + /* We only copy the per sub job array if it is enabled with at least one counter */ + if (0 < sub_job_count) { + job->perf_counter_per_sub_job_count = sub_job_count; + _mali_osk_memcpy(job->perf_counter_per_sub_job_src0, pp_counter_per_sub_job_src0, sizeof(pp_counter_per_sub_job_src0)); + _mali_osk_memcpy(job->perf_counter_per_sub_job_src1, pp_counter_per_sub_job_src1, sizeof(pp_counter_per_sub_job_src1)); + } + } + + job->session = session; + job->id = id; + + job->sub_jobs_num = job->uargs.num_cores ? job->uargs.num_cores : 1; + job->pid = _mali_osk_get_pid(); + job->tid = _mali_osk_get_tid(); + + _mali_osk_atomic_init(&job->sub_jobs_completed, 0); + _mali_osk_atomic_init(&job->sub_job_errors, 0); + job->swap_status = MALI_NO_SWAP_IN; + job->user_notification = MALI_FALSE; + job->num_pp_cores_in_virtual = 0; + + if (job->uargs.num_memory_cookies > session->allocation_mgr.mali_allocation_num) { + MALI_PRINT_ERROR(("Mali PP job: The number of memory cookies is invalid !\n")); + goto fail; + } + + if (job->uargs.num_memory_cookies > 0) { + u32 size; + u32 __user *memory_cookies = (u32 __user *)(uintptr_t)job->uargs.memory_cookies; + + size = sizeof(*memory_cookies) * (job->uargs.num_memory_cookies); + + job->memory_cookies = _mali_osk_malloc(size); + if (NULL == job->memory_cookies) { + MALI_PRINT_ERROR(("Mali PP job: Failed to allocate %d bytes of memory cookies!\n", size)); + goto fail; + } + + if (0 != _mali_osk_copy_from_user(job->memory_cookies, memory_cookies, size)) { + MALI_PRINT_ERROR(("Mali PP job: Failed to copy %d bytes of memory cookies from user!\n", size)); + goto fail; + } + } + + if (_MALI_OSK_ERR_OK != mali_pp_job_check(job)) { + /* Not a valid job. */ + goto fail; + } + + mali_timeline_tracker_init(&job->tracker, MALI_TIMELINE_TRACKER_PP, NULL, job); + mali_timeline_fence_copy_uk_fence(&(job->tracker.fence), &(job->uargs.fence)); + + mali_mem_swap_in_pages(job); + + return job; + } + +fail: + if (NULL != job) { + mali_pp_job_delete(job); + } + + return NULL; +} + +void mali_pp_job_delete(struct mali_pp_job *job) +{ + struct mali_session_data *session; + + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list)); + MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_fb_lookup_list)); + + session = mali_pp_job_get_session(job); + MALI_DEBUG_ASSERT_POINTER(session); + + if (NULL != job->memory_cookies) { +#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) + /* Unmap buffers attached to job */ + mali_dma_buf_unmap_job(job); +#endif + if (MALI_NO_SWAP_IN != job->swap_status) { + mali_mem_swap_out_pages(job); + } + + _mali_osk_free(job->memory_cookies); + } + + if (job->user_notification) { + mali_scheduler_return_pp_job_to_user(job, + job->num_pp_cores_in_virtual); + } + + if (NULL != job->finished_notification) { + _mali_osk_notification_delete(job->finished_notification); + } + + _mali_osk_atomic_term(&job->sub_jobs_completed); + _mali_osk_atomic_term(&job->sub_job_errors); + _mali_osk_atomic_dec(&session->number_of_pp_jobs); + _mali_osk_free(job); + + _mali_osk_wait_queue_wake_up(session->wait_queue); +} + +void mali_pp_job_list_add(struct mali_pp_job *job, _mali_osk_list_t *list) +{ + struct mali_pp_job *iter; + struct mali_pp_job *tmp; + + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + + /* Find position in list/queue where job should be added. */ + _MALI_OSK_LIST_FOREACHENTRY_REVERSE(iter, tmp, list, + struct mali_pp_job, list) { + /* job should be started after iter if iter is in progress. */ + if (0 < iter->sub_jobs_started) { + break; + } + + /* + * job should be started after iter if it has a higher + * job id. A span is used to handle job id wrapping. + */ + if ((mali_pp_job_get_id(job) - + mali_pp_job_get_id(iter)) < + MALI_SCHEDULER_JOB_ID_SPAN) { + break; + } + } + + _mali_osk_list_add(&job->list, &iter->list); +} + + +u32 mali_pp_job_get_perf_counter_src0(struct mali_pp_job *job, u32 sub_job) +{ + /* Virtual jobs always use the global job counter (or if there are per sub job counters at all) */ + if (mali_pp_job_is_virtual(job) || 0 == job->perf_counter_per_sub_job_count) { + return job->uargs.perf_counter_src0; + } + + /* Use per sub job counter if enabled... */ + if (MALI_HW_CORE_NO_COUNTER != job->perf_counter_per_sub_job_src0[sub_job]) { + return job->perf_counter_per_sub_job_src0[sub_job]; + } + + /* ...else default to global job counter */ + return job->uargs.perf_counter_src0; +} + +u32 mali_pp_job_get_perf_counter_src1(struct mali_pp_job *job, u32 sub_job) +{ + /* Virtual jobs always use the global job counter (or if there are per sub job counters at all) */ + if (mali_pp_job_is_virtual(job) || 0 == job->perf_counter_per_sub_job_count) { + /* Virtual jobs always use the global job counter */ + return job->uargs.perf_counter_src1; + } + + /* Use per sub job counter if enabled... */ + if (MALI_HW_CORE_NO_COUNTER != job->perf_counter_per_sub_job_src1[sub_job]) { + return job->perf_counter_per_sub_job_src1[sub_job]; + } + + /* ...else default to global job counter */ + return job->uargs.perf_counter_src1; +} + +void mali_pp_job_set_pp_counter_global_src0(u32 counter) +{ + pp_counter_src0 = counter; +} + +void mali_pp_job_set_pp_counter_global_src1(u32 counter) +{ + pp_counter_src1 = counter; +} + +void mali_pp_job_set_pp_counter_sub_job_src0(u32 sub_job, u32 counter) +{ + MALI_DEBUG_ASSERT(sub_job < _MALI_PP_MAX_SUB_JOBS); + + if (MALI_HW_CORE_NO_COUNTER == pp_counter_per_sub_job_src0[sub_job]) { + /* increment count since existing counter was disabled */ + _mali_osk_atomic_inc(&pp_counter_per_sub_job_count); + } + + if (MALI_HW_CORE_NO_COUNTER == counter) { + /* decrement count since new counter is disabled */ + _mali_osk_atomic_dec(&pp_counter_per_sub_job_count); + } + + /* PS: A change from MALI_HW_CORE_NO_COUNTER to MALI_HW_CORE_NO_COUNTER will inc and dec, result will be 0 change */ + + pp_counter_per_sub_job_src0[sub_job] = counter; +} + +void mali_pp_job_set_pp_counter_sub_job_src1(u32 sub_job, u32 counter) +{ + MALI_DEBUG_ASSERT(sub_job < _MALI_PP_MAX_SUB_JOBS); + + if (MALI_HW_CORE_NO_COUNTER == pp_counter_per_sub_job_src1[sub_job]) { + /* increment count since existing counter was disabled */ + _mali_osk_atomic_inc(&pp_counter_per_sub_job_count); + } + + if (MALI_HW_CORE_NO_COUNTER == counter) { + /* decrement count since new counter is disabled */ + _mali_osk_atomic_dec(&pp_counter_per_sub_job_count); + } + + /* PS: A change from MALI_HW_CORE_NO_COUNTER to MALI_HW_CORE_NO_COUNTER will inc and dec, result will be 0 change */ + + pp_counter_per_sub_job_src1[sub_job] = counter; +} + +u32 mali_pp_job_get_pp_counter_global_src0(void) +{ + return pp_counter_src0; +} + +u32 mali_pp_job_get_pp_counter_global_src1(void) +{ + return pp_counter_src1; +} + +u32 mali_pp_job_get_pp_counter_sub_job_src0(u32 sub_job) +{ + MALI_DEBUG_ASSERT(sub_job < _MALI_PP_MAX_SUB_JOBS); + return pp_counter_per_sub_job_src0[sub_job]; +} + +u32 mali_pp_job_get_pp_counter_sub_job_src1(u32 sub_job) +{ + MALI_DEBUG_ASSERT(sub_job < _MALI_PP_MAX_SUB_JOBS); + return pp_counter_per_sub_job_src1[sub_job]; +} diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_pp_job.h b/drivers/gpu/arm/mali400/common/mali_pp_job.h --- a/drivers/gpu/arm/mali400/common/mali_pp_job.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_pp_job.h 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,591 @@ +/* + * Copyright (C) 2011-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_PP_JOB_H__ +#define __MALI_PP_JOB_H__ + +#include "mali_osk.h" +#include "mali_osk_list.h" +#include "mali_uk_types.h" +#include "mali_session.h" +#include "mali_kernel_common.h" +#include "regs/mali_200_regs.h" +#include "mali_kernel_core.h" +#include "mali_dlbu.h" +#include "mali_timeline.h" +#include "mali_scheduler.h" +#include "mali_executor.h" +#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) +#include "linux/mali_memory_dma_buf.h" +#endif +#if defined(CONFIG_MALI_DMA_BUF_FENCE) +#include "linux/mali_dma_fence.h" +#include +#endif + +typedef enum pp_job_status { + MALI_NO_SWAP_IN, + MALI_SWAP_IN_FAIL, + MALI_SWAP_IN_SUCC, +} pp_job_status; + +/** + * This structure represents a PP job, including all sub jobs. + * + * The PP job object itself is not protected by any single lock, + * but relies on other locks instead (scheduler, executor and timeline lock). + * Think of the job object as moving between these sub systems through-out + * its lifetime. Different part of the PP job struct is used by different + * subsystems. Accessor functions ensure that correct lock is taken. + * Do NOT access any data members directly from outside this module! + */ +struct mali_pp_job { + /* + * These members are typically only set at creation, + * and only read later on. + * They do not require any lock protection. + */ + _mali_uk_pp_start_job_s uargs; /**< Arguments from user space */ + struct mali_session_data *session; /**< Session which submitted this job */ + u32 pid; /**< Process ID of submitting process */ + u32 tid; /**< Thread ID of submitting thread */ + u32 id; /**< Identifier for this job in kernel space (sequential numbering) */ + u32 cache_order; /**< Cache order used for L2 cache flushing (sequential numbering) */ + struct mali_timeline_tracker tracker; /**< Timeline tracker for this job */ + _mali_osk_notification_t *finished_notification; /**< Notification sent back to userspace on job complete */ + u32 perf_counter_per_sub_job_count; /**< Number of values in the two arrays which is != MALI_HW_CORE_NO_COUNTER */ + u32 perf_counter_per_sub_job_src0[_MALI_PP_MAX_SUB_JOBS]; /**< Per sub job counters src0 */ + u32 perf_counter_per_sub_job_src1[_MALI_PP_MAX_SUB_JOBS]; /**< Per sub job counters src1 */ + u32 sub_jobs_num; /**< Number of subjobs; set to 1 for Mali-450 if DLBU is used, otherwise equals number of PP cores */ + + pp_job_status swap_status; /**< Used to track each PP job swap status, if fail, we need to drop them in scheduler part */ + mali_bool user_notification; /**< When we deferred delete PP job, we need to judge if we need to send job finish notification to user space */ + u32 num_pp_cores_in_virtual; /**< How many PP cores we have when job finished */ + + /* + * These members are used by both scheduler and executor. + * They are "protected" by atomic operations. + */ + _mali_osk_atomic_t sub_jobs_completed; /**< Number of completed sub-jobs in this superjob */ + _mali_osk_atomic_t sub_job_errors; /**< Bitfield with errors (errors for each single sub-job is or'ed together) */ + + /* + * These members are used by scheduler, but only when no one else + * knows about this job object but the working function. + * No lock is thus needed for these. + */ + u32 *memory_cookies; /**< Memory cookies attached to job */ + + /* + * These members are used by the scheduler, + * protected by scheduler lock + */ + _mali_osk_list_t list; /**< Used to link jobs together in the scheduler queue */ + _mali_osk_list_t session_fb_lookup_list; /**< Used to link jobs together from the same frame builder in the session */ + + u32 sub_jobs_started; /**< Total number of sub-jobs started (always started in ascending order) */ + + /* + * Set by executor/group on job completion, read by scheduler when + * returning job to user. Hold executor lock when setting, + * no lock needed when reading + */ + u32 perf_counter_value0[_MALI_PP_MAX_SUB_JOBS]; /**< Value of performance counter 0 (to be returned to user space), one for each sub job */ + u32 perf_counter_value1[_MALI_PP_MAX_SUB_JOBS]; /**< Value of performance counter 1 (to be returned to user space), one for each sub job */ + +#if defined(CONFIG_MALI_DMA_BUF_FENCE) + struct mali_dma_fence_context dma_fence_context; /**< The mali dma fence context to record dma fence waiters that this job wait for */ + struct dma_fence *rendered_dma_fence; /**< the new dma fence link to this job */ +#endif +}; + +void mali_pp_job_initialize(void); +void mali_pp_job_terminate(void); + +struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, _mali_uk_pp_start_job_s *uargs, u32 id); +void mali_pp_job_delete(struct mali_pp_job *job); + +u32 mali_pp_job_get_perf_counter_src0(struct mali_pp_job *job, u32 sub_job); +u32 mali_pp_job_get_perf_counter_src1(struct mali_pp_job *job, u32 sub_job); + +void mali_pp_job_set_pp_counter_global_src0(u32 counter); +void mali_pp_job_set_pp_counter_global_src1(u32 counter); +void mali_pp_job_set_pp_counter_sub_job_src0(u32 sub_job, u32 counter); +void mali_pp_job_set_pp_counter_sub_job_src1(u32 sub_job, u32 counter); + +u32 mali_pp_job_get_pp_counter_global_src0(void); +u32 mali_pp_job_get_pp_counter_global_src1(void); +u32 mali_pp_job_get_pp_counter_sub_job_src0(u32 sub_job); +u32 mali_pp_job_get_pp_counter_sub_job_src1(u32 sub_job); + +MALI_STATIC_INLINE u32 mali_pp_job_get_id(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return (NULL == job) ? 0 : job->id; +} + +MALI_STATIC_INLINE void mali_pp_job_set_cache_order(struct mali_pp_job *job, + u32 cache_order) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + job->cache_order = cache_order; +} + +MALI_STATIC_INLINE u32 mali_pp_job_get_cache_order(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return (NULL == job) ? 0 : job->cache_order; +} + +MALI_STATIC_INLINE u64 mali_pp_job_get_user_id(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.user_job_ptr; +} + +MALI_STATIC_INLINE u32 mali_pp_job_get_frame_builder_id(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.frame_builder_id; +} + +MALI_STATIC_INLINE u32 mali_pp_job_get_flush_id(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.flush_id; +} + +MALI_STATIC_INLINE u32 mali_pp_job_get_pid(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->pid; +} + +MALI_STATIC_INLINE u32 mali_pp_job_get_tid(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->tid; +} + +MALI_STATIC_INLINE u32 *mali_pp_job_get_frame_registers(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.frame_registers; +} + +MALI_STATIC_INLINE u32 *mali_pp_job_get_dlbu_registers(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.dlbu_registers; +} + +MALI_STATIC_INLINE mali_bool mali_pp_job_is_virtual(struct mali_pp_job *job) +{ +#if (defined(CONFIG_MALI450) || defined(CONFIG_MALI470)) + MALI_DEBUG_ASSERT_POINTER(job); + return (0 == job->uargs.num_cores) ? MALI_TRUE : MALI_FALSE; +#else + return MALI_FALSE; +#endif +} + +MALI_STATIC_INLINE u32 mali_pp_job_get_addr_frame(struct mali_pp_job *job, u32 sub_job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + + if (mali_pp_job_is_virtual(job)) { + return MALI_DLBU_VIRT_ADDR; + } else if (0 == sub_job) { + return job->uargs.frame_registers[MALI200_REG_ADDR_FRAME / sizeof(u32)]; + } else if (sub_job < _MALI_PP_MAX_SUB_JOBS) { + return job->uargs.frame_registers_addr_frame[sub_job - 1]; + } + + return 0; +} + +MALI_STATIC_INLINE u32 mali_pp_job_get_addr_stack(struct mali_pp_job *job, u32 sub_job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + + if (0 == sub_job) { + return job->uargs.frame_registers[MALI200_REG_ADDR_STACK / sizeof(u32)]; + } else if (sub_job < _MALI_PP_MAX_SUB_JOBS) { + return job->uargs.frame_registers_addr_stack[sub_job - 1]; + } + + return 0; +} + +void mali_pp_job_list_add(struct mali_pp_job *job, _mali_osk_list_t *list); + +MALI_STATIC_INLINE void mali_pp_job_list_addtail(struct mali_pp_job *job, + _mali_osk_list_t *list) +{ + _mali_osk_list_addtail(&job->list, list); +} + +MALI_STATIC_INLINE void mali_pp_job_list_move(struct mali_pp_job *job, + _mali_osk_list_t *list) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + MALI_DEBUG_ASSERT(!_mali_osk_list_empty(&job->list)); + _mali_osk_list_move(&job->list, list); +} + +MALI_STATIC_INLINE void mali_pp_job_list_remove(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + _mali_osk_list_delinit(&job->list); +} + +MALI_STATIC_INLINE u32 *mali_pp_job_get_wb0_registers(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.wb0_registers; +} + +MALI_STATIC_INLINE u32 *mali_pp_job_get_wb1_registers(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.wb1_registers; +} + +MALI_STATIC_INLINE u32 *mali_pp_job_get_wb2_registers(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.wb2_registers; +} + +MALI_STATIC_INLINE u32 mali_pp_job_get_wb0_source_addr(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.wb0_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR / sizeof(u32)]; +} + +MALI_STATIC_INLINE u32 mali_pp_job_get_wb1_source_addr(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.wb1_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR / sizeof(u32)]; +} + +MALI_STATIC_INLINE u32 mali_pp_job_get_wb2_source_addr(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.wb2_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR / sizeof(u32)]; +} + +MALI_STATIC_INLINE void mali_pp_job_disable_wb0(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + job->uargs.wb0_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0; +} + +MALI_STATIC_INLINE void mali_pp_job_disable_wb1(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + job->uargs.wb1_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0; +} + +MALI_STATIC_INLINE void mali_pp_job_disable_wb2(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + job->uargs.wb2_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0; +} + +MALI_STATIC_INLINE mali_bool mali_pp_job_all_writeback_unit_disabled(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + + if (job->uargs.wb0_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] || + job->uargs.wb1_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] || + job->uargs.wb2_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] + ) { + /* At least one output unit active */ + return MALI_FALSE; + } + + /* All outputs are disabled - we can abort the job */ + return MALI_TRUE; +} + +MALI_STATIC_INLINE void mali_pp_job_fb_lookup_add(struct mali_pp_job *job) +{ + u32 fb_lookup_id; + + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + + fb_lookup_id = MALI_PP_JOB_FB_LOOKUP_LIST_MASK & job->uargs.frame_builder_id; + + MALI_DEBUG_ASSERT(MALI_PP_JOB_FB_LOOKUP_LIST_SIZE > fb_lookup_id); + + _mali_osk_list_addtail(&job->session_fb_lookup_list, + &job->session->pp_job_fb_lookup_list[fb_lookup_id]); +} + +MALI_STATIC_INLINE void mali_pp_job_fb_lookup_remove(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + _mali_osk_list_delinit(&job->session_fb_lookup_list); +} + +MALI_STATIC_INLINE struct mali_session_data *mali_pp_job_get_session(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->session; +} + +MALI_STATIC_INLINE mali_bool mali_pp_job_has_started_sub_jobs(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + return (0 < job->sub_jobs_started) ? MALI_TRUE : MALI_FALSE; +} + +MALI_STATIC_INLINE mali_bool mali_pp_job_has_unstarted_sub_jobs(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + return (job->sub_jobs_started < job->sub_jobs_num) ? MALI_TRUE : MALI_FALSE; +} + +/* Function used when we are terminating a session with jobs. Return TRUE if it has a rendering job. + Makes sure that no new subjobs are started. */ +MALI_STATIC_INLINE void mali_pp_job_mark_unstarted_failed(struct mali_pp_job *job) +{ + u32 jobs_remaining; + u32 i; + + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + + jobs_remaining = job->sub_jobs_num - job->sub_jobs_started; + job->sub_jobs_started += jobs_remaining; + + /* Not the most optimal way, but this is only used in error cases */ + for (i = 0; i < jobs_remaining; i++) { + _mali_osk_atomic_inc(&job->sub_jobs_completed); + _mali_osk_atomic_inc(&job->sub_job_errors); + } +} + +MALI_STATIC_INLINE mali_bool mali_pp_job_is_complete(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return (job->sub_jobs_num == + _mali_osk_atomic_read(&job->sub_jobs_completed)) ? + MALI_TRUE : MALI_FALSE; +} + +MALI_STATIC_INLINE u32 mali_pp_job_get_first_unstarted_sub_job(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + return job->sub_jobs_started; +} + +MALI_STATIC_INLINE u32 mali_pp_job_get_sub_job_count(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->sub_jobs_num; +} + +MALI_STATIC_INLINE u32 mali_pp_job_unstarted_sub_job_count(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + MALI_DEBUG_ASSERT(job->sub_jobs_num >= job->sub_jobs_started); + return (job->sub_jobs_num - job->sub_jobs_started); +} + +MALI_STATIC_INLINE u32 mali_pp_job_num_memory_cookies(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.num_memory_cookies; +} + +MALI_STATIC_INLINE u32 mali_pp_job_get_memory_cookie( + struct mali_pp_job *job, u32 index) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT(index < job->uargs.num_memory_cookies); + MALI_DEBUG_ASSERT_POINTER(job->memory_cookies); + return job->memory_cookies[index]; +} + +MALI_STATIC_INLINE mali_bool mali_pp_job_needs_dma_buf_mapping(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + + if (0 < job->uargs.num_memory_cookies) { + return MALI_TRUE; + } + + return MALI_FALSE; +} + +MALI_STATIC_INLINE void mali_pp_job_mark_sub_job_started(struct mali_pp_job *job, u32 sub_job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + + /* Assert that we are marking the "first unstarted sub job" as started */ + MALI_DEBUG_ASSERT(job->sub_jobs_started == sub_job); + + job->sub_jobs_started++; +} + +MALI_STATIC_INLINE void mali_pp_job_mark_sub_job_completed(struct mali_pp_job *job, mali_bool success) +{ + MALI_DEBUG_ASSERT_POINTER(job); + + _mali_osk_atomic_inc(&job->sub_jobs_completed); + if (MALI_FALSE == success) { + _mali_osk_atomic_inc(&job->sub_job_errors); + } +} + +MALI_STATIC_INLINE mali_bool mali_pp_job_was_success(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + if (0 == _mali_osk_atomic_read(&job->sub_job_errors)) { + return MALI_TRUE; + } + return MALI_FALSE; +} + +MALI_STATIC_INLINE mali_bool mali_pp_job_use_no_notification( + struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return (job->uargs.flags & _MALI_PP_JOB_FLAG_NO_NOTIFICATION) ? + MALI_TRUE : MALI_FALSE; +} + +MALI_STATIC_INLINE mali_bool mali_pp_job_is_pilot_job(struct mali_pp_job *job) +{ + /* + * A pilot job is currently identified as jobs which + * require no callback notification. + */ + return mali_pp_job_use_no_notification(job); +} + +MALI_STATIC_INLINE _mali_osk_notification_t * +mali_pp_job_get_finished_notification(struct mali_pp_job *job) +{ + _mali_osk_notification_t *notification; + + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_POINTER(job->finished_notification); + + notification = job->finished_notification; + job->finished_notification = NULL; + + return notification; +} + +MALI_STATIC_INLINE mali_bool mali_pp_job_is_window_surface( + struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return (job->uargs.flags & _MALI_PP_JOB_FLAG_IS_WINDOW_SURFACE) + ? MALI_TRUE : MALI_FALSE; +} + +MALI_STATIC_INLINE mali_bool mali_pp_job_is_protected_job(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return (job->uargs.flags & _MALI_PP_JOB_FLAG_PROTECTED) + ? MALI_TRUE : MALI_FALSE; +} + +MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_flag(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->uargs.perf_counter_flag; +} + +MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_value0(struct mali_pp_job *job, u32 sub_job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->perf_counter_value0[sub_job]; +} + +MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_value1(struct mali_pp_job *job, u32 sub_job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return job->perf_counter_value1[sub_job]; +} + +MALI_STATIC_INLINE void mali_pp_job_set_perf_counter_value0(struct mali_pp_job *job, u32 sub_job, u32 value) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + job->perf_counter_value0[sub_job] = value; +} + +MALI_STATIC_INLINE void mali_pp_job_set_perf_counter_value1(struct mali_pp_job *job, u32 sub_job, u32 value) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD(); + job->perf_counter_value1[sub_job] = value; +} + +MALI_STATIC_INLINE _mali_osk_errcode_t mali_pp_job_check(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + if (mali_pp_job_is_virtual(job) && job->sub_jobs_num != 1) { + return _MALI_OSK_ERR_FAULT; + } + return _MALI_OSK_ERR_OK; +} + +/** + * Returns MALI_TRUE if this job has more than two sub jobs and all sub jobs are unstarted. + * + * @param job Job to check. + * @return MALI_TRUE if job has more than two sub jobs and all sub jobs are unstarted, MALI_FALSE if not. + */ +MALI_STATIC_INLINE mali_bool mali_pp_job_is_large_and_unstarted(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + MALI_DEBUG_ASSERT(!mali_pp_job_is_virtual(job)); + + return (0 == job->sub_jobs_started && 2 < job->sub_jobs_num); +} + +/** + * Get PP job's Timeline tracker. + * + * @param job PP job. + * @return Pointer to Timeline tracker for the job. + */ +MALI_STATIC_INLINE struct mali_timeline_tracker *mali_pp_job_get_tracker(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return &(job->tracker); +} + +MALI_STATIC_INLINE u32 *mali_pp_job_get_timeline_point_ptr( + struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + return (u32 __user *)(uintptr_t)job->uargs.timeline_point_ptr; +} + + +#endif /* __MALI_PP_JOB_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_scheduler.c b/drivers/gpu/arm/mali400/common/mali_scheduler.c --- a/drivers/gpu/arm/mali400/common/mali_scheduler.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_scheduler.c 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,1548 @@ +/* + * Copyright (C) 2012-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_scheduler.h" +#include "mali_kernel_common.h" +#include "mali_osk.h" +#include "mali_osk_profiling.h" +#include "mali_kernel_utilization.h" +#include "mali_timeline.h" +#include "mali_gp_job.h" +#include "mali_pp_job.h" +#include "mali_executor.h" +#include "mali_group.h" +#include +#include +#include "mali_pm_metrics.h" + +#if defined(CONFIG_DMA_SHARED_BUFFER) +#include "mali_memory_dma_buf.h" +#if defined(CONFIG_MALI_DMA_BUF_FENCE) +#include "mali_dma_fence.h" +#include +#endif +#endif + +#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS) +#include +#include +#endif +/* + * ---------- static defines/constants ---------- + */ + +/* + * If dma_buf with map on demand is used, we defer job queue + * if in atomic context, since both might sleep. + */ +#if defined(CONFIG_DMA_SHARED_BUFFER) +#if !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) +#define MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE 1 +#endif +#endif + + +/* + * ---------- global variables (exported due to inline functions) ---------- + */ + +/* Lock protecting this module */ +_mali_osk_spinlock_irq_t *mali_scheduler_lock_obj = NULL; + +/* Queue of jobs to be executed on the GP group */ +struct mali_scheduler_job_queue job_queue_gp; + +/* Queue of PP jobs */ +struct mali_scheduler_job_queue job_queue_pp; + +_mali_osk_atomic_t mali_job_id_autonumber; +_mali_osk_atomic_t mali_job_cache_order_autonumber; +/* + * ---------- static variables ---------- + */ + +_mali_osk_wq_work_t *scheduler_wq_pp_job_delete = NULL; +_mali_osk_spinlock_irq_t *scheduler_pp_job_delete_lock = NULL; +static _MALI_OSK_LIST_HEAD_STATIC_INIT(scheduler_pp_job_deletion_queue); + +#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) +static _mali_osk_wq_work_t *scheduler_wq_pp_job_queue = NULL; +static _mali_osk_spinlock_irq_t *scheduler_pp_job_queue_lock = NULL; +static _MALI_OSK_LIST_HEAD_STATIC_INIT(scheduler_pp_job_queue_list); +#endif + +/* + * ---------- Forward declaration of static functions ---------- + */ + +static mali_timeline_point mali_scheduler_submit_gp_job( + struct mali_session_data *session, struct mali_gp_job *job); +static _mali_osk_errcode_t mali_scheduler_submit_pp_job( + struct mali_session_data *session, struct mali_pp_job *job, mali_timeline_point *point); + +static mali_bool mali_scheduler_queue_gp_job(struct mali_gp_job *job); +static mali_bool mali_scheduler_queue_pp_job(struct mali_pp_job *job); + +static void mali_scheduler_return_gp_job_to_user(struct mali_gp_job *job, + mali_bool success); + +static void mali_scheduler_deferred_pp_job_delete(struct mali_pp_job *job); +void mali_scheduler_do_pp_job_delete(void *arg); + +#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) +static void mali_scheduler_deferred_pp_job_queue(struct mali_pp_job *job); +static void mali_scheduler_do_pp_job_queue(void *arg); +#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */ + +/* + * ---------- Actual implementation ---------- + */ + +_mali_osk_errcode_t mali_scheduler_initialize(void) +{ + _mali_osk_atomic_init(&mali_job_id_autonumber, 0); + _mali_osk_atomic_init(&mali_job_cache_order_autonumber, 0); + + _MALI_OSK_INIT_LIST_HEAD(&job_queue_gp.normal_pri); + _MALI_OSK_INIT_LIST_HEAD(&job_queue_gp.high_pri); + job_queue_gp.depth = 0; + job_queue_gp.big_job_num = 0; + + _MALI_OSK_INIT_LIST_HEAD(&job_queue_pp.normal_pri); + _MALI_OSK_INIT_LIST_HEAD(&job_queue_pp.high_pri); + job_queue_pp.depth = 0; + job_queue_pp.big_job_num = 0; + + mali_scheduler_lock_obj = _mali_osk_spinlock_irq_init( + _MALI_OSK_LOCKFLAG_ORDERED, + _MALI_OSK_LOCK_ORDER_SCHEDULER); + if (NULL == mali_scheduler_lock_obj) { + mali_scheduler_terminate(); + } + + scheduler_wq_pp_job_delete = _mali_osk_wq_create_work( + mali_scheduler_do_pp_job_delete, NULL); + if (NULL == scheduler_wq_pp_job_delete) { + mali_scheduler_terminate(); + return _MALI_OSK_ERR_FAULT; + } + + scheduler_pp_job_delete_lock = _mali_osk_spinlock_irq_init( + _MALI_OSK_LOCKFLAG_ORDERED, + _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED); + if (NULL == scheduler_pp_job_delete_lock) { + mali_scheduler_terminate(); + return _MALI_OSK_ERR_FAULT; + } + +#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) + scheduler_wq_pp_job_queue = _mali_osk_wq_create_work( + mali_scheduler_do_pp_job_queue, NULL); + if (NULL == scheduler_wq_pp_job_queue) { + mali_scheduler_terminate(); + return _MALI_OSK_ERR_FAULT; + } + + scheduler_pp_job_queue_lock = _mali_osk_spinlock_irq_init( + _MALI_OSK_LOCKFLAG_ORDERED, + _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED); + if (NULL == scheduler_pp_job_queue_lock) { + mali_scheduler_terminate(); + return _MALI_OSK_ERR_FAULT; + } +#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */ + + return _MALI_OSK_ERR_OK; +} + +void mali_scheduler_terminate(void) +{ +#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) + if (NULL != scheduler_pp_job_queue_lock) { + _mali_osk_spinlock_irq_term(scheduler_pp_job_queue_lock); + scheduler_pp_job_queue_lock = NULL; + } + + if (NULL != scheduler_wq_pp_job_queue) { + _mali_osk_wq_delete_work(scheduler_wq_pp_job_queue); + scheduler_wq_pp_job_queue = NULL; + } +#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */ + + if (NULL != scheduler_pp_job_delete_lock) { + _mali_osk_spinlock_irq_term(scheduler_pp_job_delete_lock); + scheduler_pp_job_delete_lock = NULL; + } + + if (NULL != scheduler_wq_pp_job_delete) { + _mali_osk_wq_delete_work(scheduler_wq_pp_job_delete); + scheduler_wq_pp_job_delete = NULL; + } + + if (NULL != mali_scheduler_lock_obj) { + _mali_osk_spinlock_irq_term(mali_scheduler_lock_obj); + mali_scheduler_lock_obj = NULL; + } + + _mali_osk_atomic_term(&mali_job_cache_order_autonumber); + _mali_osk_atomic_term(&mali_job_id_autonumber); +} + +u32 mali_scheduler_job_physical_head_count(mali_bool gpu_mode_is_secure) +{ + /* + * Count how many physical sub jobs are present from the head of queue + * until the first virtual job is present. + * Early out when we have reached maximum number of PP cores (8) + */ + u32 count = 0; + struct mali_pp_job *job; + struct mali_pp_job *temp; + + /* Check for partially started normal pri jobs */ + if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) { + MALI_DEBUG_ASSERT(0 < job_queue_pp.depth); + + job = _MALI_OSK_LIST_ENTRY(job_queue_pp.normal_pri.next, + struct mali_pp_job, list); + + MALI_DEBUG_ASSERT_POINTER(job); + + if (MALI_TRUE == mali_pp_job_has_started_sub_jobs(job)) { + /* + * Remember; virtual jobs can't be queued and started + * at the same time, so this must be a physical job + */ + if ((MALI_FALSE == gpu_mode_is_secure && MALI_FALSE == mali_pp_job_is_protected_job(job)) + || (MALI_TRUE == gpu_mode_is_secure && MALI_TRUE == mali_pp_job_is_protected_job(job))) { + + count += mali_pp_job_unstarted_sub_job_count(job); + if (MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS <= count) { + return MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS; + } + } + } + } + + _MALI_OSK_LIST_FOREACHENTRY(job, temp, &job_queue_pp.high_pri, + struct mali_pp_job, list) { + if ((MALI_FALSE == mali_pp_job_is_virtual(job)) + && ((MALI_FALSE == gpu_mode_is_secure && MALI_FALSE == mali_pp_job_is_protected_job(job)) + || (MALI_TRUE == gpu_mode_is_secure && MALI_TRUE == mali_pp_job_is_protected_job(job)))) { + + count += mali_pp_job_unstarted_sub_job_count(job); + if (MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS <= count) { + return MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS; + } + } else { + /* Came across a virtual job, so stop counting */ + return count; + } + } + + _MALI_OSK_LIST_FOREACHENTRY(job, temp, &job_queue_pp.normal_pri, + struct mali_pp_job, list) { + if ((MALI_FALSE == mali_pp_job_is_virtual(job)) + && (MALI_FALSE == mali_pp_job_has_started_sub_jobs(job)) + && ((MALI_FALSE == gpu_mode_is_secure && MALI_FALSE == mali_pp_job_is_protected_job(job)) + || (MALI_TRUE == gpu_mode_is_secure && MALI_TRUE == mali_pp_job_is_protected_job(job)))) { + + count += mali_pp_job_unstarted_sub_job_count(job); + if (MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS <= count) { + return MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS; + } + } else { + /* Came across a virtual job, so stop counting */ + return count; + } + } + return count; +} + +struct mali_pp_job *mali_scheduler_job_pp_next(void) +{ + struct mali_pp_job *job; + struct mali_pp_job *temp; + + MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj); + + /* Check for partially started normal pri jobs */ + if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) { + MALI_DEBUG_ASSERT(0 < job_queue_pp.depth); + + job = _MALI_OSK_LIST_ENTRY(job_queue_pp.normal_pri.next, + struct mali_pp_job, list); + + MALI_DEBUG_ASSERT_POINTER(job); + + if (MALI_TRUE == mali_pp_job_has_started_sub_jobs(job)) { + return job; + } + } + + _MALI_OSK_LIST_FOREACHENTRY(job, temp, &job_queue_pp.high_pri, + struct mali_pp_job, list) { + return job; + } + + _MALI_OSK_LIST_FOREACHENTRY(job, temp, &job_queue_pp.normal_pri, + struct mali_pp_job, list) { + return job; + } + + return NULL; +} + +mali_bool mali_scheduler_job_next_is_virtual(void) +{ + struct mali_pp_job *job; + + job = mali_scheduler_job_pp_virtual_peek(); + if (NULL != job) { + MALI_DEBUG_ASSERT(mali_pp_job_is_virtual(job)); + + return MALI_TRUE; + } + + return MALI_FALSE; +} + +struct mali_gp_job *mali_scheduler_job_gp_get(void) +{ + _mali_osk_list_t *queue; + struct mali_gp_job *job = NULL; + + MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj); + MALI_DEBUG_ASSERT(0 < job_queue_gp.depth); + MALI_DEBUG_ASSERT(job_queue_gp.big_job_num <= job_queue_gp.depth); + + if (!_mali_osk_list_empty(&job_queue_gp.high_pri)) { + queue = &job_queue_gp.high_pri; + } else { + queue = &job_queue_gp.normal_pri; + MALI_DEBUG_ASSERT(!_mali_osk_list_empty(queue)); + } + + job = _MALI_OSK_LIST_ENTRY(queue->next, struct mali_gp_job, list); + + MALI_DEBUG_ASSERT_POINTER(job); + + mali_gp_job_list_remove(job); + job_queue_gp.depth--; + if (job->big_job) { + job_queue_gp.big_job_num --; + if (job_queue_gp.big_job_num < MALI_MAX_PENDING_BIG_JOB) { + /* wake up process */ + wait_queue_head_t *queue = mali_session_get_wait_queue(); + wake_up(queue); + } + } + return job; +} + +struct mali_pp_job *mali_scheduler_job_pp_physical_peek(void) +{ + struct mali_pp_job *job = NULL; + struct mali_pp_job *tmp_job = NULL; + + MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj); + + /* + * For PP jobs we favour partially started jobs in normal + * priority queue over unstarted jobs in high priority queue + */ + + if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) { + MALI_DEBUG_ASSERT(0 < job_queue_pp.depth); + + tmp_job = _MALI_OSK_LIST_ENTRY(job_queue_pp.normal_pri.next, + struct mali_pp_job, list); + MALI_DEBUG_ASSERT(NULL != tmp_job); + + if (MALI_FALSE == mali_pp_job_is_virtual(tmp_job)) { + job = tmp_job; + } + } + + if (NULL == job || + MALI_FALSE == mali_pp_job_has_started_sub_jobs(job)) { + /* + * There isn't a partially started job in normal queue, so + * look in high priority queue. + */ + if (!_mali_osk_list_empty(&job_queue_pp.high_pri)) { + MALI_DEBUG_ASSERT(0 < job_queue_pp.depth); + + tmp_job = _MALI_OSK_LIST_ENTRY(job_queue_pp.high_pri.next, + struct mali_pp_job, list); + MALI_DEBUG_ASSERT(NULL != tmp_job); + + if (MALI_FALSE == mali_pp_job_is_virtual(tmp_job)) { + job = tmp_job; + } + } + } + + return job; +} + +struct mali_pp_job *mali_scheduler_job_pp_virtual_peek(void) +{ + struct mali_pp_job *job = NULL; + struct mali_pp_job *tmp_job = NULL; + + MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj); + + if (!_mali_osk_list_empty(&job_queue_pp.high_pri)) { + MALI_DEBUG_ASSERT(0 < job_queue_pp.depth); + + tmp_job = _MALI_OSK_LIST_ENTRY(job_queue_pp.high_pri.next, + struct mali_pp_job, list); + + if (MALI_TRUE == mali_pp_job_is_virtual(tmp_job)) { + job = tmp_job; + } + } + + if (NULL == job) { + if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) { + MALI_DEBUG_ASSERT(0 < job_queue_pp.depth); + + tmp_job = _MALI_OSK_LIST_ENTRY(job_queue_pp.normal_pri.next, + struct mali_pp_job, list); + + if (MALI_TRUE == mali_pp_job_is_virtual(tmp_job)) { + job = tmp_job; + } + } + } + + return job; +} + +struct mali_pp_job *mali_scheduler_job_pp_physical_get(u32 *sub_job) +{ + struct mali_pp_job *job = mali_scheduler_job_pp_physical_peek(); + + MALI_DEBUG_ASSERT(MALI_FALSE == mali_pp_job_is_virtual(job)); + + if (NULL != job) { + *sub_job = mali_pp_job_get_first_unstarted_sub_job(job); + + mali_pp_job_mark_sub_job_started(job, *sub_job); + if (MALI_FALSE == mali_pp_job_has_unstarted_sub_jobs(job)) { + /* Remove from queue when last sub job has been retrieved */ + mali_pp_job_list_remove(job); + } + + job_queue_pp.depth--; + + /* + * Job about to start so it is no longer be + * possible to discard WB + */ + mali_pp_job_fb_lookup_remove(job); + } + + return job; +} + +struct mali_pp_job *mali_scheduler_job_pp_virtual_get(void) +{ + struct mali_pp_job *job = mali_scheduler_job_pp_virtual_peek(); + + MALI_DEBUG_ASSERT(MALI_TRUE == mali_pp_job_is_virtual(job)); + + if (NULL != job) { + MALI_DEBUG_ASSERT(0 == + mali_pp_job_get_first_unstarted_sub_job(job)); + MALI_DEBUG_ASSERT(1 == + mali_pp_job_get_sub_job_count(job)); + + mali_pp_job_mark_sub_job_started(job, 0); + + mali_pp_job_list_remove(job); + + job_queue_pp.depth--; + + /* + * Job about to start so it is no longer be + * possible to discard WB + */ + mali_pp_job_fb_lookup_remove(job); + } + + return job; +} + +mali_scheduler_mask mali_scheduler_activate_gp_job(struct mali_gp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + + MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Timeline activation for job %u (0x%08X).\n", + mali_gp_job_get_id(job), job)); + + mali_scheduler_lock(); + + if (!mali_scheduler_queue_gp_job(job)) { + /* Failed to enqueue job, release job (with error) */ + + mali_scheduler_unlock(); + + mali_timeline_tracker_release(mali_gp_job_get_tracker(job)); + mali_gp_job_signal_pp_tracker(job, MALI_FALSE); + + /* This will notify user space and close the job object */ + mali_scheduler_complete_gp_job(job, MALI_FALSE, + MALI_TRUE, MALI_FALSE); + + return MALI_SCHEDULER_MASK_EMPTY; + } + + mali_scheduler_unlock(); + + return MALI_SCHEDULER_MASK_GP; +} + +mali_scheduler_mask mali_scheduler_activate_pp_job(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + + MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Timeline activation for job %u (0x%08X).\n", + mali_pp_job_get_id(job), job)); + + if (MALI_TRUE == mali_timeline_tracker_activation_error( + mali_pp_job_get_tracker(job))) { + MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Job %u (0x%08X) activated with error, aborting.\n", + mali_pp_job_get_id(job), job)); + + mali_scheduler_lock(); + mali_pp_job_fb_lookup_remove(job); + mali_pp_job_mark_unstarted_failed(job); + mali_scheduler_unlock(); + + mali_timeline_tracker_release(mali_pp_job_get_tracker(job)); + + /* This will notify user space and close the job object */ + mali_scheduler_complete_pp_job(job, 0, MALI_TRUE, MALI_FALSE); + + return MALI_SCHEDULER_MASK_EMPTY; + } + +#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) + if (mali_pp_job_needs_dma_buf_mapping(job)) { + mali_scheduler_deferred_pp_job_queue(job); + return MALI_SCHEDULER_MASK_EMPTY; + } +#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */ + + mali_scheduler_lock(); + + if (!mali_scheduler_queue_pp_job(job)) { + /* Failed to enqueue job, release job (with error) */ + mali_pp_job_fb_lookup_remove(job); + mali_pp_job_mark_unstarted_failed(job); + mali_scheduler_unlock(); + + mali_timeline_tracker_release(mali_pp_job_get_tracker(job)); + + /* This will notify user space and close the job object */ + mali_scheduler_complete_pp_job(job, 0, MALI_TRUE, MALI_FALSE); + + return MALI_SCHEDULER_MASK_EMPTY; + } + + mali_scheduler_unlock(); + return MALI_SCHEDULER_MASK_PP; +} + +void mali_scheduler_complete_gp_job(struct mali_gp_job *job, + mali_bool success, + mali_bool user_notification, + mali_bool dequeued) +{ + if (user_notification) { + mali_scheduler_return_gp_job_to_user(job, success); + } + + if (dequeued) { + _mali_osk_pm_dev_ref_put(); + + if (mali_utilization_enabled()) { + mali_utilization_gp_end(); + } + mali_pm_record_gpu_idle(MALI_TRUE); + } + + mali_gp_job_delete(job); +} + +void mali_scheduler_complete_pp_job(struct mali_pp_job *job, + u32 num_cores_in_virtual, + mali_bool user_notification, + mali_bool dequeued) +{ + job->user_notification = user_notification; + job->num_pp_cores_in_virtual = num_cores_in_virtual; + +#if defined(CONFIG_MALI_DMA_BUF_FENCE) + if (NULL != job->rendered_dma_fence) + mali_dma_fence_signal_and_put(&job->rendered_dma_fence); +#endif + + if (dequeued) { +#if defined(CONFIG_MALI_DVFS) + if (mali_pp_job_is_window_surface(job)) { + struct mali_session_data *session; + session = mali_pp_job_get_session(job); + mali_session_inc_num_window_jobs(session); + } +#endif + _mali_osk_pm_dev_ref_put(); + + if (mali_utilization_enabled()) { + mali_utilization_pp_end(); + } + mali_pm_record_gpu_idle(MALI_FALSE); + } + + /* With ZRAM feature enabled, all pp jobs will be force to use deferred delete. */ + mali_scheduler_deferred_pp_job_delete(job); +} + +void mali_scheduler_abort_session(struct mali_session_data *session) +{ + struct mali_gp_job *gp_job; + struct mali_gp_job *gp_tmp; + struct mali_pp_job *pp_job; + struct mali_pp_job *pp_tmp; + _MALI_OSK_LIST_HEAD_STATIC_INIT(removed_jobs_gp); + _MALI_OSK_LIST_HEAD_STATIC_INIT(removed_jobs_pp); + + MALI_DEBUG_ASSERT_POINTER(session); + MALI_DEBUG_ASSERT(session->is_aborting); + + MALI_DEBUG_PRINT(3, ("Mali scheduler: Aborting all queued jobs from session 0x%08X.\n", + session)); + + mali_scheduler_lock(); + + /* Remove from GP normal priority queue */ + _MALI_OSK_LIST_FOREACHENTRY(gp_job, gp_tmp, &job_queue_gp.normal_pri, + struct mali_gp_job, list) { + if (mali_gp_job_get_session(gp_job) == session) { + mali_gp_job_list_move(gp_job, &removed_jobs_gp); + job_queue_gp.depth--; + job_queue_gp.big_job_num -= gp_job->big_job ? 1 : 0; + } + } + + /* Remove from GP high priority queue */ + _MALI_OSK_LIST_FOREACHENTRY(gp_job, gp_tmp, &job_queue_gp.high_pri, + struct mali_gp_job, list) { + if (mali_gp_job_get_session(gp_job) == session) { + mali_gp_job_list_move(gp_job, &removed_jobs_gp); + job_queue_gp.depth--; + job_queue_gp.big_job_num -= gp_job->big_job ? 1 : 0; + } + } + + /* Remove from PP normal priority queue */ + _MALI_OSK_LIST_FOREACHENTRY(pp_job, pp_tmp, + &job_queue_pp.normal_pri, + struct mali_pp_job, list) { + if (mali_pp_job_get_session(pp_job) == session) { + mali_pp_job_fb_lookup_remove(pp_job); + + job_queue_pp.depth -= + mali_pp_job_unstarted_sub_job_count( + pp_job); + mali_pp_job_mark_unstarted_failed(pp_job); + + if (MALI_FALSE == mali_pp_job_has_unstarted_sub_jobs(pp_job)) { + if (mali_pp_job_is_complete(pp_job)) { + mali_pp_job_list_move(pp_job, + &removed_jobs_pp); + } else { + mali_pp_job_list_remove(pp_job); + } + } + } + } + + /* Remove from PP high priority queue */ + _MALI_OSK_LIST_FOREACHENTRY(pp_job, pp_tmp, + &job_queue_pp.high_pri, + struct mali_pp_job, list) { + if (mali_pp_job_get_session(pp_job) == session) { + mali_pp_job_fb_lookup_remove(pp_job); + + job_queue_pp.depth -= + mali_pp_job_unstarted_sub_job_count( + pp_job); + mali_pp_job_mark_unstarted_failed(pp_job); + + if (MALI_FALSE == mali_pp_job_has_unstarted_sub_jobs(pp_job)) { + if (mali_pp_job_is_complete(pp_job)) { + mali_pp_job_list_move(pp_job, + &removed_jobs_pp); + } else { + mali_pp_job_list_remove(pp_job); + } + } + } + } + + /* + * Release scheduler lock so we can release trackers + * (which will potentially queue new jobs) + */ + mali_scheduler_unlock(); + + /* Release and complete all (non-running) found GP jobs */ + _MALI_OSK_LIST_FOREACHENTRY(gp_job, gp_tmp, &removed_jobs_gp, + struct mali_gp_job, list) { + mali_timeline_tracker_release(mali_gp_job_get_tracker(gp_job)); + mali_gp_job_signal_pp_tracker(gp_job, MALI_FALSE); + _mali_osk_list_delinit(&gp_job->list); + mali_scheduler_complete_gp_job(gp_job, + MALI_FALSE, MALI_FALSE, MALI_TRUE); + } + + /* Release and complete non-running PP jobs */ + _MALI_OSK_LIST_FOREACHENTRY(pp_job, pp_tmp, &removed_jobs_pp, + struct mali_pp_job, list) { + mali_timeline_tracker_release(mali_pp_job_get_tracker(pp_job)); + _mali_osk_list_delinit(&pp_job->list); + mali_scheduler_complete_pp_job(pp_job, 0, + MALI_FALSE, MALI_TRUE); + } +} + +_mali_osk_errcode_t _mali_ukk_gp_start_job(void *ctx, + _mali_uk_gp_start_job_s *uargs) +{ + struct mali_session_data *session; + struct mali_gp_job *job; + mali_timeline_point point; + u32 __user *point_ptr = NULL; + + MALI_DEBUG_ASSERT_POINTER(uargs); + MALI_DEBUG_ASSERT_POINTER(ctx); + + session = (struct mali_session_data *)(uintptr_t)ctx; + + job = mali_gp_job_create(session, uargs, mali_scheduler_get_new_id(), + NULL); + if (NULL == job) { + MALI_PRINT_ERROR(("Failed to create GP job.\n")); + return _MALI_OSK_ERR_NOMEM; + } + + point_ptr = (u32 __user *)(uintptr_t)mali_gp_job_get_timeline_point_ptr(job); + + point = mali_scheduler_submit_gp_job(session, job); + + if (0 != _mali_osk_put_user(((u32) point), point_ptr)) { + /* + * Let user space know that something failed + * after the job was started. + */ + return _MALI_OSK_ERR_ITEM_NOT_FOUND; + } + + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t _mali_ukk_pp_start_job(void *ctx, + _mali_uk_pp_start_job_s *uargs) +{ + _mali_osk_errcode_t ret; + struct mali_session_data *session; + struct mali_pp_job *job; + mali_timeline_point point; + u32 __user *point_ptr = NULL; + + MALI_DEBUG_ASSERT_POINTER(uargs); + MALI_DEBUG_ASSERT_POINTER(ctx); + + session = (struct mali_session_data *)(uintptr_t)ctx; + + job = mali_pp_job_create(session, uargs, mali_scheduler_get_new_id()); + if (NULL == job) { + MALI_PRINT_ERROR(("Failed to create PP job.\n")); + return _MALI_OSK_ERR_NOMEM; + } + + point_ptr = (u32 __user *)(uintptr_t)mali_pp_job_get_timeline_point_ptr(job); + + /* Submit PP job. */ + ret = mali_scheduler_submit_pp_job(session, job, &point); + job = NULL; + + if (_MALI_OSK_ERR_OK == ret) { + if (0 != _mali_osk_put_user(((u32) point), point_ptr)) { + /* + * Let user space know that something failed + * after the jobs were started. + */ + return _MALI_OSK_ERR_ITEM_NOT_FOUND; + } + } + + return ret; +} + +_mali_osk_errcode_t _mali_ukk_pp_and_gp_start_job(void *ctx, + _mali_uk_pp_and_gp_start_job_s *uargs) +{ + _mali_osk_errcode_t ret; + struct mali_session_data *session; + _mali_uk_pp_and_gp_start_job_s kargs; + struct mali_pp_job *pp_job; + struct mali_gp_job *gp_job; + u32 __user *point_ptr = NULL; + mali_timeline_point point; + _mali_uk_pp_start_job_s __user *pp_args; + _mali_uk_gp_start_job_s __user *gp_args; + + MALI_DEBUG_ASSERT_POINTER(ctx); + MALI_DEBUG_ASSERT_POINTER(uargs); + + session = (struct mali_session_data *) ctx; + + if (0 != _mali_osk_copy_from_user(&kargs, uargs, + sizeof(_mali_uk_pp_and_gp_start_job_s))) { + return _MALI_OSK_ERR_NOMEM; + } + + pp_args = (_mali_uk_pp_start_job_s __user *)(uintptr_t)kargs.pp_args; + gp_args = (_mali_uk_gp_start_job_s __user *)(uintptr_t)kargs.gp_args; + + pp_job = mali_pp_job_create(session, pp_args, + mali_scheduler_get_new_id()); + if (NULL == pp_job) { + MALI_PRINT_ERROR(("Failed to create PP job.\n")); + return _MALI_OSK_ERR_NOMEM; + } + + gp_job = mali_gp_job_create(session, gp_args, + mali_scheduler_get_new_id(), + mali_pp_job_get_tracker(pp_job)); + if (NULL == gp_job) { + MALI_PRINT_ERROR(("Failed to create GP job.\n")); + mali_pp_job_delete(pp_job); + return _MALI_OSK_ERR_NOMEM; + } + + point_ptr = (u32 __user *)(uintptr_t)mali_pp_job_get_timeline_point_ptr(pp_job); + + /* Submit GP job. */ + mali_scheduler_submit_gp_job(session, gp_job); + gp_job = NULL; + + /* Submit PP job. */ + ret = mali_scheduler_submit_pp_job(session, pp_job, &point); + pp_job = NULL; + + if (_MALI_OSK_ERR_OK == ret) { + if (0 != _mali_osk_put_user(((u32) point), point_ptr)) { + /* + * Let user space know that something failed + * after the jobs were started. + */ + return _MALI_OSK_ERR_ITEM_NOT_FOUND; + } + } + + return ret; +} + +void _mali_ukk_pp_job_disable_wb(_mali_uk_pp_disable_wb_s *args) +{ + struct mali_session_data *session; + struct mali_pp_job *job; + struct mali_pp_job *tmp; + u32 fb_lookup_id; + + MALI_DEBUG_ASSERT_POINTER(args); + MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx); + + session = (struct mali_session_data *)(uintptr_t)args->ctx; + + fb_lookup_id = args->fb_id & MALI_PP_JOB_FB_LOOKUP_LIST_MASK; + + mali_scheduler_lock(); + + /* Iterate over all jobs for given frame builder_id. */ + _MALI_OSK_LIST_FOREACHENTRY(job, tmp, + &session->pp_job_fb_lookup_list[fb_lookup_id], + struct mali_pp_job, session_fb_lookup_list) { + MALI_DEBUG_CODE(u32 disable_mask = 0); + + if (mali_pp_job_get_frame_builder_id(job) != + (u32) args->fb_id) { + MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Disable WB mismatching FB.\n")); + continue; + } + + MALI_DEBUG_CODE(disable_mask |= 0xD << (4 * 3)); + + if (mali_pp_job_get_wb0_source_addr(job) == args->wb0_memory) { + MALI_DEBUG_CODE(disable_mask |= 0x1 << (4 * 1)); + mali_pp_job_disable_wb0(job); + } + + if (mali_pp_job_get_wb1_source_addr(job) == args->wb1_memory) { + MALI_DEBUG_CODE(disable_mask |= 0x2 << (4 * 2)); + mali_pp_job_disable_wb1(job); + } + + if (mali_pp_job_get_wb2_source_addr(job) == args->wb2_memory) { + MALI_DEBUG_CODE(disable_mask |= 0x3 << (4 * 3)); + mali_pp_job_disable_wb2(job); + } + MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Disable WB: 0x%X.\n", + disable_mask)); + } + + mali_scheduler_unlock(); +} + +#if MALI_STATE_TRACKING +u32 mali_scheduler_dump_state(char *buf, u32 size) +{ + int n = 0; + + n += _mali_osk_snprintf(buf + n, size - n, "GP queues\n"); + n += _mali_osk_snprintf(buf + n, size - n, + "\tQueue depth: %u\n", job_queue_gp.depth); + n += _mali_osk_snprintf(buf + n, size - n, + "\tNormal priority queue is %s\n", + _mali_osk_list_empty(&job_queue_gp.normal_pri) ? + "empty" : "not empty"); + n += _mali_osk_snprintf(buf + n, size - n, + "\tHigh priority queue is %s\n", + _mali_osk_list_empty(&job_queue_gp.high_pri) ? + "empty" : "not empty"); + + n += _mali_osk_snprintf(buf + n, size - n, + "PP queues\n"); + n += _mali_osk_snprintf(buf + n, size - n, + "\tQueue depth: %u\n", job_queue_pp.depth); + n += _mali_osk_snprintf(buf + n, size - n, + "\tNormal priority queue is %s\n", + _mali_osk_list_empty(&job_queue_pp.normal_pri) + ? "empty" : "not empty"); + n += _mali_osk_snprintf(buf + n, size - n, + "\tHigh priority queue is %s\n", + _mali_osk_list_empty(&job_queue_pp.high_pri) + ? "empty" : "not empty"); + + n += _mali_osk_snprintf(buf + n, size - n, "\n"); + + return n; +} +#endif + +/* + * ---------- Implementation of static functions ---------- + */ + +static mali_timeline_point mali_scheduler_submit_gp_job( + struct mali_session_data *session, struct mali_gp_job *job) +{ + mali_timeline_point point; + + MALI_DEBUG_ASSERT_POINTER(session); + MALI_DEBUG_ASSERT_POINTER(job); + + /* Add job to Timeline system. */ + point = mali_timeline_system_add_tracker(session->timeline_system, + mali_gp_job_get_tracker(job), MALI_TIMELINE_GP); + + return point; +} + +static _mali_osk_errcode_t mali_scheduler_submit_pp_job( + struct mali_session_data *session, struct mali_pp_job *job, mali_timeline_point *point) + +{ + _mali_osk_errcode_t ret = _MALI_OSK_ERR_OK; + +#if defined(CONFIG_MALI_DMA_BUF_FENCE) + struct ww_acquire_ctx ww_actx; + u32 i; + u32 num_memory_cookies = 0; + struct reservation_object **reservation_object_list = NULL; + unsigned int num_reservation_object = 0; +#endif + + MALI_DEBUG_ASSERT_POINTER(session); + MALI_DEBUG_ASSERT_POINTER(job); + + mali_scheduler_lock(); + /* + * Adding job to the lookup list used to quickly discard + * writeback units of queued jobs. + */ + mali_pp_job_fb_lookup_add(job); + mali_scheduler_unlock(); + +#if defined(CONFIG_MALI_DMA_BUF_FENCE) + + /* Allocate the reservation_object_list to list the dma reservation object of dependent dma buffer */ + num_memory_cookies = mali_pp_job_num_memory_cookies(job); + if (0 < num_memory_cookies) { + reservation_object_list = kzalloc(sizeof(struct reservation_object *) * num_memory_cookies, GFP_KERNEL); + if (NULL == reservation_object_list) { + MALI_PRINT_ERROR(("Failed to alloc the reservation object list.\n")); + ret = _MALI_OSK_ERR_NOMEM; + goto failed_to_alloc_reservation_object_list; + } + } + + /* Add the dma reservation object into reservation_object_list*/ + for (i = 0; i < num_memory_cookies; i++) { + mali_mem_backend *mem_backend = NULL; + struct reservation_object *tmp_reservation_object = NULL; + u32 mali_addr = mali_pp_job_get_memory_cookie(job, i); + + mem_backend = mali_mem_backend_struct_search(session, mali_addr); + + MALI_DEBUG_ASSERT_POINTER(mem_backend); + + if (NULL == mem_backend) { + MALI_PRINT_ERROR(("Failed to find the memory backend for memory cookie[%d].\n", i)); + goto failed_to_find_mem_backend; + } + + if (MALI_MEM_DMA_BUF != mem_backend->type) + continue; + + tmp_reservation_object = mem_backend->dma_buf.attachment->buf->resv; + + if (NULL != tmp_reservation_object) { + mali_dma_fence_add_reservation_object_list(tmp_reservation_object, + reservation_object_list, &num_reservation_object); + } + } + + /* + * Add the mali dma fence callback to wait for all dependent dma buf, + * and extend the timeline system to support dma fence, + * then create the new internal dma fence to replace all last dma fence for dependent dma buf. + */ + if (0 < num_reservation_object) { + int error; + int num_dma_fence_waiter = 0; + /* Create one new dma fence.*/ + job->rendered_dma_fence = mali_dma_fence_new(job->session->fence_context, + _mali_osk_atomic_inc_return(&job->session->fence_seqno)); + + if (NULL == job->rendered_dma_fence) { + MALI_PRINT_ERROR(("Failed to creat one new dma fence.\n")); + ret = _MALI_OSK_ERR_FAULT; + goto failed_to_create_dma_fence; + } + + /* In order to avoid deadlock, wait/wound mutex lock to lock all dma buffers*/ + + error = mali_dma_fence_lock_reservation_object_list(reservation_object_list, + num_reservation_object, &ww_actx); + + if (0 != error) { + MALI_PRINT_ERROR(("Failed to lock all reservation objects.\n")); + ret = _MALI_OSK_ERR_FAULT; + goto failed_to_lock_reservation_object_list; + } + + mali_dma_fence_context_init(&job->dma_fence_context, + mali_timeline_dma_fence_callback, (void *)job); + + /* Add dma fence waiters and dma fence callback. */ + for (i = 0; i < num_reservation_object; i++) { + ret = mali_dma_fence_context_add_waiters(&job->dma_fence_context, reservation_object_list[i]); + if (_MALI_OSK_ERR_OK != ret) { + MALI_PRINT_ERROR(("Failed to add waiter into mali dma fence context.\n")); + goto failed_to_add_dma_fence_waiter; + } + } + + for (i = 0; i < num_reservation_object; i++) { + reservation_object_add_excl_fence(reservation_object_list[i], job->rendered_dma_fence); + } + + num_dma_fence_waiter = job->dma_fence_context.num_dma_fence_waiter; + + /* Add job to Timeline system. */ + (*point) = mali_timeline_system_add_tracker(session->timeline_system, + mali_pp_job_get_tracker(job), MALI_TIMELINE_PP); + + if (0 != num_dma_fence_waiter) { + mali_dma_fence_context_dec_count(&job->dma_fence_context); + } + + /* Unlock all wait/wound mutex lock. */ + mali_dma_fence_unlock_reservation_object_list(reservation_object_list, + num_reservation_object, &ww_actx); + } else { + /* Add job to Timeline system. */ + (*point) = mali_timeline_system_add_tracker(session->timeline_system, + mali_pp_job_get_tracker(job), MALI_TIMELINE_PP); + } + + kfree(reservation_object_list); + return ret; +#else + /* Add job to Timeline system. */ + (*point) = mali_timeline_system_add_tracker(session->timeline_system, + mali_pp_job_get_tracker(job), MALI_TIMELINE_PP); +#endif + +#if defined(CONFIG_MALI_DMA_BUF_FENCE) +failed_to_add_dma_fence_waiter: + mali_dma_fence_context_term(&job->dma_fence_context); + mali_dma_fence_unlock_reservation_object_list(reservation_object_list, + num_reservation_object, &ww_actx); +failed_to_lock_reservation_object_list: + mali_dma_fence_signal_and_put(&job->rendered_dma_fence); +failed_to_create_dma_fence: +failed_to_find_mem_backend: + if (NULL != reservation_object_list) + kfree(reservation_object_list); +failed_to_alloc_reservation_object_list: + mali_pp_job_fb_lookup_remove(job); +#endif + return ret; +} + +static mali_bool mali_scheduler_queue_gp_job(struct mali_gp_job *job) +{ + struct mali_session_data *session; + _mali_osk_list_t *queue; + + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + MALI_DEBUG_ASSERT_POINTER(job); + + session = mali_gp_job_get_session(job); + MALI_DEBUG_ASSERT_POINTER(session); + + if (unlikely(session->is_aborting)) { + MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Job %u (0x%08X) queued while session is aborting.\n", + mali_gp_job_get_id(job), job)); + return MALI_FALSE; /* job not queued */ + } + + mali_gp_job_set_cache_order(job, mali_scheduler_get_new_cache_order()); + + /* Determine which queue the job should be added to. */ + if (session->use_high_priority_job_queue) { + queue = &job_queue_gp.high_pri; + } else { + queue = &job_queue_gp.normal_pri; + } + + job_queue_gp.depth += 1; + job_queue_gp.big_job_num += (job->big_job) ? 1 : 0; + + /* Add job to queue (mali_gp_job_queue_add find correct place). */ + mali_gp_job_list_add(job, queue); + + /* + * We hold a PM reference for every job we hold queued (and running) + * It is important that we take this reference after job has been + * added the the queue so that any runtime resume could schedule this + * job right there and then. + */ + _mali_osk_pm_dev_ref_get_async(); + + if (mali_utilization_enabled()) { + /* + * We cheat a little bit by counting the GP as busy from the + * time a GP job is queued. This will be fine because we only + * loose the tiny idle gap between jobs, but we will instead + * get less utilization work to do (less locks taken) + */ + mali_utilization_gp_start(); + } + + mali_pm_record_gpu_active(MALI_TRUE); + + /* Add profiling events for job enqueued */ + _mali_osk_profiling_add_event( + MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_SINGLE_SW_GP_ENQUEUE, + mali_gp_job_get_pid(job), + mali_gp_job_get_tid(job), + mali_gp_job_get_frame_builder_id(job), + mali_gp_job_get_flush_id(job), + 0); + +#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS) + trace_gpu_job_enqueue(mali_gp_job_get_tid(job), + mali_gp_job_get_id(job), "GP"); +#endif + + MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) queued\n", + mali_gp_job_get_id(job), job)); + + return MALI_TRUE; /* job queued */ +} + +static mali_bool mali_scheduler_queue_pp_job(struct mali_pp_job *job) +{ + struct mali_session_data *session; + _mali_osk_list_t *queue = NULL; + + MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD(); + MALI_DEBUG_ASSERT_POINTER(job); + + session = mali_pp_job_get_session(job); + MALI_DEBUG_ASSERT_POINTER(session); + + if (unlikely(session->is_aborting)) { + MALI_DEBUG_PRINT(2, ("Mali PP scheduler: Job %u (0x%08X) queued while session is aborting.\n", + mali_pp_job_get_id(job), job)); + return MALI_FALSE; /* job not queued */ + } else if (unlikely(MALI_SWAP_IN_FAIL == job->swap_status)) { + MALI_DEBUG_PRINT(2, ("Mali PP scheduler: Job %u (0x%08X) queued while swap in failed.\n", + mali_pp_job_get_id(job), job)); + return MALI_FALSE; + } + + mali_pp_job_set_cache_order(job, mali_scheduler_get_new_cache_order()); + + if (session->use_high_priority_job_queue) { + queue = &job_queue_pp.high_pri; + } else { + queue = &job_queue_pp.normal_pri; + } + + job_queue_pp.depth += + mali_pp_job_get_sub_job_count(job); + + /* Add job to queue (mali_gp_job_queue_add find correct place). */ + mali_pp_job_list_add(job, queue); + + /* + * We hold a PM reference for every job we hold queued (and running) + * It is important that we take this reference after job has been + * added the the queue so that any runtime resume could schedule this + * job right there and then. + */ + _mali_osk_pm_dev_ref_get_async(); + + if (mali_utilization_enabled()) { + /* + * We cheat a little bit by counting the PP as busy from the + * time a PP job is queued. This will be fine because we only + * loose the tiny idle gap between jobs, but we will instead + * get less utilization work to do (less locks taken) + */ + mali_utilization_pp_start(); + } + + mali_pm_record_gpu_active(MALI_FALSE); + + /* Add profiling events for job enqueued */ + _mali_osk_profiling_add_event( + MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | + MALI_PROFILING_EVENT_REASON_SINGLE_SW_PP_ENQUEUE, + mali_pp_job_get_pid(job), + mali_pp_job_get_tid(job), + mali_pp_job_get_frame_builder_id(job), + mali_pp_job_get_flush_id(job), + 0); + +#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS) + trace_gpu_job_enqueue(mali_pp_job_get_tid(job), + mali_pp_job_get_id(job), "PP"); +#endif + + MALI_DEBUG_PRINT(3, ("Mali PP scheduler: %s job %u (0x%08X) with %u parts queued.\n", + mali_pp_job_is_virtual(job) + ? "Virtual" : "Physical", + mali_pp_job_get_id(job), job, + mali_pp_job_get_sub_job_count(job))); + + return MALI_TRUE; /* job queued */ +} + +static void mali_scheduler_return_gp_job_to_user(struct mali_gp_job *job, + mali_bool success) +{ + _mali_uk_gp_job_finished_s *jobres; + struct mali_session_data *session; + _mali_osk_notification_t *notification; + + MALI_DEBUG_ASSERT_POINTER(job); + + session = mali_gp_job_get_session(job); + MALI_DEBUG_ASSERT_POINTER(session); + + notification = mali_gp_job_get_finished_notification(job); + MALI_DEBUG_ASSERT_POINTER(notification); + + jobres = notification->result_buffer; + MALI_DEBUG_ASSERT_POINTER(jobres); + + jobres->pending_big_job_num = mali_scheduler_job_gp_big_job_count(); + + jobres->user_job_ptr = mali_gp_job_get_user_id(job); + if (MALI_TRUE == success) { + jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS; + } else { + jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR; + } + jobres->heap_current_addr = mali_gp_job_get_current_heap_addr(job); + jobres->perf_counter0 = mali_gp_job_get_perf_counter_value0(job); + jobres->perf_counter1 = mali_gp_job_get_perf_counter_value1(job); + + mali_session_send_notification(session, notification); +} + +void mali_scheduler_return_pp_job_to_user(struct mali_pp_job *job, + u32 num_cores_in_virtual) +{ + u32 i; + u32 num_counters_to_copy; + _mali_uk_pp_job_finished_s *jobres; + struct mali_session_data *session; + _mali_osk_notification_t *notification; + + if (MALI_TRUE == mali_pp_job_use_no_notification(job)) { + return; + } + + MALI_DEBUG_ASSERT_POINTER(job); + + session = mali_pp_job_get_session(job); + MALI_DEBUG_ASSERT_POINTER(session); + + notification = mali_pp_job_get_finished_notification(job); + MALI_DEBUG_ASSERT_POINTER(notification); + + jobres = notification->result_buffer; + MALI_DEBUG_ASSERT_POINTER(jobres); + + jobres->user_job_ptr = mali_pp_job_get_user_id(job); + if (MALI_TRUE == mali_pp_job_was_success(job)) { + jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS; + } else { + jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR; + } + + if (mali_pp_job_is_virtual(job)) { + num_counters_to_copy = num_cores_in_virtual; + } else { + num_counters_to_copy = mali_pp_job_get_sub_job_count(job); + } + + for (i = 0; i < num_counters_to_copy; i++) { + jobres->perf_counter0[i] = + mali_pp_job_get_perf_counter_value0(job, i); + jobres->perf_counter1[i] = + mali_pp_job_get_perf_counter_value1(job, i); + jobres->perf_counter_src0 = + mali_pp_job_get_pp_counter_global_src0(); + jobres->perf_counter_src1 = + mali_pp_job_get_pp_counter_global_src1(); + } + + mali_session_send_notification(session, notification); +} + +static void mali_scheduler_deferred_pp_job_delete(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + + _mali_osk_spinlock_irq_lock(scheduler_pp_job_delete_lock); + mali_pp_job_list_addtail(job, &scheduler_pp_job_deletion_queue); + _mali_osk_spinlock_irq_unlock(scheduler_pp_job_delete_lock); + + _mali_osk_wq_schedule_work(scheduler_wq_pp_job_delete); +} + +void mali_scheduler_do_pp_job_delete(void *arg) +{ + _MALI_OSK_LIST_HEAD_STATIC_INIT(list); + struct mali_pp_job *job; + struct mali_pp_job *tmp; + + MALI_IGNORE(arg); + + /* + * Quickly "unhook" the jobs pending to be deleted, so we can release + * the lock before we start deleting the job objects + * (without any locks held) + */ + _mali_osk_spinlock_irq_lock(scheduler_pp_job_delete_lock); + _mali_osk_list_move_list(&scheduler_pp_job_deletion_queue, &list); + _mali_osk_spinlock_irq_unlock(scheduler_pp_job_delete_lock); + + _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list, + struct mali_pp_job, list) { + _mali_osk_list_delinit(&job->list); + +#if defined(CONFIG_MALI_DMA_BUF_FENCE) + mali_dma_fence_context_term(&job->dma_fence_context); +#endif + + mali_pp_job_delete(job); /* delete the job object itself */ + } +} + +#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) + +static void mali_scheduler_deferred_pp_job_queue(struct mali_pp_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + + _mali_osk_spinlock_irq_lock(scheduler_pp_job_queue_lock); + mali_pp_job_list_addtail(job, &scheduler_pp_job_queue_list); + _mali_osk_spinlock_irq_unlock(scheduler_pp_job_queue_lock); + + _mali_osk_wq_schedule_work(scheduler_wq_pp_job_queue); +} + +static void mali_scheduler_do_pp_job_queue(void *arg) +{ + _MALI_OSK_LIST_HEAD_STATIC_INIT(list); + struct mali_pp_job *job; + struct mali_pp_job *tmp; + mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY; + + MALI_IGNORE(arg); + + /* + * Quickly "unhook" the jobs pending to be queued, so we can release + * the lock before we start queueing the job objects + * (without any locks held) + */ + _mali_osk_spinlock_irq_lock(scheduler_pp_job_queue_lock); + _mali_osk_list_move_list(&scheduler_pp_job_queue_list, &list); + _mali_osk_spinlock_irq_unlock(scheduler_pp_job_queue_lock); + + /* First loop through all jobs and do the pre-work (no locks needed) */ + _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list, + struct mali_pp_job, list) { + if (mali_pp_job_needs_dma_buf_mapping(job)) { + /* + * This operation could fail, but we continue anyway, + * because the worst that could happen is that this + * job will fail due to a Mali page fault. + */ + mali_dma_buf_map_job(job); + } + } + + mali_scheduler_lock(); + + /* Then loop through all jobs again to queue them (lock needed) */ + _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list, + struct mali_pp_job, list) { + + /* Remove from scheduler_pp_job_queue_list before queueing */ + mali_pp_job_list_remove(job); + + if (mali_scheduler_queue_pp_job(job)) { + /* Job queued successfully */ + schedule_mask |= MALI_SCHEDULER_MASK_PP; + } else { + /* Failed to enqueue job, release job (with error) */ + mali_pp_job_fb_lookup_remove(job); + mali_pp_job_mark_unstarted_failed(job); + + /* unlock scheduler in this uncommon case */ + mali_scheduler_unlock(); + + schedule_mask |= mali_timeline_tracker_release( + mali_pp_job_get_tracker(job)); + + /* Notify user space and close the job object */ + mali_scheduler_complete_pp_job(job, 0, MALI_TRUE, + MALI_FALSE); + + mali_scheduler_lock(); + } + } + + mali_scheduler_unlock(); + + /* Trigger scheduling of jobs */ + mali_executor_schedule_from_mask(schedule_mask, MALI_FALSE); +} + +#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */ + +void mali_scheduler_gp_pp_job_queue_print(void) +{ + struct mali_gp_job *gp_job = NULL; + struct mali_gp_job *tmp_gp_job = NULL; + struct mali_pp_job *pp_job = NULL; + struct mali_pp_job *tmp_pp_job = NULL; + + MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj); + MALI_DEBUG_ASSERT_LOCK_HELD(mali_executor_lock_obj); + + /* dump job queup status */ + if ((0 == job_queue_gp.depth) && (0 == job_queue_pp.depth)) { + MALI_PRINT(("No GP&PP job in the job queue.\n")); + return; + } + + MALI_PRINT(("Total (%d) GP job in the job queue.\n", job_queue_gp.depth)); + if (job_queue_gp.depth > 0) { + if (!_mali_osk_list_empty(&job_queue_gp.high_pri)) { + _MALI_OSK_LIST_FOREACHENTRY(gp_job, tmp_gp_job, &job_queue_gp.high_pri, + struct mali_gp_job, list) { + MALI_PRINT(("GP job(%p) id = %d tid = %d pid = %d in the gp job high_pri queue\n", gp_job, gp_job->id, gp_job->tid, gp_job->pid)); + } + } + + if (!_mali_osk_list_empty(&job_queue_gp.normal_pri)) { + _MALI_OSK_LIST_FOREACHENTRY(gp_job, tmp_gp_job, &job_queue_gp.normal_pri, + struct mali_gp_job, list) { + MALI_PRINT(("GP job(%p) id = %d tid = %d pid = %d in the gp job normal_pri queue\n", gp_job, gp_job->id, gp_job->tid, gp_job->pid)); + } + } + } + + MALI_PRINT(("Total (%d) PP job in the job queue.\n", job_queue_pp.depth)); + if (job_queue_pp.depth > 0) { + if (!_mali_osk_list_empty(&job_queue_pp.high_pri)) { + _MALI_OSK_LIST_FOREACHENTRY(pp_job, tmp_pp_job, &job_queue_pp.high_pri, + struct mali_pp_job, list) { + if (mali_pp_job_is_virtual(pp_job)) { + MALI_PRINT(("PP Virtual job(%p) id = %d tid = %d pid = %d in the pp job high_pri queue\n", pp_job, pp_job->id, pp_job->tid, pp_job->pid)); + } else { + MALI_PRINT(("PP Physical job(%p) id = %d tid = %d pid = %d in the pp job high_pri queue\n", pp_job, pp_job->id, pp_job->tid, pp_job->pid)); + } + } + } + + if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) { + _MALI_OSK_LIST_FOREACHENTRY(pp_job, tmp_pp_job, &job_queue_pp.normal_pri, + struct mali_pp_job, list) { + if (mali_pp_job_is_virtual(pp_job)) { + MALI_PRINT(("PP Virtual job(%p) id = %d tid = %d pid = %d in the pp job normal_pri queue\n", pp_job, pp_job->id, pp_job->tid, pp_job->pid)); + } else { + MALI_PRINT(("PP Physical job(%p) id = %d tid = %d pid = %d in the pp job normal_pri queue\n", pp_job, pp_job->id, pp_job->tid, pp_job->pid)); + } + } + } + } + + /* dump group running job status */ + mali_executor_running_status_print(); +} diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_scheduler.h b/drivers/gpu/arm/mali400/common/mali_scheduler.h --- a/drivers/gpu/arm/mali400/common/mali_scheduler.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_scheduler.h 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,131 @@ +/* + * Copyright (C) 2012-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_SCHEDULER_H__ +#define __MALI_SCHEDULER_H__ + +#include "mali_osk.h" +#include "mali_osk_list.h" +#include "mali_scheduler_types.h" +#include "mali_session.h" + +struct mali_scheduler_job_queue { + _MALI_OSK_LIST_HEAD(normal_pri); /* Queued jobs with normal priority */ + _MALI_OSK_LIST_HEAD(high_pri); /* Queued jobs with high priority */ + u32 depth; /* Depth of combined queues. */ + u32 big_job_num; +}; + +extern _mali_osk_spinlock_irq_t *mali_scheduler_lock_obj; + +/* Queue of jobs to be executed on the GP group */ +extern struct mali_scheduler_job_queue job_queue_gp; + +/* Queue of PP jobs */ +extern struct mali_scheduler_job_queue job_queue_pp; + +extern _mali_osk_atomic_t mali_job_id_autonumber; +extern _mali_osk_atomic_t mali_job_cache_order_autonumber; + +#define MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD() MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj); + +_mali_osk_errcode_t mali_scheduler_initialize(void); +void mali_scheduler_terminate(void); + +MALI_STATIC_INLINE void mali_scheduler_lock(void) +{ + _mali_osk_spinlock_irq_lock(mali_scheduler_lock_obj); + MALI_DEBUG_PRINT(5, ("Mali scheduler: scheduler lock taken.\n")); +} + +MALI_STATIC_INLINE void mali_scheduler_unlock(void) +{ + MALI_DEBUG_PRINT(5, ("Mali scheduler: Releasing scheduler lock.\n")); + _mali_osk_spinlock_irq_unlock(mali_scheduler_lock_obj); +} + +MALI_STATIC_INLINE u32 mali_scheduler_job_gp_count(void) +{ + return job_queue_gp.depth; +} +MALI_STATIC_INLINE u32 mali_scheduler_job_gp_big_job_count(void) +{ + return job_queue_gp.big_job_num; +} + +u32 mali_scheduler_job_physical_head_count(mali_bool gpu_mode_is_secure); + +mali_bool mali_scheduler_job_next_is_virtual(void); +struct mali_pp_job *mali_scheduler_job_pp_next(void); + +struct mali_gp_job *mali_scheduler_job_gp_get(void); +struct mali_pp_job *mali_scheduler_job_pp_physical_peek(void); +struct mali_pp_job *mali_scheduler_job_pp_virtual_peek(void); +struct mali_pp_job *mali_scheduler_job_pp_physical_get(u32 *sub_job); +struct mali_pp_job *mali_scheduler_job_pp_virtual_get(void); + +MALI_STATIC_INLINE u32 mali_scheduler_get_new_id(void) +{ + return _mali_osk_atomic_inc_return(&mali_job_id_autonumber); +} + +MALI_STATIC_INLINE u32 mali_scheduler_get_new_cache_order(void) +{ + return _mali_osk_atomic_inc_return(&mali_job_cache_order_autonumber); +} + +/** + * @brief Used by the Timeline system to queue a GP job. + * + * @note @ref mali_executor_schedule_from_mask() should be called if this + * function returns non-zero. + * + * @param job The GP job that is being activated. + * + * @return A scheduling bitmask that can be used to decide if scheduling is + * necessary after this call. + */ +mali_scheduler_mask mali_scheduler_activate_gp_job(struct mali_gp_job *job); + +/** + * @brief Used by the Timeline system to queue a PP job. + * + * @note @ref mali_executor_schedule_from_mask() should be called if this + * function returns non-zero. + * + * @param job The PP job that is being activated. + * + * @return A scheduling bitmask that can be used to decide if scheduling is + * necessary after this call. + */ +mali_scheduler_mask mali_scheduler_activate_pp_job(struct mali_pp_job *job); + +void mali_scheduler_complete_gp_job(struct mali_gp_job *job, + mali_bool success, + mali_bool user_notification, + mali_bool dequeued); + +void mali_scheduler_complete_pp_job(struct mali_pp_job *job, + u32 num_cores_in_virtual, + mali_bool user_notification, + mali_bool dequeued); + +void mali_scheduler_abort_session(struct mali_session_data *session); + +void mali_scheduler_return_pp_job_to_user(struct mali_pp_job *job, + u32 num_cores_in_virtual); + +#if MALI_STATE_TRACKING +u32 mali_scheduler_dump_state(char *buf, u32 size); +#endif + +void mali_scheduler_gp_pp_job_queue_print(void); + +#endif /* __MALI_SCHEDULER_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_scheduler_types.h b/drivers/gpu/arm/mali400/common/mali_scheduler_types.h --- a/drivers/gpu/arm/mali400/common/mali_scheduler_types.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_scheduler_types.h 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2013-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_SCHEDULER_TYPES_H__ +#define __MALI_SCHEDULER_TYPES_H__ + +#include "mali_osk.h" + +#define MALI_SCHEDULER_JOB_ID_SPAN 65535 + +/** + * Bitmask used for defered scheduling of subsystems. + */ +typedef u32 mali_scheduler_mask; + +#define MALI_SCHEDULER_MASK_GP (1<<0) +#define MALI_SCHEDULER_MASK_PP (1<<1) + +#define MALI_SCHEDULER_MASK_EMPTY 0 +#define MALI_SCHEDULER_MASK_ALL (MALI_SCHEDULER_MASK_GP | MALI_SCHEDULER_MASK_PP) + +#endif /* __MALI_SCHEDULER_TYPES_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_session.c b/drivers/gpu/arm/mali400/common/mali_session.c --- a/drivers/gpu/arm/mali400/common/mali_session.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_session.c 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,155 @@ +/* + * Copyright (C) 2012-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_osk.h" +#include "mali_osk_list.h" +#include "mali_session.h" +#include "mali_ukk.h" +#ifdef MALI_MEM_SWAP_TRACKING +#include "mali_memory_swap_alloc.h" +#endif + +_MALI_OSK_LIST_HEAD(mali_sessions); +static u32 mali_session_count = 0; + +_mali_osk_spinlock_irq_t *mali_sessions_lock = NULL; +wait_queue_head_t pending_queue; + +_mali_osk_errcode_t mali_session_initialize(void) +{ + _MALI_OSK_INIT_LIST_HEAD(&mali_sessions); + /* init wait queue for big varying job */ + init_waitqueue_head(&pending_queue); + + mali_sessions_lock = _mali_osk_spinlock_irq_init( + _MALI_OSK_LOCKFLAG_ORDERED, + _MALI_OSK_LOCK_ORDER_SESSIONS); + if (NULL == mali_sessions_lock) { + return _MALI_OSK_ERR_NOMEM; + } + + return _MALI_OSK_ERR_OK; +} + +void mali_session_terminate(void) +{ + if (NULL != mali_sessions_lock) { + _mali_osk_spinlock_irq_term(mali_sessions_lock); + mali_sessions_lock = NULL; + } +} + +void mali_session_add(struct mali_session_data *session) +{ + mali_session_lock(); + _mali_osk_list_add(&session->link, &mali_sessions); + mali_session_count++; + mali_session_unlock(); +} + +void mali_session_remove(struct mali_session_data *session) +{ + mali_session_lock(); + _mali_osk_list_delinit(&session->link); + mali_session_count--; + mali_session_unlock(); +} + +u32 mali_session_get_count(void) +{ + return mali_session_count; +} + +mali_bool mali_session_pp_job_is_empty(void *data) +{ + struct mali_session_data *session = (struct mali_session_data *)data; + MALI_DEBUG_ASSERT_POINTER(session); + + if ( 0 == _mali_osk_atomic_read(&session->number_of_pp_jobs)) { + return MALI_TRUE; + } + return MALI_FALSE; +} + +wait_queue_head_t *mali_session_get_wait_queue(void) +{ + return &pending_queue; +} + +/* + * Get the max completed window jobs from all active session, + * which will be used in window render frame per sec calculate + */ +#if defined(CONFIG_MALI_DVFS) +u32 mali_session_max_window_num(void) +{ + struct mali_session_data *session, *tmp; + u32 max_window_num = 0; + u32 tmp_number = 0; + + mali_session_lock(); + + MALI_SESSION_FOREACH(session, tmp, link) { + tmp_number = _mali_osk_atomic_xchg( + &session->number_of_window_jobs, 0); + if (max_window_num < tmp_number) { + max_window_num = tmp_number; + } + } + + mali_session_unlock(); + + return max_window_num; +} +#endif + +void mali_session_memory_tracking(_mali_osk_print_ctx *print_ctx) +{ + struct mali_session_data *session, *tmp; + u32 mali_mem_usage; + u32 total_mali_mem_size; +#ifdef MALI_MEM_SWAP_TRACKING + u32 swap_pool_size; + u32 swap_unlock_size; +#endif + + MALI_DEBUG_ASSERT_POINTER(print_ctx); + mali_session_lock(); + MALI_SESSION_FOREACH(session, tmp, link) { +#ifdef MALI_MEM_SWAP_TRACKING + _mali_osk_ctxprintf(print_ctx, " %-25s %-10u %-10u %-15u %-15u %-10u %-10u %-10u\n", + session->comm, session->pid, + (atomic_read(&session->mali_mem_allocated_pages)) * _MALI_OSK_MALI_PAGE_SIZE, + (unsigned int)session->max_mali_mem_allocated_size, + (unsigned int)((atomic_read(&session->mali_mem_array[MALI_MEM_EXTERNAL])) * _MALI_OSK_MALI_PAGE_SIZE), + (unsigned int)((atomic_read(&session->mali_mem_array[MALI_MEM_UMP])) * _MALI_OSK_MALI_PAGE_SIZE), + (unsigned int)((atomic_read(&session->mali_mem_array[MALI_MEM_DMA_BUF])) * _MALI_OSK_MALI_PAGE_SIZE), + (unsigned int)((atomic_read(&session->mali_mem_array[MALI_MEM_SWAP])) * _MALI_OSK_MALI_PAGE_SIZE) + ); +#else + _mali_osk_ctxprintf(print_ctx, " %-25s %-10u %-10u %-15u %-15u %-10u %-10u \n", + session->comm, session->pid, + (unsigned int)((atomic_read(&session->mali_mem_allocated_pages)) * _MALI_OSK_MALI_PAGE_SIZE), + (unsigned int)session->max_mali_mem_allocated_size, + (unsigned int)((atomic_read(&session->mali_mem_array[MALI_MEM_EXTERNAL])) * _MALI_OSK_MALI_PAGE_SIZE), + (unsigned int)((atomic_read(&session->mali_mem_array[MALI_MEM_UMP])) * _MALI_OSK_MALI_PAGE_SIZE), + (unsigned int)((atomic_read(&session->mali_mem_array[MALI_MEM_DMA_BUF])) * _MALI_OSK_MALI_PAGE_SIZE) + ); +#endif + } + mali_session_unlock(); + mali_mem_usage = _mali_ukk_report_memory_usage(); + total_mali_mem_size = _mali_ukk_report_total_memory_size(); + _mali_osk_ctxprintf(print_ctx, "Mali mem usage: %u\nMali mem limit: %u\n", mali_mem_usage, total_mali_mem_size); +#ifdef MALI_MEM_SWAP_TRACKING + mali_mem_swap_tracking(&swap_pool_size, &swap_unlock_size); + _mali_osk_ctxprintf(print_ctx, "Mali swap mem pool : %u\nMali swap mem unlock: %u\n", swap_pool_size, swap_unlock_size); +#endif +} diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_session.h b/drivers/gpu/arm/mali400/common/mali_session.h --- a/drivers/gpu/arm/mali400/common/mali_session.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_session.h 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,136 @@ +/* + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_SESSION_H__ +#define __MALI_SESSION_H__ + +#include "mali_mmu_page_directory.h" +#include "mali_osk.h" +#include "mali_osk_list.h" +#include "mali_memory_types.h" +#include "mali_memory_manager.h" + +struct mali_timeline_system; +struct mali_soft_system; + +/* Number of frame builder job lists per session. */ +#define MALI_PP_JOB_FB_LOOKUP_LIST_SIZE 16 +#define MALI_PP_JOB_FB_LOOKUP_LIST_MASK (MALI_PP_JOB_FB_LOOKUP_LIST_SIZE - 1) +/*Max pending big job allowed in kernel*/ +#define MALI_MAX_PENDING_BIG_JOB (2) + +struct mali_session_data { + _mali_osk_notification_queue_t *ioctl_queue; + + _mali_osk_wait_queue_t *wait_queue; /**The wait queue to wait for the number of pp job become 0.*/ + + _mali_osk_mutex_t *memory_lock; /**< Lock protecting the vm manipulation */ + _mali_osk_mutex_t *cow_lock; /** < Lock protecting the cow memory free manipulation */ +#if 0 + _mali_osk_list_t memory_head; /**< Track all the memory allocated in this session, for freeing on abnormal termination */ +#endif + struct mali_page_directory *page_directory; /**< MMU page directory for this session */ + + _MALI_OSK_LIST_HEAD(link); /**< Link for list of all sessions */ + _MALI_OSK_LIST_HEAD(pp_job_list); /**< List of all PP jobs on this session */ + +#if defined(CONFIG_MALI_DVFS) + _mali_osk_atomic_t number_of_window_jobs; /**< Record the window jobs completed on this session in a period */ +#endif + _mali_osk_atomic_t number_of_pp_jobs; /** < Record the pp jobs on this session */ + + _mali_osk_list_t pp_job_fb_lookup_list[MALI_PP_JOB_FB_LOOKUP_LIST_SIZE]; /**< List of PP job lists per frame builder id. Used to link jobs from same frame builder. */ + struct mali_soft_job_system *soft_job_system; /**< Soft job system for this session. */ + struct mali_timeline_system *timeline_system; /**< Timeline system for this session. */ + + mali_bool is_aborting; /**< MALI_TRUE if the session is aborting, MALI_FALSE if not. */ + mali_bool use_high_priority_job_queue; /**< If MALI_TRUE, jobs added from this session will use the high priority job queues. */ + u32 pid; + char *comm; + atomic_t mali_mem_array[MALI_MEM_TYPE_MAX]; /**< The array to record mem types' usage for this session. */ + atomic_t mali_mem_allocated_pages; /** The current allocated mali memory pages, which include mali os memory and mali dedicated memory.*/ + size_t max_mali_mem_allocated_size; /**< The past max mali memory allocated size, which include mali os memory and mali dedicated memory. */ + /* Added for new memroy system */ + struct mali_allocation_manager allocation_mgr; + +#if defined(CONFIG_MALI_DMA_BUF_FENCE) + u32 fence_context; /** < The execution dma fence context this fence is run on. */ + _mali_osk_atomic_t fence_seqno; /** < Alinear increasing sequence number for this dma fence context. */ +#endif +}; + +_mali_osk_errcode_t mali_session_initialize(void); +void mali_session_terminate(void); + +/* List of all sessions. Actual list head in mali_kernel_core.c */ +extern _mali_osk_list_t mali_sessions; +/* Lock to protect modification and access to the mali_sessions list */ +extern _mali_osk_spinlock_irq_t *mali_sessions_lock; + +MALI_STATIC_INLINE void mali_session_lock(void) +{ + _mali_osk_spinlock_irq_lock(mali_sessions_lock); +} + +MALI_STATIC_INLINE void mali_session_unlock(void) +{ + _mali_osk_spinlock_irq_unlock(mali_sessions_lock); +} + +void mali_session_add(struct mali_session_data *session); +void mali_session_remove(struct mali_session_data *session); +u32 mali_session_get_count(void); +mali_bool mali_session_pp_job_is_empty(void *data); +wait_queue_head_t *mali_session_get_wait_queue(void); + +#define MALI_SESSION_FOREACH(session, tmp, link) \ + _MALI_OSK_LIST_FOREACHENTRY(session, tmp, &mali_sessions, struct mali_session_data, link) + +MALI_STATIC_INLINE struct mali_page_directory *mali_session_get_page_directory(struct mali_session_data *session) +{ + return session->page_directory; +} + +MALI_STATIC_INLINE void mali_session_memory_lock(struct mali_session_data *session) +{ + MALI_DEBUG_ASSERT_POINTER(session); + _mali_osk_mutex_wait(session->memory_lock); +} + +MALI_STATIC_INLINE void mali_session_memory_unlock(struct mali_session_data *session) +{ + MALI_DEBUG_ASSERT_POINTER(session); + _mali_osk_mutex_signal(session->memory_lock); +} + +MALI_STATIC_INLINE void mali_session_send_notification(struct mali_session_data *session, _mali_osk_notification_t *object) +{ + _mali_osk_notification_queue_send(session->ioctl_queue, object); +} + +#if defined(CONFIG_MALI_DVFS) + +MALI_STATIC_INLINE void mali_session_inc_num_window_jobs(struct mali_session_data *session) +{ + MALI_DEBUG_ASSERT_POINTER(session); + _mali_osk_atomic_inc(&session->number_of_window_jobs); +} + +/* + * Get the max completed window jobs from all active session, + * which will be used in window render frame per sec calculate + */ +u32 mali_session_max_window_num(void); + +#endif + +void mali_session_memory_tracking(_mali_osk_print_ctx *print_ctx); + +#endif /* __MALI_SESSION_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_soft_job.c b/drivers/gpu/arm/mali400/common/mali_soft_job.c --- a/drivers/gpu/arm/mali400/common/mali_soft_job.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_soft_job.c 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,438 @@ +/* + * Copyright (C) 2013-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_soft_job.h" +#include "mali_osk.h" +#include "mali_timeline.h" +#include "mali_session.h" +#include "mali_kernel_common.h" +#include "mali_uk_types.h" +#include "mali_scheduler.h" +#include "mali_executor.h" + +MALI_STATIC_INLINE void mali_soft_job_system_lock(struct mali_soft_job_system *system) +{ + MALI_DEBUG_ASSERT_POINTER(system); + _mali_osk_spinlock_irq_lock(system->lock); + MALI_DEBUG_PRINT(5, ("Mali Soft Job: soft system %p lock taken\n", system)); + MALI_DEBUG_ASSERT(0 == system->lock_owner); + MALI_DEBUG_CODE(system->lock_owner = _mali_osk_get_tid()); +} + +MALI_STATIC_INLINE void mali_soft_job_system_unlock(struct mali_soft_job_system *system) +{ + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_PRINT(5, ("Mali Soft Job: releasing soft system %p lock\n", system)); + MALI_DEBUG_ASSERT(_mali_osk_get_tid() == system->lock_owner); + MALI_DEBUG_CODE(system->lock_owner = 0); + _mali_osk_spinlock_irq_unlock(system->lock); +} + +#if defined(DEBUG) +MALI_STATIC_INLINE void mali_soft_job_system_assert_locked(struct mali_soft_job_system *system) +{ + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT(_mali_osk_get_tid() == system->lock_owner); +} +#define MALI_ASSERT_SOFT_JOB_SYSTEM_LOCKED(system) mali_soft_job_system_assert_locked(system) +#else +#define MALI_ASSERT_SOFT_JOB_SYSTEM_LOCKED(system) +#endif /* defined(DEBUG) */ + +struct mali_soft_job_system *mali_soft_job_system_create(struct mali_session_data *session) +{ + struct mali_soft_job_system *system; + + MALI_DEBUG_ASSERT_POINTER(session); + + system = (struct mali_soft_job_system *) _mali_osk_calloc(1, sizeof(struct mali_soft_job_system)); + if (NULL == system) { + return NULL; + } + + system->session = session; + + system->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SCHEDULER); + if (NULL == system->lock) { + mali_soft_job_system_destroy(system); + return NULL; + } + system->lock_owner = 0; + system->last_job_id = 0; + + _MALI_OSK_INIT_LIST_HEAD(&(system->jobs_used)); + + return system; +} + +void mali_soft_job_system_destroy(struct mali_soft_job_system *system) +{ + MALI_DEBUG_ASSERT_POINTER(system); + + /* All jobs should be free at this point. */ + MALI_DEBUG_ASSERT(_mali_osk_list_empty(&(system->jobs_used))); + + if (NULL != system) { + if (NULL != system->lock) { + _mali_osk_spinlock_irq_term(system->lock); + } + _mali_osk_free(system); + } +} + +static void mali_soft_job_system_free_job(struct mali_soft_job_system *system, struct mali_soft_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_POINTER(system); + + mali_soft_job_system_lock(job->system); + + MALI_DEBUG_ASSERT(MALI_SOFT_JOB_INVALID_ID != job->id); + MALI_DEBUG_ASSERT(system == job->system); + + _mali_osk_list_del(&(job->system_list)); + + mali_soft_job_system_unlock(job->system); + + _mali_osk_free(job); +} + +MALI_STATIC_INLINE struct mali_soft_job *mali_soft_job_system_lookup_job(struct mali_soft_job_system *system, u32 job_id) +{ + struct mali_soft_job *job, *tmp; + + MALI_DEBUG_ASSERT_POINTER(system); + MALI_ASSERT_SOFT_JOB_SYSTEM_LOCKED(system); + + _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &system->jobs_used, struct mali_soft_job, system_list) { + if (job->id == job_id) + return job; + } + + return NULL; +} + +void mali_soft_job_destroy(struct mali_soft_job *job) +{ + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_POINTER(job->system); + + MALI_DEBUG_PRINT(4, ("Mali Soft Job: destroying soft job %u (0x%08X)\n", job->id, job)); + + if (NULL != job) { + if (0 < _mali_osk_atomic_dec_return(&job->refcount)) return; + + _mali_osk_atomic_term(&job->refcount); + + if (NULL != job->activated_notification) { + _mali_osk_notification_delete(job->activated_notification); + job->activated_notification = NULL; + } + + mali_soft_job_system_free_job(job->system, job); + } +} + +struct mali_soft_job *mali_soft_job_create(struct mali_soft_job_system *system, mali_soft_job_type type, u64 user_job) +{ + struct mali_soft_job *job; + _mali_osk_notification_t *notification = NULL; + + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT((MALI_SOFT_JOB_TYPE_USER_SIGNALED == type) || + (MALI_SOFT_JOB_TYPE_SELF_SIGNALED == type)); + + notification = _mali_osk_notification_create(_MALI_NOTIFICATION_SOFT_ACTIVATED, sizeof(_mali_uk_soft_job_activated_s)); + if (unlikely(NULL == notification)) { + MALI_PRINT_ERROR(("Mali Soft Job: failed to allocate notification")); + return NULL; + } + + job = _mali_osk_malloc(sizeof(struct mali_soft_job)); + if (unlikely(NULL == job)) { + MALI_DEBUG_PRINT(2, ("Mali Soft Job: system alloc job failed. \n")); + return NULL; + } + + mali_soft_job_system_lock(system); + + job->system = system; + job->id = system->last_job_id++; + job->state = MALI_SOFT_JOB_STATE_ALLOCATED; + + _mali_osk_list_add(&(job->system_list), &(system->jobs_used)); + + job->type = type; + job->user_job = user_job; + job->activated = MALI_FALSE; + + job->activated_notification = notification; + + _mali_osk_atomic_init(&job->refcount, 1); + + MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_ALLOCATED == job->state); + MALI_DEBUG_ASSERT(system == job->system); + MALI_DEBUG_ASSERT(MALI_SOFT_JOB_INVALID_ID != job->id); + + mali_soft_job_system_unlock(system); + + return job; +} + +mali_timeline_point mali_soft_job_start(struct mali_soft_job *job, struct mali_timeline_fence *fence) +{ + mali_timeline_point point; + struct mali_soft_job_system *system; + + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_POINTER(fence); + + MALI_DEBUG_ASSERT_POINTER(job->system); + system = job->system; + + MALI_DEBUG_ASSERT_POINTER(system->session); + MALI_DEBUG_ASSERT_POINTER(system->session->timeline_system); + + mali_soft_job_system_lock(system); + + MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_ALLOCATED == job->state); + job->state = MALI_SOFT_JOB_STATE_STARTED; + + mali_soft_job_system_unlock(system); + + MALI_DEBUG_PRINT(4, ("Mali Soft Job: starting soft job %u (0x%08X)\n", job->id, job)); + + mali_timeline_tracker_init(&job->tracker, MALI_TIMELINE_TRACKER_SOFT, fence, job); + point = mali_timeline_system_add_tracker(system->session->timeline_system, &job->tracker, MALI_TIMELINE_SOFT); + + return point; +} + +static mali_bool mali_soft_job_is_activated(void *data) +{ + struct mali_soft_job *job; + + job = (struct mali_soft_job *) data; + MALI_DEBUG_ASSERT_POINTER(job); + + return job->activated; +} + +_mali_osk_errcode_t mali_soft_job_system_signal_job(struct mali_soft_job_system *system, u32 job_id) +{ + struct mali_soft_job *job; + struct mali_timeline_system *timeline_system; + mali_scheduler_mask schedule_mask; + + MALI_DEBUG_ASSERT_POINTER(system); + + mali_soft_job_system_lock(system); + + job = mali_soft_job_system_lookup_job(system, job_id); + + if ((NULL == job) || (MALI_SOFT_JOB_TYPE_USER_SIGNALED != job->type) + || !(MALI_SOFT_JOB_STATE_STARTED == job->state || MALI_SOFT_JOB_STATE_TIMED_OUT == job->state)) { + mali_soft_job_system_unlock(system); + MALI_PRINT_ERROR(("Mali Soft Job: invalid soft job id %u", job_id)); + return _MALI_OSK_ERR_ITEM_NOT_FOUND; + } + + if (MALI_SOFT_JOB_STATE_TIMED_OUT == job->state) { + job->state = MALI_SOFT_JOB_STATE_SIGNALED; + mali_soft_job_system_unlock(system); + + MALI_DEBUG_ASSERT(MALI_TRUE == job->activated); + MALI_DEBUG_PRINT(4, ("Mali Soft Job: soft job %u (0x%08X) was timed out\n", job->id, job)); + mali_soft_job_destroy(job); + + return _MALI_OSK_ERR_TIMEOUT; + } + + MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state); + + job->state = MALI_SOFT_JOB_STATE_SIGNALED; + mali_soft_job_system_unlock(system); + + /* Since the job now is in signaled state, timeouts from the timeline system will be + * ignored, and it is not possible to signal this job again. */ + + timeline_system = system->session->timeline_system; + MALI_DEBUG_ASSERT_POINTER(timeline_system); + + /* Wait until activated. */ + _mali_osk_wait_queue_wait_event(timeline_system->wait_queue, mali_soft_job_is_activated, (void *) job); + + MALI_DEBUG_PRINT(4, ("Mali Soft Job: signaling soft job %u (0x%08X)\n", job->id, job)); + + schedule_mask = mali_timeline_tracker_release(&job->tracker); + mali_executor_schedule_from_mask(schedule_mask, MALI_FALSE); + + mali_soft_job_destroy(job); + + return _MALI_OSK_ERR_OK; +} + +static void mali_soft_job_send_activated_notification(struct mali_soft_job *job) +{ + if (NULL != job->activated_notification) { + _mali_uk_soft_job_activated_s *res = job->activated_notification->result_buffer; + res->user_job = job->user_job; + mali_session_send_notification(job->system->session, job->activated_notification); + } + job->activated_notification = NULL; +} + +mali_scheduler_mask mali_soft_job_system_activate_job(struct mali_soft_job *job) +{ + mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY; + + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_POINTER(job->system); + MALI_DEBUG_ASSERT_POINTER(job->system->session); + + MALI_DEBUG_PRINT(4, ("Mali Soft Job: Timeline activation for soft job %u (0x%08X).\n", job->id, job)); + + mali_soft_job_system_lock(job->system); + + if (unlikely(job->system->session->is_aborting)) { + MALI_DEBUG_PRINT(3, ("Mali Soft Job: Soft job %u (0x%08X) activated while session is aborting.\n", job->id, job)); + + mali_soft_job_system_unlock(job->system); + + /* Since we are in shutdown, we can ignore the scheduling bitmask. */ + mali_timeline_tracker_release(&job->tracker); + mali_soft_job_destroy(job); + return schedule_mask; + } + + /* Send activated notification. */ + mali_soft_job_send_activated_notification(job); + + /* Wake up sleeping signaler. */ + job->activated = MALI_TRUE; + + /* If job type is self signaled, release tracker, move soft job to free list, and scheduler at once */ + if (MALI_SOFT_JOB_TYPE_SELF_SIGNALED == job->type) { + MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state); + + job->state = MALI_SOFT_JOB_STATE_SIGNALED; + mali_soft_job_system_unlock(job->system); + + schedule_mask |= mali_timeline_tracker_release(&job->tracker); + + mali_soft_job_destroy(job); + } else { + _mali_osk_wait_queue_wake_up(job->tracker.system->wait_queue); + + mali_soft_job_system_unlock(job->system); + } + + return schedule_mask; +} + +mali_scheduler_mask mali_soft_job_system_timeout_job(struct mali_soft_job *job) +{ + mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY; + + MALI_DEBUG_ASSERT_POINTER(job); + MALI_DEBUG_ASSERT_POINTER(job->system); + MALI_DEBUG_ASSERT_POINTER(job->system->session); + MALI_DEBUG_ASSERT(MALI_TRUE == job->activated); + + MALI_DEBUG_PRINT(4, ("Mali Soft Job: Timeline timeout for soft job %u (0x%08X).\n", job->id, job)); + + mali_soft_job_system_lock(job->system); + + MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state || + MALI_SOFT_JOB_STATE_SIGNALED == job->state); + + if (unlikely(job->system->session->is_aborting)) { + /* The session is aborting. This job will be released and destroyed by @ref + * mali_soft_job_system_abort(). */ + mali_soft_job_system_unlock(job->system); + + return MALI_SCHEDULER_MASK_EMPTY; + } + + if (MALI_SOFT_JOB_STATE_STARTED != job->state) { + MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_SIGNALED == job->state); + + /* The job is about to be signaled, ignore timeout. */ + MALI_DEBUG_PRINT(4, ("Mali Soft Job: Timeout on soft job %u (0x%08X) in signaled state.\n", job->id, job)); + mali_soft_job_system_unlock(job->system); + return schedule_mask; + } + + MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state); + + job->state = MALI_SOFT_JOB_STATE_TIMED_OUT; + _mali_osk_atomic_inc(&job->refcount); + + mali_soft_job_system_unlock(job->system); + + schedule_mask = mali_timeline_tracker_release(&job->tracker); + + mali_soft_job_destroy(job); + + return schedule_mask; +} + +void mali_soft_job_system_abort(struct mali_soft_job_system *system) +{ + struct mali_soft_job *job, *tmp; + _MALI_OSK_LIST_HEAD_STATIC_INIT(jobs); + + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT_POINTER(system->session); + MALI_DEBUG_ASSERT(system->session->is_aborting); + + MALI_DEBUG_PRINT(3, ("Mali Soft Job: Aborting soft job system for session 0x%08X.\n", system->session)); + + mali_soft_job_system_lock(system); + + _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &system->jobs_used, struct mali_soft_job, system_list) { + MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_STARTED == job->state || + MALI_SOFT_JOB_STATE_TIMED_OUT == job->state); + + if (MALI_SOFT_JOB_STATE_STARTED == job->state) { + /* If the job has been activated, we have to release the tracker and destroy + * the job. If not, the tracker will be released and the job destroyed when + * it is activated. */ + if (MALI_TRUE == job->activated) { + MALI_DEBUG_PRINT(3, ("Mali Soft Job: Aborting unsignaled soft job %u (0x%08X).\n", job->id, job)); + + job->state = MALI_SOFT_JOB_STATE_SIGNALED; + _mali_osk_list_move(&job->system_list, &jobs); + } + } else if (MALI_SOFT_JOB_STATE_TIMED_OUT == job->state) { + MALI_DEBUG_PRINT(3, ("Mali Soft Job: Aborting timed out soft job %u (0x%08X).\n", job->id, job)); + + /* We need to destroy this soft job. */ + _mali_osk_list_move(&job->system_list, &jobs); + } + } + + mali_soft_job_system_unlock(system); + + /* Release and destroy jobs. */ + _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &jobs, struct mali_soft_job, system_list) { + MALI_DEBUG_ASSERT(MALI_SOFT_JOB_STATE_SIGNALED == job->state || + MALI_SOFT_JOB_STATE_TIMED_OUT == job->state); + + if (MALI_SOFT_JOB_STATE_SIGNALED == job->state) { + mali_timeline_tracker_release(&job->tracker); + } + + /* Move job back to used list before destroying. */ + _mali_osk_list_move(&job->system_list, &system->jobs_used); + + mali_soft_job_destroy(job); + } +} diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_soft_job.h b/drivers/gpu/arm/mali400/common/mali_soft_job.h --- a/drivers/gpu/arm/mali400/common/mali_soft_job.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_soft_job.h 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,190 @@ +/* + * Copyright (C) 2013-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_SOFT_JOB_H__ +#define __MALI_SOFT_JOB_H__ + +#include "mali_osk.h" + +#include "mali_timeline.h" + +struct mali_timeline_fence; +struct mali_session_data; +struct mali_soft_job; +struct mali_soft_job_system; + +/** + * Soft job types. + * + * Soft jobs of type MALI_SOFT_JOB_TYPE_USER_SIGNALED will only complete after activation if either + * they are signaled by user-space (@ref mali_soft_job_system_signaled_job) or if they are timed out + * by the Timeline system. + * Soft jobs of type MALI_SOFT_JOB_TYPE_SELF_SIGNALED will release job resource automatically + * in kernel when the job is activated. + */ +typedef enum mali_soft_job_type { + MALI_SOFT_JOB_TYPE_SELF_SIGNALED, + MALI_SOFT_JOB_TYPE_USER_SIGNALED, +} mali_soft_job_type; + +/** + * Soft job state. + * + * mali_soft_job_system_start_job a job will first be allocated.The job's state set to MALI_SOFT_JOB_STATE_ALLOCATED. + * Once the job is added to the timeline system, the state changes to MALI_SOFT_JOB_STATE_STARTED. + * + * For soft jobs of type MALI_SOFT_JOB_TYPE_USER_SIGNALED the state is changed to + * MALI_SOFT_JOB_STATE_SIGNALED when @ref mali_soft_job_system_signal_job is called and the soft + * job's state is MALI_SOFT_JOB_STATE_STARTED or MALI_SOFT_JOB_STATE_TIMED_OUT. + * + * If a soft job of type MALI_SOFT_JOB_TYPE_USER_SIGNALED is timed out before being signaled, the + * state is changed to MALI_SOFT_JOB_STATE_TIMED_OUT. This can only happen to soft jobs in state + * MALI_SOFT_JOB_STATE_STARTED. + * + */ +typedef enum mali_soft_job_state { + MALI_SOFT_JOB_STATE_ALLOCATED, + MALI_SOFT_JOB_STATE_STARTED, + MALI_SOFT_JOB_STATE_SIGNALED, + MALI_SOFT_JOB_STATE_TIMED_OUT, +} mali_soft_job_state; + +#define MALI_SOFT_JOB_INVALID_ID ((u32) -1) + +/** + * Soft job struct. + * + * Soft job can be used to represent any kind of CPU work done in kernel-space. + */ +typedef struct mali_soft_job { + mali_soft_job_type type; /**< Soft job type. Must be one of MALI_SOFT_JOB_TYPE_*. */ + u64 user_job; /**< Identifier for soft job in user space. */ + _mali_osk_atomic_t refcount; /**< Soft jobs are reference counted to prevent premature deletion. */ + struct mali_timeline_tracker tracker; /**< Timeline tracker for soft job. */ + mali_bool activated; /**< MALI_TRUE if the job has been activated, MALI_FALSE if not. */ + _mali_osk_notification_t *activated_notification; /**< Pre-allocated notification object for ACTIVATED_NOTIFICATION. */ + + /* Protected by soft job system lock. */ + u32 id; /**< Used by user-space to find corresponding soft job in kernel-space. */ + mali_soft_job_state state; /**< State of soft job, must be one of MALI_SOFT_JOB_STATE_*. */ + struct mali_soft_job_system *system; /**< The soft job system this job is in. */ + _mali_osk_list_t system_list; /**< List element used by soft job system. */ +} mali_soft_job; + +/** + * Per-session soft job system. + * + * The soft job system is used to manage all soft jobs that belongs to a session. + */ +typedef struct mali_soft_job_system { + struct mali_session_data *session; /**< The session this soft job system belongs to. */ + _MALI_OSK_LIST_HEAD(jobs_used); /**< List of all allocated soft jobs. */ + + _mali_osk_spinlock_irq_t *lock; /**< Lock used to protect soft job system and its soft jobs. */ + u32 lock_owner; /**< Contains tid of thread that locked the system or 0, if not locked. */ + u32 last_job_id; /**< Recored the last job id protected by lock. */ +} mali_soft_job_system; + +/** + * Create a soft job system. + * + * @param session The session this soft job system will belong to. + * @return The new soft job system, or NULL if unsuccessful. + */ +struct mali_soft_job_system *mali_soft_job_system_create(struct mali_session_data *session); + +/** + * Destroy a soft job system. + * + * @note The soft job must not have any started or activated jobs. Call @ref + * mali_soft_job_system_abort first. + * + * @param system The soft job system we are destroying. + */ +void mali_soft_job_system_destroy(struct mali_soft_job_system *system); + +/** + * Create a soft job. + * + * @param system Soft job system to create soft job from. + * @param type Type of the soft job. + * @param user_job Identifier for soft job in user space. + * @return New soft job if successful, NULL if not. + */ +struct mali_soft_job *mali_soft_job_create(struct mali_soft_job_system *system, mali_soft_job_type type, u64 user_job); + +/** + * Destroy soft job. + * + * @param job Soft job to destroy. + */ +void mali_soft_job_destroy(struct mali_soft_job *job); + +/** + * Start a soft job. + * + * The soft job will be added to the Timeline system which will then activate it after all + * dependencies have been resolved. + * + * Create soft jobs with @ref mali_soft_job_create before starting them. + * + * @param job Soft job to start. + * @param fence Fence representing dependencies for this soft job. + * @return Point on soft job timeline. + */ +mali_timeline_point mali_soft_job_start(struct mali_soft_job *job, struct mali_timeline_fence *fence); + +/** + * Use by user-space to signal that a soft job has completed. + * + * @note Only valid for soft jobs with type MALI_SOFT_JOB_TYPE_USER_SIGNALED. + * + * @note The soft job must be in state MALI_SOFT_JOB_STATE_STARTED for the signal to be successful. + * + * @note If the soft job was signaled successfully, or it received a time out, the soft job will be + * destroyed after this call and should no longer be used. + * + * @note This function will block until the soft job has been activated. + * + * @param system The soft job system the job was started in. + * @param job_id ID of soft job we are signaling. + * + * @return _MALI_OSK_ERR_ITEM_NOT_FOUND if the soft job ID was invalid, _MALI_OSK_ERR_TIMEOUT if the + * soft job was timed out or _MALI_OSK_ERR_OK if we successfully signaled the soft job. + */ +_mali_osk_errcode_t mali_soft_job_system_signal_job(struct mali_soft_job_system *system, u32 job_id); + +/** + * Used by the Timeline system to activate a soft job. + * + * @param job The soft job that is being activated. + * @return A scheduling bitmask. + */ +mali_scheduler_mask mali_soft_job_system_activate_job(struct mali_soft_job *job); + +/** + * Used by the Timeline system to timeout a soft job. + * + * A soft job is timed out if it completes or is signaled later than MALI_TIMELINE_TIMEOUT_HZ after + * activation. + * + * @param job The soft job that is being timed out. + * @return A scheduling bitmask. + */ +mali_scheduler_mask mali_soft_job_system_timeout_job(struct mali_soft_job *job); + +/** + * Used to cleanup activated soft jobs in the soft job system on session abort. + * + * @param system The soft job system that is being aborted. + */ +void mali_soft_job_system_abort(struct mali_soft_job_system *system); + +#endif /* __MALI_SOFT_JOB_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_spinlock_reentrant.c b/drivers/gpu/arm/mali400/common/mali_spinlock_reentrant.c --- a/drivers/gpu/arm/mali400/common/mali_spinlock_reentrant.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_spinlock_reentrant.c 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2013, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_spinlock_reentrant.h" + +#include "mali_osk.h" +#include "mali_kernel_common.h" + +struct mali_spinlock_reentrant *mali_spinlock_reentrant_init(_mali_osk_lock_order_t lock_order) +{ + struct mali_spinlock_reentrant *spinlock; + + spinlock = _mali_osk_calloc(1, sizeof(struct mali_spinlock_reentrant)); + if (NULL == spinlock) { + return NULL; + } + + spinlock->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, lock_order); + if (NULL == spinlock->lock) { + mali_spinlock_reentrant_term(spinlock); + return NULL; + } + + return spinlock; +} + +void mali_spinlock_reentrant_term(struct mali_spinlock_reentrant *spinlock) +{ + MALI_DEBUG_ASSERT_POINTER(spinlock); + MALI_DEBUG_ASSERT(0 == spinlock->counter && 0 == spinlock->owner); + + if (NULL != spinlock->lock) { + _mali_osk_spinlock_irq_term(spinlock->lock); + } + + _mali_osk_free(spinlock); +} + +void mali_spinlock_reentrant_wait(struct mali_spinlock_reentrant *spinlock, u32 tid) +{ + MALI_DEBUG_ASSERT_POINTER(spinlock); + MALI_DEBUG_ASSERT_POINTER(spinlock->lock); + MALI_DEBUG_ASSERT(0 != tid); + + MALI_DEBUG_PRINT(5, ("%s ^\n", __FUNCTION__)); + + if (tid != spinlock->owner) { + _mali_osk_spinlock_irq_lock(spinlock->lock); + MALI_DEBUG_ASSERT(0 == spinlock->owner && 0 == spinlock->counter); + spinlock->owner = tid; + } + + MALI_DEBUG_PRINT(5, ("%s v\n", __FUNCTION__)); + + ++spinlock->counter; +} + +void mali_spinlock_reentrant_signal(struct mali_spinlock_reentrant *spinlock, u32 tid) +{ + MALI_DEBUG_ASSERT_POINTER(spinlock); + MALI_DEBUG_ASSERT_POINTER(spinlock->lock); + MALI_DEBUG_ASSERT(0 != tid && tid == spinlock->owner); + + --spinlock->counter; + if (0 == spinlock->counter) { + spinlock->owner = 0; + MALI_DEBUG_PRINT(5, ("%s release last\n", __FUNCTION__)); + _mali_osk_spinlock_irq_unlock(spinlock->lock); + } +} diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_spinlock_reentrant.h b/drivers/gpu/arm/mali400/common/mali_spinlock_reentrant.h --- a/drivers/gpu/arm/mali400/common/mali_spinlock_reentrant.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_spinlock_reentrant.h 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,70 @@ +/* + * Copyright (C) 2013, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_SPINLOCK_REENTRANT_H__ +#define __MALI_SPINLOCK_REENTRANT_H__ + +#include "mali_osk.h" +#include "mali_kernel_common.h" + +/** + * Reentrant spinlock. + */ +struct mali_spinlock_reentrant { + _mali_osk_spinlock_irq_t *lock; + u32 owner; + u32 counter; +}; + +/** + * Create a new reentrant spinlock. + * + * @param lock_order Lock order. + * @return New reentrant spinlock. + */ +struct mali_spinlock_reentrant *mali_spinlock_reentrant_init(_mali_osk_lock_order_t lock_order); + +/** + * Terminate reentrant spinlock and free any associated resources. + * + * @param spinlock Reentrant spinlock to terminate. + */ +void mali_spinlock_reentrant_term(struct mali_spinlock_reentrant *spinlock); + +/** + * Wait for reentrant spinlock to be signaled. + * + * @param spinlock Reentrant spinlock. + * @param tid Thread ID. + */ +void mali_spinlock_reentrant_wait(struct mali_spinlock_reentrant *spinlock, u32 tid); + +/** + * Signal reentrant spinlock. + * + * @param spinlock Reentrant spinlock. + * @param tid Thread ID. + */ +void mali_spinlock_reentrant_signal(struct mali_spinlock_reentrant *spinlock, u32 tid); + +/** + * Check if thread is holding reentrant spinlock. + * + * @param spinlock Reentrant spinlock. + * @param tid Thread ID. + * @return MALI_TRUE if thread is holding spinlock, MALI_FALSE if not. + */ +MALI_STATIC_INLINE mali_bool mali_spinlock_reentrant_is_held(struct mali_spinlock_reentrant *spinlock, u32 tid) +{ + MALI_DEBUG_ASSERT_POINTER(spinlock->lock); + return (tid == spinlock->owner && 0 < spinlock->counter); +} + +#endif /* __MALI_SPINLOCK_REENTRANT_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_timeline.c b/drivers/gpu/arm/mali400/common/mali_timeline.c --- a/drivers/gpu/arm/mali400/common/mali_timeline.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_timeline.c 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,1748 @@ +/* + * Copyright (C) 2013-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_timeline.h" +#include "mali_kernel_common.h" +#include "mali_scheduler.h" +#include "mali_soft_job.h" +#include "mali_timeline_fence_wait.h" +#include "mali_timeline_sync_fence.h" +#include "mali_executor.h" +#include "mali_pp_job.h" + +#define MALI_TIMELINE_SYSTEM_LOCKED(system) (mali_spinlock_reentrant_is_held((system)->spinlock, _mali_osk_get_tid())) + +/* + * Following three elements are used to record how many + * gp, physical pp or virtual pp jobs are delayed in the whole + * timeline system, we can use these three value to decide + * if need to deactivate idle group. + */ +_mali_osk_atomic_t gp_tracker_count; +_mali_osk_atomic_t phy_pp_tracker_count; +_mali_osk_atomic_t virt_pp_tracker_count; + +static mali_scheduler_mask mali_timeline_system_release_waiter(struct mali_timeline_system *system, + struct mali_timeline_waiter *waiter); + +#if defined(CONFIG_SYNC) +#include +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) +#include +#include +#include + +struct mali_deferred_fence_put_entry { + struct hlist_node list; + struct sync_fence *fence; +}; + +static HLIST_HEAD(mali_timeline_sync_fence_to_free_list); +static DEFINE_SPINLOCK(mali_timeline_sync_fence_to_free_lock); + +static void put_sync_fences(struct work_struct *ignore) +{ + struct hlist_head list; + struct hlist_node *tmp, *pos; + unsigned long flags; + struct mali_deferred_fence_put_entry *o; + + spin_lock_irqsave(&mali_timeline_sync_fence_to_free_lock, flags); + hlist_move_list(&mali_timeline_sync_fence_to_free_list, &list); + spin_unlock_irqrestore(&mali_timeline_sync_fence_to_free_lock, flags); + + hlist_for_each_entry_safe(o, pos, tmp, &list, list) { + sync_fence_put(o->fence); + kfree(o); + } +} + +static DECLARE_DELAYED_WORK(delayed_sync_fence_put, put_sync_fences); +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) */ + +/* Callback that is called when a sync fence a tracker is waiting on is signaled. */ +static void mali_timeline_sync_fence_callback(struct sync_fence *sync_fence, struct sync_fence_waiter *sync_fence_waiter) +{ + struct mali_timeline_system *system; + struct mali_timeline_waiter *waiter; + struct mali_timeline_tracker *tracker; + mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY; + u32 tid = _mali_osk_get_tid(); + mali_bool is_aborting = MALI_FALSE; +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) + int fence_status = sync_fence->status; +#else + int fence_status = atomic_read(&sync_fence->status); +#endif + + MALI_DEBUG_ASSERT_POINTER(sync_fence); + MALI_DEBUG_ASSERT_POINTER(sync_fence_waiter); + + tracker = _MALI_OSK_CONTAINER_OF(sync_fence_waiter, struct mali_timeline_tracker, sync_fence_waiter); + MALI_DEBUG_ASSERT_POINTER(tracker); + + system = tracker->system; + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT_POINTER(system->session); + + mali_spinlock_reentrant_wait(system->spinlock, tid); + + is_aborting = system->session->is_aborting; + if (!is_aborting && (0 > fence_status)) { + MALI_PRINT_ERROR(("Mali Timeline: sync fence fd %d signaled with error %d\n", tracker->fence.sync_fd, fence_status)); + tracker->activation_error |= MALI_TIMELINE_ACTIVATION_ERROR_SYNC_BIT; + } + + waiter = tracker->waiter_sync; + MALI_DEBUG_ASSERT_POINTER(waiter); + + tracker->sync_fence = NULL; + tracker->fence.sync_fd = -1; + + schedule_mask |= mali_timeline_system_release_waiter(system, waiter); + + /* If aborting, wake up sleepers that are waiting for sync fence callbacks to complete. */ + if (is_aborting) { + _mali_osk_wait_queue_wake_up(system->wait_queue); + } + + mali_spinlock_reentrant_signal(system->spinlock, tid); + + /* + * Older versions of Linux, before 3.5, doesn't support fput() in interrupt + * context. For those older kernels, allocate a list object and put the + * fence object on that and defer the call to sync_fence_put() to a workqueue. + */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) + { + struct mali_deferred_fence_put_entry *obj; + + obj = kzalloc(sizeof(struct mali_deferred_fence_put_entry), GFP_ATOMIC); + if (obj) { + unsigned long flags; + mali_bool schedule = MALI_FALSE; + + obj->fence = sync_fence; + + spin_lock_irqsave(&mali_timeline_sync_fence_to_free_lock, flags); + if (hlist_empty(&mali_timeline_sync_fence_to_free_list)) + schedule = MALI_TRUE; + hlist_add_head(&obj->list, &mali_timeline_sync_fence_to_free_list); + spin_unlock_irqrestore(&mali_timeline_sync_fence_to_free_lock, flags); + + if (schedule) + schedule_delayed_work(&delayed_sync_fence_put, 0); + } + } +#else + sync_fence_put(sync_fence); +#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) */ + + if (!is_aborting) { + mali_executor_schedule_from_mask(schedule_mask, MALI_TRUE); + } +} +#endif /* defined(CONFIG_SYNC) */ + +static mali_scheduler_mask mali_timeline_tracker_time_out(struct mali_timeline_tracker *tracker) +{ + MALI_DEBUG_ASSERT_POINTER(tracker); + MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_SOFT == tracker->type); + + return mali_soft_job_system_timeout_job((struct mali_soft_job *) tracker->job); +} + +static void mali_timeline_timer_callback(void *data) +{ + struct mali_timeline_system *system; + struct mali_timeline_tracker *tracker; + struct mali_timeline *timeline; + mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY; + u32 tid = _mali_osk_get_tid(); + + timeline = (struct mali_timeline *) data; + MALI_DEBUG_ASSERT_POINTER(timeline); + + system = timeline->system; + MALI_DEBUG_ASSERT_POINTER(system); + + mali_spinlock_reentrant_wait(system->spinlock, tid); + + if (!system->timer_enabled) { + mali_spinlock_reentrant_signal(system->spinlock, tid); + return; + } + + tracker = timeline->tracker_tail; + timeline->timer_active = MALI_FALSE; + + if (NULL != tracker && MALI_TRUE == tracker->timer_active) { + /* This is likely the delayed work that has been schedule out before cancelled. */ + if (MALI_TIMELINE_TIMEOUT_HZ > (_mali_osk_time_tickcount() - tracker->os_tick_activate)) { + mali_spinlock_reentrant_signal(system->spinlock, tid); + return; + } + + schedule_mask = mali_timeline_tracker_time_out(tracker); + tracker->timer_active = MALI_FALSE; + } else { + MALI_PRINT_ERROR(("Mali Timeline: Soft job timer callback without a waiting tracker.\n")); + } + + mali_spinlock_reentrant_signal(system->spinlock, tid); + + mali_executor_schedule_from_mask(schedule_mask, MALI_FALSE); +} + +void mali_timeline_system_stop_timer(struct mali_timeline_system *system) +{ + u32 i; + u32 tid = _mali_osk_get_tid(); + + MALI_DEBUG_ASSERT_POINTER(system); + + mali_spinlock_reentrant_wait(system->spinlock, tid); + system->timer_enabled = MALI_FALSE; + mali_spinlock_reentrant_signal(system->spinlock, tid); + + for (i = 0; i < MALI_TIMELINE_MAX; ++i) { + struct mali_timeline *timeline = system->timelines[i]; + + MALI_DEBUG_ASSERT_POINTER(timeline); + + if (NULL != timeline->delayed_work) { + _mali_osk_wq_delayed_cancel_work_sync(timeline->delayed_work); + timeline->timer_active = MALI_FALSE; + } + } +} + +static void mali_timeline_destroy(struct mali_timeline *timeline) +{ + MALI_DEBUG_ASSERT_POINTER(timeline); + if (NULL != timeline) { + /* Assert that the timeline object has been properly cleaned up before destroying it. */ + MALI_DEBUG_ASSERT(timeline->point_oldest == timeline->point_next); + MALI_DEBUG_ASSERT(NULL == timeline->tracker_head); + MALI_DEBUG_ASSERT(NULL == timeline->tracker_tail); + MALI_DEBUG_ASSERT(NULL == timeline->waiter_head); + MALI_DEBUG_ASSERT(NULL == timeline->waiter_tail); + MALI_DEBUG_ASSERT(NULL != timeline->system); + MALI_DEBUG_ASSERT(MALI_TIMELINE_MAX > timeline->id); + + if (NULL != timeline->delayed_work) { + _mali_osk_wq_delayed_cancel_work_sync(timeline->delayed_work); + _mali_osk_wq_delayed_delete_work_nonflush(timeline->delayed_work); + } + +#if defined(CONFIG_SYNC) + if (NULL != timeline->sync_tl) { + sync_timeline_destroy(timeline->sync_tl); + } +#endif /* defined(CONFIG_SYNC) */ + +#ifndef CONFIG_SYNC + _mali_osk_free(timeline); +#endif + } +} + +static struct mali_timeline *mali_timeline_create(struct mali_timeline_system *system, enum mali_timeline_id id) +{ + struct mali_timeline *timeline; + + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT(id < MALI_TIMELINE_MAX); + + timeline = (struct mali_timeline *) _mali_osk_calloc(1, sizeof(struct mali_timeline)); + if (NULL == timeline) { + return NULL; + } + + /* Initially the timeline is empty. */ +#if defined(MALI_TIMELINE_DEBUG_START_POINT) + /* Start the timeline a bit before wrapping when debugging. */ + timeline->point_next = UINT_MAX - MALI_TIMELINE_MAX_POINT_SPAN - 128; +#else + timeline->point_next = 1; +#endif + timeline->point_oldest = timeline->point_next; + + /* The tracker and waiter lists will initially be empty. */ + + timeline->system = system; + timeline->id = id; + + timeline->delayed_work = _mali_osk_wq_delayed_create_work(mali_timeline_timer_callback, timeline); + if (NULL == timeline->delayed_work) { + mali_timeline_destroy(timeline); + return NULL; + } + + timeline->timer_active = MALI_FALSE; + +#if defined(CONFIG_SYNC) + { + char timeline_name[32]; + + switch (id) { + case MALI_TIMELINE_GP: + _mali_osk_snprintf(timeline_name, 32, "mali-%u-gp", _mali_osk_get_pid()); + break; + case MALI_TIMELINE_PP: + _mali_osk_snprintf(timeline_name, 32, "mali-%u-pp", _mali_osk_get_pid()); + break; + case MALI_TIMELINE_SOFT: + _mali_osk_snprintf(timeline_name, 32, "mali-%u-soft", _mali_osk_get_pid()); + break; + default: + MALI_PRINT_ERROR(("Mali Timeline: Invalid timeline id %d\n", id)); + mali_timeline_destroy(timeline); + return NULL; + } + + timeline->destroyed = MALI_FALSE; + + timeline->sync_tl = mali_sync_timeline_create(timeline, timeline_name); + if (NULL == timeline->sync_tl) { + mali_timeline_destroy(timeline); + return NULL; + } + + timeline->spinlock = mali_spinlock_reentrant_init(_MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM); + if (NULL == timeline->spinlock) { + mali_timeline_destroy(timeline); + return NULL; + } + } +#endif /* defined(CONFIG_SYNC) */ + + return timeline; +} + +static void mali_timeline_insert_tracker(struct mali_timeline *timeline, struct mali_timeline_tracker *tracker) +{ + MALI_DEBUG_ASSERT_POINTER(timeline); + MALI_DEBUG_ASSERT_POINTER(tracker); + + if (mali_timeline_is_full(timeline)) { + /* Don't add tracker if timeline is full. */ + tracker->point = MALI_TIMELINE_NO_POINT; + return; + } + + tracker->timeline = timeline; + tracker->point = timeline->point_next; + + /* Find next available point. */ + timeline->point_next++; + if (MALI_TIMELINE_NO_POINT == timeline->point_next) { + timeline->point_next++; + } + + MALI_DEBUG_ASSERT(!mali_timeline_is_empty(timeline)); + + if (MALI_TIMELINE_TRACKER_GP == tracker->type) { + _mali_osk_atomic_inc(&gp_tracker_count); + } else if (MALI_TIMELINE_TRACKER_PP == tracker->type) { + if (mali_pp_job_is_virtual((struct mali_pp_job *)tracker->job)) { + _mali_osk_atomic_inc(&virt_pp_tracker_count); + } else { + _mali_osk_atomic_inc(&phy_pp_tracker_count); + } + } + + /* Add tracker as new head on timeline's tracker list. */ + if (NULL == timeline->tracker_head) { + /* Tracker list is empty. */ + MALI_DEBUG_ASSERT(NULL == timeline->tracker_tail); + + timeline->tracker_tail = tracker; + + MALI_DEBUG_ASSERT(NULL == tracker->timeline_next); + MALI_DEBUG_ASSERT(NULL == tracker->timeline_prev); + } else { + MALI_DEBUG_ASSERT(NULL == timeline->tracker_head->timeline_next); + + tracker->timeline_prev = timeline->tracker_head; + timeline->tracker_head->timeline_next = tracker; + + MALI_DEBUG_ASSERT(NULL == tracker->timeline_next); + } + timeline->tracker_head = tracker; + + MALI_DEBUG_ASSERT(NULL == timeline->tracker_head->timeline_next); + MALI_DEBUG_ASSERT(NULL == timeline->tracker_tail->timeline_prev); +} + +/* Inserting the waiter object into the given timeline */ +static void mali_timeline_insert_waiter(struct mali_timeline *timeline, struct mali_timeline_waiter *waiter_new) +{ + struct mali_timeline_waiter *waiter_prev; + struct mali_timeline_waiter *waiter_next; + + /* Waiter time must be between timeline head and tail, and there must + * be less than MALI_TIMELINE_MAX_POINT_SPAN elements between */ + MALI_DEBUG_ASSERT((waiter_new->point - timeline->point_oldest) < MALI_TIMELINE_MAX_POINT_SPAN); + MALI_DEBUG_ASSERT((-waiter_new->point + timeline->point_next) < MALI_TIMELINE_MAX_POINT_SPAN); + + /* Finding out where to put this waiter, in the linked waiter list of the given timeline **/ + waiter_prev = timeline->waiter_head; /* Insert new after waiter_prev */ + waiter_next = NULL; /* Insert new before waiter_next */ + + /* Iterating backwards from head (newest) to tail (oldest) until we + * find the correct spot to insert the new waiter */ + while (waiter_prev && mali_timeline_point_after(waiter_prev->point, waiter_new->point)) { + waiter_next = waiter_prev; + waiter_prev = waiter_prev->timeline_prev; + } + + if (NULL == waiter_prev && NULL == waiter_next) { + /* list is empty */ + timeline->waiter_head = waiter_new; + timeline->waiter_tail = waiter_new; + } else if (NULL == waiter_next) { + /* insert at head */ + waiter_new->timeline_prev = timeline->waiter_head; + timeline->waiter_head->timeline_next = waiter_new; + timeline->waiter_head = waiter_new; + } else if (NULL == waiter_prev) { + /* insert at tail */ + waiter_new->timeline_next = timeline->waiter_tail; + timeline->waiter_tail->timeline_prev = waiter_new; + timeline->waiter_tail = waiter_new; + } else { + /* insert between */ + waiter_new->timeline_next = waiter_next; + waiter_new->timeline_prev = waiter_prev; + waiter_next->timeline_prev = waiter_new; + waiter_prev->timeline_next = waiter_new; + } +} + +static void mali_timeline_update_delayed_work(struct mali_timeline *timeline) +{ + struct mali_timeline_system *system; + struct mali_timeline_tracker *oldest_tracker; + + MALI_DEBUG_ASSERT_POINTER(timeline); + MALI_DEBUG_ASSERT(MALI_TIMELINE_SOFT == timeline->id); + + system = timeline->system; + MALI_DEBUG_ASSERT_POINTER(system); + + MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system)); + + /* Timer is disabled, early out. */ + if (!system->timer_enabled) return; + + oldest_tracker = timeline->tracker_tail; + if (NULL != oldest_tracker && 0 == oldest_tracker->trigger_ref_count) { + if (MALI_FALSE == oldest_tracker->timer_active) { + if (MALI_TRUE == timeline->timer_active) { + _mali_osk_wq_delayed_cancel_work_async(timeline->delayed_work); + } + _mali_osk_wq_delayed_schedule_work(timeline->delayed_work, MALI_TIMELINE_TIMEOUT_HZ); + oldest_tracker->timer_active = MALI_TRUE; + timeline->timer_active = MALI_TRUE; + } + } else if (MALI_TRUE == timeline->timer_active) { + _mali_osk_wq_delayed_cancel_work_async(timeline->delayed_work); + timeline->timer_active = MALI_FALSE; + } +} + +static mali_scheduler_mask mali_timeline_update_oldest_point(struct mali_timeline *timeline) +{ + mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY; + + MALI_DEBUG_ASSERT_POINTER(timeline); + + MALI_DEBUG_CODE({ + struct mali_timeline_system *system = timeline->system; + MALI_DEBUG_ASSERT_POINTER(system); + + MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system)); + }); + + if (NULL != timeline->tracker_tail) { + /* Set oldest point to oldest tracker's point */ + timeline->point_oldest = timeline->tracker_tail->point; + } else { + /* No trackers, mark point list as empty */ + timeline->point_oldest = timeline->point_next; + } + + /* Release all waiters no longer on the timeline's point list. + * Releasing a waiter can trigger this function to be called again, so + * we do not store any pointers on stack. */ + while (NULL != timeline->waiter_tail) { + u32 waiter_time_relative; + u32 time_head_relative; + struct mali_timeline_waiter *waiter = timeline->waiter_tail; + + time_head_relative = timeline->point_next - timeline->point_oldest; + waiter_time_relative = waiter->point - timeline->point_oldest; + + if (waiter_time_relative < time_head_relative) { + /* This and all following waiters are on the point list, so we are done. */ + break; + } + + /* Remove waiter from timeline's waiter list. */ + if (NULL != waiter->timeline_next) { + waiter->timeline_next->timeline_prev = NULL; + } else { + /* This was the last waiter */ + timeline->waiter_head = NULL; + } + timeline->waiter_tail = waiter->timeline_next; + + /* Release waiter. This could activate a tracker, if this was + * the last waiter for the tracker. */ + schedule_mask |= mali_timeline_system_release_waiter(timeline->system, waiter); + } + + return schedule_mask; +} + +void mali_timeline_tracker_init(struct mali_timeline_tracker *tracker, + mali_timeline_tracker_type type, + struct mali_timeline_fence *fence, + void *job) +{ + MALI_DEBUG_ASSERT_POINTER(tracker); + MALI_DEBUG_ASSERT_POINTER(job); + + MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAX > type); + + /* Zero out all tracker members. */ + _mali_osk_memset(tracker, 0, sizeof(*tracker)); + + tracker->type = type; + tracker->job = job; + tracker->trigger_ref_count = 1; /* Prevents any callback from trigging while adding it */ + tracker->os_tick_create = _mali_osk_time_tickcount(); + MALI_DEBUG_CODE(tracker->magic = MALI_TIMELINE_TRACKER_MAGIC); + + tracker->activation_error = MALI_TIMELINE_ACTIVATION_ERROR_NONE; + + /* Copy fence. */ + if (NULL != fence) { + _mali_osk_memcpy(&tracker->fence, fence, sizeof(struct mali_timeline_fence)); + } +} + +mali_scheduler_mask mali_timeline_tracker_release(struct mali_timeline_tracker *tracker) +{ + struct mali_timeline *timeline; + struct mali_timeline_system *system; + struct mali_timeline_tracker *tracker_next, *tracker_prev; + mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY; + u32 tid = _mali_osk_get_tid(); + + /* Upon entry a group lock will be held, but not a scheduler lock. */ + MALI_DEBUG_ASSERT_POINTER(tracker); + MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAGIC == tracker->magic); + + /* Tracker should have been triggered */ + MALI_DEBUG_ASSERT(0 == tracker->trigger_ref_count); + + /* All waiters should have been released at this point */ + MALI_DEBUG_ASSERT(NULL == tracker->waiter_head); + MALI_DEBUG_ASSERT(NULL == tracker->waiter_tail); + + MALI_DEBUG_PRINT(3, ("Mali Timeline: releasing tracker for job 0x%08X\n", tracker->job)); + + timeline = tracker->timeline; + if (NULL == timeline) { + /* Tracker was not on a timeline, there is nothing to release. */ + return MALI_SCHEDULER_MASK_EMPTY; + } + + system = timeline->system; + MALI_DEBUG_ASSERT_POINTER(system); + + mali_spinlock_reentrant_wait(system->spinlock, tid); + + /* Tracker should still be on timeline */ + MALI_DEBUG_ASSERT(!mali_timeline_is_empty(timeline)); + MALI_DEBUG_ASSERT(mali_timeline_is_point_on(timeline, tracker->point)); + + /* Tracker is no longer valid. */ + MALI_DEBUG_CODE(tracker->magic = 0); + + tracker_next = tracker->timeline_next; + tracker_prev = tracker->timeline_prev; + tracker->timeline_next = NULL; + tracker->timeline_prev = NULL; + + /* Removing tracker from timeline's tracker list */ + if (NULL == tracker_next) { + /* This tracker was the head */ + timeline->tracker_head = tracker_prev; + } else { + tracker_next->timeline_prev = tracker_prev; + } + + if (NULL == tracker_prev) { + /* This tracker was the tail */ + timeline->tracker_tail = tracker_next; + MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system)); + /* Update the timeline's oldest time and release any waiters */ + schedule_mask |= mali_timeline_update_oldest_point(timeline); + MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system)); + } else { + tracker_prev->timeline_next = tracker_next; + } + + MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system)); + + /* Update delayed work only when it is the soft job timeline */ + if (MALI_TIMELINE_SOFT == tracker->timeline->id) { + mali_timeline_update_delayed_work(tracker->timeline); + } + + mali_spinlock_reentrant_signal(system->spinlock, tid); + + return schedule_mask; +} + +void mali_timeline_system_release_waiter_list(struct mali_timeline_system *system, + struct mali_timeline_waiter *tail, + struct mali_timeline_waiter *head) +{ + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT_POINTER(head); + MALI_DEBUG_ASSERT_POINTER(tail); + MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system)); + + head->tracker_next = system->waiter_empty_list; + system->waiter_empty_list = tail; +} + +static mali_scheduler_mask mali_timeline_tracker_activate(struct mali_timeline_tracker *tracker) +{ + mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY; + struct mali_timeline_system *system; + struct mali_timeline *timeline; + u32 tid = _mali_osk_get_tid(); + + MALI_DEBUG_ASSERT_POINTER(tracker); + MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAGIC == tracker->magic); + + system = tracker->system; + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system)); + + tracker->os_tick_activate = _mali_osk_time_tickcount(); + + if (NULL != tracker->waiter_head) { + mali_timeline_system_release_waiter_list(system, tracker->waiter_tail, tracker->waiter_head); + tracker->waiter_head = NULL; + tracker->waiter_tail = NULL; + } + + switch (tracker->type) { + case MALI_TIMELINE_TRACKER_GP: + schedule_mask = mali_scheduler_activate_gp_job((struct mali_gp_job *) tracker->job); + + _mali_osk_atomic_dec(&gp_tracker_count); + break; + case MALI_TIMELINE_TRACKER_PP: + if (mali_pp_job_is_virtual((struct mali_pp_job *)tracker->job)) { + _mali_osk_atomic_dec(&virt_pp_tracker_count); + } else { + _mali_osk_atomic_dec(&phy_pp_tracker_count); + } + schedule_mask = mali_scheduler_activate_pp_job((struct mali_pp_job *) tracker->job); + break; + case MALI_TIMELINE_TRACKER_SOFT: + timeline = tracker->timeline; + MALI_DEBUG_ASSERT_POINTER(timeline); + + schedule_mask |= mali_soft_job_system_activate_job((struct mali_soft_job *) tracker->job); + + /* Start a soft timer to make sure the soft job be released in a limited time */ + mali_spinlock_reentrant_wait(system->spinlock, tid); + mali_timeline_update_delayed_work(timeline); + mali_spinlock_reentrant_signal(system->spinlock, tid); + break; + case MALI_TIMELINE_TRACKER_WAIT: + mali_timeline_fence_wait_activate((struct mali_timeline_fence_wait_tracker *) tracker->job); + break; + case MALI_TIMELINE_TRACKER_SYNC: +#if defined(CONFIG_SYNC) + mali_timeline_sync_fence_activate((struct mali_timeline_sync_fence_tracker *) tracker->job); +#else + MALI_PRINT_ERROR(("Mali Timeline: sync tracker not supported\n", tracker->type)); +#endif /* defined(CONFIG_SYNC) */ + break; + default: + MALI_PRINT_ERROR(("Mali Timeline - Illegal tracker type: %d\n", tracker->type)); + break; + } + + return schedule_mask; +} + +void mali_timeline_system_tracker_get(struct mali_timeline_system *system, struct mali_timeline_tracker *tracker) +{ + u32 tid = _mali_osk_get_tid(); + + MALI_DEBUG_ASSERT_POINTER(tracker); + MALI_DEBUG_ASSERT_POINTER(system); + + mali_spinlock_reentrant_wait(system->spinlock, tid); + + MALI_DEBUG_ASSERT(0 < tracker->trigger_ref_count); + tracker->trigger_ref_count++; + + mali_spinlock_reentrant_signal(system->spinlock, tid); +} + +mali_scheduler_mask mali_timeline_system_tracker_put(struct mali_timeline_system *system, struct mali_timeline_tracker *tracker, mali_timeline_activation_error activation_error) +{ + u32 tid = _mali_osk_get_tid(); + mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY; + + MALI_DEBUG_ASSERT_POINTER(tracker); + MALI_DEBUG_ASSERT_POINTER(system); + + mali_spinlock_reentrant_wait(system->spinlock, tid); + + MALI_DEBUG_ASSERT(0 < tracker->trigger_ref_count); + tracker->trigger_ref_count--; + + tracker->activation_error |= activation_error; + + if (0 == tracker->trigger_ref_count) { + schedule_mask |= mali_timeline_tracker_activate(tracker); + tracker = NULL; + } + + mali_spinlock_reentrant_signal(system->spinlock, tid); + + return schedule_mask; +} + +void mali_timeline_fence_copy_uk_fence(struct mali_timeline_fence *fence, _mali_uk_fence_t *uk_fence) +{ + u32 i; + + MALI_DEBUG_ASSERT_POINTER(fence); + MALI_DEBUG_ASSERT_POINTER(uk_fence); + + for (i = 0; i < MALI_TIMELINE_MAX; ++i) { + fence->points[i] = uk_fence->points[i]; + } + + fence->sync_fd = uk_fence->sync_fd; +} + +struct mali_timeline_system *mali_timeline_system_create(struct mali_session_data *session) +{ + u32 i; + struct mali_timeline_system *system; + + MALI_DEBUG_ASSERT_POINTER(session); + MALI_DEBUG_PRINT(4, ("Mali Timeline: creating timeline system\n")); + + system = (struct mali_timeline_system *) _mali_osk_calloc(1, sizeof(struct mali_timeline_system)); + if (NULL == system) { + return NULL; + } + + system->spinlock = mali_spinlock_reentrant_init(_MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM); + if (NULL == system->spinlock) { + mali_timeline_system_destroy(system); + return NULL; + } + + for (i = 0; i < MALI_TIMELINE_MAX; ++i) { + system->timelines[i] = mali_timeline_create(system, (enum mali_timeline_id)i); + if (NULL == system->timelines[i]) { + mali_timeline_system_destroy(system); + return NULL; + } + } + +#if defined(CONFIG_SYNC) + system->signaled_sync_tl = mali_sync_timeline_create(NULL, "mali-always-signaled"); + if (NULL == system->signaled_sync_tl) { + mali_timeline_system_destroy(system); + return NULL; + } +#endif /* defined(CONFIG_SYNC) */ + + system->waiter_empty_list = NULL; + system->session = session; + system->timer_enabled = MALI_TRUE; + + system->wait_queue = _mali_osk_wait_queue_init(); + if (NULL == system->wait_queue) { + mali_timeline_system_destroy(system); + return NULL; + } + + return system; +} + +#if defined(CONFIG_MALI_DMA_BUF_FENCE) ||defined(CONFIG_SYNC) +/** + * Check if there are any trackers left on timeline. + * + * Used as a wait queue conditional. + * + * @param data Timeline. + * @return MALI_TRUE if there are no trackers on timeline, MALI_FALSE if not. + */ +static mali_bool mali_timeline_has_no_trackers(void *data) +{ + struct mali_timeline *timeline = (struct mali_timeline *) data; + + MALI_DEBUG_ASSERT_POINTER(timeline); + + return mali_timeline_is_empty(timeline); +} +#if defined(CONFIG_SYNC) +/** + * Cancel sync fence waiters waited upon by trackers on all timelines. + * + * Will return after all timelines have no trackers left. + * + * @param system Timeline system. + */ +static void mali_timeline_cancel_sync_fence_waiters(struct mali_timeline_system *system) +{ + u32 i; + u32 tid = _mali_osk_get_tid(); + struct mali_timeline_tracker *tracker, *tracker_next; + _MALI_OSK_LIST_HEAD_STATIC_INIT(tracker_list); + + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT_POINTER(system->session); + MALI_DEBUG_ASSERT(system->session->is_aborting); + + mali_spinlock_reentrant_wait(system->spinlock, tid); + + /* Cancel sync fence waiters. */ + for (i = 0; i < MALI_TIMELINE_MAX; ++i) { + struct mali_timeline *timeline = system->timelines[i]; + + MALI_DEBUG_ASSERT_POINTER(timeline); + + tracker_next = timeline->tracker_tail; + while (NULL != tracker_next) { + tracker = tracker_next; + tracker_next = tracker->timeline_next; + + if (NULL == tracker->sync_fence) continue; + + MALI_DEBUG_PRINT(3, ("Mali Timeline: Cancelling sync fence wait for tracker 0x%08X.\n", tracker)); + + /* Cancel sync fence waiter. */ + if (0 == sync_fence_cancel_async(tracker->sync_fence, &tracker->sync_fence_waiter)) { + /* Callback was not called, move tracker to local list. */ + _mali_osk_list_add(&tracker->sync_fence_cancel_list, &tracker_list); + } + } + } + + mali_spinlock_reentrant_signal(system->spinlock, tid); + + /* Manually call sync fence callback in order to release waiter and trigger activation of tracker. */ + _MALI_OSK_LIST_FOREACHENTRY(tracker, tracker_next, &tracker_list, struct mali_timeline_tracker, sync_fence_cancel_list) { + mali_timeline_sync_fence_callback(tracker->sync_fence, &tracker->sync_fence_waiter); + } + + /* Sleep until all sync fence callbacks are done and all timelines are empty. */ + for (i = 0; i < MALI_TIMELINE_MAX; ++i) { + struct mali_timeline *timeline = system->timelines[i]; + + MALI_DEBUG_ASSERT_POINTER(timeline); + + _mali_osk_wait_queue_wait_event(system->wait_queue, mali_timeline_has_no_trackers, (void *) timeline); + } +} + +#endif /* defined(CONFIG_SYNC) */ + +#if defined(CONFIG_MALI_DMA_BUF_FENCE) +static void mali_timeline_cancel_dma_fence_waiters(struct mali_timeline_system *system) +{ + u32 i, j; + u32 tid = _mali_osk_get_tid(); + struct mali_pp_job *pp_job = NULL; + struct mali_pp_job *next_pp_job = NULL; + struct mali_timeline *timeline = NULL; + struct mali_timeline_tracker *tracker, *tracker_next; + _MALI_OSK_LIST_HEAD_STATIC_INIT(pp_job_list); + + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT_POINTER(system->session); + MALI_DEBUG_ASSERT(system->session->is_aborting); + + mali_spinlock_reentrant_wait(system->spinlock, tid); + + /* Cancel dma fence waiters. */ + timeline = system->timelines[MALI_TIMELINE_PP]; + MALI_DEBUG_ASSERT_POINTER(timeline); + + tracker_next = timeline->tracker_tail; + while (NULL != tracker_next) { + mali_bool fence_is_signaled = MALI_TRUE; + tracker = tracker_next; + tracker_next = tracker->timeline_next; + + if (NULL == tracker->waiter_dma_fence) continue; + pp_job = (struct mali_pp_job *)tracker->job; + MALI_DEBUG_ASSERT_POINTER(pp_job); + MALI_DEBUG_PRINT(3, ("Mali Timeline: Cancelling dma fence waiter for tracker 0x%08X.\n", tracker)); + + for (j = 0; j < pp_job->dma_fence_context.num_dma_fence_waiter; j++) { + if (pp_job->dma_fence_context.mali_dma_fence_waiters[j]) { + /* Cancel a previously callback from the fence. + * This function returns true if the callback is successfully removed, + * or false if the fence has already been signaled. + */ + bool ret = dma_fence_remove_callback(pp_job->dma_fence_context.mali_dma_fence_waiters[j]->fence, + &pp_job->dma_fence_context.mali_dma_fence_waiters[j]->base); + if (ret) { + fence_is_signaled = MALI_FALSE; + } + } + } + + /* Callbacks were not called, move pp job to local list. */ + if (MALI_FALSE == fence_is_signaled) + _mali_osk_list_add(&pp_job->list, &pp_job_list); + } + + mali_spinlock_reentrant_signal(system->spinlock, tid); + + /* Manually call dma fence callback in order to release waiter and trigger activation of tracker. */ + _MALI_OSK_LIST_FOREACHENTRY(pp_job, next_pp_job, &pp_job_list, struct mali_pp_job, list) { + mali_timeline_dma_fence_callback((void *)pp_job); + } + + /* Sleep until all dma fence callbacks are done and all timelines are empty. */ + for (i = 0; i < MALI_TIMELINE_MAX; ++i) { + struct mali_timeline *timeline = system->timelines[i]; + MALI_DEBUG_ASSERT_POINTER(timeline); + _mali_osk_wait_queue_wait_event(system->wait_queue, mali_timeline_has_no_trackers, (void *) timeline); + } +} +#endif +#endif +void mali_timeline_system_abort(struct mali_timeline_system *system) +{ + MALI_DEBUG_CODE(u32 tid = _mali_osk_get_tid();); + + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT_POINTER(system->session); + MALI_DEBUG_ASSERT(system->session->is_aborting); + + MALI_DEBUG_PRINT(3, ("Mali Timeline: Aborting timeline system for session 0x%08X.\n", system->session)); + +#if defined(CONFIG_SYNC) + mali_timeline_cancel_sync_fence_waiters(system); +#endif /* defined(CONFIG_SYNC) */ + +#if defined(CONFIG_MALI_DMA_BUF_FENCE) + mali_timeline_cancel_dma_fence_waiters(system); +#endif + + /* Should not be any waiters or trackers left at this point. */ + MALI_DEBUG_CODE({ + u32 i; + mali_spinlock_reentrant_wait(system->spinlock, tid); + for (i = 0; i < MALI_TIMELINE_MAX; ++i) + { + struct mali_timeline *timeline = system->timelines[i]; + MALI_DEBUG_ASSERT_POINTER(timeline); + MALI_DEBUG_ASSERT(timeline->point_oldest == timeline->point_next); + MALI_DEBUG_ASSERT(NULL == timeline->tracker_head); + MALI_DEBUG_ASSERT(NULL == timeline->tracker_tail); + MALI_DEBUG_ASSERT(NULL == timeline->waiter_head); + MALI_DEBUG_ASSERT(NULL == timeline->waiter_tail); + } + mali_spinlock_reentrant_signal(system->spinlock, tid); + }); +} + +void mali_timeline_system_destroy(struct mali_timeline_system *system) +{ + u32 i; + struct mali_timeline_waiter *waiter, *next; +#if defined(CONFIG_SYNC) + u32 tid = _mali_osk_get_tid(); +#endif + + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT_POINTER(system->session); + + MALI_DEBUG_PRINT(4, ("Mali Timeline: destroying timeline system\n")); + + if (NULL != system) { + + /* There should be no waiters left on this queue. */ + if (NULL != system->wait_queue) { + _mali_osk_wait_queue_term(system->wait_queue); + system->wait_queue = NULL; + } + + /* Free all waiters in empty list */ + waiter = system->waiter_empty_list; + while (NULL != waiter) { + next = waiter->tracker_next; + _mali_osk_free(waiter); + waiter = next; + } + +#if defined(CONFIG_SYNC) + if (NULL != system->signaled_sync_tl) { + sync_timeline_destroy(system->signaled_sync_tl); + } + + for (i = 0; i < MALI_TIMELINE_MAX; ++i) { + if ((NULL != system->timelines[i]) && (NULL != system->timelines[i]->spinlock)) { + mali_spinlock_reentrant_wait(system->timelines[i]->spinlock, tid); + system->timelines[i]->destroyed = MALI_TRUE; + mali_spinlock_reentrant_signal(system->timelines[i]->spinlock, tid); + } + } +#endif /* defined(CONFIG_SYNC) */ + + for (i = 0; i < MALI_TIMELINE_MAX; ++i) { + if (NULL != system->timelines[i]) { + mali_timeline_destroy(system->timelines[i]); + } + } + + if (NULL != system->spinlock) { + mali_spinlock_reentrant_term(system->spinlock); + } + + _mali_osk_free(system); + } +} + +/** + * Find how many waiters are needed for a given fence. + * + * @param fence The fence to check. + * @return Number of waiters needed for fence. + */ +static u32 mali_timeline_fence_num_waiters(struct mali_timeline_fence *fence) +{ + u32 i, num_waiters = 0; + + MALI_DEBUG_ASSERT_POINTER(fence); + + for (i = 0; i < MALI_TIMELINE_MAX; ++i) { + if (MALI_TIMELINE_NO_POINT != fence->points[i]) { + ++num_waiters; + } + } + +#if defined(CONFIG_SYNC) + if (-1 != fence->sync_fd) ++num_waiters; +#endif /* defined(CONFIG_SYNC) */ + + return num_waiters; +} + +static struct mali_timeline_waiter *mali_timeline_system_get_zeroed_waiter(struct mali_timeline_system *system) +{ + struct mali_timeline_waiter *waiter; + + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system)); + + waiter = system->waiter_empty_list; + if (NULL != waiter) { + /* Remove waiter from empty list and zero it */ + system->waiter_empty_list = waiter->tracker_next; + _mali_osk_memset(waiter, 0, sizeof(*waiter)); + } + + /* Return NULL if list was empty. */ + return waiter; +} + +static void mali_timeline_system_allocate_waiters(struct mali_timeline_system *system, + struct mali_timeline_waiter **tail, + struct mali_timeline_waiter **head, + int max_num_waiters) +{ + u32 i, tid = _mali_osk_get_tid(); + mali_bool do_alloc; + struct mali_timeline_waiter *waiter; + + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT_POINTER(tail); + MALI_DEBUG_ASSERT_POINTER(head); + + MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system)); + + *head = *tail = NULL; + do_alloc = MALI_FALSE; + i = 0; + while (i < max_num_waiters) { + if (MALI_FALSE == do_alloc) { + waiter = mali_timeline_system_get_zeroed_waiter(system); + if (NULL == waiter) { + do_alloc = MALI_TRUE; + mali_spinlock_reentrant_signal(system->spinlock, tid); + continue; + } + } else { + waiter = _mali_osk_calloc(1, sizeof(struct mali_timeline_waiter)); + if (NULL == waiter) break; + } + ++i; + if (NULL == *tail) { + *tail = waiter; + *head = waiter; + } else { + (*head)->tracker_next = waiter; + *head = waiter; + } + } + if (MALI_TRUE == do_alloc) { + mali_spinlock_reentrant_wait(system->spinlock, tid); + } +} + +/** + * Create waiters for the given tracker. The tracker is activated when all waiters are release. + * + * @note Tracker can potentially be activated before this function returns. + * + * @param system Timeline system. + * @param tracker Tracker we will create waiters for. + * @param waiter_tail List of pre-allocated waiters. + * @param waiter_head List of pre-allocated waiters. + */ +static void mali_timeline_system_create_waiters_and_unlock(struct mali_timeline_system *system, + struct mali_timeline_tracker *tracker, + struct mali_timeline_waiter *waiter_tail, + struct mali_timeline_waiter *waiter_head) +{ + int i; + u32 tid = _mali_osk_get_tid(); + mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY; +#if defined(CONFIG_SYNC) + struct sync_fence *sync_fence = NULL; +#endif /* defined(CONFIG_SYNC) */ + + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT_POINTER(tracker); + + MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system)); + + MALI_DEBUG_ASSERT(NULL == tracker->waiter_head); + MALI_DEBUG_ASSERT(NULL == tracker->waiter_tail); + MALI_DEBUG_ASSERT(NULL != tracker->job); + + /* Creating waiter object for all the timelines the fence is put on. Inserting this waiter + * into the timelines sorted list of waiters */ + for (i = 0; i < MALI_TIMELINE_MAX; ++i) { + mali_timeline_point point; + struct mali_timeline *timeline; + struct mali_timeline_waiter *waiter; + + /* Get point on current timeline from tracker's fence. */ + point = tracker->fence.points[i]; + + if (likely(MALI_TIMELINE_NO_POINT == point)) { + /* Fence contains no point on this timeline so we don't need a waiter. */ + continue; + } + + timeline = system->timelines[i]; + MALI_DEBUG_ASSERT_POINTER(timeline); + + if (unlikely(!mali_timeline_is_point_valid(timeline, point))) { + MALI_PRINT_ERROR(("Mali Timeline: point %d is not valid (oldest=%d, next=%d)\n", + point, timeline->point_oldest, timeline->point_next)); + continue; + } + + if (likely(mali_timeline_is_point_released(timeline, point))) { + /* Tracker representing the point has been released so we don't need a + * waiter. */ + continue; + } + + /* The point is on timeline. */ + MALI_DEBUG_ASSERT(mali_timeline_is_point_on(timeline, point)); + + /* Get a new zeroed waiter object. */ + if (likely(NULL != waiter_tail)) { + waiter = waiter_tail; + waiter_tail = waiter_tail->tracker_next; + } else { + MALI_PRINT_ERROR(("Mali Timeline: failed to allocate memory for waiter\n")); + continue; + } + + /* Yanking the trigger ref count of the tracker. */ + tracker->trigger_ref_count++; + + waiter->point = point; + waiter->tracker = tracker; + + /* Insert waiter on tracker's singly-linked waiter list. */ + if (NULL == tracker->waiter_head) { + /* list is empty */ + MALI_DEBUG_ASSERT(NULL == tracker->waiter_tail); + tracker->waiter_tail = waiter; + } else { + tracker->waiter_head->tracker_next = waiter; + } + tracker->waiter_head = waiter; + + /* Add waiter to timeline. */ + mali_timeline_insert_waiter(timeline, waiter); + } +#if defined(CONFIG_SYNC) + if (-1 != tracker->fence.sync_fd) { + int ret; + struct mali_timeline_waiter *waiter; + + sync_fence = sync_fence_fdget(tracker->fence.sync_fd); + if (unlikely(NULL == sync_fence)) { + MALI_PRINT_ERROR(("Mali Timeline: failed to get sync fence from fd %d\n", tracker->fence.sync_fd)); + goto exit; + } + + /* Check if we have a zeroed waiter object available. */ + if (unlikely(NULL == waiter_tail)) { + MALI_PRINT_ERROR(("Mali Timeline: failed to allocate memory for waiter\n")); + goto exit; + } + + /* Start asynchronous wait that will release waiter when the fence is signaled. */ + sync_fence_waiter_init(&tracker->sync_fence_waiter, mali_timeline_sync_fence_callback); + ret = sync_fence_wait_async(sync_fence, &tracker->sync_fence_waiter); + if (1 == ret) { + /* Fence already signaled, no waiter needed. */ + tracker->fence.sync_fd = -1; + goto exit; + } else if (0 != ret) { + MALI_PRINT_ERROR(("Mali Timeline: sync fence fd %d signaled with error %d\n", tracker->fence.sync_fd, ret)); + tracker->activation_error |= MALI_TIMELINE_ACTIVATION_ERROR_SYNC_BIT; + goto exit; + } + + /* Grab new zeroed waiter object. */ + waiter = waiter_tail; + waiter_tail = waiter_tail->tracker_next; + + /* Increase the trigger ref count of the tracker. */ + tracker->trigger_ref_count++; + + waiter->point = MALI_TIMELINE_NO_POINT; + waiter->tracker = tracker; + + /* Insert waiter on tracker's singly-linked waiter list. */ + if (NULL == tracker->waiter_head) { + /* list is empty */ + MALI_DEBUG_ASSERT(NULL == tracker->waiter_tail); + tracker->waiter_tail = waiter; + } else { + tracker->waiter_head->tracker_next = waiter; + } + tracker->waiter_head = waiter; + + /* Also store waiter in separate field for easy access by sync callback. */ + tracker->waiter_sync = waiter; + + /* Store the sync fence in tracker so we can retrieve in abort session, if needed. */ + tracker->sync_fence = sync_fence; + + sync_fence = NULL; + } +#endif /* defined(CONFIG_SYNC)*/ +#if defined(CONFIG_MALI_DMA_BUF_FENCE) + if ((NULL != tracker->timeline) && (MALI_TIMELINE_PP == tracker->timeline->id)) { + + struct mali_pp_job *job = (struct mali_pp_job *)tracker->job; + + if (0 < job->dma_fence_context.num_dma_fence_waiter) { + struct mali_timeline_waiter *waiter; + /* Check if we have a zeroed waiter object available. */ + if (unlikely(NULL == waiter_tail)) { + MALI_PRINT_ERROR(("Mali Timeline: failed to allocate memory for waiter\n")); + goto exit; + } + + /* Grab new zeroed waiter object. */ + waiter = waiter_tail; + waiter_tail = waiter_tail->tracker_next; + + /* Increase the trigger ref count of the tracker. */ + tracker->trigger_ref_count++; + + waiter->point = MALI_TIMELINE_NO_POINT; + waiter->tracker = tracker; + + /* Insert waiter on tracker's singly-linked waiter list. */ + if (NULL == tracker->waiter_head) { + /* list is empty */ + MALI_DEBUG_ASSERT(NULL == tracker->waiter_tail); + tracker->waiter_tail = waiter; + } else { + tracker->waiter_head->tracker_next = waiter; + } + tracker->waiter_head = waiter; + + /* Also store waiter in separate field for easy access by sync callback. */ + tracker->waiter_dma_fence = waiter; + } + } +#endif /* defined(CONFIG_MALI_DMA_BUF_FENCE)*/ + +#if defined(CONFIG_MALI_DMA_BUF_FENCE) ||defined(CONFIG_SYNC) +exit: +#endif /* defined(CONFIG_MALI_DMA_BUF_FENCE) || defined(CONFIG_SYNC) */ + + if (NULL != waiter_tail) { + mali_timeline_system_release_waiter_list(system, waiter_tail, waiter_head); + } + + /* Release the initial trigger ref count. */ + tracker->trigger_ref_count--; + + /* If there were no waiters added to this tracker we activate immediately. */ + if (0 == tracker->trigger_ref_count) { + schedule_mask |= mali_timeline_tracker_activate(tracker); + } + + mali_spinlock_reentrant_signal(system->spinlock, tid); + +#if defined(CONFIG_SYNC) + if (NULL != sync_fence) { + sync_fence_put(sync_fence); + } +#endif /* defined(CONFIG_SYNC) */ + + mali_executor_schedule_from_mask(schedule_mask, MALI_FALSE); +} + +mali_timeline_point mali_timeline_system_add_tracker(struct mali_timeline_system *system, + struct mali_timeline_tracker *tracker, + enum mali_timeline_id timeline_id) +{ + int num_waiters = 0; + struct mali_timeline_waiter *waiter_tail, *waiter_head; + u32 tid = _mali_osk_get_tid(); + + mali_timeline_point point = MALI_TIMELINE_NO_POINT; + + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT_POINTER(system->session); + MALI_DEBUG_ASSERT_POINTER(tracker); + + MALI_DEBUG_ASSERT(MALI_FALSE == system->session->is_aborting); + MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAX > tracker->type); + MALI_DEBUG_ASSERT(MALI_TIMELINE_TRACKER_MAGIC == tracker->magic); + + MALI_DEBUG_PRINT(4, ("Mali Timeline: adding tracker for job %p, timeline: %d\n", tracker->job, timeline_id)); + + MALI_DEBUG_ASSERT(0 < tracker->trigger_ref_count); + tracker->system = system; + + mali_spinlock_reentrant_wait(system->spinlock, tid); + + num_waiters = mali_timeline_fence_num_waiters(&tracker->fence); + +#if defined(CONFIG_MALI_DMA_BUF_FENCE) + if (MALI_TIMELINE_PP == timeline_id) { + struct mali_pp_job *job = (struct mali_pp_job *)tracker->job; + if (0 < job->dma_fence_context.num_dma_fence_waiter) + num_waiters++; + } +#endif + + /* Allocate waiters. */ + mali_timeline_system_allocate_waiters(system, &waiter_tail, &waiter_head, num_waiters); + MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system)); + + /* Add tracker to timeline. This will allocate a point for the tracker on the timeline. If + * timeline ID is MALI_TIMELINE_NONE the tracker will NOT be added to a timeline and the + * point will be MALI_TIMELINE_NO_POINT. + * + * NOTE: the tracker can fail to be added if the timeline is full. If this happens, the + * point will be MALI_TIMELINE_NO_POINT. */ + MALI_DEBUG_ASSERT(timeline_id < MALI_TIMELINE_MAX || timeline_id == MALI_TIMELINE_NONE); + if (likely(timeline_id < MALI_TIMELINE_MAX)) { + struct mali_timeline *timeline = system->timelines[timeline_id]; + mali_timeline_insert_tracker(timeline, tracker); + MALI_DEBUG_ASSERT(!mali_timeline_is_empty(timeline)); + } + + point = tracker->point; + + /* Create waiters for tracker based on supplied fence. Each waiter will increase the + * trigger ref count. */ + mali_timeline_system_create_waiters_and_unlock(system, tracker, waiter_tail, waiter_head); + tracker = NULL; + + /* At this point the tracker object might have been freed so we should no longer + * access it. */ + + + /* The tracker will always be activated after calling add_tracker, even if NO_POINT is + * returned. */ + return point; +} + +static mali_scheduler_mask mali_timeline_system_release_waiter(struct mali_timeline_system *system, + struct mali_timeline_waiter *waiter) +{ + struct mali_timeline_tracker *tracker; + mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY; + + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT_POINTER(waiter); + + MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system)); + + tracker = waiter->tracker; + MALI_DEBUG_ASSERT_POINTER(tracker); + + /* At this point the waiter has been removed from the timeline's waiter list, but it is + * still on the tracker's waiter list. All of the tracker's waiters will be released when + * the tracker is activated. */ + + waiter->point = MALI_TIMELINE_NO_POINT; + waiter->tracker = NULL; + + tracker->trigger_ref_count--; + if (0 == tracker->trigger_ref_count) { + /* This was the last waiter; activate tracker */ + schedule_mask |= mali_timeline_tracker_activate(tracker); + tracker = NULL; + } + + return schedule_mask; +} + +mali_timeline_point mali_timeline_system_get_latest_point(struct mali_timeline_system *system, + enum mali_timeline_id timeline_id) +{ + mali_timeline_point point; + struct mali_timeline *timeline; + u32 tid = _mali_osk_get_tid(); + + MALI_DEBUG_ASSERT_POINTER(system); + + if (MALI_TIMELINE_MAX <= timeline_id) { + return MALI_TIMELINE_NO_POINT; + } + + mali_spinlock_reentrant_wait(system->spinlock, tid); + + timeline = system->timelines[timeline_id]; + MALI_DEBUG_ASSERT_POINTER(timeline); + + point = MALI_TIMELINE_NO_POINT; + if (timeline->point_oldest != timeline->point_next) { + point = timeline->point_next - 1; + if (MALI_TIMELINE_NO_POINT == point) point--; + } + + mali_spinlock_reentrant_signal(system->spinlock, tid); + + return point; +} + +void mali_timeline_initialize(void) +{ + _mali_osk_atomic_init(&gp_tracker_count, 0); + _mali_osk_atomic_init(&phy_pp_tracker_count, 0); + _mali_osk_atomic_init(&virt_pp_tracker_count, 0); +} + +void mali_timeline_terminate(void) +{ + _mali_osk_atomic_term(&gp_tracker_count); + _mali_osk_atomic_term(&phy_pp_tracker_count); + _mali_osk_atomic_term(&virt_pp_tracker_count); +} + +#if defined(MALI_TIMELINE_DEBUG_FUNCTIONS) + +static mali_bool is_waiting_on_timeline(struct mali_timeline_tracker *tracker, enum mali_timeline_id id) +{ + struct mali_timeline *timeline; + struct mali_timeline_system *system; + + MALI_DEBUG_ASSERT_POINTER(tracker); + + MALI_DEBUG_ASSERT_POINTER(tracker->timeline); + timeline = tracker->timeline; + + MALI_DEBUG_ASSERT_POINTER(timeline->system); + system = timeline->system; + + if (MALI_TIMELINE_MAX > id) { + if (MALI_TIMELINE_NO_POINT != tracker->fence.points[id]) { + return mali_timeline_is_point_on(system->timelines[id], tracker->fence.points[id]); + } else { + return MALI_FALSE; + } + } else { + MALI_DEBUG_ASSERT(MALI_TIMELINE_NONE == id); + return MALI_FALSE; + } +} + +static const char *timeline_id_to_string(enum mali_timeline_id id) +{ + switch (id) { + case MALI_TIMELINE_GP: + return "GP"; + case MALI_TIMELINE_PP: + return "PP"; + case MALI_TIMELINE_SOFT: + return "SOFT"; + default: + return "NONE"; + } +} + +static const char *timeline_tracker_type_to_string(enum mali_timeline_tracker_type type) +{ + switch (type) { + case MALI_TIMELINE_TRACKER_GP: + return "GP"; + case MALI_TIMELINE_TRACKER_PP: + return "PP"; + case MALI_TIMELINE_TRACKER_SOFT: + return "SOFT"; + case MALI_TIMELINE_TRACKER_WAIT: + return "WAIT"; + case MALI_TIMELINE_TRACKER_SYNC: + return "SYNC"; + default: + return "INVALID"; + } +} + +mali_timeline_tracker_state mali_timeline_debug_get_tracker_state(struct mali_timeline_tracker *tracker) +{ + struct mali_timeline *timeline = NULL; + + MALI_DEBUG_ASSERT_POINTER(tracker); + timeline = tracker->timeline; + + if (0 != tracker->trigger_ref_count) { + return MALI_TIMELINE_TS_WAITING; + } + + if (timeline && (timeline->tracker_tail == tracker || NULL != tracker->timeline_prev)) { + return MALI_TIMELINE_TS_ACTIVE; + } + + if (timeline && (MALI_TIMELINE_NO_POINT == tracker->point)) { + return MALI_TIMELINE_TS_INIT; + } + + return MALI_TIMELINE_TS_FINISH; +} + +void mali_timeline_debug_print_tracker(struct mali_timeline_tracker *tracker, _mali_osk_print_ctx *print_ctx) +{ + const char *tracker_state = "IWAF"; + char state_char = 'I'; + char tracker_type[32] = {0}; + + MALI_DEBUG_ASSERT_POINTER(tracker); + + state_char = *(tracker_state + mali_timeline_debug_get_tracker_state(tracker)); + _mali_osk_snprintf(tracker_type, sizeof(tracker_type), "%s", timeline_tracker_type_to_string(tracker->type)); + +#if defined(CONFIG_SYNC) + if (0 != tracker->trigger_ref_count) { + _mali_osk_ctxprintf(print_ctx, "TL: %s %u %c - ref_wait:%u [%s(%u),%s(%u),%s(%u), fd:%d, fence:(0x%08X)] job:(0x%08X)\n", + tracker_type, tracker->point, state_char, tracker->trigger_ref_count, + is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0], + is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1], + is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2], + tracker->fence.sync_fd, (unsigned int)(uintptr_t)(tracker->sync_fence), (unsigned int)(uintptr_t)(tracker->job)); + } else { + _mali_osk_ctxprintf(print_ctx, "TL: %s %u %c fd:%d fence:(0x%08X) job:(0x%08X)\n", + tracker_type, tracker->point, state_char, + tracker->fence.sync_fd, (unsigned int)(uintptr_t)(tracker->sync_fence), (unsigned int)(uintptr_t)(tracker->job)); + } +#else + if (0 != tracker->trigger_ref_count) { + _mali_osk_ctxprintf(print_ctx, "TL: %s %u %c - ref_wait:%u [%s(%u),%s(%u),%s(%u)] job:(0x%08X)\n", + tracker_type, tracker->point, state_char, tracker->trigger_ref_count, + is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0], + is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1], + is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2], + (unsigned int)(uintptr_t)(tracker->job)); + } else { + _mali_osk_ctxprintf(print_ctx, "TL: %s %u %c job:(0x%08X)\n", + tracker_type, tracker->point, state_char, + (unsigned int)(uintptr_t)(tracker->job)); + } +#endif +} + +void mali_timeline_debug_print_timeline(struct mali_timeline *timeline, _mali_osk_print_ctx *print_ctx) +{ + struct mali_timeline_tracker *tracker = NULL; + + MALI_DEBUG_ASSERT_POINTER(timeline); + + tracker = timeline->tracker_tail; + while (NULL != tracker) { + mali_timeline_debug_print_tracker(tracker, print_ctx); + tracker = tracker->timeline_next; + } +} + +#if !(LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) +void mali_timeline_debug_direct_print_tracker(struct mali_timeline_tracker *tracker) +{ + const char *tracker_state = "IWAF"; + char state_char = 'I'; + char tracker_type[32] = {0}; + + MALI_DEBUG_ASSERT_POINTER(tracker); + + state_char = *(tracker_state + mali_timeline_debug_get_tracker_state(tracker)); + _mali_osk_snprintf(tracker_type, sizeof(tracker_type), "%s", timeline_tracker_type_to_string(tracker->type)); + +#if defined(CONFIG_SYNC) + if (0 != tracker->trigger_ref_count) { + MALI_PRINT(("TL: %s %u %c - ref_wait:%u [%s(%u),%s(%u),%s(%u), fd:%d, fence:(0x%08X)] job:(0x%08X)\n", + tracker_type, tracker->point, state_char, tracker->trigger_ref_count, + is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0], + is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1], + is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2], + tracker->fence.sync_fd, tracker->sync_fence, tracker->job)); + } else { + MALI_PRINT(("TL: %s %u %c fd:%d fence:(0x%08X) job:(0x%08X)\n", + tracker_type, tracker->point, state_char, + tracker->fence.sync_fd, tracker->sync_fence, tracker->job)); + } +#else + if (0 != tracker->trigger_ref_count) { + MALI_PRINT(("TL: %s %u %c - ref_wait:%u [%s(%u),%s(%u),%s(%u)] job:(0x%08X)\n", + tracker_type, tracker->point, state_char, tracker->trigger_ref_count, + is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0], + is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1], + is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2], + tracker->job)); + } else { + MALI_PRINT(("TL: %s %u %c job:(0x%08X)\n", + tracker_type, tracker->point, state_char, + tracker->job)); + } +#endif +} + +void mali_timeline_debug_direct_print_timeline(struct mali_timeline *timeline) +{ + struct mali_timeline_tracker *tracker = NULL; + + MALI_DEBUG_ASSERT_POINTER(timeline); + + tracker = timeline->tracker_tail; + while (NULL != tracker) { + mali_timeline_debug_direct_print_tracker(tracker); + tracker = tracker->timeline_next; + } +} + +#endif + +void mali_timeline_debug_print_system(struct mali_timeline_system *system, _mali_osk_print_ctx *print_ctx) +{ + int i; + int num_printed = 0; + u32 tid = _mali_osk_get_tid(); + + MALI_DEBUG_ASSERT_POINTER(system); + + mali_spinlock_reentrant_wait(system->spinlock, tid); + + /* Print all timelines */ + for (i = 0; i < MALI_TIMELINE_MAX; ++i) { + struct mali_timeline *timeline = system->timelines[i]; + + MALI_DEBUG_ASSERT_POINTER(timeline); + + if (NULL == timeline->tracker_head) continue; + + _mali_osk_ctxprintf(print_ctx, "TL: Timeline %s:\n", + timeline_id_to_string((enum mali_timeline_id)i)); + + mali_timeline_debug_print_timeline(timeline, print_ctx); + num_printed++; + } + + if (0 == num_printed) { + _mali_osk_ctxprintf(print_ctx, "TL: All timelines empty\n"); + } + + mali_spinlock_reentrant_signal(system->spinlock, tid); +} + +#endif /* defined(MALI_TIMELINE_DEBUG_FUNCTIONS) */ + +#if defined(CONFIG_MALI_DMA_BUF_FENCE) +void mali_timeline_dma_fence_callback(void *pp_job_ptr) +{ + struct mali_timeline_system *system; + struct mali_timeline_waiter *waiter; + struct mali_timeline_tracker *tracker; + struct mali_pp_job *pp_job = (struct mali_pp_job *)pp_job_ptr; + mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY; + u32 tid = _mali_osk_get_tid(); + mali_bool is_aborting = MALI_FALSE; + + MALI_DEBUG_ASSERT_POINTER(pp_job); + + tracker = &pp_job->tracker; + MALI_DEBUG_ASSERT_POINTER(tracker); + + system = tracker->system; + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT_POINTER(system->session); + + mali_spinlock_reentrant_wait(system->spinlock, tid); + + waiter = tracker->waiter_dma_fence; + MALI_DEBUG_ASSERT_POINTER(waiter); + + schedule_mask |= mali_timeline_system_release_waiter(system, waiter); + + is_aborting = system->session->is_aborting; + + /* If aborting, wake up sleepers that are waiting for dma fence callbacks to complete. */ + if (is_aborting) { + _mali_osk_wait_queue_wake_up(system->wait_queue); + } + + mali_spinlock_reentrant_signal(system->spinlock, tid); + + if (!is_aborting) { + mali_executor_schedule_from_mask(schedule_mask, MALI_TRUE); + } +} +#endif diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_timeline_fence_wait.c b/drivers/gpu/arm/mali400/common/mali_timeline_fence_wait.c --- a/drivers/gpu/arm/mali400/common/mali_timeline_fence_wait.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_timeline_fence_wait.c 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,202 @@ +/* + * Copyright (C) 2013-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_timeline_fence_wait.h" + +#include "mali_osk.h" +#include "mali_kernel_common.h" +#include "mali_spinlock_reentrant.h" + +/** + * Allocate a fence waiter tracker. + * + * @return New fence waiter if successful, NULL if not. + */ +static struct mali_timeline_fence_wait_tracker *mali_timeline_fence_wait_tracker_alloc(void) +{ + return (struct mali_timeline_fence_wait_tracker *) _mali_osk_calloc(1, sizeof(struct mali_timeline_fence_wait_tracker)); +} + +/** + * Free fence waiter tracker. + * + * @param wait Fence wait tracker to free. + */ +static void mali_timeline_fence_wait_tracker_free(struct mali_timeline_fence_wait_tracker *wait) +{ + MALI_DEBUG_ASSERT_POINTER(wait); + _mali_osk_atomic_term(&wait->refcount); + _mali_osk_free(wait); +} + +/** + * Check if fence wait tracker has been activated. Used as a wait queue condition. + * + * @param data Fence waiter. + * @return MALI_TRUE if tracker has been activated, MALI_FALSE if not. + */ +static mali_bool mali_timeline_fence_wait_tracker_is_activated(void *data) +{ + struct mali_timeline_fence_wait_tracker *wait; + + wait = (struct mali_timeline_fence_wait_tracker *) data; + MALI_DEBUG_ASSERT_POINTER(wait); + + return wait->activated; +} + +/** + * Check if fence has been signaled. + * + * @param system Timeline system. + * @param fence Timeline fence. + * @return MALI_TRUE if fence is signaled, MALI_FALSE if not. + */ +static mali_bool mali_timeline_fence_wait_check_status(struct mali_timeline_system *system, struct mali_timeline_fence *fence) +{ + int i; + u32 tid = _mali_osk_get_tid(); + mali_bool ret = MALI_TRUE; +#if defined(CONFIG_SYNC) + struct sync_fence *sync_fence = NULL; +#endif + + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT_POINTER(fence); + + mali_spinlock_reentrant_wait(system->spinlock, tid); + + for (i = 0; i < MALI_TIMELINE_MAX; ++i) { + struct mali_timeline *timeline; + mali_timeline_point point; + + point = fence->points[i]; + + if (likely(MALI_TIMELINE_NO_POINT == point)) { + /* Fence contains no point on this timeline. */ + continue; + } + + timeline = system->timelines[i]; + MALI_DEBUG_ASSERT_POINTER(timeline); + + if (unlikely(!mali_timeline_is_point_valid(timeline, point))) { + MALI_PRINT_ERROR(("Mali Timeline: point %d is not valid (oldest=%d, next=%d)\n", point, timeline->point_oldest, timeline->point_next)); + } + + if (!mali_timeline_is_point_released(timeline, point)) { + ret = MALI_FALSE; + goto exit; + } + } + +#if defined(CONFIG_SYNC) + if (-1 != fence->sync_fd) { + sync_fence = sync_fence_fdget(fence->sync_fd); + if (likely(NULL != sync_fence)) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) + if (0 == sync_fence->status) { +#else + if (0 == atomic_read(&sync_fence->status)) { +#endif + ret = MALI_FALSE; + } + } else { + MALI_PRINT_ERROR(("Mali Timeline: failed to get sync fence from fd %d\n", fence->sync_fd)); + } + } +#endif /* defined(CONFIG_SYNC) */ + +exit: + mali_spinlock_reentrant_signal(system->spinlock, tid); + +#if defined(CONFIG_SYNC) + if (NULL != sync_fence) { + sync_fence_put(sync_fence); + } +#endif /* defined(CONFIG_SYNC) */ + + return ret; +} + +mali_bool mali_timeline_fence_wait(struct mali_timeline_system *system, struct mali_timeline_fence *fence, u32 timeout) +{ + struct mali_timeline_fence_wait_tracker *wait; + mali_timeline_point point; + mali_bool ret; + + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT_POINTER(fence); + + MALI_DEBUG_PRINT(4, ("Mali Timeline: wait on fence\n")); + + if (MALI_TIMELINE_FENCE_WAIT_TIMEOUT_IMMEDIATELY == timeout) { + return mali_timeline_fence_wait_check_status(system, fence); + } + + wait = mali_timeline_fence_wait_tracker_alloc(); + if (unlikely(NULL == wait)) { + MALI_PRINT_ERROR(("Mali Timeline: failed to allocate data for fence wait\n")); + return MALI_FALSE; + } + + wait->activated = MALI_FALSE; + wait->system = system; + + /* Initialize refcount to two references. The reference first will be released by this + * function after the wait is over. The second reference will be released when the tracker + * is activated. */ + _mali_osk_atomic_init(&wait->refcount, 2); + + /* Add tracker to timeline system, but not to a timeline. */ + mali_timeline_tracker_init(&wait->tracker, MALI_TIMELINE_TRACKER_WAIT, fence, wait); + point = mali_timeline_system_add_tracker(system, &wait->tracker, MALI_TIMELINE_NONE); + MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT == point); + MALI_IGNORE(point); + + /* Wait for the tracker to be activated or time out. */ + if (MALI_TIMELINE_FENCE_WAIT_TIMEOUT_NEVER == timeout) { + _mali_osk_wait_queue_wait_event(system->wait_queue, mali_timeline_fence_wait_tracker_is_activated, (void *) wait); + } else { + _mali_osk_wait_queue_wait_event_timeout(system->wait_queue, mali_timeline_fence_wait_tracker_is_activated, (void *) wait, timeout); + } + + ret = wait->activated; + + if (0 == _mali_osk_atomic_dec_return(&wait->refcount)) { + mali_timeline_fence_wait_tracker_free(wait); + } + + return ret; +} + +void mali_timeline_fence_wait_activate(struct mali_timeline_fence_wait_tracker *wait) +{ + mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY; + + MALI_DEBUG_ASSERT_POINTER(wait); + MALI_DEBUG_ASSERT_POINTER(wait->system); + + MALI_DEBUG_PRINT(4, ("Mali Timeline: activation for fence wait tracker\n")); + + MALI_DEBUG_ASSERT(MALI_FALSE == wait->activated); + wait->activated = MALI_TRUE; + + _mali_osk_wait_queue_wake_up(wait->system->wait_queue); + + /* Nothing can wait on this tracker, so nothing to schedule after release. */ + schedule_mask = mali_timeline_tracker_release(&wait->tracker); + MALI_DEBUG_ASSERT(MALI_SCHEDULER_MASK_EMPTY == schedule_mask); + MALI_IGNORE(schedule_mask); + + if (0 == _mali_osk_atomic_dec_return(&wait->refcount)) { + mali_timeline_fence_wait_tracker_free(wait); + } +} diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_timeline_fence_wait.h b/drivers/gpu/arm/mali400/common/mali_timeline_fence_wait.h --- a/drivers/gpu/arm/mali400/common/mali_timeline_fence_wait.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_timeline_fence_wait.h 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,67 @@ +/* + * Copyright (C) 2013, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_timeline_fence_wait.h + * + * This file contains functions used to wait until a Timeline fence is signaled. + */ + +#ifndef __MALI_TIMELINE_FENCE_WAIT_H__ +#define __MALI_TIMELINE_FENCE_WAIT_H__ + +#include "mali_osk.h" +#include "mali_timeline.h" + +/** + * If used as the timeout argument in @ref mali_timeline_fence_wait, a timer is not used and the + * function only returns when the fence is signaled. + */ +#define MALI_TIMELINE_FENCE_WAIT_TIMEOUT_NEVER ((u32) -1) + +/** + * If used as the timeout argument in @ref mali_timeline_fence_wait, the function will return + * immediately with the current state of the fence. + */ +#define MALI_TIMELINE_FENCE_WAIT_TIMEOUT_IMMEDIATELY 0 + +/** + * Fence wait tracker. + * + * The fence wait tracker is added to the Timeline system with the fence we are waiting on as a + * dependency. We will then perform a blocking wait, possibly with a timeout, until the tracker is + * activated, which happens when the fence is signaled. + */ +struct mali_timeline_fence_wait_tracker { + mali_bool activated; /**< MALI_TRUE if the tracker has been activated, MALI_FALSE if not. */ + _mali_osk_atomic_t refcount; /**< Reference count. */ + struct mali_timeline_system *system; /**< Timeline system. */ + struct mali_timeline_tracker tracker; /**< Timeline tracker. */ +}; + +/** + * Wait for a fence to be signaled, or timeout is reached. + * + * @param system Timeline system. + * @param fence Fence to wait on. + * @param timeout Timeout in ms, or MALI_TIMELINE_FENCE_WAIT_TIMEOUT_NEVER or + * MALI_TIMELINE_FENCE_WAIT_TIMEOUT_IMMEDIATELY. + * @return MALI_TRUE if signaled, MALI_FALSE if timed out. + */ +mali_bool mali_timeline_fence_wait(struct mali_timeline_system *system, struct mali_timeline_fence *fence, u32 timeout); + +/** + * Used by the Timeline system to activate a fence wait tracker. + * + * @param fence_wait_tracker Fence waiter tracker. + */ +void mali_timeline_fence_wait_activate(struct mali_timeline_fence_wait_tracker *fence_wait_tracker); + +#endif /* __MALI_TIMELINE_FENCE_WAIT_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_timeline.h b/drivers/gpu/arm/mali400/common/mali_timeline.h --- a/drivers/gpu/arm/mali400/common/mali_timeline.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_timeline.h 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,548 @@ +/* + * Copyright (C) 2013-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_TIMELINE_H__ +#define __MALI_TIMELINE_H__ + +#include "mali_osk.h" +#include "mali_ukk.h" +#include "mali_session.h" +#include "mali_kernel_common.h" +#include "mali_spinlock_reentrant.h" +#include "mali_sync.h" +#include "mali_scheduler_types.h" +#include + +/** + * Soft job timeout. + * + * Soft jobs have to be signaled as complete after activation. Normally this is done by user space, + * but in order to guarantee that every soft job is completed, we also have a timer. + */ +#define MALI_TIMELINE_TIMEOUT_HZ ((unsigned long) (HZ * 3 / 2)) /* 1500 ms. */ + +/** + * Timeline type. + */ +typedef enum mali_timeline_id { + MALI_TIMELINE_GP = MALI_UK_TIMELINE_GP, /**< GP job timeline. */ + MALI_TIMELINE_PP = MALI_UK_TIMELINE_PP, /**< PP job timeline. */ + MALI_TIMELINE_SOFT = MALI_UK_TIMELINE_SOFT, /**< Soft job timeline. */ + MALI_TIMELINE_MAX = MALI_UK_TIMELINE_MAX +} mali_timeline_id; + +/** + * Used by trackers that should not be added to a timeline (@ref mali_timeline_system_add_tracker). + */ +#define MALI_TIMELINE_NONE MALI_TIMELINE_MAX + +/** + * Tracker type. + */ +typedef enum mali_timeline_tracker_type { + MALI_TIMELINE_TRACKER_GP = 0, /**< Tracker used by GP jobs. */ + MALI_TIMELINE_TRACKER_PP = 1, /**< Tracker used by PP jobs. */ + MALI_TIMELINE_TRACKER_SOFT = 2, /**< Tracker used by soft jobs. */ + MALI_TIMELINE_TRACKER_WAIT = 3, /**< Tracker used for fence wait. */ + MALI_TIMELINE_TRACKER_SYNC = 4, /**< Tracker used for sync fence. */ + MALI_TIMELINE_TRACKER_MAX = 5, +} mali_timeline_tracker_type; + +/** + * Tracker activation error. + */ +typedef u32 mali_timeline_activation_error; +#define MALI_TIMELINE_ACTIVATION_ERROR_NONE 0 +#define MALI_TIMELINE_ACTIVATION_ERROR_SYNC_BIT (1<<1) +#define MALI_TIMELINE_ACTIVATION_ERROR_FATAL_BIT (1<<0) + +/** + * Type used to represent a point on a timeline. + */ +typedef u32 mali_timeline_point; + +/** + * Used to represent that no point on a timeline. + */ +#define MALI_TIMELINE_NO_POINT ((mali_timeline_point) 0) + +/** + * The maximum span of points on a timeline. A timeline will be considered full if the difference + * between the oldest and newest points is equal or larger to this value. + */ +#define MALI_TIMELINE_MAX_POINT_SPAN 65536 + +/** + * Magic value used to assert on validity of trackers. + */ +#define MALI_TIMELINE_TRACKER_MAGIC 0xabcdabcd + +struct mali_timeline; +struct mali_timeline_waiter; +struct mali_timeline_tracker; + +/** + * Timeline fence. + */ +struct mali_timeline_fence { + mali_timeline_point points[MALI_TIMELINE_MAX]; /**< For each timeline, a point or MALI_TIMELINE_NO_POINT. */ + s32 sync_fd; /**< A file descriptor representing a sync fence, or -1. */ +}; + +/** + * Timeline system. + * + * The Timeline system has a set of timelines associated with a session. + */ +struct mali_timeline_system { + struct mali_spinlock_reentrant *spinlock; /**< Spin lock protecting the timeline system */ + struct mali_timeline *timelines[MALI_TIMELINE_MAX]; /**< The timelines in this system */ + + /* Single-linked list of unused waiter objects. Uses the tracker_next field in tracker. */ + struct mali_timeline_waiter *waiter_empty_list; + + struct mali_session_data *session; /**< Session that owns this system. */ + + mali_bool timer_enabled; /**< Set to MALI_TRUE if soft job timer should be enabled, MALI_FALSE if not. */ + + _mali_osk_wait_queue_t *wait_queue; /**< Wait queue. */ + +#if defined(CONFIG_SYNC) + struct sync_timeline *signaled_sync_tl; /**< Special sync timeline used to create pre-signaled sync fences */ +#endif /* defined(CONFIG_SYNC) */ +}; + +/** + * Timeline. Each Timeline system will have MALI_TIMELINE_MAX timelines. + */ +struct mali_timeline { + mali_timeline_point point_next; /**< The next available point. */ + mali_timeline_point point_oldest; /**< The oldest point not released. */ + + /* Double-linked list of trackers. Sorted in ascending order by tracker->time_number with + * tail pointing to the tracker with the oldest time. */ + struct mali_timeline_tracker *tracker_head; + struct mali_timeline_tracker *tracker_tail; + + /* Double-linked list of waiters. Sorted in ascending order by waiter->time_number_wait + * with tail pointing to the waiter with oldest wait time. */ + struct mali_timeline_waiter *waiter_head; + struct mali_timeline_waiter *waiter_tail; + + struct mali_timeline_system *system; /**< Timeline system this timeline belongs to. */ + enum mali_timeline_id id; /**< Timeline type. */ + +#if defined(CONFIG_SYNC) + struct sync_timeline *sync_tl; /**< Sync timeline that corresponds to this timeline. */ + mali_bool destroyed; + struct mali_spinlock_reentrant *spinlock; /**< Spin lock protecting the timeline system */ +#endif /* defined(CONFIG_SYNC) */ + + /* The following fields are used to time out soft job trackers. */ + _mali_osk_wq_delayed_work_t *delayed_work; + mali_bool timer_active; +}; + +/** + * Timeline waiter. + */ +struct mali_timeline_waiter { + mali_timeline_point point; /**< Point on timeline we are waiting for to be released. */ + struct mali_timeline_tracker *tracker; /**< Tracker that is waiting. */ + + struct mali_timeline_waiter *timeline_next; /**< Next waiter on timeline's waiter list. */ + struct mali_timeline_waiter *timeline_prev; /**< Previous waiter on timeline's waiter list. */ + + struct mali_timeline_waiter *tracker_next; /**< Next waiter on tracker's waiter list. */ +}; + +/** + * Timeline tracker. + */ +struct mali_timeline_tracker { + MALI_DEBUG_CODE(u32 magic); /**< Should always be MALI_TIMELINE_TRACKER_MAGIC for a valid tracker. */ + + mali_timeline_point point; /**< Point on timeline for this tracker */ + + struct mali_timeline_tracker *timeline_next; /**< Next tracker on timeline's tracker list */ + struct mali_timeline_tracker *timeline_prev; /**< Previous tracker on timeline's tracker list */ + + u32 trigger_ref_count; /**< When zero tracker will be activated */ + mali_timeline_activation_error activation_error; /**< Activation error. */ + struct mali_timeline_fence fence; /**< Fence used to create this tracker */ + + /* Single-linked list of waiters. Sorted in order of insertions with + * tail pointing to first waiter. */ + struct mali_timeline_waiter *waiter_head; + struct mali_timeline_waiter *waiter_tail; + +#if defined(CONFIG_SYNC) + /* These are only used if the tracker is waiting on a sync fence. */ + struct mali_timeline_waiter *waiter_sync; /**< A direct pointer to timeline waiter representing sync fence. */ + struct sync_fence_waiter sync_fence_waiter; /**< Used to connect sync fence and tracker in sync fence wait callback. */ + struct sync_fence *sync_fence; /**< The sync fence this tracker is waiting on. */ + _mali_osk_list_t sync_fence_cancel_list; /**< List node used to cancel sync fence waiters. */ +#endif /* defined(CONFIG_SYNC) */ + +#if defined(CONFIG_MALI_DMA_BUF_FENCE) + struct mali_timeline_waiter *waiter_dma_fence; /**< A direct pointer to timeline waiter representing dma fence. */ +#endif + + struct mali_timeline_system *system; /**< Timeline system. */ + struct mali_timeline *timeline; /**< Timeline, or NULL if not on a timeline. */ + enum mali_timeline_tracker_type type; /**< Type of tracker. */ + void *job; /**< Owner of tracker. */ + + /* The following fields are used to time out soft job trackers. */ + unsigned long os_tick_create; + unsigned long os_tick_activate; + mali_bool timer_active; +}; + +extern _mali_osk_atomic_t gp_tracker_count; +extern _mali_osk_atomic_t phy_pp_tracker_count; +extern _mali_osk_atomic_t virt_pp_tracker_count; + +/** + * What follows is a set of functions to check the state of a timeline and to determine where on a + * timeline a given point is. Most of these checks will translate the timeline so the oldest point + * on the timeline is aligned with zero. Remember that all of these calculation are done on + * unsigned integers. + * + * The following example illustrates the three different states a point can be in. The timeline has + * been translated to put the oldest point at zero: + * + * + * + * [ point is in forbidden zone ] + * 64k wide + * MALI_TIMELINE_MAX_POINT_SPAN + * + * [ point is on timeline ) ( point is released ] + * + * 0--------------------------##############################--------------------2^32 - 1 + * ^ ^ + * \ | + * oldest point on timeline | + * \ + * next point on timeline + */ + +/** + * Compare two timeline points + * + * Returns true if a is after b, false if a is before or equal to b. + * + * This funcion ignores MALI_TIMELINE_MAX_POINT_SPAN. Wrapping is supported and + * the result will be correct if the points is less then UINT_MAX/2 apart. + * + * @param a Point on timeline + * @param b Point on timeline + * @return MALI_TRUE if a is after b + */ +MALI_STATIC_INLINE mali_bool mali_timeline_point_after(mali_timeline_point a, mali_timeline_point b) +{ + return 0 > ((s32)b) - ((s32)a); +} + +/** + * Check if a point is on timeline. A point is on a timeline if it is greater than, or equal to, + * the oldest point, and less than the next point. + * + * @param timeline Timeline. + * @param point Point on timeline. + * @return MALI_TRUE if point is on timeline, MALI_FALSE if not. + */ +MALI_STATIC_INLINE mali_bool mali_timeline_is_point_on(struct mali_timeline *timeline, mali_timeline_point point) +{ + MALI_DEBUG_ASSERT_POINTER(timeline); + MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT != point); + + return (point - timeline->point_oldest) < (timeline->point_next - timeline->point_oldest); +} + +/** + * Check if a point has been released. A point is released if it is older than the oldest point on + * the timeline, newer than the next point, and also not in the forbidden zone. + * + * @param timeline Timeline. + * @param point Point on timeline. + * @return MALI_TRUE if point has been release, MALI_FALSE if not. + */ +MALI_STATIC_INLINE mali_bool mali_timeline_is_point_released(struct mali_timeline *timeline, mali_timeline_point point) +{ + mali_timeline_point point_normalized; + mali_timeline_point next_normalized; + + MALI_DEBUG_ASSERT_POINTER(timeline); + MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT != point); + + point_normalized = point - timeline->point_oldest; + next_normalized = timeline->point_next - timeline->point_oldest; + + return point_normalized > (next_normalized + MALI_TIMELINE_MAX_POINT_SPAN); +} + +/** + * Check if a point is valid. A point is valid if is on the timeline or has been released. + * + * @param timeline Timeline. + * @param point Point on timeline. + * @return MALI_TRUE if point is valid, MALI_FALSE if not. + */ +MALI_STATIC_INLINE mali_bool mali_timeline_is_point_valid(struct mali_timeline *timeline, mali_timeline_point point) +{ + MALI_DEBUG_ASSERT_POINTER(timeline); + return mali_timeline_is_point_on(timeline, point) || mali_timeline_is_point_released(timeline, point); +} + +/** + * Check if timeline is empty (has no points on it). A timeline is empty if next == oldest. + * + * @param timeline Timeline. + * @return MALI_TRUE if timeline is empty, MALI_FALSE if not. + */ +MALI_STATIC_INLINE mali_bool mali_timeline_is_empty(struct mali_timeline *timeline) +{ + MALI_DEBUG_ASSERT_POINTER(timeline); + return timeline->point_next == timeline->point_oldest; +} + +/** + * Check if timeline is full. A valid timeline cannot span more than 64k points (@ref + * MALI_TIMELINE_MAX_POINT_SPAN). + * + * @param timeline Timeline. + * @return MALI_TRUE if timeline is full, MALI_FALSE if not. + */ +MALI_STATIC_INLINE mali_bool mali_timeline_is_full(struct mali_timeline *timeline) +{ + MALI_DEBUG_ASSERT_POINTER(timeline); + return MALI_TIMELINE_MAX_POINT_SPAN <= (timeline->point_next - timeline->point_oldest); +} + +/** + * Create a new timeline system. + * + * @param session The session this timeline system will belong to. + * @return New timeline system. + */ +struct mali_timeline_system *mali_timeline_system_create(struct mali_session_data *session); + +/** + * Abort timeline system. + * + * This will release all pending waiters in the timeline system causing all trackers to be + * activated. + * + * @param system Timeline system to abort all jobs from. + */ +void mali_timeline_system_abort(struct mali_timeline_system *system); + +/** + * Destroy an empty timeline system. + * + * @note @ref mali_timeline_system_abort() should be called prior to this function. + * + * @param system Timeline system to destroy. + */ +void mali_timeline_system_destroy(struct mali_timeline_system *system); + +/** + * Stop the soft job timer. + * + * @param system Timeline system + */ +void mali_timeline_system_stop_timer(struct mali_timeline_system *system); + +/** + * Add a tracker to a timeline system and optionally also on a timeline. + * + * Once added to the timeline system, the tracker is guaranteed to be activated. The tracker can be + * activated before this function returns. Thus, it is also possible that the tracker is released + * before this function returns, depending on the tracker type. + * + * @note Tracker must be initialized (@ref mali_timeline_tracker_init) before being added to the + * timeline system. + * + * @param system Timeline system the tracker will be added to. + * @param tracker The tracker to be added. + * @param timeline_id Id of the timeline the tracker will be added to, or + * MALI_TIMELINE_NONE if it should not be added on a timeline. + * @return Point on timeline identifying this tracker, or MALI_TIMELINE_NO_POINT if not on timeline. + */ +mali_timeline_point mali_timeline_system_add_tracker(struct mali_timeline_system *system, + struct mali_timeline_tracker *tracker, + enum mali_timeline_id timeline_id); + +/** + * Get latest point on timeline. + * + * @param system Timeline system. + * @param timeline_id Id of timeline to get latest point from. + * @return Latest point on timeline, or MALI_TIMELINE_NO_POINT if the timeline is empty. + */ +mali_timeline_point mali_timeline_system_get_latest_point(struct mali_timeline_system *system, + enum mali_timeline_id timeline_id); + +/** + * Initialize tracker. + * + * Must be called before tracker is added to timeline system (@ref mali_timeline_system_add_tracker). + * + * @param tracker Tracker to initialize. + * @param type Type of tracker. + * @param fence Fence used to set up dependencies for tracker. + * @param job Pointer to job struct this tracker is associated with. + */ +void mali_timeline_tracker_init(struct mali_timeline_tracker *tracker, + mali_timeline_tracker_type type, + struct mali_timeline_fence *fence, + void *job); + +/** + * Grab trigger ref count on tracker. + * + * This will prevent tracker from being activated until the trigger ref count reaches zero. + * + * @note Tracker must have been initialized (@ref mali_timeline_tracker_init). + * + * @param system Timeline system. + * @param tracker Tracker. + */ +void mali_timeline_system_tracker_get(struct mali_timeline_system *system, struct mali_timeline_tracker *tracker); + +/** + * Release trigger ref count on tracker. + * + * If the trigger ref count reaches zero, the tracker will be activated. + * + * @param system Timeline system. + * @param tracker Tracker. + * @param activation_error Error bitmask if activated with error, or MALI_TIMELINE_ACTIVATION_ERROR_NONE if no error. + * @return Scheduling bitmask. + */ +mali_scheduler_mask mali_timeline_system_tracker_put(struct mali_timeline_system *system, struct mali_timeline_tracker *tracker, mali_timeline_activation_error activation_error); + +/** + * Release a tracker from the timeline system. + * + * This is used to signal that the job being tracker is finished, either due to normal circumstances + * (job complete/abort) or due to a timeout. + * + * We may need to schedule some subsystems after a tracker has been released and the returned + * bitmask will tell us if it is necessary. If the return value is non-zero, this value needs to be + * sent as an input parameter to @ref mali_scheduler_schedule_from_mask() to do the scheduling. + * + * @note Tracker must have been activated before being released. + * @warning Not calling @ref mali_scheduler_schedule_from_mask() after releasing a tracker can lead + * to a deadlock. + * + * @param tracker Tracker being released. + * @return Scheduling bitmask. + */ +mali_scheduler_mask mali_timeline_tracker_release(struct mali_timeline_tracker *tracker); + +MALI_STATIC_INLINE mali_bool mali_timeline_tracker_activation_error( + struct mali_timeline_tracker *tracker) +{ + MALI_DEBUG_ASSERT_POINTER(tracker); + return (MALI_TIMELINE_ACTIVATION_ERROR_FATAL_BIT & + tracker->activation_error) ? MALI_TRUE : MALI_FALSE; +} + +/** + * Copy data from a UK fence to a Timeline fence. + * + * @param fence Timeline fence. + * @param uk_fence UK fence. + */ +void mali_timeline_fence_copy_uk_fence(struct mali_timeline_fence *fence, _mali_uk_fence_t *uk_fence); + +void mali_timeline_initialize(void); + +void mali_timeline_terminate(void); + +MALI_STATIC_INLINE mali_bool mali_timeline_has_gp_job(void) +{ + return 0 < _mali_osk_atomic_read(&gp_tracker_count); +} + +MALI_STATIC_INLINE mali_bool mali_timeline_has_physical_pp_job(void) +{ + return 0 < _mali_osk_atomic_read(&phy_pp_tracker_count); +} + +MALI_STATIC_INLINE mali_bool mali_timeline_has_virtual_pp_job(void) +{ + return 0 < _mali_osk_atomic_read(&virt_pp_tracker_count); +} + +#if defined(DEBUG) +#define MALI_TIMELINE_DEBUG_FUNCTIONS +#endif /* DEBUG */ +#if defined(MALI_TIMELINE_DEBUG_FUNCTIONS) + +/** + * Tracker state. Used for debug printing. + */ +typedef enum mali_timeline_tracker_state { + MALI_TIMELINE_TS_INIT = 0, + MALI_TIMELINE_TS_WAITING = 1, + MALI_TIMELINE_TS_ACTIVE = 2, + MALI_TIMELINE_TS_FINISH = 3, +} mali_timeline_tracker_state; + +/** + * Get tracker state. + * + * @param tracker Tracker to check. + * @return State of tracker. + */ +mali_timeline_tracker_state mali_timeline_debug_get_tracker_state(struct mali_timeline_tracker *tracker); + +/** + * Print debug information about tracker. + * + * @param tracker Tracker to print. + */ +void mali_timeline_debug_print_tracker(struct mali_timeline_tracker *tracker, _mali_osk_print_ctx *print_ctx); + +/** + * Print debug information about timeline. + * + * @param timeline Timeline to print. + */ +void mali_timeline_debug_print_timeline(struct mali_timeline *timeline, _mali_osk_print_ctx *print_ctx); + +#if !(LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) +void mali_timeline_debug_direct_print_tracker(struct mali_timeline_tracker *tracker); +void mali_timeline_debug_direct_print_timeline(struct mali_timeline *timeline); +#endif + +/** + * Print debug information about timeline system. + * + * @param system Timeline system to print. + */ +void mali_timeline_debug_print_system(struct mali_timeline_system *system, _mali_osk_print_ctx *print_ctx); + +#endif /* defined(MALI_TIMELINE_DEBUG_FUNCTIONS) */ + +#if defined(CONFIG_MALI_DMA_BUF_FENCE) +/** + * The timeline dma fence callback when dma fence signal. + * + * @param pp_job_ptr The pointer to pp job that link to the signaled dma fence. + */ +void mali_timeline_dma_fence_callback(void *pp_job_ptr); +#endif + +#endif /* __MALI_TIMELINE_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_timeline_sync_fence.c b/drivers/gpu/arm/mali400/common/mali_timeline_sync_fence.c --- a/drivers/gpu/arm/mali400/common/mali_timeline_sync_fence.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_timeline_sync_fence.c 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,158 @@ +/* + * Copyright (C) 2013, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_timeline_sync_fence.h" + +#include "mali_osk.h" +#include "mali_kernel_common.h" +#include "mali_sync.h" + +#if defined(CONFIG_SYNC) + +/** + * Creates a sync fence tracker and a sync fence. Adds sync fence tracker to Timeline system and + * returns sync fence. The sync fence will be signaled when the sync fence tracker is activated. + * + * @param timeline Timeline. + * @param point Point on timeline. + * @return Sync fence that will be signaled when tracker is activated. + */ +static struct sync_fence *mali_timeline_sync_fence_create_and_add_tracker(struct mali_timeline *timeline, mali_timeline_point point) +{ + struct mali_timeline_sync_fence_tracker *sync_fence_tracker; + struct sync_fence *sync_fence; + struct mali_timeline_fence fence; + + MALI_DEBUG_ASSERT_POINTER(timeline); + MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT != point); + + /* Allocate sync fence tracker. */ + sync_fence_tracker = _mali_osk_calloc(1, sizeof(struct mali_timeline_sync_fence_tracker)); + if (NULL == sync_fence_tracker) { + MALI_PRINT_ERROR(("Mali Timeline: sync_fence_tracker allocation failed\n")); + return NULL; + } + + /* Create sync flag. */ + MALI_DEBUG_ASSERT_POINTER(timeline->sync_tl); + sync_fence_tracker->flag = mali_sync_flag_create(timeline->sync_tl, point); + if (NULL == sync_fence_tracker->flag) { + MALI_PRINT_ERROR(("Mali Timeline: sync_flag creation failed\n")); + _mali_osk_free(sync_fence_tracker); + return NULL; + } + + /* Create sync fence from sync flag. */ + sync_fence = mali_sync_flag_create_fence(sync_fence_tracker->flag); + if (NULL == sync_fence) { + MALI_PRINT_ERROR(("Mali Timeline: sync_fence creation failed\n")); + mali_sync_flag_put(sync_fence_tracker->flag); + _mali_osk_free(sync_fence_tracker); + return NULL; + } + + /* Setup fence for tracker. */ + _mali_osk_memset(&fence, 0, sizeof(struct mali_timeline_fence)); + fence.sync_fd = -1; + fence.points[timeline->id] = point; + + /* Finally, add the tracker to Timeline system. */ + mali_timeline_tracker_init(&sync_fence_tracker->tracker, MALI_TIMELINE_TRACKER_SYNC, &fence, sync_fence_tracker); + point = mali_timeline_system_add_tracker(timeline->system, &sync_fence_tracker->tracker, MALI_TIMELINE_NONE); + MALI_DEBUG_ASSERT(MALI_TIMELINE_NO_POINT == point); + + return sync_fence; +} + +s32 mali_timeline_sync_fence_create(struct mali_timeline_system *system, struct mali_timeline_fence *fence) +{ + u32 i; + struct sync_fence *sync_fence_acc = NULL; + + MALI_DEBUG_ASSERT_POINTER(system); + MALI_DEBUG_ASSERT_POINTER(fence); + + for (i = 0; i < MALI_TIMELINE_MAX; ++i) { + struct mali_timeline *timeline; + struct sync_fence *sync_fence; + + if (MALI_TIMELINE_NO_POINT == fence->points[i]) continue; + + timeline = system->timelines[i]; + MALI_DEBUG_ASSERT_POINTER(timeline); + + sync_fence = mali_timeline_sync_fence_create_and_add_tracker(timeline, fence->points[i]); + if (NULL == sync_fence) goto error; + + if (NULL != sync_fence_acc) { + /* Merge sync fences. */ + sync_fence_acc = mali_sync_fence_merge(sync_fence_acc, sync_fence); + if (NULL == sync_fence_acc) goto error; + } else { + /* This was the first sync fence created. */ + sync_fence_acc = sync_fence; + } + } + + if (-1 != fence->sync_fd) { + struct sync_fence *sync_fence; + + sync_fence = sync_fence_fdget(fence->sync_fd); + if (NULL == sync_fence) goto error; + + if (NULL != sync_fence_acc) { + sync_fence_acc = mali_sync_fence_merge(sync_fence_acc, sync_fence); + if (NULL == sync_fence_acc) goto error; + } else { + sync_fence_acc = sync_fence; + } + } + + if (NULL == sync_fence_acc) { + MALI_DEBUG_ASSERT_POINTER(system->signaled_sync_tl); + + /* There was nothing to wait on, so return an already signaled fence. */ + + sync_fence_acc = mali_sync_timeline_create_signaled_fence(system->signaled_sync_tl); + if (NULL == sync_fence_acc) goto error; + } + + /* Return file descriptor for the accumulated sync fence. */ + return mali_sync_fence_fd_alloc(sync_fence_acc); + +error: + if (NULL != sync_fence_acc) { + sync_fence_put(sync_fence_acc); + } + + return -1; +} + +void mali_timeline_sync_fence_activate(struct mali_timeline_sync_fence_tracker *sync_fence_tracker) +{ + mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY; + + MALI_DEBUG_ASSERT_POINTER(sync_fence_tracker); + MALI_DEBUG_ASSERT_POINTER(sync_fence_tracker->flag); + + MALI_DEBUG_PRINT(4, ("Mali Timeline: activation for sync fence tracker\n")); + + /* Signal flag and release reference. */ + mali_sync_flag_signal(sync_fence_tracker->flag, 0); + mali_sync_flag_put(sync_fence_tracker->flag); + + /* Nothing can wait on this tracker, so nothing to schedule after release. */ + schedule_mask = mali_timeline_tracker_release(&sync_fence_tracker->tracker); + MALI_DEBUG_ASSERT(MALI_SCHEDULER_MASK_EMPTY == schedule_mask); + + _mali_osk_free(sync_fence_tracker); +} + +#endif /* defined(CONFIG_SYNC) */ diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_timeline_sync_fence.h b/drivers/gpu/arm/mali400/common/mali_timeline_sync_fence.h --- a/drivers/gpu/arm/mali400/common/mali_timeline_sync_fence.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_timeline_sync_fence.h 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2013, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_timeline_sync_fence.h + * + * This file contains code related to creating sync fences from timeline fences. + */ + +#ifndef __MALI_TIMELINE_SYNC_FENCE_H__ +#define __MALI_TIMELINE_SYNC_FENCE_H__ + +#include "mali_timeline.h" + +#if defined(CONFIG_SYNC) + +/** + * Sync fence tracker. + */ +struct mali_timeline_sync_fence_tracker { + struct mali_sync_flag *flag; /**< Sync flag used to connect tracker and sync fence. */ + struct mali_timeline_tracker tracker; /**< Timeline tracker. */ +}; + +/** + * Create a sync fence that will be signaled when @ref fence is signaled. + * + * @param system Timeline system. + * @param fence Fence to create sync fence from. + * @return File descriptor for new sync fence, or -1 on error. + */ +s32 mali_timeline_sync_fence_create(struct mali_timeline_system *system, struct mali_timeline_fence *fence); + +/** + * Used by the Timeline system to activate a sync fence tracker. + * + * @param sync_fence_tracker Sync fence tracker. + * + */ +void mali_timeline_sync_fence_activate(struct mali_timeline_sync_fence_tracker *sync_fence_tracker); + +#endif /* defined(CONFIG_SYNC) */ + +#endif /* __MALI_TIMELINE_SYNC_FENCE_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_ukk.h b/drivers/gpu/arm/mali400/common/mali_ukk.h --- a/drivers/gpu/arm/mali400/common/mali_ukk.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_ukk.h 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,551 @@ +/* + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_ukk.h + * Defines the kernel-side interface of the user-kernel interface + */ + +#ifndef __MALI_UKK_H__ +#define __MALI_UKK_H__ + +#include "mali_osk.h" +#include "mali_uk_types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @addtogroup uddapi Unified Device Driver (UDD) APIs + * + * @{ + */ + +/** + * @addtogroup u_k_api UDD User/Kernel Interface (U/K) APIs + * + * - The _mali_uk functions are an abstraction of the interface to the device + * driver. On certain OSs, this would be implemented via the IOCTL interface. + * On other OSs, it could be via extension of some Device Driver Class, or + * direct function call for Bare metal/RTOSs. + * - It is important to note that: + * - The Device Driver has implemented the _mali_ukk set of functions + * - The Base Driver calls the corresponding set of _mali_uku functions. + * - What requires porting is solely the calling mechanism from User-side to + * Kernel-side, and propagating back the results. + * - Each U/K function is associated with a (group, number) pair from + * \ref _mali_uk_functions to make it possible for a common function in the + * Base Driver and Device Driver to route User/Kernel calls from/to the + * correct _mali_uk function. For example, in an IOCTL system, the IOCTL number + * would be formed based on the group and number assigned to the _mali_uk + * function, as listed in \ref _mali_uk_functions. On the user-side, each + * _mali_uku function would just make an IOCTL with the IOCTL-code being an + * encoded form of the (group, number) pair. On the kernel-side, the Device + * Driver's IOCTL handler decodes the IOCTL-code back into a (group, number) + * pair, and uses this to determine which corresponding _mali_ukk should be + * called. + * - Refer to \ref _mali_uk_functions for more information about this + * (group, number) pairing. + * - In a system where there is no distinction between user and kernel-side, + * the U/K interface may be implemented as:@code + * MALI_STATIC_INLINE _mali_osk_errcode_t _mali_uku_examplefunction( _mali_uk_examplefunction_s *args ) + * { + * return mali_ukk_examplefunction( args ); + * } + * @endcode + * - Therefore, all U/K calls behave \em as \em though they were direct + * function calls (but the \b implementation \em need \em not be a direct + * function calls) + * + * @note Naming the _mali_uk functions the same on both User and Kernel sides + * on non-RTOS systems causes debugging issues when setting breakpoints. In + * this case, it is not clear which function the breakpoint is put on. + * Therefore the _mali_uk functions in user space are prefixed with \c _mali_uku + * and in kernel space with \c _mali_ukk. The naming for the argument + * structures is unaffected. + * + * - The _mali_uk functions are synchronous. + * - Arguments to the _mali_uk functions are passed in a structure. The only + * parameter passed to the _mali_uk functions is a pointer to this structure. + * This first member of this structure, ctx, is a pointer to a context returned + * by _mali_uku_open(). For example:@code + * typedef struct + * { + * void *ctx; + * u32 number_of_cores; + * } _mali_uk_get_gp_number_of_cores_s; + * @endcode + * + * - Each _mali_uk function has its own argument structure named after the + * function. The argument is distinguished by the _s suffix. + * - The argument types are defined by the base driver and user-kernel + * interface. + * - All _mali_uk functions return a standard \ref _mali_osk_errcode_t. + * - Only arguments of type input or input/output need be initialized before + * calling a _mali_uk function. + * - Arguments of type output and input/output are only valid when the + * _mali_uk function returns \ref _MALI_OSK_ERR_OK. + * - The \c ctx member is always invalid after it has been used by a + * _mali_uk function, except for the context management functions + * + * + * \b Interface \b restrictions + * + * The requirements of the interface mean that an implementation of the + * User-kernel interface may do no 'real' work. For example, the following are + * illegal in the User-kernel implementation: + * - Calling functions necessary for operation on all systems, which would + * not otherwise get called on RTOS systems. + * - For example, a U/K interface that calls multiple _mali_ukk functions + * during one particular U/K call. This could not be achieved by the same code + * which uses direct function calls for the U/K interface. + * - Writing in values to the args members, when otherwise these members would + * not hold a useful value for a direct function call U/K interface. + * - For example, U/K interface implementation that take NULL members in + * their arguments structure from the user side, but those members are + * replaced with non-NULL values in the kernel-side of the U/K interface + * implementation. A scratch area for writing data is one such example. In this + * case, a direct function call U/K interface would segfault, because no code + * would be present to replace the NULL pointer with a meaningful pointer. + * - Note that we discourage the case where the U/K implementation changes + * a NULL argument member to non-NULL, and then the Device Driver code (outside + * of the U/K layer) re-checks this member for NULL, and corrects it when + * necessary. Whilst such code works even on direct function call U/K + * intefaces, it reduces the testing coverage of the Device Driver code. This + * is because we have no way of testing the NULL == value path on an OS + * implementation. + * + * A number of allowable examples exist where U/K interfaces do 'real' work: + * - The 'pointer switching' technique for \ref _mali_ukk_get_system_info + * - In this case, without the pointer switching on direct function call + * U/K interface, the Device Driver code still sees the same thing: a pointer + * to which it can write memory. This is because such a system has no + * distinction between a user and kernel pointer. + * - Writing an OS-specific value into the ukk_private member for + * _mali_ukk_mem_mmap(). + * - In this case, this value is passed around by Device Driver code, but + * its actual value is never checked. Device Driver code simply passes it from + * the U/K layer to the OSK layer, where it can be acted upon. In this case, + * \em some OS implementations of the U/K (_mali_ukk_mem_mmap()) and OSK + * (_mali_osk_mem_mapregion_init()) functions will collaborate on the + * meaning of ukk_private member. On other OSs, it may be unused by both + * U/K and OSK layers + * - Therefore, on error inside the U/K interface implementation itself, + * it will be as though the _mali_ukk function itself had failed, and cleaned + * up after itself. + * - Compare this to a direct function call U/K implementation, where all + * error cleanup is handled by the _mali_ukk function itself. The direct + * function call U/K interface implementation is automatically atomic. + * + * The last example highlights a consequence of all U/K interface + * implementations: they must be atomic with respect to the Device Driver code. + * And therefore, should Device Driver code succeed but the U/K implementation + * fail afterwards (but before return to user-space), then the U/K + * implementation must cause appropriate cleanup actions to preserve the + * atomicity of the interface. + * + * @{ + */ + + +/** @defgroup _mali_uk_context U/K Context management + * + * These functions allow for initialisation of the user-kernel interface once per process. + * + * Generally the context will store the OS specific object to communicate with the kernel device driver and further + * state information required by the specific implementation. The context is shareable among all threads in the caller process. + * + * On IOCTL systems, this is likely to be a file descriptor as a result of opening the kernel device driver. + * + * On a bare-metal/RTOS system with no distinction between kernel and + * user-space, the U/K interface simply calls the _mali_ukk variant of the + * function by direct function call. In this case, the context returned is the + * mali_session_data from _mali_ukk_open(). + * + * The kernel side implementations of the U/K interface expect the first member of the argument structure to + * be the context created by _mali_uku_open(). On some OS implementations, the meaning of this context + * will be different between user-side and kernel-side. In which case, the kernel-side will need to replace this context + * with the kernel-side equivalent, because user-side will not have access to kernel-side data. The context parameter + * in the argument structure therefore has to be of type input/output. + * + * It should be noted that the caller cannot reuse the \c ctx member of U/K + * argument structure after a U/K call, because it may be overwritten. Instead, + * the context handle must always be stored elsewhere, and copied into + * the appropriate U/K argument structure for each user-side call to + * the U/K interface. This is not usually a problem, since U/K argument + * structures are usually placed on the stack. + * + * @{ */ + +/** @brief Begin a new Mali Device Driver session + * + * This is used to obtain a per-process context handle for all future U/K calls. + * + * @param context pointer to storage to return a (void*)context handle. + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_open(void **context); + +/** @brief End a Mali Device Driver session + * + * This should be called when the process no longer requires use of the Mali Device Driver. + * + * The context handle must not be used after it has been closed. + * + * @param context pointer to a stored (void*)context handle. + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_close(void **context); + +/** @} */ /* end group _mali_uk_context */ + + +/** @addtogroup _mali_uk_core U/K Core + * + * The core functions provide the following functionality: + * - verify that the user and kernel API are compatible + * - retrieve information about the cores and memory banks in the system + * - wait for the result of jobs started on a core + * + * @{ */ + +/** @brief Waits for a job notification. + * + * Sleeps until notified or a timeout occurs. Returns information about the notification. + * + * @param args see _mali_uk_wait_for_notification_s in "mali_utgard_uk_types.h" + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_wait_for_notification(_mali_uk_wait_for_notification_s *args); + +/** @brief Post a notification to the notification queue of this application. + * + * @param args see _mali_uk_post_notification_s in "mali_utgard_uk_types.h" + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_post_notification(_mali_uk_post_notification_s *args); + +/** @brief Verifies if the user and kernel side of this API are compatible. + * + * This function is obsolete, but kept to allow old, incompatible user space + * clients to robustly detect the incompatibility. + * + * @param args see _mali_uk_get_api_version_s in "mali_utgard_uk_types.h" + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_get_api_version(_mali_uk_get_api_version_s *args); + +/** @brief Verifies if the user and kernel side of this API are compatible. + * + * @param args see _mali_uk_get_api_version_v2_s in "mali_utgard_uk_types.h" + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_get_api_version_v2(_mali_uk_get_api_version_v2_s *args); + +/** @brief Get the user space settings applicable for calling process. + * + * @param args see _mali_uk_get_user_settings_s in "mali_utgard_uk_types.h" + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_get_user_settings(_mali_uk_get_user_settings_s *args); + +/** @brief Get a user space setting applicable for calling process. + * + * @param args see _mali_uk_get_user_setting_s in "mali_utgard_uk_types.h" + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_get_user_setting(_mali_uk_get_user_setting_s *args); + +/* @brief Grant or deny high priority scheduling for this session. + * + * @param args see _mali_uk_request_high_priority_s in "mali_utgard_uk_types.h" + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_request_high_priority(_mali_uk_request_high_priority_s *args); + +/** @brief Make process sleep if the pending big job in kernel >= MALI_MAX_PENDING_BIG_JOB + * + */ +_mali_osk_errcode_t _mali_ukk_pending_submit(_mali_uk_pending_submit_s *args); + +/** @} */ /* end group _mali_uk_core */ + + +/** @addtogroup _mali_uk_memory U/K Memory + * + * The memory functions provide functionality with and without a Mali-MMU present. + * + * For Mali-MMU based systems, the following functionality is provided: + * - Initialize and terminate MALI virtual address space + * - Allocate/deallocate physical memory to a MALI virtual address range and map into/unmap from the + * current process address space + * - Map/unmap external physical memory into the MALI virtual address range + * + * For Mali-nonMMU based systems: + * - Allocate/deallocate MALI memory + * + * @{ */ + +/** @brief Map Mali Memory into the current user process + * + * Maps Mali memory into the current user process in a generic way. + * + * This function is to be used for Mali-MMU mode. The function is available in both Mali-MMU and Mali-nonMMU modes, + * but should not be called by a user process in Mali-nonMMU mode. + * + * The implementation and operation of _mali_ukk_mem_mmap() is dependant on whether the driver is built for Mali-MMU + * or Mali-nonMMU: + * - In the nonMMU case, _mali_ukk_mem_mmap() requires a physical address to be specified. For this reason, an OS U/K + * implementation should not allow this to be called from user-space. In any case, nonMMU implementations are + * inherently insecure, and so the overall impact is minimal. Mali-MMU mode should be used if security is desired. + * - In the MMU case, _mali_ukk_mem_mmap() the _mali_uk_mem_mmap_s::phys_addr + * member is used for the \em Mali-virtual address desired for the mapping. The + * implementation of _mali_ukk_mem_mmap() will allocate both the CPU-virtual + * and CPU-physical addresses, and can cope with mapping a contiguous virtual + * address range to a sequence of non-contiguous physical pages. In this case, + * the CPU-physical addresses are not communicated back to the user-side, as + * they are unnecsessary; the \em Mali-virtual address range must be used for + * programming Mali structures. + * + * In the second (MMU) case, _mali_ukk_mem_mmap() handles management of + * CPU-virtual and CPU-physical ranges, but the \em caller must manage the + * \em Mali-virtual address range from the user-side. + * + * @note Mali-virtual address ranges are entirely separate between processes. + * It is not possible for a process to accidentally corrupt another process' + * \em Mali-virtual address space. + * + * @param args see _mali_uk_mem_mmap_s in "mali_utgard_uk_types.h" + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_mem_mmap(_mali_uk_mem_mmap_s *args); + +/** @brief Unmap Mali Memory from the current user process + * + * Unmaps Mali memory from the current user process in a generic way. This only operates on Mali memory supplied + * from _mali_ukk_mem_mmap(). + * + * @param args see _mali_uk_mem_munmap_s in "mali_utgard_uk_types.h" + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_mem_munmap(_mali_uk_mem_munmap_s *args); + +/** @brief Determine the buffer size necessary for an MMU page table dump. + * @param args see _mali_uk_query_mmu_page_table_dump_size_s in mali_utgard_uk_types.h + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size(_mali_uk_query_mmu_page_table_dump_size_s *args); +/** @brief Dump MMU Page tables. + * @param args see _mali_uk_dump_mmu_page_table_s in mali_utgard_uk_types.h + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table(_mali_uk_dump_mmu_page_table_s *args); + +/** @brief Write user data to specified Mali memory without causing segfaults. + * @param args see _mali_uk_mem_write_safe_s in mali_utgard_uk_types.h + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_mem_write_safe(_mali_uk_mem_write_safe_s *args); + +/** @} */ /* end group _mali_uk_memory */ + + +/** @addtogroup _mali_uk_pp U/K Fragment Processor + * + * The Fragment Processor (aka PP (Pixel Processor)) functions provide the following functionality: + * - retrieving version of the fragment processors + * - determine number of fragment processors + * - starting a job on a fragment processor + * + * @{ */ + +/** @brief Issue a request to start a new job on a Fragment Processor. + * + * If the request fails args->status is set to _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE and you can + * try to start the job again. + * + * An existing job could be returned for requeueing if the new job has a higher priority than a previously started job + * which the hardware hasn't actually started processing yet. In this case the new job will be started instead and the + * existing one returned, otherwise the new job is started and the status field args->status is set to + * _MALI_UK_START_JOB_STARTED. + * + * Job completion can be awaited with _mali_ukk_wait_for_notification(). + * + * @param ctx user-kernel context (mali_session) + * @param uargs see _mali_uk_pp_start_job_s in "mali_utgard_uk_types.h". Use _mali_osk_copy_from_user to retrieve data! + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_pp_start_job(void *ctx, _mali_uk_pp_start_job_s *uargs); + +/** + * @brief Issue a request to start new jobs on both Vertex Processor and Fragment Processor. + * + * @note Will call into @ref _mali_ukk_pp_start_job and @ref _mali_ukk_gp_start_job. + * + * @param ctx user-kernel context (mali_session) + * @param uargs see _mali_uk_pp_and_gp_start_job_s in "mali_utgard_uk_types.h". Use _mali_osk_copy_from_user to retrieve data! + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_pp_and_gp_start_job(void *ctx, _mali_uk_pp_and_gp_start_job_s *uargs); + +/** @brief Returns the number of Fragment Processors in the system + * + * @param args see _mali_uk_get_pp_number_of_cores_s in "mali_utgard_uk_types.h" + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores(_mali_uk_get_pp_number_of_cores_s *args); + +/** @brief Returns the version that all Fragment Processor cores are compatible with. + * + * This function may only be called when _mali_ukk_get_pp_number_of_cores() indicated at least one Fragment + * Processor core is available. + * + * @param args see _mali_uk_get_pp_core_version_s in "mali_utgard_uk_types.h" + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_get_pp_core_version(_mali_uk_get_pp_core_version_s *args); + +/** @brief Disable Write-back unit(s) on specified job + * + * @param args see _mali_uk_get_pp_core_version_s in "mali_utgard_uk_types.h" + */ +void _mali_ukk_pp_job_disable_wb(_mali_uk_pp_disable_wb_s *args); + + +/** @} */ /* end group _mali_uk_pp */ + + +/** @addtogroup _mali_uk_gp U/K Vertex Processor + * + * The Vertex Processor (aka GP (Geometry Processor)) functions provide the following functionality: + * - retrieving version of the Vertex Processors + * - determine number of Vertex Processors available + * - starting a job on a Vertex Processor + * + * @{ */ + +/** @brief Issue a request to start a new job on a Vertex Processor. + * + * If the request fails args->status is set to _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE and you can + * try to start the job again. + * + * An existing job could be returned for requeueing if the new job has a higher priority than a previously started job + * which the hardware hasn't actually started processing yet. In this case the new job will be started and the + * existing one returned, otherwise the new job is started and the status field args->status is set to + * _MALI_UK_START_JOB_STARTED. + * + * Job completion can be awaited with _mali_ukk_wait_for_notification(). + * + * @param ctx user-kernel context (mali_session) + * @param uargs see _mali_uk_gp_start_job_s in "mali_utgard_uk_types.h". Use _mali_osk_copy_from_user to retrieve data! + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_gp_start_job(void *ctx, _mali_uk_gp_start_job_s *uargs); + +/** @brief Returns the number of Vertex Processors in the system. + * + * @param args see _mali_uk_get_gp_number_of_cores_s in "mali_utgard_uk_types.h" + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores(_mali_uk_get_gp_number_of_cores_s *args); + +/** @brief Returns the version that all Vertex Processor cores are compatible with. + * + * This function may only be called when _mali_uk_get_gp_number_of_cores() indicated at least one Vertex + * Processor core is available. + * + * @param args see _mali_uk_get_gp_core_version_s in "mali_utgard_uk_types.h" + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_get_gp_core_version(_mali_uk_get_gp_core_version_s *args); + +/** @brief Resume or abort suspended Vertex Processor jobs. + * + * After receiving notification that a Vertex Processor job was suspended from + * _mali_ukk_wait_for_notification() you can use this function to resume or abort the job. + * + * @param args see _mali_uk_gp_suspend_response_s in "mali_utgard_uk_types.h" + * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure. + */ +_mali_osk_errcode_t _mali_ukk_gp_suspend_response(_mali_uk_gp_suspend_response_s *args); + +/** @} */ /* end group _mali_uk_gp */ + +#if defined(CONFIG_MALI400_PROFILING) +/** @addtogroup _mali_uk_profiling U/K Timeline profiling module + * @{ */ + +/** @brief Add event to profiling buffer. + * + * @param args see _mali_uk_profiling_add_event_s in "mali_utgard_uk_types.h" + */ +_mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args); + +/** @brief Get profiling stream fd. + * + * @param args see _mali_uk_profiling_stream_fd_get_s in "mali_utgard_uk_types.h" + */ +_mali_osk_errcode_t _mali_ukk_profiling_stream_fd_get(_mali_uk_profiling_stream_fd_get_s *args); + +/** @brief Profiling control set. + * + * @param args see _mali_uk_profiling_control_set_s in "mali_utgard_uk_types.h" + */ +_mali_osk_errcode_t _mali_ukk_profiling_control_set(_mali_uk_profiling_control_set_s *args); + +/** @} */ /* end group _mali_uk_profiling */ +#endif + +/** @addtogroup _mali_uk_vsync U/K VSYNC reporting module + * @{ */ + +/** @brief Report events related to vsync. + * + * @note Events should be reported when starting to wait for vsync and when the + * waiting is finished. This information can then be used in kernel space to + * complement the GPU utilization metric. + * + * @param args see _mali_uk_vsync_event_report_s in "mali_utgard_uk_types.h" + */ +_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args); + +/** @} */ /* end group _mali_uk_vsync */ + +/** @addtogroup _mali_sw_counters_report U/K Software counter reporting + * @{ */ + +/** @brief Report software counters. + * + * @param args see _mali_uk_sw_counters_report_s in "mali_uk_types.h" + */ +_mali_osk_errcode_t _mali_ukk_sw_counters_report(_mali_uk_sw_counters_report_s *args); + +/** @} */ /* end group _mali_sw_counters_report */ + +/** @} */ /* end group u_k_api */ + +/** @} */ /* end group uddapi */ + +u32 _mali_ukk_report_memory_usage(void); + +u32 _mali_ukk_report_total_memory_size(void); + +u32 _mali_ukk_utilization_gp_pp(void); + +u32 _mali_ukk_utilization_gp(void); + +u32 _mali_ukk_utilization_pp(void); + +#ifdef __cplusplus +} +#endif + +#endif /* __MALI_UKK_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_user_settings_db.c b/drivers/gpu/arm/mali400/common/mali_user_settings_db.c --- a/drivers/gpu/arm/mali400/common/mali_user_settings_db.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_user_settings_db.c 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,147 @@ +/** + * Copyright (C) 2012-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_kernel_common.h" +#include "mali_osk.h" +#include "mali_ukk.h" +#include "mali_uk_types.h" +#include "mali_user_settings_db.h" +#include "mali_session.h" + +static u32 mali_user_settings[_MALI_UK_USER_SETTING_MAX]; +const char *_mali_uk_user_setting_descriptions[] = _MALI_UK_USER_SETTING_DESCRIPTIONS; + +static void mali_user_settings_notify(_mali_uk_user_setting_t setting, u32 value) +{ + mali_bool done = MALI_FALSE; + + /* + * This function gets a bit complicated because we can't hold the session lock while + * allocating notification objects. + */ + + while (!done) { + u32 i; + u32 num_sessions_alloc; + u32 num_sessions_with_lock; + u32 used_notification_objects = 0; + _mali_osk_notification_t **notobjs; + + /* Pre allocate the number of notifications objects we need right now (might change after lock has been taken) */ + num_sessions_alloc = mali_session_get_count(); + if (0 == num_sessions_alloc) { + /* No sessions to report to */ + return; + } + + notobjs = (_mali_osk_notification_t **)_mali_osk_malloc(sizeof(_mali_osk_notification_t *) * num_sessions_alloc); + if (NULL == notobjs) { + MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure)\n")); + return; + } + + for (i = 0; i < num_sessions_alloc; i++) { + notobjs[i] = _mali_osk_notification_create(_MALI_NOTIFICATION_SETTINGS_CHANGED, + sizeof(_mali_uk_settings_changed_s)); + if (NULL != notobjs[i]) { + _mali_uk_settings_changed_s *data; + data = notobjs[i]->result_buffer; + + data->setting = setting; + data->value = value; + } else { + MALI_PRINT_ERROR(("Failed to notify user space session about setting change (alloc failure %u)\n", i)); + } + } + + mali_session_lock(); + + /* number of sessions will not change while we hold the lock */ + num_sessions_with_lock = mali_session_get_count(); + + if (num_sessions_alloc >= num_sessions_with_lock) { + /* We have allocated enough notification objects for all the sessions atm */ + struct mali_session_data *session, *tmp; + MALI_SESSION_FOREACH(session, tmp, link) { + MALI_DEBUG_ASSERT(used_notification_objects < num_sessions_alloc); + if (NULL != notobjs[used_notification_objects]) { + mali_session_send_notification(session, notobjs[used_notification_objects]); + notobjs[used_notification_objects] = NULL; /* Don't track this notification object any more */ + } + used_notification_objects++; + } + done = MALI_TRUE; + } + + mali_session_unlock(); + + /* Delete any remaining/unused notification objects */ + for (; used_notification_objects < num_sessions_alloc; used_notification_objects++) { + if (NULL != notobjs[used_notification_objects]) { + _mali_osk_notification_delete(notobjs[used_notification_objects]); + } + } + + _mali_osk_free(notobjs); + } +} + +void mali_set_user_setting(_mali_uk_user_setting_t setting, u32 value) +{ + mali_bool notify = MALI_FALSE; + + if (setting >= _MALI_UK_USER_SETTING_MAX) { + MALI_DEBUG_PRINT_ERROR(("Invalid user setting %ud\n")); + return; + } + + if (mali_user_settings[setting] != value) { + notify = MALI_TRUE; + } + + mali_user_settings[setting] = value; + + if (notify) { + mali_user_settings_notify(setting, value); + } +} + +u32 mali_get_user_setting(_mali_uk_user_setting_t setting) +{ + if (setting >= _MALI_UK_USER_SETTING_MAX) { + return 0; + } + + return mali_user_settings[setting]; +} + +_mali_osk_errcode_t _mali_ukk_get_user_setting(_mali_uk_get_user_setting_s *args) +{ + _mali_uk_user_setting_t setting; + MALI_DEBUG_ASSERT_POINTER(args); + + setting = args->setting; + + if (_MALI_UK_USER_SETTING_MAX > setting) { + args->value = mali_user_settings[setting]; + return _MALI_OSK_ERR_OK; + } else { + return _MALI_OSK_ERR_INVALID_ARGS; + } +} + +_mali_osk_errcode_t _mali_ukk_get_user_settings(_mali_uk_get_user_settings_s *args) +{ + MALI_DEBUG_ASSERT_POINTER(args); + + _mali_osk_memcpy(args->settings, mali_user_settings, sizeof(mali_user_settings)); + + return _MALI_OSK_ERR_OK; +} diff -ENwbur a/drivers/gpu/arm/mali400/common/mali_user_settings_db.h b/drivers/gpu/arm/mali400/common/mali_user_settings_db.h --- a/drivers/gpu/arm/mali400/common/mali_user_settings_db.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/common/mali_user_settings_db.h 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,39 @@ +/** + * Copyright (C) 2012-2013, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_USER_SETTINGS_DB_H__ +#define __MALI_USER_SETTINGS_DB_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "mali_uk_types.h" + +/** @brief Set Mali user setting in DB + * + * Update the DB with a new value for \a setting. If the value is different from theprevious set value running sessions will be notified of the change. + * + * @param setting the setting to be changed + * @param value the new value to set + */ +void mali_set_user_setting(_mali_uk_user_setting_t setting, u32 value); + +/** @brief Get current Mali user setting value from DB + * + * @param setting the setting to extract + * @return the value of the selected setting + */ +u32 mali_get_user_setting(_mali_uk_user_setting_t setting); + +#ifdef __cplusplus +} +#endif +#endif /* __MALI_KERNEL_USER_SETTING__ */ diff -ENwbur a/drivers/gpu/arm/mali400/.gitignore b/drivers/gpu/arm/mali400/.gitignore --- a/drivers/gpu/arm/mali400/.gitignore 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/.gitignore 2018-05-06 08:49:49.174695256 +0200 @@ -0,0 +1 @@ +__malidrv_build_info.c diff -ENwbur a/drivers/gpu/arm/mali400/include/linux/mali/mali_utgard.h b/drivers/gpu/arm/mali400/include/linux/mali/mali_utgard.h --- a/drivers/gpu/arm/mali400/include/linux/mali/mali_utgard.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/include/linux/mali/mali_utgard.h 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,531 @@ +/* + * Copyright (C) 2012-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_utgard.h + * Defines types and interface exposed by the Mali Utgard device driver + */ + +#ifndef __MALI_UTGARD_H__ +#define __MALI_UTGARD_H__ + +#include "mali_osk_types.h" +#ifdef CONFIG_MALI_DEVFREQ +#include +#include "mali_pm_metrics.h" +#ifdef CONFIG_DEVFREQ_THERMAL +#include +#endif +#endif + +#define MALI_GPU_NAME_UTGARD "mali-utgard" + + +#define MALI_OFFSET_GP 0x00000 +#define MALI_OFFSET_GP_MMU 0x03000 + +#define MALI_OFFSET_PP0 0x08000 +#define MALI_OFFSET_PP0_MMU 0x04000 +#define MALI_OFFSET_PP1 0x0A000 +#define MALI_OFFSET_PP1_MMU 0x05000 +#define MALI_OFFSET_PP2 0x0C000 +#define MALI_OFFSET_PP2_MMU 0x06000 +#define MALI_OFFSET_PP3 0x0E000 +#define MALI_OFFSET_PP3_MMU 0x07000 + +#define MALI_OFFSET_PP4 0x28000 +#define MALI_OFFSET_PP4_MMU 0x1C000 +#define MALI_OFFSET_PP5 0x2A000 +#define MALI_OFFSET_PP5_MMU 0x1D000 +#define MALI_OFFSET_PP6 0x2C000 +#define MALI_OFFSET_PP6_MMU 0x1E000 +#define MALI_OFFSET_PP7 0x2E000 +#define MALI_OFFSET_PP7_MMU 0x1F000 + +#define MALI_OFFSET_L2_RESOURCE0 0x01000 +#define MALI_OFFSET_L2_RESOURCE1 0x10000 +#define MALI_OFFSET_L2_RESOURCE2 0x11000 + +#define MALI400_OFFSET_L2_CACHE0 MALI_OFFSET_L2_RESOURCE0 +#define MALI450_OFFSET_L2_CACHE0 MALI_OFFSET_L2_RESOURCE1 +#define MALI450_OFFSET_L2_CACHE1 MALI_OFFSET_L2_RESOURCE0 +#define MALI450_OFFSET_L2_CACHE2 MALI_OFFSET_L2_RESOURCE2 +#define MALI470_OFFSET_L2_CACHE1 MALI_OFFSET_L2_RESOURCE0 + +#define MALI_OFFSET_BCAST 0x13000 +#define MALI_OFFSET_DLBU 0x14000 + +#define MALI_OFFSET_PP_BCAST 0x16000 +#define MALI_OFFSET_PP_BCAST_MMU 0x15000 + +#define MALI_OFFSET_PMU 0x02000 +#define MALI_OFFSET_DMA 0x12000 + +/* Mali-300 */ + +#define MALI_GPU_RESOURCES_MALI300(base_addr, gp_irq, gp_mmu_irq, pp_irq, pp_mmu_irq) \ + MALI_GPU_RESOURCES_MALI400_MP1(base_addr, gp_irq, gp_mmu_irq, pp_irq, pp_mmu_irq) + +#define MALI_GPU_RESOURCES_MALI300_PMU(base_addr, gp_irq, gp_mmu_irq, pp_irq, pp_mmu_irq) \ + MALI_GPU_RESOURCES_MALI400_MP1_PMU(base_addr, gp_irq, gp_mmu_irq, pp_irq, pp_mmu_irq) + +/* Mali-400 */ + +#define MALI_GPU_RESOURCES_MALI400_MP1(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq) \ + MALI_GPU_RESOURCE_L2(base_addr + MALI400_OFFSET_L2_CACHE0) \ + MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) + +#define MALI_GPU_RESOURCES_MALI400_MP1_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq) \ + MALI_GPU_RESOURCES_MALI400_MP1(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq) \ + MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) + +#define MALI_GPU_RESOURCES_MALI400_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq) \ + MALI_GPU_RESOURCE_L2(base_addr + MALI400_OFFSET_L2_CACHE0) \ + MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) + +#define MALI_GPU_RESOURCES_MALI400_MP2_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq) \ + MALI_GPU_RESOURCES_MALI400_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq) \ + MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) + +#define MALI_GPU_RESOURCES_MALI400_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq) \ + MALI_GPU_RESOURCE_L2(base_addr + MALI400_OFFSET_L2_CACHE0) \ + MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) + +#define MALI_GPU_RESOURCES_MALI400_MP3_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq) \ + MALI_GPU_RESOURCES_MALI400_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq) \ + MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) + +#define MALI_GPU_RESOURCES_MALI400_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq) \ + MALI_GPU_RESOURCE_L2(base_addr + MALI400_OFFSET_L2_CACHE0) \ + MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + MALI_OFFSET_PP3, pp3_irq, base_addr + MALI_OFFSET_PP3_MMU, pp3_mmu_irq) + +#define MALI_GPU_RESOURCES_MALI400_MP4_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq) \ + MALI_GPU_RESOURCES_MALI400_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq) \ + MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \ + + /* Mali-450 */ +#define MALI_GPU_RESOURCES_MALI450_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE0) \ + MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \ + MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE1) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \ + MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \ + MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \ + MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \ + MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU) \ + MALI_GPU_RESOURCE_DMA(base_addr + MALI_OFFSET_DMA) + +#define MALI_GPU_RESOURCES_MALI450_MP2_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCES_MALI450_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \ + +#define MALI_GPU_RESOURCES_MALI450_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE0) \ + MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \ + MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE1) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \ + MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \ + MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \ + MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \ + MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU) + +#define MALI_GPU_RESOURCES_MALI450_MP3_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCES_MALI450_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \ + +#define MALI_GPU_RESOURCES_MALI450_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE0) \ + MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \ + MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE1) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + MALI_OFFSET_PP3, pp3_irq, base_addr + MALI_OFFSET_PP3_MMU, pp3_mmu_irq) \ + MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \ + MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \ + MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \ + MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU) \ + MALI_GPU_RESOURCE_DMA(base_addr + MALI_OFFSET_DMA) + +#define MALI_GPU_RESOURCES_MALI450_MP4_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCES_MALI450_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \ + +#define MALI_GPU_RESOURCES_MALI450_MP6(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE0) \ + MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \ + MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE1) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \ + MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE2) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + MALI_OFFSET_PP4, pp3_irq, base_addr + MALI_OFFSET_PP4_MMU, pp3_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(4, base_addr + MALI_OFFSET_PP5, pp4_irq, base_addr + MALI_OFFSET_PP5_MMU, pp4_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(5, base_addr + MALI_OFFSET_PP6, pp5_irq, base_addr + MALI_OFFSET_PP6_MMU, pp5_mmu_irq) \ + MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \ + MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \ + MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \ + MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU) \ + MALI_GPU_RESOURCE_DMA(base_addr + MALI_OFFSET_DMA) + +#define MALI_GPU_RESOURCES_MALI450_MP6_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCES_MALI450_MP6(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \ + +#define MALI_GPU_RESOURCES_MALI450_MP8(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp6_irq, pp6_mmu_irq, pp7_irq, pp7_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE0) \ + MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \ + MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE1) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + MALI_OFFSET_PP3, pp3_irq, base_addr + MALI_OFFSET_PP3_MMU, pp3_mmu_irq) \ + MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE2) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(4, base_addr + MALI_OFFSET_PP4, pp4_irq, base_addr + MALI_OFFSET_PP4_MMU, pp4_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(5, base_addr + MALI_OFFSET_PP5, pp5_irq, base_addr + MALI_OFFSET_PP5_MMU, pp5_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(6, base_addr + MALI_OFFSET_PP6, pp6_irq, base_addr + MALI_OFFSET_PP6_MMU, pp6_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(7, base_addr + MALI_OFFSET_PP7, pp7_irq, base_addr + MALI_OFFSET_PP7_MMU, pp7_mmu_irq) \ + MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \ + MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \ + MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \ + MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU) \ + MALI_GPU_RESOURCE_DMA(base_addr + MALI_OFFSET_DMA) + +#define MALI_GPU_RESOURCES_MALI450_MP8_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp6_irq, pp6_mmu_irq, pp7_irq, pp7_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCES_MALI450_MP8(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp6_irq, pp6_mmu_irq, pp7_irq, pp7_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \ + + /* Mali - 470 */ +#define MALI_GPU_RESOURCES_MALI470_MP1(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCE_L2(base_addr + MALI470_OFFSET_L2_CACHE1) \ + MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \ + MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \ + MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \ + MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \ + MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU) + +#define MALI_GPU_RESOURCES_MALI470_MP1_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCES_MALI470_MP1(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \ + +#define MALI_GPU_RESOURCES_MALI470_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCE_L2(base_addr + MALI470_OFFSET_L2_CACHE1) \ + MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \ + MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \ + MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \ + MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \ + MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU) + +#define MALI_GPU_RESOURCES_MALI470_MP2_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCES_MALI470_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \ + +#define MALI_GPU_RESOURCES_MALI470_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCE_L2(base_addr + MALI470_OFFSET_L2_CACHE1) \ + MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \ + MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \ + MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \ + MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \ + MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU) + +#define MALI_GPU_RESOURCES_MALI470_MP3_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCES_MALI470_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \ + +#define MALI_GPU_RESOURCES_MALI470_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCE_L2(base_addr + MALI470_OFFSET_L2_CACHE1) \ + MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \ + MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + MALI_OFFSET_PP3, pp3_irq, base_addr + MALI_OFFSET_PP3_MMU, pp3_mmu_irq) \ + MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \ + MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \ + MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \ + MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU) + +#define MALI_GPU_RESOURCES_MALI470_MP4_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCES_MALI470_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \ + MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \ + +#define MALI_GPU_RESOURCE_L2(addr) \ + { \ + .name = "Mali_L2", \ + .flags = IORESOURCE_MEM, \ + .start = addr, \ + .end = addr + 0x200, \ + }, + +#define MALI_GPU_RESOURCE_GP(gp_addr, gp_irq) \ + { \ + .name = "Mali_GP", \ + .flags = IORESOURCE_MEM, \ + .start = gp_addr, \ + .end = gp_addr + 0x100, \ + }, \ + { \ + .name = "Mali_GP_IRQ", \ + .flags = IORESOURCE_IRQ, \ + .start = gp_irq, \ + .end = gp_irq, \ + }, \ + +#define MALI_GPU_RESOURCE_GP_WITH_MMU(gp_addr, gp_irq, gp_mmu_addr, gp_mmu_irq) \ + { \ + .name = "Mali_GP", \ + .flags = IORESOURCE_MEM, \ + .start = gp_addr, \ + .end = gp_addr + 0x100, \ + }, \ + { \ + .name = "Mali_GP_IRQ", \ + .flags = IORESOURCE_IRQ, \ + .start = gp_irq, \ + .end = gp_irq, \ + }, \ + { \ + .name = "Mali_GP_MMU", \ + .flags = IORESOURCE_MEM, \ + .start = gp_mmu_addr, \ + .end = gp_mmu_addr + 0x100, \ + }, \ + { \ + .name = "Mali_GP_MMU_IRQ", \ + .flags = IORESOURCE_IRQ, \ + .start = gp_mmu_irq, \ + .end = gp_mmu_irq, \ + }, + +#define MALI_GPU_RESOURCE_PP(pp_addr, pp_irq) \ + { \ + .name = "Mali_PP", \ + .flags = IORESOURCE_MEM, \ + .start = pp_addr, \ + .end = pp_addr + 0x1100, \ + }, \ + { \ + .name = "Mali_PP_IRQ", \ + .flags = IORESOURCE_IRQ, \ + .start = pp_irq, \ + .end = pp_irq, \ + }, \ + +#define MALI_GPU_RESOURCE_PP_WITH_MMU(id, pp_addr, pp_irq, pp_mmu_addr, pp_mmu_irq) \ + { \ + .name = "Mali_PP" #id, \ + .flags = IORESOURCE_MEM, \ + .start = pp_addr, \ + .end = pp_addr + 0x1100, \ + }, \ + { \ + .name = "Mali_PP" #id "_IRQ", \ + .flags = IORESOURCE_IRQ, \ + .start = pp_irq, \ + .end = pp_irq, \ + }, \ + { \ + .name = "Mali_PP" #id "_MMU", \ + .flags = IORESOURCE_MEM, \ + .start = pp_mmu_addr, \ + .end = pp_mmu_addr + 0x100, \ + }, \ + { \ + .name = "Mali_PP" #id "_MMU_IRQ", \ + .flags = IORESOURCE_IRQ, \ + .start = pp_mmu_irq, \ + .end = pp_mmu_irq, \ + }, + +#define MALI_GPU_RESOURCE_MMU(mmu_addr, mmu_irq) \ + { \ + .name = "Mali_MMU", \ + .flags = IORESOURCE_MEM, \ + .start = mmu_addr, \ + .end = mmu_addr + 0x100, \ + }, \ + { \ + .name = "Mali_MMU_IRQ", \ + .flags = IORESOURCE_IRQ, \ + .start = mmu_irq, \ + .end = mmu_irq, \ + }, + +#define MALI_GPU_RESOURCE_PMU(pmu_addr) \ + { \ + .name = "Mali_PMU", \ + .flags = IORESOURCE_MEM, \ + .start = pmu_addr, \ + .end = pmu_addr + 0x100, \ + }, + +#define MALI_GPU_RESOURCE_DMA(dma_addr) \ + { \ + .name = "Mali_DMA", \ + .flags = IORESOURCE_MEM, \ + .start = dma_addr, \ + .end = dma_addr + 0x100, \ + }, + +#define MALI_GPU_RESOURCE_DLBU(dlbu_addr) \ + { \ + .name = "Mali_DLBU", \ + .flags = IORESOURCE_MEM, \ + .start = dlbu_addr, \ + .end = dlbu_addr + 0x100, \ + }, + +#define MALI_GPU_RESOURCE_BCAST(bcast_addr) \ + { \ + .name = "Mali_Broadcast", \ + .flags = IORESOURCE_MEM, \ + .start = bcast_addr, \ + .end = bcast_addr + 0x100, \ + }, + +#define MALI_GPU_RESOURCE_PP_BCAST(pp_addr, pp_irq) \ + { \ + .name = "Mali_PP_Broadcast", \ + .flags = IORESOURCE_MEM, \ + .start = pp_addr, \ + .end = pp_addr + 0x1100, \ + }, \ + { \ + .name = "Mali_PP_Broadcast_IRQ", \ + .flags = IORESOURCE_IRQ, \ + .start = pp_irq, \ + .end = pp_irq, \ + }, \ + +#define MALI_GPU_RESOURCE_PP_MMU_BCAST(pp_mmu_bcast_addr) \ + { \ + .name = "Mali_PP_MMU_Broadcast", \ + .flags = IORESOURCE_MEM, \ + .start = pp_mmu_bcast_addr, \ + .end = pp_mmu_bcast_addr + 0x100, \ + }, + + struct mali_gpu_utilization_data { + unsigned int utilization_gpu; /* Utilization for GP and all PP cores combined, 0 = no utilization, 256 = full utilization */ + unsigned int utilization_gp; /* Utilization for GP core only, 0 = no utilization, 256 = full utilization */ + unsigned int utilization_pp; /* Utilization for all PP cores combined, 0 = no utilization, 256 = full utilization */ + }; + + struct mali_gpu_clk_item { + unsigned int clock; /* unit(MHz) */ + unsigned int vol; + }; + + struct mali_gpu_clock { + struct mali_gpu_clk_item *item; + unsigned int num_of_steps; + }; + + struct device; + struct mali_gpu_device_data { + /* Shared GPU memory */ + unsigned long shared_mem_size; + + /* + * Mali PMU switch delay. + * Only needed if the power gates are connected to the PMU in a high fanout + * network. This value is the number of Mali clock cycles it takes to + * enable the power gates and turn on the power mesh. + * This value will have no effect if a daisy chain implementation is used. + */ + u32 pmu_switch_delay; + + /* Mali Dynamic power domain configuration in sequence from 0-11 + * GP PP0 PP1 PP2 PP3 PP4 PP5 PP6 PP7, L2$0 L2$1 L2$2 + */ + u16 pmu_domain_config[12]; + + /* Dedicated GPU memory range (physical). */ + unsigned long dedicated_mem_start; + unsigned long dedicated_mem_size; + + /* Frame buffer memory to be accessible by Mali GPU (physical) */ + unsigned long fb_start; + unsigned long fb_size; + + /* Max runtime [ms] for jobs */ + int max_job_runtime; + + /* Report GPU utilization and related control in this interval (specified in ms) */ + unsigned long control_interval; + + /* Function that will receive periodic GPU utilization numbers */ + void (*utilization_callback)(struct mali_gpu_utilization_data *data); + + /* Fuction that platform callback for freq setting, needed when CONFIG_MALI_DVFS enabled */ + int (*set_freq)(int setting_clock_step); + /* Function that platfrom report it's clock info which driver can set, needed when CONFIG_MALI_DVFS enabled */ + void (*get_clock_info)(struct mali_gpu_clock **data); + /* Function that get the current clock info, needed when CONFIG_MALI_DVFS enabled */ + int (*get_freq)(void); + /* Function that init the mali gpu secure mode */ + int (*secure_mode_init)(void); + /* Function that deinit the mali gpu secure mode */ + void (*secure_mode_deinit)(void); + /* Function that reset GPU and enable gpu secure mode */ + int (*gpu_reset_and_secure_mode_enable)(void); + /* Function that Reset GPU and disable gpu secure mode */ + int (*gpu_reset_and_secure_mode_disable)(void); + /* Function that platform specific suspend callback */ + void (*platform_suspend)(struct device *dev); + /* Function that platform specific resume callback */ + void (*platform_resume)(struct device *dev); + /* ipa related interface customer need register */ +#if defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL) + struct devfreq_cooling_power *gpu_cooling_ops; +#endif + }; + + /** + * Pause the scheduling and power state changes of Mali device driver. + * mali_dev_resume() must always be called as soon as possible after this function + * in order to resume normal operation of the Mali driver. + */ + void mali_dev_pause(void); + + /** + * Resume scheduling and allow power changes in Mali device driver. + * This must always be called after mali_dev_pause(). + */ + void mali_dev_resume(void); + + /** @brief Set the desired number of PP cores to use. + * + * The internal Mali PMU will be used, if present, to physically power off the PP cores. + * + * @param num_cores The number of desired cores + * @return 0 on success, otherwise error. -EINVAL means an invalid number of cores was specified. + */ + int mali_perf_set_num_pp_cores(unsigned int num_cores); + +#endif diff -ENwbur a/drivers/gpu/arm/mali400/include/linux/mali/mali_utgard_ioctl.h b/drivers/gpu/arm/mali400/include/linux/mali/mali_utgard_ioctl.h --- a/drivers/gpu/arm/mali400/include/linux/mali/mali_utgard_ioctl.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/include/linux/mali/mali_utgard_ioctl.h 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,100 @@ +/* + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + + * Class Path Exception + * Linking this library statically or dynamically with other modules is making a combined work based on this library. + * Thus, the terms and conditions of the GNU General Public License cover the whole combination. + * As a special exception, the copyright holders of this library give you permission to link this library with independent modules + * to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting + * executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions + * of the license of that module. An independent module is a module which is not derived from or based on this library. If you modify + * this library, you may extend this exception to your version of the library, but you are not obligated to do so. + * If you do not wish to do so, delete this exception statement from your version. + */ + +#ifndef __MALI_UTGARD_IOCTL_H__ +#define __MALI_UTGARD_IOCTL_H__ + +#include +#include +#include /* file system operations */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @file mali_kernel_ioctl.h + * Interface to the Linux device driver. + * This file describes the interface needed to use the Linux device driver. + * Its interface is designed to used by the HAL implementation through a thin arch layer. + */ + +/** + * ioctl commands + */ + +#define MALI_IOC_BASE 0x82 +#define MALI_IOC_CORE_BASE (_MALI_UK_CORE_SUBSYSTEM + MALI_IOC_BASE) +#define MALI_IOC_MEMORY_BASE (_MALI_UK_MEMORY_SUBSYSTEM + MALI_IOC_BASE) +#define MALI_IOC_PP_BASE (_MALI_UK_PP_SUBSYSTEM + MALI_IOC_BASE) +#define MALI_IOC_GP_BASE (_MALI_UK_GP_SUBSYSTEM + MALI_IOC_BASE) +#define MALI_IOC_PROFILING_BASE (_MALI_UK_PROFILING_SUBSYSTEM + MALI_IOC_BASE) +#define MALI_IOC_VSYNC_BASE (_MALI_UK_VSYNC_SUBSYSTEM + MALI_IOC_BASE) + +#define MALI_IOC_WAIT_FOR_NOTIFICATION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_WAIT_FOR_NOTIFICATION, _mali_uk_wait_for_notification_s) +#define MALI_IOC_GET_API_VERSION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_API_VERSION, u32) +#define MALI_IOC_GET_API_VERSION_V2 _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_API_VERSION, _mali_uk_get_api_version_v2_s) +#define MALI_IOC_POST_NOTIFICATION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_POST_NOTIFICATION, _mali_uk_post_notification_s) +#define MALI_IOC_GET_USER_SETTING _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_USER_SETTING, _mali_uk_get_user_setting_s) +#define MALI_IOC_GET_USER_SETTINGS _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_USER_SETTINGS, _mali_uk_get_user_settings_s) +#define MALI_IOC_REQUEST_HIGH_PRIORITY _IOW (MALI_IOC_CORE_BASE, _MALI_UK_REQUEST_HIGH_PRIORITY, _mali_uk_request_high_priority_s) +#define MALI_IOC_TIMELINE_GET_LATEST_POINT _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_TIMELINE_GET_LATEST_POINT, _mali_uk_timeline_get_latest_point_s) +#define MALI_IOC_TIMELINE_WAIT _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_TIMELINE_WAIT, _mali_uk_timeline_wait_s) +#define MALI_IOC_TIMELINE_CREATE_SYNC_FENCE _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_TIMELINE_CREATE_SYNC_FENCE, _mali_uk_timeline_create_sync_fence_s) +#define MALI_IOC_SOFT_JOB_START _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_SOFT_JOB_START, _mali_uk_soft_job_start_s) +#define MALI_IOC_SOFT_JOB_SIGNAL _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_SOFT_JOB_SIGNAL, _mali_uk_soft_job_signal_s) +#define MALI_IOC_PENDING_SUBMIT _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_PENDING_SUBMIT, _mali_uk_pending_submit_s) + +#define MALI_IOC_MEM_ALLOC _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_ALLOC_MEM, _mali_uk_alloc_mem_s) +#define MALI_IOC_MEM_FREE _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_FREE_MEM, _mali_uk_free_mem_s) +#define MALI_IOC_MEM_BIND _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_BIND_MEM, _mali_uk_bind_mem_s) +#define MALI_IOC_MEM_UNBIND _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_UNBIND_MEM, _mali_uk_unbind_mem_s) +#define MALI_IOC_MEM_COW _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_COW_MEM, _mali_uk_cow_mem_s) +#define MALI_IOC_MEM_COW_MODIFY_RANGE _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_COW_MODIFY_RANGE, _mali_uk_cow_modify_range_s) +#define MALI_IOC_MEM_RESIZE _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_RESIZE_MEM, _mali_uk_mem_resize_s) +#define MALI_IOC_MEM_DMA_BUF_GET_SIZE _IOR(MALI_IOC_MEMORY_BASE, _MALI_UK_DMA_BUF_GET_SIZE, _mali_uk_dma_buf_get_size_s) +#define MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE _IOR (MALI_IOC_MEMORY_BASE, _MALI_UK_QUERY_MMU_PAGE_TABLE_DUMP_SIZE, _mali_uk_query_mmu_page_table_dump_size_s) +#define MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_DUMP_MMU_PAGE_TABLE, _mali_uk_dump_mmu_page_table_s) +#define MALI_IOC_MEM_WRITE_SAFE _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_MEM_WRITE_SAFE, _mali_uk_mem_write_safe_s) + +#define MALI_IOC_PP_START_JOB _IOWR(MALI_IOC_PP_BASE, _MALI_UK_PP_START_JOB, _mali_uk_pp_start_job_s) +#define MALI_IOC_PP_AND_GP_START_JOB _IOWR(MALI_IOC_PP_BASE, _MALI_UK_PP_AND_GP_START_JOB, _mali_uk_pp_and_gp_start_job_s) +#define MALI_IOC_PP_NUMBER_OF_CORES_GET _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_NUMBER_OF_CORES, _mali_uk_get_pp_number_of_cores_s) +#define MALI_IOC_PP_CORE_VERSION_GET _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_CORE_VERSION, _mali_uk_get_pp_core_version_s) +#define MALI_IOC_PP_DISABLE_WB _IOW (MALI_IOC_PP_BASE, _MALI_UK_PP_DISABLE_WB, _mali_uk_pp_disable_wb_s) + +#define MALI_IOC_GP2_START_JOB _IOWR(MALI_IOC_GP_BASE, _MALI_UK_GP_START_JOB, _mali_uk_gp_start_job_s) +#define MALI_IOC_GP2_NUMBER_OF_CORES_GET _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_NUMBER_OF_CORES, _mali_uk_get_gp_number_of_cores_s) +#define MALI_IOC_GP2_CORE_VERSION_GET _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_CORE_VERSION, _mali_uk_get_gp_core_version_s) +#define MALI_IOC_GP2_SUSPEND_RESPONSE _IOW (MALI_IOC_GP_BASE, _MALI_UK_GP_SUSPEND_RESPONSE,_mali_uk_gp_suspend_response_s) + +#define MALI_IOC_PROFILING_ADD_EVENT _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_ADD_EVENT, _mali_uk_profiling_add_event_s) +#define MALI_IOC_PROFILING_REPORT_SW_COUNTERS _IOW (MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_REPORT_SW_COUNTERS, _mali_uk_sw_counters_report_s) +#define MALI_IOC_PROFILING_MEMORY_USAGE_GET _IOR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_MEMORY_USAGE_GET, _mali_uk_profiling_memory_usage_get_s) +#define MALI_IOC_PROFILING_STREAM_FD_GET _IOR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_STREAM_FD_GET, _mali_uk_profiling_stream_fd_get_s) +#define MALI_IOC_PROILING_CONTROL_SET _IOR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_CONTROL_SET, _mali_uk_profiling_control_set_s) + +#define MALI_IOC_VSYNC_EVENT_REPORT _IOW (MALI_IOC_VSYNC_BASE, _MALI_UK_VSYNC_EVENT_REPORT, _mali_uk_vsync_event_report_s) + +#ifdef __cplusplus +} +#endif + +#endif /* __MALI_UTGARD_IOCTL_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/include/linux/mali/mali_utgard_profiling_events.h b/drivers/gpu/arm/mali400/include/linux/mali/mali_utgard_profiling_events.h --- a/drivers/gpu/arm/mali400/include/linux/mali/mali_utgard_profiling_events.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/include/linux/mali/mali_utgard_profiling_events.h 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,200 @@ +/* + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + + * Class Path Exception + * Linking this library statically or dynamically with other modules is making a combined work based on this library. + * Thus, the terms and conditions of the GNU General Public License cover the whole combination. + * As a special exception, the copyright holders of this library give you permission to link this library with independent modules + * to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting + * executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions + * of the license of that module. An independent module is a module which is not derived from or based on this library. If you modify + * this library, you may extend this exception to your version of the library, but you are not obligated to do so. + * If you do not wish to do so, delete this exception statement from your version. + */ + +#ifndef _MALI_UTGARD_PROFILING_EVENTS_H_ +#define _MALI_UTGARD_PROFILING_EVENTS_H_ + +/* + * The event ID is a 32 bit value consisting of different fields + * reserved, 4 bits, for future use + * event type, 4 bits, cinstr_profiling_event_type_t + * event channel, 8 bits, the source of the event. + * event data, 16 bit field, data depending on event type + */ + +/** + * Specifies what kind of event this is + */ +typedef enum { + MALI_PROFILING_EVENT_TYPE_SINGLE = 0 << 24, + MALI_PROFILING_EVENT_TYPE_START = 1 << 24, + MALI_PROFILING_EVENT_TYPE_STOP = 2 << 24, + MALI_PROFILING_EVENT_TYPE_SUSPEND = 3 << 24, + MALI_PROFILING_EVENT_TYPE_RESUME = 4 << 24, +} cinstr_profiling_event_type_t; + + +/** + * Secifies the channel/source of the event + */ +typedef enum { + MALI_PROFILING_EVENT_CHANNEL_SOFTWARE = 0 << 16, + MALI_PROFILING_EVENT_CHANNEL_GP0 = 1 << 16, + MALI_PROFILING_EVENT_CHANNEL_PP0 = 5 << 16, + MALI_PROFILING_EVENT_CHANNEL_PP1 = 6 << 16, + MALI_PROFILING_EVENT_CHANNEL_PP2 = 7 << 16, + MALI_PROFILING_EVENT_CHANNEL_PP3 = 8 << 16, + MALI_PROFILING_EVENT_CHANNEL_PP4 = 9 << 16, + MALI_PROFILING_EVENT_CHANNEL_PP5 = 10 << 16, + MALI_PROFILING_EVENT_CHANNEL_PP6 = 11 << 16, + MALI_PROFILING_EVENT_CHANNEL_PP7 = 12 << 16, + MALI_PROFILING_EVENT_CHANNEL_GPU = 21 << 16, +} cinstr_profiling_event_channel_t; + + +#define MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(num) (((MALI_PROFILING_EVENT_CHANNEL_GP0 >> 16) + (num)) << 16) +#define MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(num) (((MALI_PROFILING_EVENT_CHANNEL_PP0 >> 16) + (num)) << 16) + +/** + * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SINGLE is used from software channel + */ +typedef enum { + MALI_PROFILING_EVENT_REASON_SINGLE_SW_NONE = 0, + MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_NEW_FRAME = 1, + MALI_PROFILING_EVENT_REASON_SINGLE_SW_FLUSH = 2, + MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_SWAP_BUFFERS = 3, + MALI_PROFILING_EVENT_REASON_SINGLE_SW_FB_EVENT = 4, + MALI_PROFILING_EVENT_REASON_SINGLE_SW_GP_ENQUEUE = 5, + MALI_PROFILING_EVENT_REASON_SINGLE_SW_PP_ENQUEUE = 6, + MALI_PROFILING_EVENT_REASON_SINGLE_SW_READBACK = 7, + MALI_PROFILING_EVENT_REASON_SINGLE_SW_WRITEBACK = 8, + MALI_PROFILING_EVENT_REASON_SINGLE_SW_ENTER_API_FUNC = 10, + MALI_PROFILING_EVENT_REASON_SINGLE_SW_LEAVE_API_FUNC = 11, + MALI_PROFILING_EVENT_REASON_SINGLE_SW_DISCARD_ATTACHMENTS = 13, + MALI_PROFILING_EVENT_REASON_SINGLE_SW_UMP_TRY_LOCK = 53, + MALI_PROFILING_EVENT_REASON_SINGLE_SW_UMP_LOCK = 54, + MALI_PROFILING_EVENT_REASON_SINGLE_SW_UMP_UNLOCK = 55, + MALI_PROFILING_EVENT_REASON_SINGLE_LOCK_CONTENDED = 56, + MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_MALI_FENCE_DUP = 57, + MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_SET_PP_JOB_FENCE = 58, + MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_WAIT_SYNC = 59, + MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_CREATE_FENCE_SYNC = 60, + MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_CREATE_NATIVE_FENCE_SYNC = 61, + MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_FENCE_FLUSH = 62, + MALI_PROFILING_EVENT_REASON_SINGLE_SW_EGL_FLUSH_SERVER_WAITS = 63, +} cinstr_profiling_event_reason_single_sw_t; + +/** + * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_START/STOP is used from software channel + * to inform whether the core is physical or virtual + */ +typedef enum { + MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL = 0, + MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL = 1, +} cinstr_profiling_event_reason_start_stop_hw_t; + +/** + * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_START/STOP is used from software channel + */ +typedef enum { + /*MALI_PROFILING_EVENT_REASON_START_STOP_SW_NONE = 0,*/ + MALI_PROFILING_EVENT_REASON_START_STOP_SW_MALI = 1, + MALI_PROFILING_EVENT_REASON_START_STOP_SW_CALLBACK_THREAD = 2, + MALI_PROFILING_EVENT_REASON_START_STOP_SW_WORKER_THREAD = 3, + MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF = 4, + MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF = 5, +} cinstr_profiling_event_reason_start_stop_sw_t; + +/** + * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SUSPEND/RESUME is used from software channel + */ +typedef enum { + MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_NONE = 0, /* used */ + MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_PIPELINE_FULL = 1, /* NOT used */ + MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC = 26, /* used in some build configurations */ + MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_IFRAME_WAIT = 27, /* USED */ + MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_IFRAME_SYNC = 28, /* USED */ + MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VG_WAIT_FILTER_CLEANUP = 29, /* used */ + MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VG_WAIT_TEXTURE = 30, /* used */ + MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_GLES_WAIT_MIPLEVEL = 31, /* used */ + MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_GLES_WAIT_READPIXELS = 32, /* used */ + MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_WAIT_SWAP_IMMEDIATE = 33, /* NOT used */ + MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_ICS_QUEUE_BUFFER = 34, /* USED */ + MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_ICS_DEQUEUE_BUFFER = 35, /* USED */ + MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_UMP_LOCK = 36, /* Not currently used */ + MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_X11_GLOBAL_LOCK = 37, /* Not currently used */ + MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_X11_SWAP = 38, /* Not currently used */ + MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_MALI_EGL_IMAGE_SYNC_WAIT = 39, /* USED */ + MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_GP_JOB_HANDLING = 40, /* USED */ + MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_PP_JOB_HANDLING = 41, /* USED */ + MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_MALI_FENCE_MERGE = 42, /* USED */ + MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_MALI_FENCE_DUP = 43, + MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_FLUSH_SERVER_WAITS = 44, + MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_WAIT_SYNC = 45, /* USED */ + MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_JOBS_WAIT = 46, /* USED */ + MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_NOFRAMES_WAIT = 47, /* USED */ + MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_NOJOBS_WAIT = 48, /* USED */ + MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_SUBMIT_LIMITER_WAIT = 49, /* USED */ +} cinstr_profiling_event_reason_suspend_resume_sw_t; + +/** + * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SINGLE is used from a HW channel (GPx+PPx) + */ +typedef enum { + MALI_PROFILING_EVENT_REASON_SINGLE_HW_NONE = 0, + MALI_PROFILING_EVENT_REASON_SINGLE_HW_INTERRUPT = 1, + MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH = 2, +} cinstr_profiling_event_reason_single_hw_t; + +/** + * These events are applicable when the type MALI_PROFILING_EVENT_TYPE_SINGLE is used from the GPU channel + */ +typedef enum { + MALI_PROFILING_EVENT_REASON_SINGLE_GPU_NONE = 0, + MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE = 1, + MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L20_COUNTERS = 2, + MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L21_COUNTERS = 3, + MALI_PROFILING_EVENT_REASON_SINGLE_GPU_L22_COUNTERS = 4, +} cinstr_profiling_event_reason_single_gpu_t; + +/** + * These values are applicable for the 3rd data parameter when + * the type MALI_PROFILING_EVENT_TYPE_START is used from the software channel + * with the MALI_PROFILING_EVENT_REASON_START_STOP_BOTTOM_HALF reason. + */ +typedef enum { + MALI_PROFILING_EVENT_DATA_CORE_GP0 = 1, + MALI_PROFILING_EVENT_DATA_CORE_PP0 = 5, + MALI_PROFILING_EVENT_DATA_CORE_PP1 = 6, + MALI_PROFILING_EVENT_DATA_CORE_PP2 = 7, + MALI_PROFILING_EVENT_DATA_CORE_PP3 = 8, + MALI_PROFILING_EVENT_DATA_CORE_PP4 = 9, + MALI_PROFILING_EVENT_DATA_CORE_PP5 = 10, + MALI_PROFILING_EVENT_DATA_CORE_PP6 = 11, + MALI_PROFILING_EVENT_DATA_CORE_PP7 = 12, + MALI_PROFILING_EVENT_DATA_CORE_GP0_MMU = 22, /* GP0 + 21 */ + MALI_PROFILING_EVENT_DATA_CORE_PP0_MMU = 26, /* PP0 + 21 */ + MALI_PROFILING_EVENT_DATA_CORE_PP1_MMU = 27, /* PP1 + 21 */ + MALI_PROFILING_EVENT_DATA_CORE_PP2_MMU = 28, /* PP2 + 21 */ + MALI_PROFILING_EVENT_DATA_CORE_PP3_MMU = 29, /* PP3 + 21 */ + MALI_PROFILING_EVENT_DATA_CORE_PP4_MMU = 30, /* PP4 + 21 */ + MALI_PROFILING_EVENT_DATA_CORE_PP5_MMU = 31, /* PP5 + 21 */ + MALI_PROFILING_EVENT_DATA_CORE_PP6_MMU = 32, /* PP6 + 21 */ + MALI_PROFILING_EVENT_DATA_CORE_PP7_MMU = 33, /* PP7 + 21 */ + +} cinstr_profiling_event_data_core_t; + +#define MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(num) (MALI_PROFILING_EVENT_DATA_CORE_GP0 + (num)) +#define MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(num) (MALI_PROFILING_EVENT_DATA_CORE_GP0_MMU + (num)) +#define MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(num) (MALI_PROFILING_EVENT_DATA_CORE_PP0 + (num)) +#define MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(num) (MALI_PROFILING_EVENT_DATA_CORE_PP0_MMU + (num)) + + +#endif /*_MALI_UTGARD_PROFILING_EVENTS_H_*/ diff -ENwbur a/drivers/gpu/arm/mali400/include/linux/mali/mali_utgard_profiling_gator_api.h b/drivers/gpu/arm/mali400/include/linux/mali/mali_utgard_profiling_gator_api.h --- a/drivers/gpu/arm/mali400/include/linux/mali/mali_utgard_profiling_gator_api.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/include/linux/mali/mali_utgard_profiling_gator_api.h 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,315 @@ +/* + * Copyright (C) 2013, 2015-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + + * Class Path Exception + * Linking this library statically or dynamically with other modules is making a combined work based on this library. + * Thus, the terms and conditions of the GNU General Public License cover the whole combination. + * As a special exception, the copyright holders of this library give you permission to link this library with independent modules + * to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting + * executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions + * of the license of that module. An independent module is a module which is not derived from or based on this library. If you modify + * this library, you may extend this exception to your version of the library, but you are not obligated to do so. + * If you do not wish to do so, delete this exception statement from your version. + */ + +#ifndef __MALI_UTGARD_PROFILING_GATOR_API_H__ +#define __MALI_UTGARD_PROFILING_GATOR_API_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#define MALI_PROFILING_API_VERSION 4 + +#define MAX_NUM_L2_CACHE_CORES 3 +#define MAX_NUM_FP_CORES 8 +#define MAX_NUM_VP_CORES 1 + +#define _MALI_SPCIAL_COUNTER_DESCRIPTIONS \ + { \ + "Filmstrip_cnt0", \ + "Frequency", \ + "Voltage", \ + "vertex", \ + "fragment", \ + "Total_alloc_pages", \ + }; + +#define _MALI_MEM_COUTNER_DESCRIPTIONS \ + { \ + "untyped_memory", \ + "vertex_index_buffer", \ + "texture_buffer", \ + "varying_buffer", \ + "render_target", \ + "pbuffer_buffer", \ + "plbu_heap", \ + "pointer_array_buffer", \ + "slave_tilelist", \ + "untyped_gp_cmdlist", \ + "polygon_cmdlist", \ + "texture_descriptor", \ + "render_state_word", \ + "shader", \ + "stream_buffer", \ + "fragment_stack", \ + "uniform", \ + "untyped_frame_pool", \ + "untyped_surface", \ + }; + +/** The list of events supported by the Mali DDK. */ +typedef enum { + /* Vertex processor activity */ + ACTIVITY_VP_0 = 0, + + /* Fragment processor activity */ + ACTIVITY_FP_0, + ACTIVITY_FP_1, + ACTIVITY_FP_2, + ACTIVITY_FP_3, + ACTIVITY_FP_4, + ACTIVITY_FP_5, + ACTIVITY_FP_6, + ACTIVITY_FP_7, + + /* L2 cache counters */ + COUNTER_L2_0_C0, + COUNTER_L2_0_C1, + COUNTER_L2_1_C0, + COUNTER_L2_1_C1, + COUNTER_L2_2_C0, + COUNTER_L2_2_C1, + + /* Vertex processor counters */ + COUNTER_VP_0_C0, + COUNTER_VP_0_C1, + + /* Fragment processor counters */ + COUNTER_FP_0_C0, + COUNTER_FP_0_C1, + COUNTER_FP_1_C0, + COUNTER_FP_1_C1, + COUNTER_FP_2_C0, + COUNTER_FP_2_C1, + COUNTER_FP_3_C0, + COUNTER_FP_3_C1, + COUNTER_FP_4_C0, + COUNTER_FP_4_C1, + COUNTER_FP_5_C0, + COUNTER_FP_5_C1, + COUNTER_FP_6_C0, + COUNTER_FP_6_C1, + COUNTER_FP_7_C0, + COUNTER_FP_7_C1, + + /* + * If more hardware counters are added, the _mali_osk_hw_counter_table + * below should also be updated. + */ + + /* EGL software counters */ + COUNTER_EGL_BLIT_TIME, + + /* GLES software counters */ + COUNTER_GLES_DRAW_ELEMENTS_CALLS, + COUNTER_GLES_DRAW_ELEMENTS_NUM_INDICES, + COUNTER_GLES_DRAW_ELEMENTS_NUM_TRANSFORMED, + COUNTER_GLES_DRAW_ARRAYS_CALLS, + COUNTER_GLES_DRAW_ARRAYS_NUM_TRANSFORMED, + COUNTER_GLES_DRAW_POINTS, + COUNTER_GLES_DRAW_LINES, + COUNTER_GLES_DRAW_LINE_LOOP, + COUNTER_GLES_DRAW_LINE_STRIP, + COUNTER_GLES_DRAW_TRIANGLES, + COUNTER_GLES_DRAW_TRIANGLE_STRIP, + COUNTER_GLES_DRAW_TRIANGLE_FAN, + COUNTER_GLES_NON_VBO_DATA_COPY_TIME, + COUNTER_GLES_UNIFORM_BYTES_COPIED_TO_MALI, + COUNTER_GLES_UPLOAD_TEXTURE_TIME, + COUNTER_GLES_UPLOAD_VBO_TIME, + COUNTER_GLES_NUM_FLUSHES, + COUNTER_GLES_NUM_VSHADERS_GENERATED, + COUNTER_GLES_NUM_FSHADERS_GENERATED, + COUNTER_GLES_VSHADER_GEN_TIME, + COUNTER_GLES_FSHADER_GEN_TIME, + COUNTER_GLES_INPUT_TRIANGLES, + COUNTER_GLES_VXCACHE_HIT, + COUNTER_GLES_VXCACHE_MISS, + COUNTER_GLES_VXCACHE_COLLISION, + COUNTER_GLES_CULLED_TRIANGLES, + COUNTER_GLES_CULLED_LINES, + COUNTER_GLES_BACKFACE_TRIANGLES, + COUNTER_GLES_GBCLIP_TRIANGLES, + COUNTER_GLES_GBCLIP_LINES, + COUNTER_GLES_TRIANGLES_DRAWN, + COUNTER_GLES_DRAWCALL_TIME, + COUNTER_GLES_TRIANGLES_COUNT, + COUNTER_GLES_INDEPENDENT_TRIANGLES_COUNT, + COUNTER_GLES_STRIP_TRIANGLES_COUNT, + COUNTER_GLES_FAN_TRIANGLES_COUNT, + COUNTER_GLES_LINES_COUNT, + COUNTER_GLES_INDEPENDENT_LINES_COUNT, + COUNTER_GLES_STRIP_LINES_COUNT, + COUNTER_GLES_LOOP_LINES_COUNT, + + /* Special counter */ + + /* Framebuffer capture pseudo-counter */ + COUNTER_FILMSTRIP, + COUNTER_FREQUENCY, + COUNTER_VOLTAGE, + COUNTER_VP_ACTIVITY, + COUNTER_FP_ACTIVITY, + COUNTER_TOTAL_ALLOC_PAGES, + + /* Memory usage counter */ + COUNTER_MEM_UNTYPED, + COUNTER_MEM_VB_IB, + COUNTER_MEM_TEXTURE, + COUNTER_MEM_VARYING, + COUNTER_MEM_RT, + COUNTER_MEM_PBUFFER, + /* memory usages for gp command */ + COUNTER_MEM_PLBU_HEAP, + COUNTER_MEM_POINTER_ARRAY, + COUNTER_MEM_SLAVE_TILELIST, + COUNTER_MEM_UNTYPE_GP_CMDLIST, + /* memory usages for polygon list command */ + COUNTER_MEM_POLYGON_CMDLIST, + /* memory usages for pp command */ + COUNTER_MEM_TD, + COUNTER_MEM_RSW, + /* other memory usages */ + COUNTER_MEM_SHADER, + COUNTER_MEM_STREAMS, + COUNTER_MEM_FRAGMENT_STACK, + COUNTER_MEM_UNIFORM, + /* Special mem usage, which is used for mem pool allocation */ + COUNTER_MEM_UNTYPE_MEM_POOL, + COUNTER_MEM_UNTYPE_SURFACE, + + NUMBER_OF_EVENTS +} _mali_osk_counter_id; + +#define FIRST_ACTIVITY_EVENT ACTIVITY_VP_0 +#define LAST_ACTIVITY_EVENT ACTIVITY_FP_7 + +#define FIRST_HW_COUNTER COUNTER_L2_0_C0 +#define LAST_HW_COUNTER COUNTER_FP_7_C1 + +#define FIRST_SW_COUNTER COUNTER_EGL_BLIT_TIME +#define LAST_SW_COUNTER COUNTER_GLES_LOOP_LINES_COUNT + +#define FIRST_SPECIAL_COUNTER COUNTER_FILMSTRIP +#define LAST_SPECIAL_COUNTER COUNTER_TOTAL_ALLOC_PAGES + +#define FIRST_MEM_COUNTER COUNTER_MEM_UNTYPED +#define LAST_MEM_COUNTER COUNTER_MEM_UNTYPE_SURFACE + +#define MALI_PROFILING_MEM_COUNTERS_NUM (LAST_MEM_COUNTER - FIRST_MEM_COUNTER + 1) +#define MALI_PROFILING_SPECIAL_COUNTERS_NUM (LAST_SPECIAL_COUNTER - FIRST_SPECIAL_COUNTER + 1) +#define MALI_PROFILING_SW_COUNTERS_NUM (LAST_SW_COUNTER - FIRST_SW_COUNTER + 1) + +/** + * Define the stream header type for porfiling stream. + */ +#define STREAM_HEADER_FRAMEBUFFER 0x05 /* The stream packet header type for framebuffer dumping. */ +#define STREAM_HEADER_COUNTER_VALUE 0x09 /* The stream packet header type for hw/sw/memory counter sampling. */ +#define STREAM_HEADER_CORE_ACTIVITY 0x0a /* The stream packet header type for activity counter sampling. */ +#define STREAM_HEADER_SIZE 5 + +/** + * Define the packet header type of profiling control packet. + */ +#define PACKET_HEADER_ERROR 0x80 /* The response packet header type if error. */ +#define PACKET_HEADER_ACK 0x81 /* The response packet header type if OK. */ +#define PACKET_HEADER_COUNTERS_REQUEST 0x82 /* The control packet header type to request counter information from ddk. */ +#define PACKET_HEADER_COUNTERS_ACK 0x83 /* The response packet header type to send out counter information. */ +#define PACKET_HEADER_COUNTERS_ENABLE 0x84 /* The control packet header type to enable counters. */ +#define PACKET_HEADER_START_CAPTURE_VALUE 0x85 /* The control packet header type to start capture values. */ + +#define PACKET_HEADER_SIZE 5 + +/** + * Structure to pass performance counter data of a Mali core + */ +typedef struct _mali_profiling_core_counters { + u32 source0; + u32 value0; + u32 source1; + u32 value1; +} _mali_profiling_core_counters; + +/** + * Structure to pass performance counter data of Mali L2 cache cores + */ +typedef struct _mali_profiling_l2_counter_values { + struct _mali_profiling_core_counters cores[MAX_NUM_L2_CACHE_CORES]; +} _mali_profiling_l2_counter_values; + +/** + * Structure to pass data defining Mali instance in use: + * + * mali_product_id - Mali product id + * mali_version_major - Mali version major number + * mali_version_minor - Mali version minor number + * num_of_l2_cores - number of L2 cache cores + * num_of_fp_cores - number of fragment processor cores + * num_of_vp_cores - number of vertex processor cores + */ +typedef struct _mali_profiling_mali_version { + u32 mali_product_id; + u32 mali_version_major; + u32 mali_version_minor; + u32 num_of_l2_cores; + u32 num_of_fp_cores; + u32 num_of_vp_cores; +} _mali_profiling_mali_version; + +/** + * Structure to define the mali profiling counter struct. + */ +typedef struct mali_profiling_counter { + char counter_name[40]; + u32 counter_id; + u32 counter_event; + u32 prev_counter_value; + u32 current_counter_value; + u32 key; + int enabled; +} mali_profiling_counter; + +/* + * List of possible actions to be controlled by Streamline. + * The following numbers are used by gator to control the frame buffer dumping and s/w counter reporting. + * We cannot use the enums in mali_uk_types.h because they are unknown inside gator. + */ +#define FBDUMP_CONTROL_ENABLE (1) +#define FBDUMP_CONTROL_RATE (2) +#define SW_COUNTER_ENABLE (3) +#define FBDUMP_CONTROL_RESIZE_FACTOR (4) +#define MEM_COUNTER_ENABLE (5) +#define ANNOTATE_PROFILING_ENABLE (6) + +void _mali_profiling_control(u32 action, u32 value); + +u32 _mali_profiling_get_l2_counters(_mali_profiling_l2_counter_values *values); + +int _mali_profiling_set_event(u32 counter_id, s32 event_id); + +u32 _mali_profiling_get_api_version(void); + +void _mali_profiling_get_mali_version(struct _mali_profiling_mali_version *values); + +#ifdef __cplusplus +} +#endif + +#endif /* __MALI_UTGARD_PROFILING_GATOR_API_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/include/linux/mali/mali_utgard_uk_types.h b/drivers/gpu/arm/mali400/include/linux/mali/mali_utgard_uk_types.h --- a/drivers/gpu/arm/mali400/include/linux/mali/mali_utgard_uk_types.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/include/linux/mali/mali_utgard_uk_types.h 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,1100 @@ +/* + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + + * Class Path Exception + * Linking this library statically or dynamically with other modules is making a combined work based on this library. + * Thus, the terms and conditions of the GNU General Public License cover the whole combination. + * As a special exception, the copyright holders of this library give you permission to link this library with independent modules + * to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting + * executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions + * of the license of that module. An independent module is a module which is not derived from or based on this library. If you modify + * this library, you may extend this exception to your version of the library, but you are not obligated to do so. + * If you do not wish to do so, delete this exception statement from your version. + */ + +/** + * @file mali_uk_types.h + * Defines the types and constants used in the user-kernel interface + */ + +#ifndef __MALI_UTGARD_UK_TYPES_H__ +#define __MALI_UTGARD_UK_TYPES_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +/* Iteration functions depend on these values being consecutive. */ +#define MALI_UK_TIMELINE_GP 0 +#define MALI_UK_TIMELINE_PP 1 +#define MALI_UK_TIMELINE_SOFT 2 +#define MALI_UK_TIMELINE_MAX 3 + +#define MALI_UK_BIG_VARYING_SIZE (1024*1024*2) + +typedef struct { + u32 points[MALI_UK_TIMELINE_MAX]; + s32 sync_fd; +} _mali_uk_fence_t; + +/** + * @addtogroup uddapi Unified Device Driver (UDD) APIs + * + * @{ + */ + +/** + * @addtogroup u_k_api UDD User/Kernel Interface (U/K) APIs + * + * @{ + */ + +/** @defgroup _mali_uk_core U/K Core + * @{ */ + +/** Definition of subsystem numbers, to assist in creating a unique identifier + * for each U/K call. + * + * @see _mali_uk_functions */ +typedef enum { + _MALI_UK_CORE_SUBSYSTEM, /**< Core Group of U/K calls */ + _MALI_UK_MEMORY_SUBSYSTEM, /**< Memory Group of U/K calls */ + _MALI_UK_PP_SUBSYSTEM, /**< Fragment Processor Group of U/K calls */ + _MALI_UK_GP_SUBSYSTEM, /**< Vertex Processor Group of U/K calls */ + _MALI_UK_PROFILING_SUBSYSTEM, /**< Profiling Group of U/K calls */ + _MALI_UK_VSYNC_SUBSYSTEM, /**< VSYNC Group of U/K calls */ +} _mali_uk_subsystem_t; + +/** Within a function group each function has its unique sequence number + * to assist in creating a unique identifier for each U/K call. + * + * An ordered pair of numbers selected from + * ( \ref _mali_uk_subsystem_t,\ref _mali_uk_functions) will uniquely identify the + * U/K call across all groups of functions, and all functions. */ +typedef enum { + /** Core functions */ + + _MALI_UK_OPEN = 0, /**< _mali_ukk_open() */ + _MALI_UK_CLOSE, /**< _mali_ukk_close() */ + _MALI_UK_WAIT_FOR_NOTIFICATION, /**< _mali_ukk_wait_for_notification() */ + _MALI_UK_GET_API_VERSION, /**< _mali_ukk_get_api_version() */ + _MALI_UK_POST_NOTIFICATION, /**< _mali_ukk_post_notification() */ + _MALI_UK_GET_USER_SETTING, /**< _mali_ukk_get_user_setting() *//**< [out] */ + _MALI_UK_GET_USER_SETTINGS, /**< _mali_ukk_get_user_settings() *//**< [out] */ + _MALI_UK_REQUEST_HIGH_PRIORITY, /**< _mali_ukk_request_high_priority() */ + _MALI_UK_TIMELINE_GET_LATEST_POINT, /**< _mali_ukk_timeline_get_latest_point() */ + _MALI_UK_TIMELINE_WAIT, /**< _mali_ukk_timeline_wait() */ + _MALI_UK_TIMELINE_CREATE_SYNC_FENCE, /**< _mali_ukk_timeline_create_sync_fence() */ + _MALI_UK_SOFT_JOB_START, /**< _mali_ukk_soft_job_start() */ + _MALI_UK_SOFT_JOB_SIGNAL, /**< _mali_ukk_soft_job_signal() */ + _MALI_UK_PENDING_SUBMIT, /**< _mali_ukk_pending_submit() */ + + /** Memory functions */ + + _MALI_UK_ALLOC_MEM = 0, /**< _mali_ukk_alloc_mem() */ + _MALI_UK_FREE_MEM, /**< _mali_ukk_free_mem() */ + _MALI_UK_BIND_MEM, /**< _mali_ukk_mem_bind() */ + _MALI_UK_UNBIND_MEM, /**< _mali_ukk_mem_unbind() */ + _MALI_UK_COW_MEM, /**< _mali_ukk_mem_cow() */ + _MALI_UK_COW_MODIFY_RANGE, /**< _mali_ukk_mem_cow_modify_range() */ + _MALI_UK_RESIZE_MEM, /**<._mali_ukk_mem_resize() */ + _MALI_UK_QUERY_MMU_PAGE_TABLE_DUMP_SIZE, /**< _mali_ukk_mem_get_mmu_page_table_dump_size() */ + _MALI_UK_DUMP_MMU_PAGE_TABLE, /**< _mali_ukk_mem_dump_mmu_page_table() */ + _MALI_UK_DMA_BUF_GET_SIZE, /**< _mali_ukk_dma_buf_get_size() */ + _MALI_UK_MEM_WRITE_SAFE, /**< _mali_uku_mem_write_safe() */ + + /** Common functions for each core */ + + _MALI_UK_START_JOB = 0, /**< Start a Fragment/Vertex Processor Job on a core */ + _MALI_UK_GET_NUMBER_OF_CORES, /**< Get the number of Fragment/Vertex Processor cores */ + _MALI_UK_GET_CORE_VERSION, /**< Get the Fragment/Vertex Processor version compatible with all cores */ + + /** Fragment Processor Functions */ + + _MALI_UK_PP_START_JOB = _MALI_UK_START_JOB, /**< _mali_ukk_pp_start_job() */ + _MALI_UK_GET_PP_NUMBER_OF_CORES = _MALI_UK_GET_NUMBER_OF_CORES, /**< _mali_ukk_get_pp_number_of_cores() */ + _MALI_UK_GET_PP_CORE_VERSION = _MALI_UK_GET_CORE_VERSION, /**< _mali_ukk_get_pp_core_version() */ + _MALI_UK_PP_DISABLE_WB, /**< _mali_ukk_pp_job_disable_wb() */ + _MALI_UK_PP_AND_GP_START_JOB, /**< _mali_ukk_pp_and_gp_start_job() */ + + /** Vertex Processor Functions */ + + _MALI_UK_GP_START_JOB = _MALI_UK_START_JOB, /**< _mali_ukk_gp_start_job() */ + _MALI_UK_GET_GP_NUMBER_OF_CORES = _MALI_UK_GET_NUMBER_OF_CORES, /**< _mali_ukk_get_gp_number_of_cores() */ + _MALI_UK_GET_GP_CORE_VERSION = _MALI_UK_GET_CORE_VERSION, /**< _mali_ukk_get_gp_core_version() */ + _MALI_UK_GP_SUSPEND_RESPONSE, /**< _mali_ukk_gp_suspend_response() */ + + /** Profiling functions */ + + _MALI_UK_PROFILING_ADD_EVENT = 0, /**< __mali_uku_profiling_add_event() */ + _MALI_UK_PROFILING_REPORT_SW_COUNTERS,/**< __mali_uku_profiling_report_sw_counters() */ + _MALI_UK_PROFILING_MEMORY_USAGE_GET, /**< __mali_uku_profiling_memory_usage_get() */ + _MALI_UK_PROFILING_STREAM_FD_GET, /** < __mali_uku_profiling_stream_fd_get() */ + _MALI_UK_PROFILING_CONTROL_SET, /** < __mali_uku_profiling_control_set() */ + + /** VSYNC reporting fuctions */ + _MALI_UK_VSYNC_EVENT_REPORT = 0, /**< _mali_ukk_vsync_event_report() */ +} _mali_uk_functions; + +/** @defgroup _mali_uk_getsysteminfo U/K Get System Info + * @{ */ + +/** + * Type definition for the core version number. + * Used when returning the version number read from a core + * + * Its format is that of the 32-bit Version register for a particular core. + * Refer to the "Mali200 and MaliGP2 3D Graphics Processor Technical Reference + * Manual", ARM DDI 0415C, for more information. + */ +typedef u32 _mali_core_version; + +/** @} */ /* end group _mali_uk_core */ + + +/** @defgroup _mali_uk_gp U/K Vertex Processor + * @{ */ + +/** @defgroup _mali_uk_gp_suspend_response_s Vertex Processor Suspend Response + * @{ */ + +/** @brief Arguments for _mali_ukk_gp_suspend_response() + * + * When _mali_wait_for_notification() receives notification that a + * Vertex Processor job was suspended, you need to send a response to indicate + * what needs to happen with this job. You can either abort or resume the job. + * + * - set @c code to indicate response code. This is either @c _MALIGP_JOB_ABORT or + * @c _MALIGP_JOB_RESUME_WITH_NEW_HEAP to indicate you will provide a new heap + * for the job that will resolve the out of memory condition for the job. + * - copy the @c cookie value from the @c _mali_uk_gp_job_suspended_s notification; + * this is an identifier for the suspended job + * - set @c arguments[0] and @c arguments[1] to zero if you abort the job. If + * you resume it, @c argument[0] should specify the Mali start address for the new + * heap and @c argument[1] the Mali end address of the heap. + * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open() + * + */ +typedef enum _maligp_job_suspended_response_code { + _MALIGP_JOB_ABORT, /**< Abort the Vertex Processor job */ + _MALIGP_JOB_RESUME_WITH_NEW_HEAP /**< Resume the Vertex Processor job with a new heap */ +} _maligp_job_suspended_response_code; + +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + u32 cookie; /**< [in] cookie from the _mali_uk_gp_job_suspended_s notification */ + _maligp_job_suspended_response_code code; /**< [in] abort or resume response code, see \ref _maligp_job_suspended_response_code */ + u32 arguments[2]; /**< [in] 0 when aborting a job. When resuming a job, the Mali start and end address for a new heap to resume the job with */ +} _mali_uk_gp_suspend_response_s; + +/** @} */ /* end group _mali_uk_gp_suspend_response_s */ + +/** @defgroup _mali_uk_gpstartjob_s Vertex Processor Start Job + * @{ */ + +/** @brief Status indicating the result of the execution of a Vertex or Fragment processor job */ +typedef enum { + _MALI_UK_JOB_STATUS_END_SUCCESS = 1 << (16 + 0), + _MALI_UK_JOB_STATUS_END_OOM = 1 << (16 + 1), + _MALI_UK_JOB_STATUS_END_ABORT = 1 << (16 + 2), + _MALI_UK_JOB_STATUS_END_TIMEOUT_SW = 1 << (16 + 3), + _MALI_UK_JOB_STATUS_END_HANG = 1 << (16 + 4), + _MALI_UK_JOB_STATUS_END_SEG_FAULT = 1 << (16 + 5), + _MALI_UK_JOB_STATUS_END_ILLEGAL_JOB = 1 << (16 + 6), + _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR = 1 << (16 + 7), + _MALI_UK_JOB_STATUS_END_SHUTDOWN = 1 << (16 + 8), + _MALI_UK_JOB_STATUS_END_SYSTEM_UNUSABLE = 1 << (16 + 9) +} _mali_uk_job_status; + +#define MALIGP2_NUM_REGS_FRAME (6) + +/** @brief Arguments for _mali_ukk_gp_start_job() + * + * To start a Vertex Processor job + * - associate the request with a reference to a @c mali_gp_job_info by setting + * user_job_ptr to the address of the @c mali_gp_job_info of the job. + * - set @c priority to the priority of the @c mali_gp_job_info + * - specify a timeout for the job by setting @c watchdog_msecs to the number of + * milliseconds the job is allowed to run. Specifying a value of 0 selects the + * default timeout in use by the device driver. + * - copy the frame registers from the @c mali_gp_job_info into @c frame_registers. + * - set the @c perf_counter_flag, @c perf_counter_src0 and @c perf_counter_src1 to zero + * for a non-instrumented build. For an instrumented build you can use up + * to two performance counters. Set the corresponding bit in @c perf_counter_flag + * to enable them. @c perf_counter_src0 and @c perf_counter_src1 specify + * the source of what needs to get counted (e.g. number of vertex loader + * cache hits). For source id values, see ARM DDI0415A, Table 3-60. + * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open() + * + * When @c _mali_ukk_gp_start_job() returns @c _MALI_OSK_ERR_OK, status contains the + * result of the request (see \ref _mali_uk_start_job_status). If the job could + * not get started (@c _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE) it should be + * tried again. + * + * After the job has started, @c _mali_wait_for_notification() will be notified + * that the job finished or got suspended. It may get suspended due to + * resource shortage. If it finished (see _mali_ukk_wait_for_notification()) + * the notification will contain a @c _mali_uk_gp_job_finished_s result. If + * it got suspended the notification will contain a @c _mali_uk_gp_job_suspended_s + * result. + * + * The @c _mali_uk_gp_job_finished_s contains the job status (see \ref _mali_uk_job_status), + * the number of milliseconds the job took to render, and values of core registers + * when the job finished (irq status, performance counters, renderer list + * address). A job has finished succesfully when its status is + * @c _MALI_UK_JOB_STATUS_FINISHED. If the hardware detected a timeout while rendering + * the job, or software detected the job is taking more than watchdog_msecs to + * complete, the status will indicate @c _MALI_UK_JOB_STATUS_HANG. + * If the hardware detected a bus error while accessing memory associated with the + * job, status will indicate @c _MALI_UK_JOB_STATUS_SEG_FAULT. + * status will indicate @c _MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to + * stop the job but the job didn't start on the hardware yet, e.g. when the + * driver shutdown. + * + * In case the job got suspended, @c _mali_uk_gp_job_suspended_s contains + * the @c user_job_ptr identifier used to start the job with, the @c reason + * why the job stalled (see \ref _maligp_job_suspended_reason) and a @c cookie + * to identify the core on which the job stalled. This @c cookie will be needed + * when responding to this nofication by means of _mali_ukk_gp_suspend_response(). + * (see _mali_ukk_gp_suspend_response()). The response is either to abort or + * resume the job. If the job got suspended due to an out of memory condition + * you may be able to resolve this by providing more memory and resuming the job. + * + */ +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + u64 user_job_ptr; /**< [in] identifier for the job in user space, a @c mali_gp_job_info* */ + u32 priority; /**< [in] job priority. A lower number means higher priority */ + u32 frame_registers[MALIGP2_NUM_REGS_FRAME]; /**< [in] core specific registers associated with this job */ + u32 perf_counter_flag; /**< [in] bitmask indicating which performance counters to enable, see \ref _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE and related macro definitions */ + u32 perf_counter_src0; /**< [in] source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */ + u32 perf_counter_src1; /**< [in] source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */ + u32 frame_builder_id; /**< [in] id of the originating frame builder */ + u32 flush_id; /**< [in] flush id within the originating frame builder */ + _mali_uk_fence_t fence; /**< [in] fence this job must wait on */ + u64 timeline_point_ptr; /**< [in,out] pointer to u32: location where point on gp timeline for this job will be written */ + u32 varying_memsize; /** < [in] size of varying memory to use deffer bind*/ + u32 deferred_mem_num; + u64 deferred_mem_list; /** < [in] memory hanlde list of varying buffer to use deffer bind */ +} _mali_uk_gp_start_job_s; + +#define _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE (1<<0) /**< Enable performance counter SRC0 for a job */ +#define _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE (1<<1) /**< Enable performance counter SRC1 for a job */ +#define _MALI_PERFORMANCE_COUNTER_FLAG_HEATMAP_ENABLE (1<<2) /**< Enable per tile (aka heatmap) generation with for a job (using the enabled counter sources) */ + +/** @} */ /* end group _mali_uk_gpstartjob_s */ + +typedef struct { + u64 user_job_ptr; /**< [out] identifier for the job in user space */ + _mali_uk_job_status status; /**< [out] status of finished job */ + u32 heap_current_addr; /**< [out] value of the GP PLB PL heap start address register */ + u32 perf_counter0; /**< [out] value of performance counter 0 (see ARM DDI0415A) */ + u32 perf_counter1; /**< [out] value of performance counter 1 (see ARM DDI0415A) */ + u32 pending_big_job_num; +} _mali_uk_gp_job_finished_s; + +typedef struct { + u64 user_job_ptr; /**< [out] identifier for the job in user space */ + u32 cookie; /**< [out] identifier for the core in kernel space on which the job stalled */ +} _mali_uk_gp_job_suspended_s; + +/** @} */ /* end group _mali_uk_gp */ + + +/** @defgroup _mali_uk_pp U/K Fragment Processor + * @{ */ + +#define _MALI_PP_MAX_SUB_JOBS 8 + +#define _MALI_PP_MAX_FRAME_REGISTERS ((0x058/4)+1) + +#define _MALI_PP_MAX_WB_REGISTERS ((0x02C/4)+1) + +#define _MALI_DLBU_MAX_REGISTERS 4 + +/** Flag for _mali_uk_pp_start_job_s */ +#define _MALI_PP_JOB_FLAG_NO_NOTIFICATION (1<<0) +#define _MALI_PP_JOB_FLAG_IS_WINDOW_SURFACE (1<<1) +#define _MALI_PP_JOB_FLAG_PROTECTED (1<<2) + +/** @defgroup _mali_uk_ppstartjob_s Fragment Processor Start Job + * @{ */ + +/** @brief Arguments for _mali_ukk_pp_start_job() + * + * To start a Fragment Processor job + * - associate the request with a reference to a mali_pp_job by setting + * @c user_job_ptr to the address of the @c mali_pp_job of the job. + * - set @c priority to the priority of the mali_pp_job + * - specify a timeout for the job by setting @c watchdog_msecs to the number of + * milliseconds the job is allowed to run. Specifying a value of 0 selects the + * default timeout in use by the device driver. + * - copy the frame registers from the @c mali_pp_job into @c frame_registers. + * For MALI200 you also need to copy the write back 0,1 and 2 registers. + * - set the @c perf_counter_flag, @c perf_counter_src0 and @c perf_counter_src1 to zero + * for a non-instrumented build. For an instrumented build you can use up + * to two performance counters. Set the corresponding bit in @c perf_counter_flag + * to enable them. @c perf_counter_src0 and @c perf_counter_src1 specify + * the source of what needs to get counted (e.g. number of vertex loader + * cache hits). For source id values, see ARM DDI0415A, Table 3-60. + * - pass in the user-kernel context in @c ctx that was returned from _mali_ukk_open() + * + * When _mali_ukk_pp_start_job() returns @c _MALI_OSK_ERR_OK, @c status contains the + * result of the request (see \ref _mali_uk_start_job_status). If the job could + * not get started (@c _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE) it should be + * tried again. + * + * After the job has started, _mali_wait_for_notification() will be notified + * when the job finished. The notification will contain a + * @c _mali_uk_pp_job_finished_s result. It contains the @c user_job_ptr + * identifier used to start the job with, the job @c status (see \ref _mali_uk_job_status), + * the number of milliseconds the job took to render, and values of core registers + * when the job finished (irq status, performance counters, renderer list + * address). A job has finished succesfully when its status is + * @c _MALI_UK_JOB_STATUS_FINISHED. If the hardware detected a timeout while rendering + * the job, or software detected the job is taking more than @c watchdog_msecs to + * complete, the status will indicate @c _MALI_UK_JOB_STATUS_HANG. + * If the hardware detected a bus error while accessing memory associated with the + * job, status will indicate @c _MALI_UK_JOB_STATUS_SEG_FAULT. + * status will indicate @c _MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to + * stop the job but the job didn't start on the hardware yet, e.g. when the + * driver shutdown. + * + */ +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + u64 user_job_ptr; /**< [in] identifier for the job in user space */ + u32 priority; /**< [in] job priority. A lower number means higher priority */ + u32 frame_registers[_MALI_PP_MAX_FRAME_REGISTERS]; /**< [in] core specific registers associated with first sub job, see ARM DDI0415A */ + u32 frame_registers_addr_frame[_MALI_PP_MAX_SUB_JOBS - 1]; /**< [in] ADDR_FRAME registers for sub job 1-7 */ + u32 frame_registers_addr_stack[_MALI_PP_MAX_SUB_JOBS - 1]; /**< [in] ADDR_STACK registers for sub job 1-7 */ + u32 wb0_registers[_MALI_PP_MAX_WB_REGISTERS]; + u32 wb1_registers[_MALI_PP_MAX_WB_REGISTERS]; + u32 wb2_registers[_MALI_PP_MAX_WB_REGISTERS]; + u32 dlbu_registers[_MALI_DLBU_MAX_REGISTERS]; /**< [in] Dynamic load balancing unit registers */ + u32 num_cores; /**< [in] Number of cores to set up (valid range: 1-8(M450) or 4(M400)) */ + u32 perf_counter_flag; /**< [in] bitmask indicating which performance counters to enable, see \ref _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE and related macro definitions */ + u32 perf_counter_src0; /**< [in] source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */ + u32 perf_counter_src1; /**< [in] source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */ + u32 frame_builder_id; /**< [in] id of the originating frame builder */ + u32 flush_id; /**< [in] flush id within the originating frame builder */ + u32 flags; /**< [in] See _MALI_PP_JOB_FLAG_* for a list of avaiable flags */ + u32 tilesx; /**< [in] number of tiles in the x direction (needed for heatmap generation */ + u32 tilesy; /**< [in] number of tiles in y direction (needed for reading the heatmap memory) */ + u32 heatmap_mem; /**< [in] memory address to store counter values per tile (aka heatmap) */ + u32 num_memory_cookies; /**< [in] number of memory cookies attached to job */ + u64 memory_cookies; /**< [in] pointer to array of u32 memory cookies attached to job */ + _mali_uk_fence_t fence; /**< [in] fence this job must wait on */ + u64 timeline_point_ptr; /**< [in,out] pointer to location of u32 where point on pp timeline for this job will be written */ +} _mali_uk_pp_start_job_s; + +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + u64 gp_args; /**< [in,out] GP uk arguments (see _mali_uk_gp_start_job_s) */ + u64 pp_args; /**< [in,out] PP uk arguments (see _mali_uk_pp_start_job_s) */ +} _mali_uk_pp_and_gp_start_job_s; + +/** @} */ /* end group _mali_uk_ppstartjob_s */ + +typedef struct { + u64 user_job_ptr; /**< [out] identifier for the job in user space */ + _mali_uk_job_status status; /**< [out] status of finished job */ + u32 perf_counter0[_MALI_PP_MAX_SUB_JOBS]; /**< [out] value of perfomance counter 0 (see ARM DDI0415A), one for each sub job */ + u32 perf_counter1[_MALI_PP_MAX_SUB_JOBS]; /**< [out] value of perfomance counter 1 (see ARM DDI0415A), one for each sub job */ + u32 perf_counter_src0; + u32 perf_counter_src1; +} _mali_uk_pp_job_finished_s; + +typedef struct { + u32 number_of_enabled_cores; /**< [out] the new number of enabled cores */ +} _mali_uk_pp_num_cores_changed_s; + + + +/** + * Flags to indicate write-back units + */ +typedef enum { + _MALI_UK_PP_JOB_WB0 = 1, + _MALI_UK_PP_JOB_WB1 = 2, + _MALI_UK_PP_JOB_WB2 = 4, +} _mali_uk_pp_job_wbx_flag; + +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + u32 fb_id; /**< [in] Frame builder ID of job to disable WB units for */ + u32 wb0_memory; + u32 wb1_memory; + u32 wb2_memory; +} _mali_uk_pp_disable_wb_s; + + +/** @} */ /* end group _mali_uk_pp */ + +/** @defgroup _mali_uk_soft_job U/K Soft Job + * @{ */ + +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + u64 user_job; /**< [in] identifier for the job in user space */ + u64 job_id_ptr; /**< [in,out] pointer to location of u32 where job id will be written */ + _mali_uk_fence_t fence; /**< [in] fence this job must wait on */ + u32 point; /**< [out] point on soft timeline for this job */ + u32 type; /**< [in] type of soft job */ +} _mali_uk_soft_job_start_s; + +typedef struct { + u64 user_job; /**< [out] identifier for the job in user space */ +} _mali_uk_soft_job_activated_s; + +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + u32 job_id; /**< [in] id for soft job */ +} _mali_uk_soft_job_signal_s; + +/** @} */ /* end group _mali_uk_soft_job */ + +typedef struct { + u32 counter_id; + u32 key; + int enable; +} _mali_uk_annotate_profiling_mem_counter_s; + +typedef struct { + u32 sampling_rate; + int enable; +} _mali_uk_annotate_profiling_enable_s; + + +/** @addtogroup _mali_uk_core U/K Core + * @{ */ + +/** @defgroup _mali_uk_waitfornotification_s Wait For Notification + * @{ */ + +/** @brief Notification type encodings + * + * Each Notification type is an ordered pair of (subsystem,id), and is unique. + * + * The encoding of subsystem,id into a 32-bit word is: + * encoding = (( subsystem << _MALI_NOTIFICATION_SUBSYSTEM_SHIFT ) & _MALI_NOTIFICATION_SUBSYSTEM_MASK) + * | (( id << _MALI_NOTIFICATION_ID_SHIFT ) & _MALI_NOTIFICATION_ID_MASK) + * + * @see _mali_uk_wait_for_notification_s + */ +typedef enum { + /** core notifications */ + + _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x20, + _MALI_NOTIFICATION_APPLICATION_QUIT = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x40, + _MALI_NOTIFICATION_SETTINGS_CHANGED = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x80, + _MALI_NOTIFICATION_SOFT_ACTIVATED = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x100, + + /** Fragment Processor notifications */ + + _MALI_NOTIFICATION_PP_FINISHED = (_MALI_UK_PP_SUBSYSTEM << 16) | 0x10, + _MALI_NOTIFICATION_PP_NUM_CORE_CHANGE = (_MALI_UK_PP_SUBSYSTEM << 16) | 0x20, + + /** Vertex Processor notifications */ + + _MALI_NOTIFICATION_GP_FINISHED = (_MALI_UK_GP_SUBSYSTEM << 16) | 0x10, + _MALI_NOTIFICATION_GP_STALLED = (_MALI_UK_GP_SUBSYSTEM << 16) | 0x20, + + /** Profiling notifications */ + _MALI_NOTIFICATION_ANNOTATE_PROFILING_MEM_COUNTER = (_MALI_UK_PROFILING_SUBSYSTEM << 16) | 0x10, + _MALI_NOTIFICATION_ANNOTATE_PROFILING_ENABLE = (_MALI_UK_PROFILING_SUBSYSTEM << 16) | 0x20, +} _mali_uk_notification_type; + +/** to assist in splitting up 32-bit notification value in subsystem and id value */ +#define _MALI_NOTIFICATION_SUBSYSTEM_MASK 0xFFFF0000 +#define _MALI_NOTIFICATION_SUBSYSTEM_SHIFT 16 +#define _MALI_NOTIFICATION_ID_MASK 0x0000FFFF +#define _MALI_NOTIFICATION_ID_SHIFT 0 + + +/** @brief Enumeration of possible settings which match mali_setting_t in user space + * + * + */ +typedef enum { + _MALI_UK_USER_SETTING_SW_EVENTS_ENABLE = 0, + _MALI_UK_USER_SETTING_COLORBUFFER_CAPTURE_ENABLED, + _MALI_UK_USER_SETTING_DEPTHBUFFER_CAPTURE_ENABLED, + _MALI_UK_USER_SETTING_STENCILBUFFER_CAPTURE_ENABLED, + _MALI_UK_USER_SETTING_PER_TILE_COUNTERS_CAPTURE_ENABLED, + _MALI_UK_USER_SETTING_BUFFER_CAPTURE_COMPOSITOR, + _MALI_UK_USER_SETTING_BUFFER_CAPTURE_WINDOW, + _MALI_UK_USER_SETTING_BUFFER_CAPTURE_OTHER, + _MALI_UK_USER_SETTING_BUFFER_CAPTURE_N_FRAMES, + _MALI_UK_USER_SETTING_BUFFER_CAPTURE_RESIZE_FACTOR, + _MALI_UK_USER_SETTING_SW_COUNTER_ENABLED, + _MALI_UK_USER_SETTING_MAX, +} _mali_uk_user_setting_t; + +/* See mali_user_settings_db.c */ +extern const char *_mali_uk_user_setting_descriptions[]; +#define _MALI_UK_USER_SETTING_DESCRIPTIONS \ + { \ + "sw_events_enable", \ + "colorbuffer_capture_enable", \ + "depthbuffer_capture_enable", \ + "stencilbuffer_capture_enable", \ + "per_tile_counters_enable", \ + "buffer_capture_compositor", \ + "buffer_capture_window", \ + "buffer_capture_other", \ + "buffer_capture_n_frames", \ + "buffer_capture_resize_factor", \ + "sw_counters_enable", \ + }; + +/** @brief struct to hold the value to a particular setting as seen in the kernel space + */ +typedef struct { + _mali_uk_user_setting_t setting; + u32 value; +} _mali_uk_settings_changed_s; + +/** @brief Arguments for _mali_ukk_wait_for_notification() + * + * On successful return from _mali_ukk_wait_for_notification(), the members of + * this structure will indicate the reason for notification. + * + * Specifically, the source of the notification can be identified by the + * subsystem and id fields of the mali_uk_notification_type in the code.type + * member. The type member is encoded in a way to divide up the types into a + * subsystem field, and a per-subsystem ID field. See + * _mali_uk_notification_type for more information. + * + * Interpreting the data union member depends on the notification type: + * + * - type == _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS + * - The kernel side is shutting down. No further + * _mali_uk_wait_for_notification() calls should be made. + * - In this case, the value of the data union member is undefined. + * - This is used to indicate to the user space client that it should close + * the connection to the Mali Device Driver. + * - type == _MALI_NOTIFICATION_PP_FINISHED + * - The notification data is of type _mali_uk_pp_job_finished_s. It contains the user_job_ptr + * identifier used to start the job with, the job status, the number of milliseconds the job took to render, + * and values of core registers when the job finished (irq status, performance counters, renderer list + * address). + * - A job has finished succesfully when its status member is _MALI_UK_JOB_STATUS_FINISHED. + * - If the hardware detected a timeout while rendering the job, or software detected the job is + * taking more than watchdog_msecs (see _mali_ukk_pp_start_job()) to complete, the status member will + * indicate _MALI_UK_JOB_STATUS_HANG. + * - If the hardware detected a bus error while accessing memory associated with the job, status will + * indicate _MALI_UK_JOB_STATUS_SEG_FAULT. + * - Status will indicate MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to stop the job but the job + * didn't start the hardware yet, e.g. when the driver closes. + * - type == _MALI_NOTIFICATION_GP_FINISHED + * - The notification data is of type _mali_uk_gp_job_finished_s. The notification is similar to that of + * type == _MALI_NOTIFICATION_PP_FINISHED, except that several other GP core register values are returned. + * The status values have the same meaning for type == _MALI_NOTIFICATION_PP_FINISHED. + * - type == _MALI_NOTIFICATION_GP_STALLED + * - The nofication data is of type _mali_uk_gp_job_suspended_s. It contains the user_job_ptr + * identifier used to start the job with, the reason why the job stalled and a cookie to identify the core on + * which the job stalled. + * - The reason member of gp_job_suspended is set to _MALIGP_JOB_SUSPENDED_OUT_OF_MEMORY + * when the polygon list builder unit has run out of memory. + */ +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + _mali_uk_notification_type type; /**< [out] Type of notification available */ + union { + _mali_uk_gp_job_suspended_s gp_job_suspended;/**< [out] Notification data for _MALI_NOTIFICATION_GP_STALLED notification type */ + _mali_uk_gp_job_finished_s gp_job_finished; /**< [out] Notification data for _MALI_NOTIFICATION_GP_FINISHED notification type */ + _mali_uk_pp_job_finished_s pp_job_finished; /**< [out] Notification data for _MALI_NOTIFICATION_PP_FINISHED notification type */ + _mali_uk_settings_changed_s setting_changed;/**< [out] Notification data for _MALI_NOTIFICAATION_SETTINGS_CHANGED notification type */ + _mali_uk_soft_job_activated_s soft_job_activated; /**< [out] Notification data for _MALI_NOTIFICATION_SOFT_ACTIVATED notification type */ + _mali_uk_annotate_profiling_mem_counter_s profiling_mem_counter; + _mali_uk_annotate_profiling_enable_s profiling_enable; + } data; +} _mali_uk_wait_for_notification_s; + +/** @brief Arguments for _mali_ukk_post_notification() + * + * Posts the specified notification to the notification queue for this application. + * This is used to send a quit message to the callback thread. + */ +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + _mali_uk_notification_type type; /**< [in] Type of notification to post */ +} _mali_uk_post_notification_s; + +/** @} */ /* end group _mali_uk_waitfornotification_s */ + +/** @defgroup _mali_uk_getapiversion_s Get API Version + * @{ */ + +/** helpers for Device Driver API version handling */ + +/** @brief Encode a version ID from a 16-bit input + * + * @note the input is assumed to be 16 bits. It must not exceed 16 bits. */ +#define _MAKE_VERSION_ID(x) (((x) << 16UL) | (x)) + +/** @brief Check whether a 32-bit value is likely to be Device Driver API + * version ID. */ +#define _IS_VERSION_ID(x) (((x) & 0xFFFF) == (((x) >> 16UL) & 0xFFFF)) + +/** @brief Decode a 16-bit version number from a 32-bit Device Driver API version + * ID */ +#define _GET_VERSION(x) (((x) >> 16UL) & 0xFFFF) + +/** @brief Determine whether two 32-bit encoded version IDs match */ +#define _IS_API_MATCH(x, y) (IS_VERSION_ID((x)) && IS_VERSION_ID((y)) && (GET_VERSION((x)) == GET_VERSION((y)))) + +/** + * API version define. + * Indicates the version of the kernel API + * The version is a 16bit integer incremented on each API change. + * The 16bit integer is stored twice in a 32bit integer + * For example, for version 1 the value would be 0x00010001 + */ +#define _MALI_API_VERSION 900 +#define _MALI_UK_API_VERSION _MAKE_VERSION_ID(_MALI_API_VERSION) + +/** + * The API version is a 16-bit integer stored in both the lower and upper 16-bits + * of a 32-bit value. The 16-bit API version value is incremented on each API + * change. Version 1 would be 0x00010001. Used in _mali_uk_get_api_version_s. + */ +typedef u32 _mali_uk_api_version; + +/** @brief Arguments for _mali_uk_get_api_version() + * + * The user-side interface version must be written into the version member, + * encoded using _MAKE_VERSION_ID(). It will be compared to the API version of + * the kernel-side interface. + * + * On successful return, the version member will be the API version of the + * kernel-side interface. _MALI_UK_API_VERSION macro defines the current version + * of the API. + * + * The compatible member must be checked to see if the version of the user-side + * interface is compatible with the kernel-side interface, since future versions + * of the interface may be backwards compatible. + */ +typedef struct { + u32 ctx; /**< [in,out] user-kernel context (trashed on output) */ + _mali_uk_api_version version; /**< [in,out] API version of user-side interface. */ + int compatible; /**< [out] @c 1 when @version is compatible, @c 0 otherwise */ +} _mali_uk_get_api_version_s; + +/** @brief Arguments for _mali_uk_get_api_version_v2() + * + * The user-side interface version must be written into the version member, + * encoded using _MAKE_VERSION_ID(). It will be compared to the API version of + * the kernel-side interface. + * + * On successful return, the version member will be the API version of the + * kernel-side interface. _MALI_UK_API_VERSION macro defines the current version + * of the API. + * + * The compatible member must be checked to see if the version of the user-side + * interface is compatible with the kernel-side interface, since future versions + * of the interface may be backwards compatible. + */ +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + _mali_uk_api_version version; /**< [in,out] API version of user-side interface. */ + int compatible; /**< [out] @c 1 when @version is compatible, @c 0 otherwise */ +} _mali_uk_get_api_version_v2_s; + +/** @} */ /* end group _mali_uk_getapiversion_s */ + +/** @defgroup _mali_uk_get_user_settings_s Get user space settings */ + +/** @brief struct to keep the matching values of the user space settings within certain context + * + * Each member of the settings array corresponds to a matching setting in the user space and its value is the value + * of that particular setting. + * + * All settings are given reference to the context pointed to by the ctx pointer. + * + */ +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + u32 settings[_MALI_UK_USER_SETTING_MAX]; /**< [out] The values for all settings */ +} _mali_uk_get_user_settings_s; + +/** @brief struct to hold the value of a particular setting from the user space within a given context + */ +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + _mali_uk_user_setting_t setting; /**< [in] setting to get */ + u32 value; /**< [out] value of setting */ +} _mali_uk_get_user_setting_s; + +/** @brief Arguments for _mali_ukk_request_high_priority() */ +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ +} _mali_uk_request_high_priority_s; + +/** @brief Arguments for _mali_ukk_pending_submit() */ +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ +} _mali_uk_pending_submit_s; + +/** @} */ /* end group _mali_uk_core */ + + +/** @defgroup _mali_uk_memory U/K Memory + * @{ */ + +#define _MALI_MEMORY_ALLOCATE_RESIZEABLE (1<<4) /* BUFFER can trim dow/grow*/ +#define _MALI_MEMORY_ALLOCATE_NO_BIND_GPU (1<<5) /*Not map to GPU when allocate, must call bind later*/ +#define _MALI_MEMORY_ALLOCATE_SWAPPABLE (1<<6) /* Allocate swappale memory. */ +#define _MALI_MEMORY_ALLOCATE_DEFER_BIND (1<<7) /*Not map to GPU when allocate, must call bind later*/ +#define _MALI_MEMORY_ALLOCATE_SECURE (1<<8) /* Allocate secure memory. */ + + +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + u32 gpu_vaddr; /**< [in] GPU virtual address */ + u32 vsize; /**< [in] vitrual size of the allocation */ + u32 psize; /**< [in] physical size of the allocation */ + u32 flags; + u64 backend_handle; /**< [out] backend handle */ + s32 secure_shared_fd; /** < [in] the mem handle for secure mem */ +} _mali_uk_alloc_mem_s; + + +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + u32 gpu_vaddr; /**< [in] use as handle to free allocation */ + u32 free_pages_nr; /** < [out] record the number of free pages */ +} _mali_uk_free_mem_s; + + +#define _MALI_MEMORY_BIND_BACKEND_UMP (1<<8) +#define _MALI_MEMORY_BIND_BACKEND_DMA_BUF (1<<9) +#define _MALI_MEMORY_BIND_BACKEND_MALI_MEMORY (1<<10) +#define _MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY (1<<11) +#define _MALI_MEMORY_BIND_BACKEND_EXT_COW (1<<12) +#define _MALI_MEMORY_BIND_BACKEND_HAVE_ALLOCATION (1<<13) + + +#define _MALI_MEMORY_BIND_BACKEND_MASK (_MALI_MEMORY_BIND_BACKEND_UMP| \ + _MALI_MEMORY_BIND_BACKEND_DMA_BUF |\ + _MALI_MEMORY_BIND_BACKEND_MALI_MEMORY |\ + _MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY |\ + _MALI_MEMORY_BIND_BACKEND_EXT_COW |\ + _MALI_MEMORY_BIND_BACKEND_HAVE_ALLOCATION) + + +#define _MALI_MEMORY_GPU_READ_ALLOCATE (1<<16) + + +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + u32 vaddr; /**< [in] mali address to map the physical memory to */ + u32 size; /**< [in] size */ + u32 flags; /**< [in] see_MALI_MEMORY_BIND_BACKEND_* */ + u32 padding; /** padding for 32/64 struct alignment */ + union { + struct { + u32 secure_id; /**< [in] secure id */ + u32 rights; /**< [in] rights necessary for accessing memory */ + u32 flags; /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */ + } bind_ump; + struct { + u32 mem_fd; /**< [in] Memory descriptor */ + u32 rights; /**< [in] rights necessary for accessing memory */ + u32 flags; /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */ + } bind_dma_buf; + struct { + u32 phys_addr; /**< [in] physical address */ + u32 rights; /**< [in] rights necessary for accessing memory */ + u32 flags; /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */ + } bind_ext_memory; + } mem_union; +} _mali_uk_bind_mem_s; + +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + u32 flags; /**< [in] see_MALI_MEMORY_BIND_BACKEND_* */ + u32 vaddr; /**< [in] identifier for mapped memory object in kernel space */ +} _mali_uk_unbind_mem_s; + +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + u32 target_handle; /**< [in] handle of allocation need to do COW */ + u32 target_offset; /**< [in] offset in target allocation to do COW(for support COW a memory allocated from memory_bank, PAGE_SIZE align)*/ + u32 target_size; /**< [in] size of target allocation to do COW (for support memory bank, PAGE_SIZE align)(in byte) */ + u32 range_start; /**< [in] re allocate range start offset, offset from the start of allocation (PAGE_SIZE align)*/ + u32 range_size; /**< [in] re allocate size (PAGE_SIZE align)*/ + u32 vaddr; /**< [in] mali address for the new allocaiton */ + u32 backend_handle; /**< [out] backend handle */ + u32 flags; +} _mali_uk_cow_mem_s; + +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + u32 range_start; /**< [in] re allocate range start offset, offset from the start of allocation */ + u32 size; /**< [in] re allocate size*/ + u32 vaddr; /**< [in] mali address for the new allocaiton */ + s32 change_pages_nr; /**< [out] record the page number change for cow operation */ +} _mali_uk_cow_modify_range_s; + + +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + u32 mem_fd; /**< [in] Memory descriptor */ + u32 size; /**< [out] size */ +} _mali_uk_dma_buf_get_size_s; + +/** Flag for _mali_uk_map_external_mem_s, _mali_uk_attach_ump_mem_s and _mali_uk_attach_dma_buf_s */ +#define _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE (1<<0) + + +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + u64 vaddr; /* the buffer to do resize*/ + u32 psize; /* wanted physical size of this memory */ +} _mali_uk_mem_resize_s; + +/** + * @brief Arguments for _mali_uk[uk]_mem_write_safe() + */ +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + u64 src; /**< [in] Pointer to source data */ + u64 dest; /**< [in] Destination Mali buffer */ + u32 size; /**< [in,out] Number of bytes to write/copy on input, number of bytes actually written/copied on output */ +} _mali_uk_mem_write_safe_s; + +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + u32 size; /**< [out] size of MMU page table information (registers + page tables) */ +} _mali_uk_query_mmu_page_table_dump_size_s; + +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + u32 size; /**< [in] size of buffer to receive mmu page table information */ + u64 buffer; /**< [in,out] buffer to receive mmu page table information */ + u32 register_writes_size; /**< [out] size of MMU register dump */ + u64 register_writes; /**< [out] pointer within buffer where MMU register dump is stored */ + u32 page_table_dump_size; /**< [out] size of MMU page table dump */ + u64 page_table_dump; /**< [out] pointer within buffer where MMU page table dump is stored */ +} _mali_uk_dump_mmu_page_table_s; + +/** @} */ /* end group _mali_uk_memory */ + + +/** @addtogroup _mali_uk_pp U/K Fragment Processor + * @{ */ + +/** @brief Arguments for _mali_ukk_get_pp_number_of_cores() + * + * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open() + * - Upon successful return from _mali_ukk_get_pp_number_of_cores(), @c number_of_cores + * will contain the number of Fragment Processor cores in the system. + */ +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + u32 number_of_total_cores; /**< [out] Total number of Fragment Processor cores in the system */ + u32 number_of_enabled_cores; /**< [out] Number of enabled Fragment Processor cores */ +} _mali_uk_get_pp_number_of_cores_s; + +/** @brief Arguments for _mali_ukk_get_pp_core_version() + * + * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open() + * - Upon successful return from _mali_ukk_get_pp_core_version(), @c version contains + * the version that all Fragment Processor cores are compatible with. + */ +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + _mali_core_version version; /**< [out] version returned from core, see \ref _mali_core_version */ + u32 padding; +} _mali_uk_get_pp_core_version_s; + +/** @} */ /* end group _mali_uk_pp */ + + +/** @addtogroup _mali_uk_gp U/K Vertex Processor + * @{ */ + +/** @brief Arguments for _mali_ukk_get_gp_number_of_cores() + * + * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open() + * - Upon successful return from _mali_ukk_get_gp_number_of_cores(), @c number_of_cores + * will contain the number of Vertex Processor cores in the system. + */ +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + u32 number_of_cores; /**< [out] number of Vertex Processor cores in the system */ +} _mali_uk_get_gp_number_of_cores_s; + +/** @brief Arguments for _mali_ukk_get_gp_core_version() + * + * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open() + * - Upon successful return from _mali_ukk_get_gp_core_version(), @c version contains + * the version that all Vertex Processor cores are compatible with. + */ +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + _mali_core_version version; /**< [out] version returned from core, see \ref _mali_core_version */ +} _mali_uk_get_gp_core_version_s; + +/** @} */ /* end group _mali_uk_gp */ + +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + u32 event_id; /**< [in] event id to register (see enum mali_profiling_events for values) */ + u32 data[5]; /**< [in] event specific data */ +} _mali_uk_profiling_add_event_s; + +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + u32 memory_usage; /**< [out] total memory usage */ + u32 vaddr; /**< [in] mali address for the cow allocaiton */ + s32 change_pages_nr; /**< [out] record the page number change for cow operation */ +} _mali_uk_profiling_memory_usage_get_s; + + +/** @addtogroup _mali_uk_memory U/K Memory + * @{ */ + +/** @brief Arguments to _mali_ukk_mem_mmap() + * + * Use of the phys_addr member depends on whether the driver is compiled for + * Mali-MMU or nonMMU: + * - in the nonMMU case, this is the physical address of the memory as seen by + * the CPU (which may be a constant offset from that used by Mali) + * - in the MMU case, this is the Mali Virtual base address of the memory to + * allocate, and the particular physical pages used to back the memory are + * entirely determined by _mali_ukk_mem_mmap(). The details of the physical pages + * are not reported to user-space for security reasons. + * + * The cookie member must be stored for use later when freeing the memory by + * calling _mali_ukk_mem_munmap(). In the Mali-MMU case, the cookie is secure. + * + * The ukk_private word must be set to zero when calling from user-space. On + * Kernel-side, the OS implementation of the U/K interface can use it to + * communicate data to the OS implementation of the OSK layer. In particular, + * _mali_ukk_get_big_block() directly calls _mali_ukk_mem_mmap directly, and + * will communicate its own ukk_private word through the ukk_private member + * here. The common code itself will not inspect or modify the ukk_private + * word, and so it may be safely used for whatever purposes necessary to + * integrate Mali Memory handling into the OS. + * + * The uku_private member is currently reserved for use by the user-side + * implementation of the U/K interface. Its value must be zero. + */ +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + void *mapping; /**< [out] Returns user-space virtual address for the mapping */ + u32 size; /**< [in] Size of the requested mapping */ + u32 phys_addr; /**< [in] Physical address - could be offset, depending on caller+callee convention */ + mali_bool writeable; +} _mali_uk_mem_mmap_s; + +/** @brief Arguments to _mali_ukk_mem_munmap() + * + * The cookie and mapping members must be that returned from the same previous + * call to _mali_ukk_mem_mmap(). The size member must correspond to cookie + * and mapping - that is, it must be the value originally supplied to a call to + * _mali_ukk_mem_mmap that returned the values of mapping and cookie. + * + * An error will be returned if an attempt is made to unmap only part of the + * originally obtained range, or to unmap more than was originally obtained. + */ +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + void *mapping; /**< [in] The mapping returned from mmap call */ + u32 size; /**< [in] The size passed to mmap call */ +} _mali_uk_mem_munmap_s; +/** @} */ /* end group _mali_uk_memory */ + +/** @defgroup _mali_uk_vsync U/K VSYNC Wait Reporting Module + * @{ */ + +/** @brief VSYNC events + * + * These events are reported when DDK starts to wait for vsync and when the + * vsync has occured and the DDK can continue on the next frame. + */ +typedef enum _mali_uk_vsync_event { + _MALI_UK_VSYNC_EVENT_BEGIN_WAIT = 0, + _MALI_UK_VSYNC_EVENT_END_WAIT +} _mali_uk_vsync_event; + +/** @brief Arguments to _mali_ukk_vsync_event() + * + */ +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + _mali_uk_vsync_event event; /**< [in] VSYNCH event type */ +} _mali_uk_vsync_event_report_s; + +/** @} */ /* end group _mali_uk_vsync */ + +/** @defgroup _mali_uk_sw_counters_report U/K Software Counter Reporting + * @{ */ + +/** @brief Software counter values + * + * Values recorded for each of the software counters during a single renderpass. + */ +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + u64 counters; /**< [in] The array of u32 counter values */ + u32 num_counters; /**< [in] The number of elements in counters array */ +} _mali_uk_sw_counters_report_s; + +/** @} */ /* end group _mali_uk_sw_counters_report */ + +/** @defgroup _mali_uk_timeline U/K Mali Timeline + * @{ */ + +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + u32 timeline; /**< [in] timeline id */ + u32 point; /**< [out] latest point on timeline */ +} _mali_uk_timeline_get_latest_point_s; + +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + _mali_uk_fence_t fence; /**< [in] fence */ + u32 timeout; /**< [in] timeout (0 for no wait, -1 for blocking) */ + u32 status; /**< [out] status of fence (1 if signaled, 0 if timeout) */ +} _mali_uk_timeline_wait_s; + +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + _mali_uk_fence_t fence; /**< [in] mali fence to create linux sync fence from */ + s32 sync_fd; /**< [out] file descriptor for new linux sync fence */ +} _mali_uk_timeline_create_sync_fence_s; + +/** @} */ /* end group _mali_uk_timeline */ + +/** @} */ /* end group u_k_api */ + +/** @} */ /* end group uddapi */ + +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + s32 stream_fd; /**< [in] The profiling kernel base stream fd handle */ +} _mali_uk_profiling_stream_fd_get_s; + +typedef struct { + u64 ctx; /**< [in,out] user-kernel context (trashed on output) */ + u64 control_packet_data; /**< [in] the control packet data for control settings */ + u32 control_packet_size; /**< [in] The control packet size */ + u64 response_packet_data; /** < [out] The response packet data */ + u32 response_packet_size; /** < [in,out] The response packet data */ +} _mali_uk_profiling_control_set_s; + +#ifdef __cplusplus +} +#endif + +#endif /* __MALI_UTGARD_UK_TYPES_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/Kbuild b/drivers/gpu/arm/mali400/Kbuild --- a/drivers/gpu/arm/mali400/Kbuild 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/Kbuild 2018-05-06 08:49:49.174695256 +0200 @@ -0,0 +1,261 @@ +# +# Copyright (C) 2010-2011 ARM Limited. All rights reserved. +# +# This program is free software and is provided to you under the terms of the GNU General Public License version 2 +# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. +# +# A copy of the licence is included with the program, and can also be obtained from Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# + +# This file is called by the Linux build system. + +# set up defaults if not defined by the user +TIMESTAMP ?= default +OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB ?= 16 +USING_GPU_UTILIZATION ?= 0 +PROFILING_SKIP_PP_JOBS ?= 0 +PROFILING_SKIP_PP_AND_GP_JOBS ?= 0 +MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP ?= 0 +MALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED ?= 0 +MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS ?= 0 +MALI_UPPER_HALF_SCHEDULING ?= 1 +MALI_ENABLE_CPU_CYCLES ?= 0 + +# For customer releases the Linux Device Drivers will be provided as ARM proprietary and GPL releases: +# The ARM proprietary product will only include the license/proprietary directory +# The GPL product will only include the license/gpl directory +ifeq ($(wildcard $(src)/linux/license/gpl/*),) + ccflags-y += -I$(src)/linux/license/proprietary + ifeq ($(CONFIG_MALI400_PROFILING),y) + $(error Profiling is incompatible with non-GPL license) + endif + ifeq ($(CONFIG_PM_RUNTIME),y) + $(error Runtime PM is incompatible with non-GPL license) + endif + ifeq ($(CONFIG_DMA_SHARED_BUFFER),y) + $(error DMA-BUF is incompatible with non-GPL license) + endif + $(error Linux Device integration is incompatible with non-GPL license) +else + ccflags-y += -I$(src)/linux/license/gpl +endif + +ifeq ($(USING_GPU_UTILIZATION), 1) + ifeq ($(USING_DVFS), 1) + $(error USING_GPU_UTILIZATION conflict with USING_DVFS you can read the Integration Guide to choose which one do you need) + endif +endif + +ifeq ($(CONFIG_MALI_PLATFORM_S5P4418),y) +export MALI_PLATFORM=nexell +endif + +ifeq ($(CONFIG_MALI_PLATFORM_S5P6818),y) +export MALI_PLATFORM=nexell +endif + +ifeq ($(CONFIG_ARCH_EXYNOS4),y) +export MALI_PLATFORM=exynos4 +endif + +ifeq ($(MALI_PLATFORM_FILES),) +EXTRA_DEFINES += -DMALI_FAKE_PLATFORM_DEVICE=1 +export MALI_PLATFORM_FILES_BUILDIN = $(notdir $(wildcard $(src)/platform/$(MALI_PLATFORM)/*.c)) +export MALI_PLATFORM_FILES_ADD_PREFIX = $(addprefix platform/$(MALI_PLATFORM)/,$(MALI_PLATFORM_FILES_BUILDIN)) +endif + +mali-y += \ + linux/mali_osk_atomics.o \ + linux/mali_osk_irq.o \ + linux/mali_osk_wq.o \ + linux/mali_osk_locks.o \ + linux/mali_osk_wait_queue.o \ + linux/mali_osk_low_level_mem.o \ + linux/mali_osk_math.o \ + linux/mali_osk_memory.o \ + linux/mali_osk_misc.o \ + linux/mali_osk_mali.o \ + linux/mali_osk_notification.o \ + linux/mali_osk_time.o \ + linux/mali_osk_timers.o \ + linux/mali_osk_bitmap.o + +mali-y += linux/mali_memory.o linux/mali_memory_os_alloc.o +mali-y += linux/mali_memory_external.o +mali-y += linux/mali_memory_block_alloc.o +mali-y += linux/mali_memory_swap_alloc.o + +mali-y += \ + linux/mali_memory_manager.o \ + linux/mali_memory_virtual.o \ + linux/mali_memory_util.o \ + linux/mali_memory_cow.o \ + linux/mali_memory_defer_bind.o + +mali-y += \ + linux/mali_ukk_mem.o \ + linux/mali_ukk_gp.o \ + linux/mali_ukk_pp.o \ + linux/mali_ukk_core.o \ + linux/mali_ukk_soft_job.o \ + linux/mali_ukk_timeline.o + +mali-$(CONFIG_MALI_DEVFREQ) += \ + linux/mali_devfreq.o \ + common/mali_pm_metrics.o + +# Source files which always are included in a build +mali-y += \ + common/mali_kernel_core.o \ + linux/mali_kernel_linux.o \ + common/mali_session.o \ + linux/mali_device_pause_resume.o \ + common/mali_kernel_vsync.o \ + linux/mali_ukk_vsync.o \ + linux/mali_kernel_sysfs.o \ + common/mali_mmu.o \ + common/mali_mmu_page_directory.o \ + common/mali_mem_validation.o \ + common/mali_hw_core.o \ + common/mali_gp.o \ + common/mali_pp.o \ + common/mali_pp_job.o \ + common/mali_gp_job.o \ + common/mali_soft_job.o \ + common/mali_scheduler.o \ + common/mali_executor.o \ + common/mali_group.o \ + common/mali_dlbu.o \ + common/mali_broadcast.o \ + common/mali_pm.o \ + common/mali_pmu.o \ + common/mali_user_settings_db.o \ + common/mali_kernel_utilization.o \ + common/mali_control_timer.o \ + common/mali_l2_cache.o \ + common/mali_timeline.o \ + common/mali_timeline_fence_wait.o \ + common/mali_timeline_sync_fence.o \ + common/mali_spinlock_reentrant.o \ + common/mali_pm_domain.o \ + linux/mali_osk_pm.o \ + linux/mali_pmu_power_up_down.o \ + __malidrv_build_info.o + +ifneq ($(wildcard $(src)/linux/mali_slp_global_lock.c),) + mali-y += linux/mali_slp_global_lock.o +endif + +ifneq ($(MALI_PLATFORM_FILES),) + mali-y += $(MALI_PLATFORM_FILES:.c=.o) +endif + +ifneq ($(MALI_PLATFORM_FILES_ADD_PREFIX),) + mali-y += $(MALI_PLATFORM_FILES_ADD_PREFIX:.c=.o) +endif + +mali-$(CONFIG_MALI400_PROFILING) += linux/mali_ukk_profiling.o +mali-$(CONFIG_MALI400_PROFILING) += linux/mali_osk_profiling.o + +mali-$(CONFIG_MALI400_INTERNAL_PROFILING) += linux/mali_profiling_internal.o timestamp-$(TIMESTAMP)/mali_timestamp.o +ccflags-$(CONFIG_MALI400_INTERNAL_PROFILING) += -I$(src)/timestamp-$(TIMESTAMP) + +mali-$(CONFIG_DMA_SHARED_BUFFER) += linux/mali_memory_dma_buf.o +mali-$(CONFIG_DMA_SHARED_BUFFER) += linux/mali_memory_secure.o +mali-$(CONFIG_SYNC) += linux/mali_sync.o +mali-$(CONFIG_MALI_DMA_BUF_FENCE) += linux/mali_dma_fence.o +ccflags-$(CONFIG_SYNC) += -Idrivers/staging/android + +mali-$(CONFIG_MALI400_UMP) += linux/mali_memory_ump.o + +mali-$(CONFIG_MALI_DVFS) += common/mali_dvfs_policy.o + +# Tell the Linux build system from which .o file to create the kernel module +obj-$(CONFIG_MALI400) := mali.o + +ccflags-y += $(EXTRA_DEFINES) + +# Set up our defines, which will be passed to gcc +ccflags-y += -DMALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP=$(MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP) +ccflags-y += -DMALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED=$(MALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED) +ccflags-y += -DMALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS=$(MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS) +ccflags-y += -DMALI_STATE_TRACKING=1 +ccflags-y += -DMALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB=$(OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB) +ccflags-y += -DUSING_GPU_UTILIZATION=$(USING_GPU_UTILIZATION) +ccflags-y += -DMALI_ENABLE_CPU_CYCLES=$(MALI_ENABLE_CPU_CYCLES) + +ifeq ($(MALI_UPPER_HALF_SCHEDULING),1) + ccflags-y += -DMALI_UPPER_HALF_SCHEDULING +endif + +#build-in include path is different +ifeq ($(MALI_PLATFORM_FILES),) +ccflags-$(CONFIG_MALI400_UMP) += -I$(src)/../ump/include/ +else +ccflags-$(CONFIG_MALI400_UMP) += -I$(src)/../../ump/include/ump +endif +ccflags-$(CONFIG_MALI400_DEBUG) += -DDEBUG + +# Use our defines when compiling +ccflags-y += -I$(src) -I$(src)/include -I$(src)/common -I$(src)/linux -I$(src)/platform -Wno-date-time + +# Get subversion revision number, fall back to only ${MALI_RELEASE_NAME} if no svn info is available +MALI_RELEASE_NAME=$(shell cat $(src)/.version 2> /dev/null) + +SVN_INFO = (cd $(src); svn info 2>/dev/null) + +ifneq ($(shell $(SVN_INFO) 2>/dev/null),) +# SVN detected +SVN_REV := $(shell $(SVN_INFO) | grep '^Revision: '| sed -e 's/^Revision: //' 2>/dev/null) +DRIVER_REV := $(MALI_RELEASE_NAME)-r$(SVN_REV) +CHANGE_DATE := $(shell $(SVN_INFO) | grep '^Last Changed Date: ' | cut -d: -f2- | cut -b2-) +CHANGED_REVISION := $(shell $(SVN_INFO) | grep '^Last Changed Rev: ' | cut -d: -f2- | cut -b2-) +REPO_URL := $(shell $(SVN_INFO) | grep '^URL: ' | cut -d: -f2- | cut -b2-) + +else # SVN +GIT_REV := $(shell cd $(src); git describe --always 2>/dev/null) +ifneq ($(GIT_REV),) +# Git detected +DRIVER_REV := $(MALI_RELEASE_NAME)-$(GIT_REV) +CHANGE_DATE := $(shell cd $(src); git log -1 --format="%ci") +CHANGED_REVISION := $(GIT_REV) +REPO_URL := $(shell cd $(src); git describe --all --always 2>/dev/null) + +else # Git +# No Git or SVN detected +DRIVER_REV := $(MALI_RELEASE_NAME) +CHANGE_DATE := $(MALI_RELEASE_NAME) +CHANGED_REVISION := $(MALI_RELEASE_NAME) +endif +endif + +ccflags-y += -DSVN_REV_STRING=\"$(DRIVER_REV)\" + +VERSION_STRINGS := +VERSION_STRINGS += API_VERSION=$(shell cd $(src); grep "\#define _MALI_API_VERSION" $(FILES_PREFIX)include/linux/mali/mali_utgard_uk_types.h | cut -d' ' -f 3 ) +VERSION_STRINGS += REPO_URL=$(REPO_URL) +VERSION_STRINGS += REVISION=$(DRIVER_REV) +VERSION_STRINGS += CHANGED_REVISION=$(CHANGED_REVISION) +VERSION_STRINGS += CHANGE_DATE=$(CHANGE_DATE) +VERSION_STRINGS += BUILD_DATE=$(shell date) +ifdef CONFIG_MALI400_DEBUG +VERSION_STRINGS += BUILD=debug +else +VERSION_STRINGS += BUILD=release +endif +VERSION_STRINGS += TARGET_PLATFORM=$(TARGET_PLATFORM) +VERSION_STRINGS += MALI_PLATFORM=$(MALI_PLATFORM) +VERSION_STRINGS += KDIR=$(KDIR) +VERSION_STRINGS += OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB=$(OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB) +VERSION_STRINGS += USING_UMP=$(CONFIG_MALI400_UMP) +VERSION_STRINGS += USING_PROFILING=$(CONFIG_MALI400_PROFILING) +VERSION_STRINGS += USING_INTERNAL_PROFILING=$(CONFIG_MALI400_INTERNAL_PROFILING) +VERSION_STRINGS += USING_GPU_UTILIZATION=$(USING_GPU_UTILIZATION) +VERSION_STRINGS += USING_DVFS=$(CONFIG_MALI_DVFS) +VERSION_STRINGS += USING_DMA_BUF_FENCE = $(CONFIG_MALI_DMA_BUF_FENCE) +VERSION_STRINGS += MALI_UPPER_HALF_SCHEDULING=$(MALI_UPPER_HALF_SCHEDULING) + +# Create file with Mali driver configuration +$(src)/__malidrv_build_info.c: + @echo 'const char *__malidrv_build_info(void) { return "malidrv: $(VERSION_STRINGS)";}' > $(src)/__malidrv_build_info.c diff -ENwbur a/drivers/gpu/arm/mali400/Kconfig b/drivers/gpu/arm/mali400/Kconfig --- a/drivers/gpu/arm/mali400/Kconfig 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/Kconfig 2018-05-06 08:49:49.174695256 +0200 @@ -0,0 +1,152 @@ +config MALI400 + tristate "Mali-300/400/450 support" + depends on ARM || ARM64 + select DMA_SHARED_BUFFER + ---help--- + This enables support for the ARM Mali-300, Mali-400, and Mali-450 + GPUs. + + To compile this driver as a module, choose M here: the module will be + called mali. + +config MALI450 + bool "Enable Mali-450 support" + depends on MALI400 + ---help--- + This enables support for Mali-450 specific features. + +config MALI470 + bool "Enable Mali-470 support" + depends on MALI400 + ---help--- + This enables support for Mali-470 specific features. + +config MALI400_DEBUG + bool "Enable debug in Mali driver" + depends on MALI400 + ---help--- + This enabled extra debug checks and messages in the Mali driver. + +choice + prompt "Platform configuration" + depends on (MALI400 || MALI450 || MALI470) + default MALI_PLATFORM_S5P6818 if ARM64 + default MALI_PLATFORM_S5P4418 if ARM + help + Select the SOC platform that contains a Mali utgard GPU + +config MALI_PLATFORM_S5P4418 + depends on ARCH_S5P4418 + bool "Nexell S5P4418" + select MALI_SHARED_INTERRUPTS + select MALI_DT + help + Select S5P4418 SoC configuration + +config MALI_PLATFORM_S5P6818 + depends on ARCH_S5P6818 + bool "Nexell S5P6818" + select MALI_SHARED_INTERRUPTS + select MALI_DT + help + Select S5P6818 SoC configuration +endchoice + +config MALI400_PROFILING + bool "Enable Mali profiling" + depends on MALI400 + select TRACEPOINTS + default y + ---help--- + This enables gator profiling of Mali GPU events. + +config MALI400_INTERNAL_PROFILING + bool "Enable internal Mali profiling API" + depends on MALI400_PROFILING + default n + ---help--- + This enables the internal legacy Mali profiling API. + +config MALI400_UMP + bool "Enable UMP support" + depends on MALI400 + ---help--- + This enables support for the UMP memory sharing API in the Mali driver. + +config MALI_DVFS + bool "Enable Mali dynamically frequency change" + depends on MALI400 && !MALI_DEVFREQ + default y + ---help--- + This enables support for dynamic change frequency of Mali with the goal of lowering power consumption. + +config MALI_DMA_BUF_MAP_ON_ATTACH + bool "Map dma-buf attachments on attach" + depends on MALI400 && DMA_SHARED_BUFFER + default y + ---help--- + This makes the Mali driver map dma-buf attachments after doing + attach. If this is not set the dma-buf attachments will be mapped for + every time the GPU need to access the buffer. + + Mapping for each access can cause lower performance. + +config MALI_SHARED_INTERRUPTS + bool "Support for shared interrupts" + depends on MALI400 + default n + ---help--- + Adds functionality required to properly support shared interrupts. Without this support, + the device driver will fail during insmod if it detects shared interrupts. This also + works when the GPU is not using shared interrupts, but might have a slight performance + impact. + +config MALI_PMU_PARALLEL_POWER_UP + bool "Power up Mali PMU domains in parallel" + depends on MALI400 + default n + ---help--- + This makes the Mali driver power up all PMU power domains in parallel, instead of + powering up domains one by one, with a slight delay in between. Powering on all power + domains at the same time may cause peak currents higher than what some systems can handle. + These systems must not enable this option. + +config MALI_DT + bool "Using device tree to initialize module" + depends on MALI400 && OF + default n + ---help--- + This enable the Mali driver to choose the device tree path to get platform resoures + and disable the old config method. Mali driver could run on the platform which the + device tree is enabled in kernel and corresponding hardware description is implemented + properly in device DTS file. + +config MALI_DEVFREQ + bool "Using devfreq to tuning frequency" + depends on MALI400 && PM_DEVFREQ + default n + ---help--- + Support devfreq for Mali. + + Using the devfreq framework and, by default, the simpleondemand + governor, the frequency of Mali will be dynamically selected from the + available OPPs. + +config MALI_QUIET + bool "Make Mali driver very quiet" + depends on MALI400 && !MALI400_DEBUG + default n + ---help--- + This forces the Mali driver to never print any messages. + + If unsure, say N. + +config MALI_DMA_BUF_FENCE + bool "Make DMA BUF Fence" + depends on MALI400 + default n + ---help--- + Choose this option if you want to use fences and reservations for + synchronization of shared dma-buf access between different drivers. + + If unsure, say N. diff -ENwbur a/drivers/gpu/arm/mali400/linux/license/gpl/mali_kernel_license.h b/drivers/gpu/arm/mali400/linux/license/gpl/mali_kernel_license.h --- a/drivers/gpu/arm/mali400/linux/license/gpl/mali_kernel_license.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/license/gpl/mali_kernel_license.h 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2010, 2013, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_kernel_license.h + * Defines for the macro MODULE_LICENSE. + */ + +#ifndef __MALI_KERNEL_LICENSE_H__ +#define __MALI_KERNEL_LICENSE_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#define MALI_KERNEL_LINUX_LICENSE "GPL" +#define MALI_LICENSE_IS_GPL 1 + +#ifdef __cplusplus +} +#endif + +#endif /* __MALI_KERNEL_LICENSE_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_devfreq.c b/drivers/gpu/arm/mali400/linux/mali_devfreq.c --- a/drivers/gpu/arm/mali400/linux/mali_devfreq.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_devfreq.c 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,310 @@ +/* + * Copyright (C) 2011-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_osk_mali.h" +#include "mali_kernel_common.h" + +#include +#include +#include +#include +#ifdef CONFIG_DEVFREQ_THERMAL +#include +#endif + +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) +#include +#else /* Linux >= 3.13 */ +/* In 3.13 the OPP include header file, types, and functions were all + * renamed. Use the old filename for the include, and define the new names to + * the old, when an old kernel is detected. + */ +#include +#define dev_pm_opp opp +#define dev_pm_opp_get_voltage opp_get_voltage +#define dev_pm_opp_get_opp_count opp_get_opp_count +#define dev_pm_opp_find_freq_ceil opp_find_freq_ceil +#endif /* Linux >= 3.13 */ + +#include "mali_pm_metrics.h" + +static int +mali_devfreq_target(struct device *dev, unsigned long *target_freq, u32 flags) +{ + struct mali_device *mdev = dev_get_drvdata(dev); + struct dev_pm_opp *opp; + unsigned long freq = 0; + unsigned long voltage; + int err; + + freq = *target_freq; + + rcu_read_lock(); + opp = devfreq_recommended_opp(dev, &freq, flags); + voltage = dev_pm_opp_get_voltage(opp); + rcu_read_unlock(); + if (IS_ERR_OR_NULL(opp)) { + MALI_PRINT_ERROR(("Failed to get opp (%ld)\n", PTR_ERR(opp))); + return PTR_ERR(opp); + } + + MALI_DEBUG_PRINT(2, ("mali_devfreq_target:set_freq = %lld flags = 0x%x\n", freq, flags)); + /* + * Only update if there is a change of frequency + */ + if (mdev->current_freq == freq) { + *target_freq = freq; + mali_pm_reset_dvfs_utilisation(mdev); + return 0; + } + +#ifdef CONFIG_REGULATOR + if (mdev->regulator && mdev->current_voltage != voltage + && mdev->current_freq < freq) { + err = regulator_set_voltage(mdev->regulator, voltage, voltage); + if (err) { + MALI_PRINT_ERROR(("Failed to increase voltage (%d)\n", err)); + return err; + } + } +#endif + + err = clk_set_rate(mdev->clock, freq); + if (err) { + MALI_PRINT_ERROR(("Failed to set clock %lu (target %lu)\n", freq, *target_freq)); + return err; + } + +#ifdef CONFIG_REGULATOR + if (mdev->regulator && mdev->current_voltage != voltage + && mdev->current_freq > freq) { + err = regulator_set_voltage(mdev->regulator, voltage, voltage); + if (err) { + MALI_PRINT_ERROR(("Failed to decrease voltage (%d)\n", err)); + return err; + } + } +#endif + + *target_freq = freq; + mdev->current_voltage = voltage; + mdev->current_freq = freq; + + mali_pm_reset_dvfs_utilisation(mdev); + + return err; +} + +static int +mali_devfreq_cur_freq(struct device *dev, unsigned long *freq) +{ + struct mali_device *mdev = dev_get_drvdata(dev); + + *freq = mdev->current_freq; + + MALI_DEBUG_PRINT(2, ("mali_devfreq_cur_freq: freq = %d \n", *freq)); + return 0; +} + +static int +mali_devfreq_status(struct device *dev, struct devfreq_dev_status *stat) +{ + struct mali_device *mdev = dev_get_drvdata(dev); + + stat->current_frequency = mdev->current_freq; + + mali_pm_get_dvfs_utilisation(mdev, + &stat->total_time, &stat->busy_time); + + stat->private_data = NULL; + +#ifdef CONFIG_DEVFREQ_THERMAL + memcpy(&mdev->devfreq->last_status, stat, sizeof(*stat)); +#endif + + return 0; +} + +/* setup platform specific opp in platform.c*/ +int __weak setup_opps(void) +{ + return 0; +} + +/* term platform specific opp in platform.c*/ +int __weak term_opps(struct device *dev) +{ + return 0; +} + +static int mali_devfreq_init_freq_table(struct mali_device *mdev, + struct devfreq_dev_profile *dp) +{ + int err, count; + int i = 0; + unsigned long freq = 0; + struct dev_pm_opp *opp; + + err = setup_opps(); + if (err) + return err; + + rcu_read_lock(); + count = dev_pm_opp_get_opp_count(mdev->dev); + if (count < 0) { + rcu_read_unlock(); + return count; + } + rcu_read_unlock(); + + MALI_DEBUG_PRINT(2, ("mali devfreq table count %d\n", count)); + + dp->freq_table = kmalloc_array(count, sizeof(dp->freq_table[0]), + GFP_KERNEL); + if (!dp->freq_table) + return -ENOMEM; + + rcu_read_lock(); + for (i = 0; i < count; i++, freq++) { + opp = dev_pm_opp_find_freq_ceil(mdev->dev, &freq); + if (IS_ERR(opp)) + break; + + dp->freq_table[i] = freq; + MALI_DEBUG_PRINT(2, ("mali devfreq table array[%d] = %d\n", i, freq)); + } + rcu_read_unlock(); + + if (count != i) + MALI_PRINT_ERROR(("Unable to enumerate all OPPs (%d!=%d)\n", + count, i)); + + dp->max_state = i; + + return 0; +} + +static void mali_devfreq_term_freq_table(struct mali_device *mdev) +{ + struct devfreq_dev_profile *dp = mdev->devfreq->profile; + + kfree(dp->freq_table); + term_opps(mdev->dev); +} + +static void mali_devfreq_exit(struct device *dev) +{ + struct mali_device *mdev = dev_get_drvdata(dev); + + mali_devfreq_term_freq_table(mdev); +} + +int mali_devfreq_init(struct mali_device *mdev) +{ +#ifdef CONFIG_DEVFREQ_THERMAL + struct devfreq_cooling_power *callbacks = NULL; + _mali_osk_device_data data; +#endif + struct devfreq_dev_profile *dp; + int err; + + MALI_DEBUG_PRINT(2, ("Init Mali devfreq\n")); + + if (!mdev->clock) + return -ENODEV; + + mdev->current_freq = clk_get_rate(mdev->clock); + + dp = &mdev->devfreq_profile; + + dp->initial_freq = mdev->current_freq; + dp->polling_ms = 100; + dp->target = mali_devfreq_target; + dp->get_dev_status = mali_devfreq_status; + dp->get_cur_freq = mali_devfreq_cur_freq; + dp->exit = mali_devfreq_exit; + + if (mali_devfreq_init_freq_table(mdev, dp)) + return -EFAULT; + + mdev->devfreq = devfreq_add_device(mdev->dev, dp, + "simple_ondemand", NULL); + if (IS_ERR(mdev->devfreq)) { + mali_devfreq_term_freq_table(mdev); + return PTR_ERR(mdev->devfreq); + } + + err = devfreq_register_opp_notifier(mdev->dev, mdev->devfreq); + if (err) { + MALI_PRINT_ERROR(("Failed to register OPP notifier (%d)\n", err)); + goto opp_notifier_failed; + } + +#ifdef CONFIG_DEVFREQ_THERMAL + /* Initilization last_status it will be used when first power allocate called */ + mdev->devfreq->last_status.current_frequency = mdev->current_freq; + + if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) { + if (NULL != data.gpu_cooling_ops) { + callbacks = data.gpu_cooling_ops; + MALI_DEBUG_PRINT(2, ("Mali GPU Thermal: Callback handler installed \n")); + } + } + + if (callbacks) { + mdev->devfreq_cooling = of_devfreq_cooling_register_power( + mdev->dev->of_node, + mdev->devfreq, + callbacks); + if (IS_ERR_OR_NULL(mdev->devfreq_cooling)) { + err = PTR_ERR(mdev->devfreq_cooling); + MALI_PRINT_ERROR(("Failed to register cooling device (%d)\n", err)); + goto cooling_failed; + } else { + MALI_DEBUG_PRINT(2, ("Mali GPU Thermal Cooling installed \n")); + } + } +#endif + + return 0; + +#ifdef CONFIG_DEVFREQ_THERMAL +cooling_failed: + devfreq_unregister_opp_notifier(mdev->dev, mdev->devfreq); +#endif /* CONFIG_DEVFREQ_THERMAL */ +opp_notifier_failed: + err = devfreq_remove_device(mdev->devfreq); + if (err) + MALI_PRINT_ERROR(("Failed to terminate devfreq (%d)\n", err)); + else + mdev->devfreq = NULL; + + return err; +} + +void mali_devfreq_term(struct mali_device *mdev) +{ + int err; + + MALI_DEBUG_PRINT(2, ("Term Mali devfreq\n")); + +#ifdef CONFIG_DEVFREQ_THERMAL + devfreq_cooling_unregister(mdev->devfreq_cooling); +#endif + + devfreq_unregister_opp_notifier(mdev->dev, mdev->devfreq); + + err = devfreq_remove_device(mdev->devfreq); + if (err) + MALI_PRINT_ERROR(("Failed to terminate devfreq (%d)\n", err)); + else + mdev->devfreq = NULL; +} diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_devfreq.h b/drivers/gpu/arm/mali400/linux/mali_devfreq.h --- a/drivers/gpu/arm/mali400/linux/mali_devfreq.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_devfreq.h 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,17 @@ +/* + * Copyright (C) 2011-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ +#ifndef _MALI_DEVFREQ_H_ +#define _MALI_DEVFREQ_H_ + +int mali_devfreq_init(struct mali_device *mdev); + +void mali_devfreq_term(struct mali_device *mdev); + +#endif diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_device_pause_resume.c b/drivers/gpu/arm/mali400/linux/mali_device_pause_resume.c --- a/drivers/gpu/arm/mali400/linux/mali_device_pause_resume.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_device_pause_resume.c 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,36 @@ +/** + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_device_pause_resume.c + * Implementation of the Mali pause/resume functionality + */ + +#include +#include +#include "mali_pm.h" + +void mali_dev_pause(void) +{ + /* + * Deactive all groups to prevent hardware being touched + * during the period of mali device pausing + */ + mali_pm_os_suspend(MALI_FALSE); +} + +EXPORT_SYMBOL(mali_dev_pause); + +void mali_dev_resume(void) +{ + mali_pm_os_resume(); +} + +EXPORT_SYMBOL(mali_dev_resume); diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_dma_fence.c b/drivers/gpu/arm/mali400/linux/mali_dma_fence.c --- a/drivers/gpu/arm/mali400/linux/mali_dma_fence.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_dma_fence.c 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,352 @@ +/* + * Copyright (C) 2012-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ +#include +#include "mali_osk.h" +#include "mali_kernel_common.h" + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0) +#include "mali_dma_fence.h" +#include +#include +#endif + +static DEFINE_SPINLOCK(mali_dma_fence_lock); + +static bool mali_dma_fence_enable_signaling(struct dma_fence *fence) +{ + MALI_IGNORE(fence); + return true; +} + +static const char *mali_dma_fence_get_driver_name(struct dma_fence *fence) +{ + MALI_IGNORE(fence); + return "mali"; +} + +static const char *mali_dma_fence_get_timeline_name(struct dma_fence *fence) +{ + MALI_IGNORE(fence); + return "mali_dma_fence"; +} + +static const struct dma_fence_ops mali_dma_fence_ops = { + .get_driver_name = mali_dma_fence_get_driver_name, + .get_timeline_name = mali_dma_fence_get_timeline_name, + .enable_signaling = mali_dma_fence_enable_signaling, + .signaled = NULL, + .wait = dma_fence_default_wait, + .release = NULL +}; + +static void mali_dma_fence_context_cleanup(struct mali_dma_fence_context *dma_fence_context) +{ + u32 i; + + MALI_DEBUG_ASSERT_POINTER(dma_fence_context); + + for (i = 0; i < dma_fence_context->num_dma_fence_waiter; i++) { + if (dma_fence_context->mali_dma_fence_waiters[i]) { + dma_fence_remove_callback(dma_fence_context->mali_dma_fence_waiters[i]->fence, + &dma_fence_context->mali_dma_fence_waiters[i]->base); + dma_fence_put(dma_fence_context->mali_dma_fence_waiters[i]->fence); + kfree(dma_fence_context->mali_dma_fence_waiters[i]); + dma_fence_context->mali_dma_fence_waiters[i] = NULL; + } + } + + if (NULL != dma_fence_context->mali_dma_fence_waiters) + kfree(dma_fence_context->mali_dma_fence_waiters); + + dma_fence_context->mali_dma_fence_waiters = NULL; + dma_fence_context->num_dma_fence_waiter = 0; +} + +static void mali_dma_fence_context_work_func(struct work_struct *work_handle) +{ + struct mali_dma_fence_context *dma_fence_context; + + MALI_DEBUG_ASSERT_POINTER(work_handle); + + dma_fence_context = container_of(work_handle, struct mali_dma_fence_context, work_handle); + + dma_fence_context->cb_func(dma_fence_context->pp_job_ptr); +} + +static void mali_dma_fence_callback(struct dma_fence *fence, struct dma_fence_cb *cb) +{ + struct mali_dma_fence_waiter *dma_fence_waiter = NULL; + struct mali_dma_fence_context *dma_fence_context = NULL; + + MALI_DEBUG_ASSERT_POINTER(fence); + MALI_DEBUG_ASSERT_POINTER(cb); + + MALI_IGNORE(fence); + + dma_fence_waiter = container_of(cb, struct mali_dma_fence_waiter, base); + dma_fence_context = dma_fence_waiter->parent; + + MALI_DEBUG_ASSERT_POINTER(dma_fence_context); + + if (atomic_dec_and_test(&dma_fence_context->count)) + schedule_work(&dma_fence_context->work_handle); +} + +static _mali_osk_errcode_t mali_dma_fence_add_callback(struct mali_dma_fence_context *dma_fence_context, struct dma_fence *fence) +{ + int ret = 0; + struct mali_dma_fence_waiter *dma_fence_waiter; + struct mali_dma_fence_waiter **dma_fence_waiters; + + MALI_DEBUG_ASSERT_POINTER(dma_fence_context); + MALI_DEBUG_ASSERT_POINTER(fence); + + dma_fence_waiters = krealloc(dma_fence_context->mali_dma_fence_waiters, + (dma_fence_context->num_dma_fence_waiter + 1) + * sizeof(struct mali_dma_fence_waiter *), + GFP_KERNEL); + + if (NULL == dma_fence_waiters) { + MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to realloc the dma fence waiters.\n")); + return _MALI_OSK_ERR_NOMEM; + } + + dma_fence_context->mali_dma_fence_waiters = dma_fence_waiters; + + dma_fence_waiter = kzalloc(sizeof(struct mali_dma_fence_waiter), GFP_KERNEL); + + if (NULL == dma_fence_waiter) { + MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to create mali dma fence waiter.\n")); + return _MALI_OSK_ERR_NOMEM; + } + + dma_fence_get(fence); + + dma_fence_waiter->fence = fence; + dma_fence_waiter->parent = dma_fence_context; + atomic_inc(&dma_fence_context->count); + + ret = dma_fence_add_callback(fence, &dma_fence_waiter->base, + mali_dma_fence_callback); + if (0 > ret) { + dma_fence_put(fence); + kfree(dma_fence_waiter); + atomic_dec(&dma_fence_context->count); + if (-ENOENT == ret) { + /*-ENOENT if fence has already been signaled, return _MALI_OSK_ERR_OK*/ + return _MALI_OSK_ERR_OK; + } + /* Failed to add the fence callback into fence, return _MALI_OSK_ERR_FAULT*/ + MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to add callback into fence.\n")); + return _MALI_OSK_ERR_FAULT; + } + + dma_fence_context->mali_dma_fence_waiters[dma_fence_context->num_dma_fence_waiter] = dma_fence_waiter; + dma_fence_context->num_dma_fence_waiter++; + + return _MALI_OSK_ERR_OK; +} + + +struct dma_fence *mali_dma_fence_new(u32 context, u32 seqno) +{ + struct dma_fence *fence = NULL; + + fence = kzalloc(sizeof(*fence), GFP_KERNEL); + + if (NULL == fence) { + MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to create dma fence.\n")); + return fence; + } + + dma_fence_init(fence, + &mali_dma_fence_ops, + &mali_dma_fence_lock, + context, seqno); + + return fence; +} + +void mali_dma_fence_signal_and_put(struct dma_fence **fence) +{ + MALI_DEBUG_ASSERT_POINTER(fence); + MALI_DEBUG_ASSERT_POINTER(*fence); + + dma_fence_signal(*fence); + dma_fence_put(*fence); + *fence = NULL; +} + +void mali_dma_fence_context_init(struct mali_dma_fence_context *dma_fence_context, + mali_dma_fence_context_callback_func_t cb_func, + void *pp_job_ptr) +{ + MALI_DEBUG_ASSERT_POINTER(dma_fence_context); + + INIT_WORK(&dma_fence_context->work_handle, mali_dma_fence_context_work_func); + atomic_set(&dma_fence_context->count, 1); + dma_fence_context->num_dma_fence_waiter = 0; + dma_fence_context->mali_dma_fence_waiters = NULL; + dma_fence_context->cb_func = cb_func; + dma_fence_context->pp_job_ptr = pp_job_ptr; +} + +_mali_osk_errcode_t mali_dma_fence_context_add_waiters(struct mali_dma_fence_context *dma_fence_context, + struct reservation_object *dma_reservation_object) +{ + _mali_osk_errcode_t ret = _MALI_OSK_ERR_OK; + struct dma_fence *exclusive_fence = NULL; + u32 shared_count = 0, i; + struct dma_fence **shared_fences = NULL; + + MALI_DEBUG_ASSERT_POINTER(dma_fence_context); + MALI_DEBUG_ASSERT_POINTER(dma_reservation_object); + + /* Get all the shared/exclusive fences in the reservation object of dma buf*/ + ret = reservation_object_get_fences_rcu(dma_reservation_object, &exclusive_fence, + &shared_count, &shared_fences); + if (ret < 0) { + MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to get shared or exclusive_fence dma fences from the reservation object of dma buf.\n")); + return _MALI_OSK_ERR_FAULT; + } + + if (exclusive_fence) { + ret = mali_dma_fence_add_callback(dma_fence_context, exclusive_fence); + if (_MALI_OSK_ERR_OK != ret) { + MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to add callback into exclusive fence.\n")); + mali_dma_fence_context_cleanup(dma_fence_context); + goto ended; + } + } + + + for (i = 0; i < shared_count; i++) { + ret = mali_dma_fence_add_callback(dma_fence_context, shared_fences[i]); + if (_MALI_OSK_ERR_OK != ret) { + MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to add callback into shared fence [%d].\n", i)); + mali_dma_fence_context_cleanup(dma_fence_context); + break; + } + } + +ended: + + if (exclusive_fence) + dma_fence_put(exclusive_fence); + + if (shared_fences) { + for (i = 0; i < shared_count; i++) { + dma_fence_put(shared_fences[i]); + } + kfree(shared_fences); + } + + return ret; +} + + +void mali_dma_fence_context_term(struct mali_dma_fence_context *dma_fence_context) +{ + MALI_DEBUG_ASSERT_POINTER(dma_fence_context); + atomic_set(&dma_fence_context->count, 0); + if (dma_fence_context->work_handle.func) { + cancel_work_sync(&dma_fence_context->work_handle); + } + mali_dma_fence_context_cleanup(dma_fence_context); +} + +void mali_dma_fence_context_dec_count(struct mali_dma_fence_context *dma_fence_context) +{ + MALI_DEBUG_ASSERT_POINTER(dma_fence_context); + + if (atomic_dec_and_test(&dma_fence_context->count)) + schedule_work(&dma_fence_context->work_handle); +} + + +void mali_dma_fence_add_reservation_object_list(struct reservation_object *dma_reservation_object, + struct reservation_object **dma_reservation_object_list, + u32 *num_dma_reservation_object) +{ + u32 i; + + MALI_DEBUG_ASSERT_POINTER(dma_reservation_object); + MALI_DEBUG_ASSERT_POINTER(dma_reservation_object_list); + MALI_DEBUG_ASSERT_POINTER(num_dma_reservation_object); + + for (i = 0; i < *num_dma_reservation_object; i++) { + if (dma_reservation_object_list[i] == dma_reservation_object) + return; + } + + dma_reservation_object_list[*num_dma_reservation_object] = dma_reservation_object; + (*num_dma_reservation_object)++; +} + +int mali_dma_fence_lock_reservation_object_list(struct reservation_object **dma_reservation_object_list, + u32 num_dma_reservation_object, struct ww_acquire_ctx *ww_actx) +{ + u32 i; + + struct reservation_object *reservation_object_to_slow_lock = NULL; + + MALI_DEBUG_ASSERT_POINTER(dma_reservation_object_list); + MALI_DEBUG_ASSERT_POINTER(ww_actx); + + ww_acquire_init(ww_actx, &reservation_ww_class); + +again: + for (i = 0; i < num_dma_reservation_object; i++) { + int ret; + + if (dma_reservation_object_list[i] == reservation_object_to_slow_lock) { + reservation_object_to_slow_lock = NULL; + continue; + } + + ret = ww_mutex_lock(&dma_reservation_object_list[i]->lock, ww_actx); + + if (ret < 0) { + u32 slow_lock_index = i; + + /* unlock all pre locks we have already locked.*/ + while (i > 0) { + i--; + ww_mutex_unlock(&dma_reservation_object_list[i]->lock); + } + + if (NULL != reservation_object_to_slow_lock) + ww_mutex_unlock(&reservation_object_to_slow_lock->lock); + + if (ret == -EDEADLK) { + reservation_object_to_slow_lock = dma_reservation_object_list[slow_lock_index]; + ww_mutex_lock_slow(&reservation_object_to_slow_lock->lock, ww_actx); + goto again; + } + ww_acquire_fini(ww_actx); + MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to lock all dma reservation objects.\n", i)); + return ret; + } + } + + ww_acquire_done(ww_actx); + return 0; +} + +void mali_dma_fence_unlock_reservation_object_list(struct reservation_object **dma_reservation_object_list, + u32 num_dma_reservation_object, struct ww_acquire_ctx *ww_actx) +{ + u32 i; + + for (i = 0; i < num_dma_reservation_object; i++) + ww_mutex_unlock(&dma_reservation_object_list[i]->lock); + + ww_acquire_fini(ww_actx); +} diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_dma_fence.h b/drivers/gpu/arm/mali400/linux/mali_dma_fence.h --- a/drivers/gpu/arm/mali400/linux/mali_dma_fence.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_dma_fence.h 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,109 @@ +/* + * Copyright (C) 2012-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_dma_fence.h + * + * Mali interface for Linux dma buf fence objects. + */ + +#ifndef _MALI_DMA_FENCE_H_ +#define _MALI_DMA_FENCE_H_ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0) +#include +#include +#endif + +struct mali_dma_fence_context; + +/* The mali dma fence context callback function */ +typedef void (*mali_dma_fence_context_callback_func_t)(void *pp_job_ptr); + +struct mali_dma_fence_waiter { + struct dma_fence_cb base; + struct mali_dma_fence_context *parent; + struct dma_fence *fence; +}; + +struct mali_dma_fence_context { + struct work_struct work_handle; + struct mali_dma_fence_waiter **mali_dma_fence_waiters; + u32 num_dma_fence_waiter; + atomic_t count; + void *pp_job_ptr; /* the mali pp job pointer */; + mali_dma_fence_context_callback_func_t cb_func; +}; + +/* Create a dma fence + * @param context The execution context this fence is run on + * @param seqno A linearly increasing sequence number for this context + * @return the new dma fence if success, or NULL on failure. + */ +struct dma_fence *mali_dma_fence_new(u32 context, u32 seqno); + +/* Signal and put dma fence + * @param fence The dma fence to signal and put + */ +void mali_dma_fence_signal_and_put(struct dma_fence **fence); + +/** + * Initialize a mali dma fence context for pp job. + * @param dma_fence_context The mali dma fence context to initialize. + * @param cb_func The dma fence context callback function to call when all dma fence release. + * @param pp_job_ptr The pp_job to call function with. + */ +void mali_dma_fence_context_init(struct mali_dma_fence_context *dma_fence_context, + mali_dma_fence_context_callback_func_t cb_func, + void *pp_job_ptr); + +/** + * Add new mali dma fence waiter into mali dma fence context + * @param dma_fence_context The mali dma fence context + * @param dma_reservation_object the reservation object to create new mali dma fence waiters + * @return _MALI_OSK_ERR_OK if success, or not. + */ +_mali_osk_errcode_t mali_dma_fence_context_add_waiters(struct mali_dma_fence_context *dma_fence_context, + struct reservation_object *dma_reservation_object); + +/** + * Release the dma fence context + * @param dma_fence_text The mali dma fence context. + */ +void mali_dma_fence_context_term(struct mali_dma_fence_context *dma_fence_context); + +/** + * Decrease the dma fence context atomic count + * @param dma_fence_text The mali dma fence context. + */ +void mali_dma_fence_context_dec_count(struct mali_dma_fence_context *dma_fence_context); + +/** + * Get all reservation object + * @param dma_reservation_object The reservation object to add into the reservation object list + * @param dma_reservation_object_list The reservation object list to store all reservation object + * @param num_dma_reservation_object The number of all reservation object + */ +void mali_dma_fence_add_reservation_object_list(struct reservation_object *dma_reservation_object, + struct reservation_object **dma_reservation_object_list, + u32 *num_dma_reservation_object); + +/** + * Wait/wound mutex lock to lock all reservation object. + */ +int mali_dma_fence_lock_reservation_object_list(struct reservation_object **dma_reservation_object_list, + u32 num_dma_reservation_object, struct ww_acquire_ctx *ww_actx); + +/** + * Wait/wound mutex lock to unlock all reservation object. + */ +void mali_dma_fence_unlock_reservation_object_list(struct reservation_object **dma_reservation_object_list, + u32 num_dma_reservation_object, struct ww_acquire_ctx *ww_actx); +#endif diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_kernel_linux.c b/drivers/gpu/arm/mali400/linux/mali_kernel_linux.c --- a/drivers/gpu/arm/mali400/linux/mali_kernel_linux.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_kernel_linux.c 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,1154 @@ +/** + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_kernel_linux.c + * Implementation of the Linux device driver entrypoints + */ +#include /* kernel module definitions */ +#include /* file system operations */ +#include /* character device definitions */ +#include /* memory manager definitions */ +#include +#include +#include +#include "mali_kernel_license.h" +#include +#include +#include +#include +#include +#include + +#include +#include "mali_kernel_common.h" +#include "mali_session.h" +#include "mali_kernel_core.h" +#include "mali_osk.h" +#include "mali_kernel_linux.h" +#include "mali_ukk.h" +#include "mali_ukk_wrappers.h" +#include "mali_kernel_sysfs.h" +#include "mali_pm.h" +#include "mali_kernel_license.h" +#include "mali_memory.h" +#include "mali_memory_dma_buf.h" +#include "mali_memory_manager.h" +#include "mali_memory_swap_alloc.h" +#if defined(CONFIG_MALI400_INTERNAL_PROFILING) +#include "mali_profiling_internal.h" +#endif +#if defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_MALI_DVFS) +#include "mali_osk_profiling.h" +#include "mali_dvfs_policy.h" + +static int is_first_resume = 1; +/*Store the clk and vol for boot/insmod and mali_resume*/ +static struct mali_gpu_clk_item mali_gpu_clk[2]; +#endif + +/* Streamline support for the Mali driver */ +#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_MALI400_PROFILING) +/* Ask Linux to create the tracepoints */ +#define CREATE_TRACE_POINTS +#include "mali_linux_trace.h" + +EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_event); +EXPORT_TRACEPOINT_SYMBOL_GPL(mali_hw_counter); +EXPORT_TRACEPOINT_SYMBOL_GPL(mali_sw_counters); +#endif /* CONFIG_TRACEPOINTS */ + +#ifdef CONFIG_MALI_DEVFREQ +#include "mali_devfreq.h" +#include "mali_osk_mali.h" + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) +#include +#else +/* In 3.13 the OPP include header file, types, and functions were all + * renamed. Use the old filename for the include, and define the new names to + * the old, when an old kernel is detected. + */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) +#include +#else +#include +#endif /* Linux >= 3.13*/ +#define dev_pm_opp_of_add_table of_init_opp_table +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0) +#define dev_pm_opp_of_remove_table of_free_opp_table +#endif /* Linux >= 3.19 */ +#endif /* Linux >= 4.4.0 */ +#endif + +/* from the __malidrv_build_info.c file that is generated during build */ +extern const char *__malidrv_build_info(void); + +/* Module parameter to control log level */ +int mali_debug_level = 2; +module_param(mali_debug_level, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */ +MODULE_PARM_DESC(mali_debug_level, "Higher number, more dmesg output"); + +extern int mali_max_job_runtime; +module_param(mali_max_job_runtime, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); +MODULE_PARM_DESC(mali_max_job_runtime, "Maximum allowed job runtime in msecs.\nJobs will be killed after this no matter what"); + +extern int mali_l2_max_reads; +module_param(mali_l2_max_reads, int, S_IRUSR | S_IRGRP | S_IROTH); +MODULE_PARM_DESC(mali_l2_max_reads, "Maximum reads for Mali L2 cache"); + +extern unsigned int mali_dedicated_mem_start; +module_param(mali_dedicated_mem_start, uint, S_IRUSR | S_IRGRP | S_IROTH); +MODULE_PARM_DESC(mali_dedicated_mem_start, "Physical start address of dedicated Mali GPU memory."); + +extern unsigned int mali_dedicated_mem_size; +module_param(mali_dedicated_mem_size, uint, S_IRUSR | S_IRGRP | S_IROTH); +MODULE_PARM_DESC(mali_dedicated_mem_size, "Size of dedicated Mali GPU memory."); + +extern unsigned int mali_shared_mem_size; +module_param(mali_shared_mem_size, uint, S_IRUSR | S_IRGRP | S_IROTH); +MODULE_PARM_DESC(mali_shared_mem_size, "Size of shared Mali GPU memory."); + +#if defined(CONFIG_MALI400_PROFILING) +extern int mali_boot_profiling; +module_param(mali_boot_profiling, int, S_IRUSR | S_IRGRP | S_IROTH); +MODULE_PARM_DESC(mali_boot_profiling, "Start profiling as a part of Mali driver initialization"); +#endif + +extern int mali_max_pp_cores_group_1; +module_param(mali_max_pp_cores_group_1, int, S_IRUSR | S_IRGRP | S_IROTH); +MODULE_PARM_DESC(mali_max_pp_cores_group_1, "Limit the number of PP cores to use from first PP group."); + +extern int mali_max_pp_cores_group_2; +module_param(mali_max_pp_cores_group_2, int, S_IRUSR | S_IRGRP | S_IROTH); +MODULE_PARM_DESC(mali_max_pp_cores_group_2, "Limit the number of PP cores to use from second PP group (Mali-450 only)."); + +extern unsigned int mali_mem_swap_out_threshold_value; +module_param(mali_mem_swap_out_threshold_value, uint, S_IRUSR | S_IRGRP | S_IROTH); +MODULE_PARM_DESC(mali_mem_swap_out_threshold_value, "Threshold value used to limit how much swappable memory cached in Mali driver."); + +#if defined(CONFIG_MALI_DVFS) +/** the max fps the same as display vsync default 60, can set by module insert parameter */ +extern int mali_max_system_fps; +module_param(mali_max_system_fps, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); +MODULE_PARM_DESC(mali_max_system_fps, "Max system fps the same as display VSYNC."); + +/** a lower limit on their desired FPS default 58, can set by module insert parameter*/ +extern int mali_desired_fps; +module_param(mali_desired_fps, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); +MODULE_PARM_DESC(mali_desired_fps, "A bit lower than max_system_fps which user desired fps"); +#endif + +#if MALI_ENABLE_CPU_CYCLES +#include +#include +#include +static struct timer_list mali_init_cpu_clock_timers[8]; +static u32 mali_cpu_clock_last_value[8] = {0,}; +#endif + +/* Export symbols from common code: mali_user_settings.c */ +#include "mali_user_settings_db.h" +EXPORT_SYMBOL(mali_set_user_setting); +EXPORT_SYMBOL(mali_get_user_setting); + +static char mali_dev_name[] = "mali"; /* should be const, but the functions we call requires non-cost */ + +/* This driver only supports one Mali device, and this variable stores this single platform device */ +struct platform_device *mali_platform_device = NULL; + +/* This driver only supports one Mali device, and this variable stores the exposed misc device (/dev/mali) */ +static struct miscdevice mali_miscdevice = { 0, }; + +static int mali_miscdevice_register(struct platform_device *pdev); +static void mali_miscdevice_unregister(void); + +static int mali_open(struct inode *inode, struct file *filp); +static int mali_release(struct inode *inode, struct file *filp); +#ifdef HAVE_UNLOCKED_IOCTL +static long mali_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); +#else +static int mali_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); +#endif + +static int mali_probe(struct platform_device *pdev); +static int mali_remove(struct platform_device *pdev); + +static int mali_driver_suspend_scheduler(struct device *dev); +static int mali_driver_resume_scheduler(struct device *dev); + +#ifdef CONFIG_PM_RUNTIME +static int mali_driver_runtime_suspend(struct device *dev); +static int mali_driver_runtime_resume(struct device *dev); +static int mali_driver_runtime_idle(struct device *dev); +#endif + +#if defined(MALI_FAKE_PLATFORM_DEVICE) +#if defined(CONFIG_MALI_DT) +extern int mali_platform_device_init(struct platform_device *device); +extern int mali_platform_device_deinit(struct platform_device *device); +#else +extern int mali_platform_device_register(void); +extern int mali_platform_device_unregister(void); +#endif +#endif + +/* Linux power management operations provided by the Mali device driver */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)) +struct pm_ext_ops mali_dev_ext_pm_ops = { + .base = + { + .suspend = mali_driver_suspend_scheduler, + .resume = mali_driver_resume_scheduler, + .freeze = mali_driver_suspend_scheduler, + .thaw = mali_driver_resume_scheduler, + }, +}; +#else +static const struct dev_pm_ops mali_dev_pm_ops = { +#ifdef CONFIG_PM_RUNTIME + .runtime_suspend = mali_driver_runtime_suspend, + .runtime_resume = mali_driver_runtime_resume, + .runtime_idle = mali_driver_runtime_idle, +#endif + .suspend = mali_driver_suspend_scheduler, + .resume = mali_driver_resume_scheduler, + .freeze = mali_driver_suspend_scheduler, + .thaw = mali_driver_resume_scheduler, + .poweroff = mali_driver_suspend_scheduler, +}; +#endif + +#ifdef CONFIG_MALI_DT +static struct of_device_id base_dt_ids[] = { + {.compatible = "arm,mali-300"}, + {.compatible = "arm,mali-400"}, + {.compatible = "arm,mali-450"}, + {.compatible = "arm,mali-470"}, + {}, +}; + +MODULE_DEVICE_TABLE(of, base_dt_ids); +#endif + +/* The Mali device driver struct */ +static struct platform_driver mali_platform_driver = { + .probe = mali_probe, + .remove = mali_remove, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)) + .pm = &mali_dev_ext_pm_ops, +#endif + .driver = + { + .name = MALI_GPU_NAME_UTGARD, + .owner = THIS_MODULE, + .bus = &platform_bus_type, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)) + .pm = &mali_dev_pm_ops, +#endif +#ifdef CONFIG_MALI_DT + .of_match_table = of_match_ptr(base_dt_ids), +#endif + }, +}; + +/* Linux misc device operations (/dev/mali) */ +struct file_operations mali_fops = { + .owner = THIS_MODULE, + .open = mali_open, + .release = mali_release, +#ifdef HAVE_UNLOCKED_IOCTL + .unlocked_ioctl = mali_ioctl, +#else + .ioctl = mali_ioctl, +#endif + .compat_ioctl = mali_ioctl, + .mmap = mali_mmap +}; + +#if MALI_ENABLE_CPU_CYCLES +void mali_init_cpu_time_counters(int reset, int enable_divide_by_64) +{ + /* The CPU assembly reference used is: ARM Architecture Reference Manual ARMv7-AR C.b */ + u32 write_value; + + /* See B4.1.116 PMCNTENSET, Performance Monitors Count Enable Set register, VMSA */ + /* setting p15 c9 c12 1 to 0x8000000f==CPU_CYCLE_ENABLE |EVENT_3_ENABLE|EVENT_2_ENABLE|EVENT_1_ENABLE|EVENT_0_ENABLE */ + asm volatile("mcr p15, 0, %0, c9, c12, 1" :: "r"(0x8000000f)); + + + /* See B4.1.117 PMCR, Performance Monitors Control Register. Writing to p15, c9, c12, 0 */ + write_value = 1 << 0; /* Bit 0 set. Enable counters */ + if (reset) { + write_value |= 1 << 1; /* Reset event counters */ + write_value |= 1 << 2; /* Reset cycle counter */ + } + if (enable_divide_by_64) { + write_value |= 1 << 3; /* Enable the Clock divider by 64 */ + } + write_value |= 1 << 4; /* Export enable. Not needed */ + asm volatile("MCR p15, 0, %0, c9, c12, 0\t\n" :: "r"(write_value)); + + /* PMOVSR Overflow Flag Status Register - Clear Clock and Event overflows */ + asm volatile("MCR p15, 0, %0, c9, c12, 3\t\n" :: "r"(0x8000000f)); + + + /* See B4.1.124 PMUSERENR - setting p15 c9 c14 to 1" */ + /* User mode access to the Performance Monitors enabled. */ + /* Lets User space read cpu clock cycles */ + asm volatile("mcr p15, 0, %0, c9, c14, 0" :: "r"(1)); +} + +/** A timer function that configures the cycle clock counter on current CPU. + * The function \a mali_init_cpu_time_counters_on_all_cpus sets up this + * function to trigger on all Cpus during module load. + */ +static void mali_init_cpu_clock_timer_func(unsigned long data) +{ + int reset_counters, enable_divide_clock_counter_by_64; + int current_cpu = raw_smp_processor_id(); + unsigned int sample0; + unsigned int sample1; + + MALI_IGNORE(data); + + reset_counters = 1; + enable_divide_clock_counter_by_64 = 0; + mali_init_cpu_time_counters(reset_counters, enable_divide_clock_counter_by_64); + + sample0 = mali_get_cpu_cyclecount(); + sample1 = mali_get_cpu_cyclecount(); + + MALI_DEBUG_PRINT(3, ("Init Cpu %d cycle counter- First two samples: %08x %08x \n", current_cpu, sample0, sample1)); +} + +/** A timer functions for storing current time on all cpus. + * Used for checking if the clocks have similar values or if they are drifting. + */ +static void mali_print_cpu_clock_timer_func(unsigned long data) +{ + int current_cpu = raw_smp_processor_id(); + unsigned int sample0; + + MALI_IGNORE(data); + sample0 = mali_get_cpu_cyclecount(); + if (current_cpu < 8) { + mali_cpu_clock_last_value[current_cpu] = sample0; + } +} + +/** Init the performance registers on all CPUs to count clock cycles. + * For init \a print_only should be 0. + * If \a print_only is 1, it will intead print the current clock value of all CPUs. + */ +void mali_init_cpu_time_counters_on_all_cpus(int print_only) +{ + int i = 0; + int cpu_number; + int jiffies_trigger; + int jiffies_wait; + + jiffies_wait = 2; + jiffies_trigger = jiffies + jiffies_wait; + + for (i = 0 ; i < 8 ; i++) { + init_timer(&mali_init_cpu_clock_timers[i]); + if (print_only) mali_init_cpu_clock_timers[i].function = mali_print_cpu_clock_timer_func; + else mali_init_cpu_clock_timers[i].function = mali_init_cpu_clock_timer_func; + mali_init_cpu_clock_timers[i].expires = jiffies_trigger ; + } + cpu_number = cpumask_first(cpu_online_mask); + for (i = 0 ; i < 8 ; i++) { + int next_cpu; + add_timer_on(&mali_init_cpu_clock_timers[i], cpu_number); + next_cpu = cpumask_next(cpu_number, cpu_online_mask); + if (next_cpu >= nr_cpu_ids) break; + cpu_number = next_cpu; + } + + while (jiffies_wait) jiffies_wait = schedule_timeout_uninterruptible(jiffies_wait); + + for (i = 0 ; i < 8 ; i++) { + del_timer_sync(&mali_init_cpu_clock_timers[i]); + } + + if (print_only) { + if ((0 == mali_cpu_clock_last_value[2]) && (0 == mali_cpu_clock_last_value[3])) { + /* Diff can be printed if we want to check if the clocks are in sync + int diff = mali_cpu_clock_last_value[0] - mali_cpu_clock_last_value[1];*/ + MALI_DEBUG_PRINT(2, ("CPU cycle counters readout all: %08x %08x\n", mali_cpu_clock_last_value[0], mali_cpu_clock_last_value[1])); + } else { + MALI_DEBUG_PRINT(2, ("CPU cycle counters readout all: %08x %08x %08x %08x\n", mali_cpu_clock_last_value[0], mali_cpu_clock_last_value[1], mali_cpu_clock_last_value[2], mali_cpu_clock_last_value[3])); + } + } +} +#endif + +int mali_module_init(void) +{ + int err = 0; + + MALI_DEBUG_PRINT(2, ("Inserting Mali v%d device driver. \n", _MALI_API_VERSION)); + MALI_DEBUG_PRINT(2, ("Compiled: %s, time: %s.\n", __DATE__, __TIME__)); + MALI_DEBUG_PRINT(2, ("Driver revision: %s\n", SVN_REV_STRING)); + +#if MALI_ENABLE_CPU_CYCLES + mali_init_cpu_time_counters_on_all_cpus(0); + MALI_DEBUG_PRINT(2, ("CPU cycle counter setup complete\n")); + /* Printing the current cpu counters */ + mali_init_cpu_time_counters_on_all_cpus(1); +#endif + + /* Initialize module wide settings */ +#ifdef MALI_FAKE_PLATFORM_DEVICE +#ifndef CONFIG_MALI_DT + MALI_DEBUG_PRINT(2, ("mali_module_init() registering device\n")); + err = mali_platform_device_register(); + if (0 != err) { + return err; + } +#endif +#endif + + MALI_DEBUG_PRINT(2, ("mali_module_init() registering driver\n")); + + err = platform_driver_register(&mali_platform_driver); + + if (0 != err) { + MALI_DEBUG_PRINT(2, ("mali_module_init() Failed to register driver (%d)\n", err)); +#ifdef MALI_FAKE_PLATFORM_DEVICE +#ifndef CONFIG_MALI_DT + mali_platform_device_unregister(); +#endif +#endif + mali_platform_device = NULL; + return err; + } + +#if defined(CONFIG_MALI400_INTERNAL_PROFILING) + err = _mali_internal_profiling_init(mali_boot_profiling ? MALI_TRUE : MALI_FALSE); + if (0 != err) { + /* No biggie if we wheren't able to initialize the profiling */ + MALI_PRINT_ERROR(("Failed to initialize profiling, feature will be unavailable\n")); + } +#endif + + /* Tracing the current frequency and voltage from boot/insmod*/ +#if defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_MALI_DVFS) + /* Just call mali_get_current_gpu_clk_item(),to record current clk info.*/ + mali_get_current_gpu_clk_item(&mali_gpu_clk[0]); + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_EVENT_CHANNEL_GPU | + MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE, + mali_gpu_clk[0].clock, + mali_gpu_clk[0].vol / 1000, + 0, 0, 0); +#endif + + MALI_PRINT(("Mali device driver loaded\n")); + + return 0; /* Success */ +} + +void mali_module_exit(void) +{ + MALI_DEBUG_PRINT(2, ("Unloading Mali v%d device driver.\n", _MALI_API_VERSION)); + + MALI_DEBUG_PRINT(2, ("mali_module_exit() unregistering driver\n")); + + platform_driver_unregister(&mali_platform_driver); + +#if defined(MALI_FAKE_PLATFORM_DEVICE) +#ifndef CONFIG_MALI_DT + MALI_DEBUG_PRINT(2, ("mali_module_exit() unregistering device\n")); + mali_platform_device_unregister(); +#endif +#endif + + /* Tracing the current frequency and voltage from rmmod*/ + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_EVENT_CHANNEL_GPU | + MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE, + 0, + 0, + 0, 0, 0); + +#if defined(CONFIG_MALI400_INTERNAL_PROFILING) + _mali_internal_profiling_term(); +#endif + + MALI_PRINT(("Mali device driver unloaded\n")); +} + +#ifdef CONFIG_MALI_DEVFREQ +struct mali_device *mali_device_alloc(void) +{ + return kzalloc(sizeof(struct mali_device), GFP_KERNEL); +} + +void mali_device_free(struct mali_device *mdev) +{ + kfree(mdev); +} +#endif + +static int mali_probe(struct platform_device *pdev) +{ + int err; +#ifdef CONFIG_MALI_DEVFREQ + struct mali_device *mdev; +#endif + + MALI_DEBUG_PRINT(2, ("mali_probe(): Called for platform device %s\n", pdev->name)); + + if (NULL != mali_platform_device) { + /* Already connected to a device, return error */ + MALI_PRINT_ERROR(("mali_probe(): The Mali driver is already connected with a Mali device.")); + return -EEXIST; + } + + mali_platform_device = pdev; + +#ifdef CONFIG_MALI_DT + /* If we use DT to initialize our DDK, we have to prepare somethings. */ + err = mali_platform_device_init(mali_platform_device); + if (0 != err) { + MALI_PRINT_ERROR(("mali_probe(): Failed to initialize platform device.")); + mali_platform_device = NULL; + return -EFAULT; + } +#endif + +#ifdef CONFIG_MALI_DEVFREQ + mdev = mali_device_alloc(); + if (!mdev) { + MALI_PRINT_ERROR(("Can't allocate mali device private data\n")); + return -ENOMEM; + } + + mdev->dev = &pdev->dev; + dev_set_drvdata(mdev->dev, mdev); + + /*Initilization clock and regulator*/ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \ + && defined(CONFIG_REGULATOR) + mdev->regulator = regulator_get_optional(mdev->dev, "mali"); + if (IS_ERR_OR_NULL(mdev->regulator)) { + MALI_DEBUG_PRINT(2, ("Continuing without Mali regulator control\n")); + mdev->regulator = NULL; + /* Allow probe to continue without regulator */ + } +#endif /* LINUX_VERSION_CODE >= 3, 12, 0 */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) && defined(CONFIG_OF) \ + && defined(CONFIG_PM_OPP) + /* Register the OPPs if they are available in device tree */ + if (dev_pm_opp_of_add_table(mdev->dev) < 0) + MALI_DEBUG_PRINT(3, ("OPP table not found\n")); +#endif + + /* Need to name the gpu clock "clk_mali" in the device tree */ + mdev->clock = clk_get(mdev->dev, "clk_mali"); + if (IS_ERR_OR_NULL(mdev->clock)) { + MALI_DEBUG_PRINT(2, ("Continuing without Mali clock control\n")); + mdev->clock = NULL; + /* Allow probe to continue without clock. */ + } else { + err = clk_prepare_enable(mdev->clock); + if (err) { + MALI_PRINT_ERROR(("Failed to prepare and enable clock (%d)\n", err)); + goto clock_prepare_failed; + } + } + + /* initilize pm metrics related */ + if (mali_pm_metrics_init(mdev) < 0) { + MALI_DEBUG_PRINT(2, ("mali pm metrics init failed\n")); + goto pm_metrics_init_failed; + } + + if (mali_devfreq_init(mdev) < 0) { + MALI_DEBUG_PRINT(2, ("mali devfreq init failed\n")); + goto devfreq_init_failed; + } +#endif + + + if (_MALI_OSK_ERR_OK == _mali_osk_wq_init()) { + /* Initialize the Mali GPU HW specified by pdev */ + if (_MALI_OSK_ERR_OK == mali_initialize_subsystems()) { + /* Register a misc device (so we are accessible from user space) */ + err = mali_miscdevice_register(pdev); + if (0 == err) { + /* Setup sysfs entries */ + err = mali_sysfs_register(mali_dev_name); + + if (0 == err) { + MALI_DEBUG_PRINT(2, ("mali_probe(): Successfully initialized driver for platform device %s\n", pdev->name)); + + return 0; + } else { + MALI_PRINT_ERROR(("mali_probe(): failed to register sysfs entries")); + } + mali_miscdevice_unregister(); + } else { + MALI_PRINT_ERROR(("mali_probe(): failed to register Mali misc device.")); + } + mali_terminate_subsystems(); + } else { + MALI_PRINT_ERROR(("mali_probe(): Failed to initialize Mali device driver.")); + } + _mali_osk_wq_term(); + } + +#ifdef CONFIG_MALI_DEVFREQ + mali_devfreq_term(mdev); +devfreq_init_failed: + mali_pm_metrics_term(mdev); +pm_metrics_init_failed: + clk_disable_unprepare(mdev->clock); +clock_prepare_failed: + clk_put(mdev->clock); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) && defined(CONFIG_OF) \ + && defined(CONFIG_PM_OPP) + dev_pm_opp_of_remove_table(mdev->dev); +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \ + && defined(CONFIG_REGULATOR) + regulator_put(mdev->regulator); +#endif /* LINUX_VERSION_CODE >= 3, 12, 0 */ + mali_device_free(mdev); +#endif + +#ifdef CONFIG_MALI_DT + mali_platform_device_deinit(mali_platform_device); +#endif + mali_platform_device = NULL; + return -EFAULT; +} + +static int mali_remove(struct platform_device *pdev) +{ +#ifdef CONFIG_MALI_DEVFREQ + struct mali_device *mdev = dev_get_drvdata(&pdev->dev); +#endif + + MALI_DEBUG_PRINT(2, ("mali_remove() called for platform device %s\n", pdev->name)); + mali_sysfs_unregister(); + mali_miscdevice_unregister(); + mali_terminate_subsystems(); + _mali_osk_wq_term(); + +#ifdef CONFIG_MALI_DEVFREQ + mali_devfreq_term(mdev); + + mali_pm_metrics_term(mdev); + + if (mdev->clock) { + clk_disable_unprepare(mdev->clock); + clk_put(mdev->clock); + mdev->clock = NULL; + } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) && defined(CONFIG_OF) \ + && defined(CONFIG_PM_OPP) + dev_pm_opp_of_remove_table(mdev->dev); +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)) && defined(CONFIG_OF) \ + && defined(CONFIG_REGULATOR) + regulator_put(mdev->regulator); +#endif /* LINUX_VERSION_CODE >= 3, 12, 0 */ + mali_device_free(mdev); +#endif + +#ifdef CONFIG_MALI_DT + mali_platform_device_deinit(mali_platform_device); +#endif + mali_platform_device = NULL; + return 0; +} + +static int mali_miscdevice_register(struct platform_device *pdev) +{ + int err; + + mali_miscdevice.minor = MISC_DYNAMIC_MINOR; + mali_miscdevice.name = mali_dev_name; + mali_miscdevice.fops = &mali_fops; + mali_miscdevice.parent = get_device(&pdev->dev); + + err = misc_register(&mali_miscdevice); + if (0 != err) { + MALI_PRINT_ERROR(("Failed to register misc device, misc_register() returned %d\n", err)); + } + + return err; +} + +static void mali_miscdevice_unregister(void) +{ + misc_deregister(&mali_miscdevice); +} + +static int mali_driver_suspend_scheduler(struct device *dev) +{ + struct mali_gpu_device_data *device_data = + mali_platform_device->dev.platform_data; +#ifdef CONFIG_MALI_DEVFREQ + struct mali_device *mdev = dev_get_drvdata(dev); + if (!mdev) + return -ENODEV; +#endif + +#if defined(CONFIG_MALI_DEVFREQ) && \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) + devfreq_suspend_device(mdev->devfreq); +#endif + + mali_pm_os_suspend(MALI_TRUE); + /* Tracing the frequency and voltage after mali is suspended */ + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_EVENT_CHANNEL_GPU | + MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE, + 0, + 0, + 0, 0, 0); + + if (device_data->platform_suspend) + device_data->platform_suspend(dev); + + return 0; +} + +static int mali_driver_resume_scheduler(struct device *dev) +{ + struct mali_gpu_device_data *device_data = + mali_platform_device->dev.platform_data; +#ifdef CONFIG_MALI_DEVFREQ + struct mali_device *mdev = dev_get_drvdata(dev); + if (!mdev) + return -ENODEV; +#endif + + /* Tracing the frequency and voltage after mali is resumed */ +#if defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_MALI_DVFS) + /* Just call mali_get_current_gpu_clk_item() once,to record current clk info.*/ + if (is_first_resume == 1) { + mali_get_current_gpu_clk_item(&mali_gpu_clk[1]); + is_first_resume = 0; + } + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_EVENT_CHANNEL_GPU | + MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE, + mali_gpu_clk[1].clock, + mali_gpu_clk[1].vol / 1000, + 0, 0, 0); +#endif + if (device_data->platform_resume) + device_data->platform_resume(dev); + + mali_pm_os_resume(); + +#if defined(CONFIG_MALI_DEVFREQ) && \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) + devfreq_resume_device(mdev->devfreq); +#endif + + return 0; +} + +#ifdef CONFIG_PM_RUNTIME +static int mali_driver_runtime_suspend(struct device *dev) +{ + struct mali_gpu_device_data *device_data = + mali_platform_device->dev.platform_data; +#ifdef CONFIG_MALI_DEVFREQ + struct mali_device *mdev = dev_get_drvdata(dev); + if (!mdev) + return -ENODEV; +#endif + + if (MALI_TRUE == mali_pm_runtime_suspend()) { + /* Tracing the frequency and voltage after mali is suspended */ + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_EVENT_CHANNEL_GPU | + MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE, + 0, + 0, + 0, 0, 0); + +#if defined(CONFIG_MALI_DEVFREQ) && \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) + MALI_DEBUG_PRINT(4, ("devfreq_suspend_device: stop devfreq monitor\n")); + devfreq_suspend_device(mdev->devfreq); +#endif + + if (device_data->platform_suspend) + device_data->platform_suspend(dev); + + return 0; + } else { + return -EBUSY; + } +} + +static int mali_driver_runtime_resume(struct device *dev) +{ + struct mali_gpu_device_data *device_data = + mali_platform_device->dev.platform_data; +#ifdef CONFIG_MALI_DEVFREQ + struct mali_device *mdev = dev_get_drvdata(dev); + if (!mdev) + return -ENODEV; +#endif + + /* Tracing the frequency and voltage after mali is resumed */ +#if defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_MALI_DVFS) + /* Just call mali_get_current_gpu_clk_item() once,to record current clk info.*/ + if (is_first_resume == 1) { + mali_get_current_gpu_clk_item(&mali_gpu_clk[1]); + is_first_resume = 0; + } + _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | + MALI_PROFILING_EVENT_CHANNEL_GPU | + MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE, + mali_gpu_clk[1].clock, + mali_gpu_clk[1].vol / 1000, + 0, 0, 0); +#endif + if (device_data->platform_resume) + device_data->platform_resume(dev); + + mali_pm_runtime_resume(); + +#if defined(CONFIG_MALI_DEVFREQ) && \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) + MALI_DEBUG_PRINT(4, ("devfreq_resume_device: start devfreq monitor\n")); + devfreq_resume_device(mdev->devfreq); +#endif + return 0; +} + +static int mali_driver_runtime_idle(struct device *dev) +{ + /* Nothing to do */ + return 0; +} +#endif + +static int mali_open(struct inode *inode, struct file *filp) +{ + struct mali_session_data *session_data; + _mali_osk_errcode_t err; + + /* input validation */ + if (mali_miscdevice.minor != iminor(inode)) { + MALI_PRINT_ERROR(("mali_open() Minor does not match\n")); + return -ENODEV; + } + + /* allocated struct to track this session */ + err = _mali_ukk_open((void **)&session_data); + if (_MALI_OSK_ERR_OK != err) return map_errcode(err); + + /* initialize file pointer */ + filp->f_pos = 0; + + /* link in our session data */ + filp->private_data = (void *)session_data; + + filp->f_mapping = mali_mem_swap_get_global_swap_file()->f_mapping; + + return 0; +} + +static int mali_release(struct inode *inode, struct file *filp) +{ + _mali_osk_errcode_t err; + + /* input validation */ + if (mali_miscdevice.minor != iminor(inode)) { + MALI_PRINT_ERROR(("mali_release() Minor does not match\n")); + return -ENODEV; + } + + err = _mali_ukk_close((void **)&filp->private_data); + if (_MALI_OSK_ERR_OK != err) return map_errcode(err); + + return 0; +} + +int map_errcode(_mali_osk_errcode_t err) +{ + switch (err) { + case _MALI_OSK_ERR_OK : + return 0; + case _MALI_OSK_ERR_FAULT: + return -EFAULT; + case _MALI_OSK_ERR_INVALID_FUNC: + return -ENOTTY; + case _MALI_OSK_ERR_INVALID_ARGS: + return -EINVAL; + case _MALI_OSK_ERR_NOMEM: + return -ENOMEM; + case _MALI_OSK_ERR_TIMEOUT: + return -ETIMEDOUT; + case _MALI_OSK_ERR_RESTARTSYSCALL: + return -ERESTARTSYS; + case _MALI_OSK_ERR_ITEM_NOT_FOUND: + return -ENOENT; + default: + return -EFAULT; + } +} + +#ifdef HAVE_UNLOCKED_IOCTL +static long mali_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +#else +static int mali_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) +#endif +{ + int err; + struct mali_session_data *session_data; + +#ifndef HAVE_UNLOCKED_IOCTL + /* inode not used */ + (void)inode; +#endif + + MALI_DEBUG_PRINT(7, ("Ioctl received 0x%08X 0x%08lX\n", cmd, arg)); + + session_data = (struct mali_session_data *)filp->private_data; + if (NULL == session_data) { + MALI_DEBUG_PRINT(7, ("filp->private_data was NULL\n")); + return -ENOTTY; + } + + if (NULL == (void *)arg) { + MALI_DEBUG_PRINT(7, ("arg was NULL\n")); + return -ENOTTY; + } + + switch (cmd) { + case MALI_IOC_WAIT_FOR_NOTIFICATION: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_wait_for_notification_s), sizeof(u64))); + err = wait_for_notification_wrapper(session_data, (_mali_uk_wait_for_notification_s __user *)arg); + break; + + case MALI_IOC_GET_API_VERSION_V2: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_api_version_v2_s), sizeof(u64))); + err = get_api_version_v2_wrapper(session_data, (_mali_uk_get_api_version_v2_s __user *)arg); + break; + + case MALI_IOC_GET_API_VERSION: + err = get_api_version_wrapper(session_data, (_mali_uk_get_api_version_s __user *)arg); + break; + + case MALI_IOC_POST_NOTIFICATION: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_post_notification_s), sizeof(u64))); + err = post_notification_wrapper(session_data, (_mali_uk_post_notification_s __user *)arg); + break; + + case MALI_IOC_GET_USER_SETTINGS: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_user_settings_s), sizeof(u64))); + err = get_user_settings_wrapper(session_data, (_mali_uk_get_user_settings_s __user *)arg); + break; + + case MALI_IOC_REQUEST_HIGH_PRIORITY: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_request_high_priority_s), sizeof(u64))); + err = request_high_priority_wrapper(session_data, (_mali_uk_request_high_priority_s __user *)arg); + break; + + case MALI_IOC_PENDING_SUBMIT: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_pending_submit_s), sizeof(u64))); + err = pending_submit_wrapper(session_data, (_mali_uk_pending_submit_s __user *)arg); + break; + +#if defined(CONFIG_MALI400_PROFILING) + case MALI_IOC_PROFILING_ADD_EVENT: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_profiling_add_event_s), sizeof(u64))); + err = profiling_add_event_wrapper(session_data, (_mali_uk_profiling_add_event_s __user *)arg); + break; + + case MALI_IOC_PROFILING_REPORT_SW_COUNTERS: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_sw_counters_report_s), sizeof(u64))); + err = profiling_report_sw_counters_wrapper(session_data, (_mali_uk_sw_counters_report_s __user *)arg); + break; + + case MALI_IOC_PROFILING_STREAM_FD_GET: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_profiling_stream_fd_get_s), sizeof(u64))); + err = profiling_get_stream_fd_wrapper(session_data, (_mali_uk_profiling_stream_fd_get_s __user *)arg); + break; + + case MALI_IOC_PROILING_CONTROL_SET: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_profiling_control_set_s), sizeof(u64))); + err = profiling_control_set_wrapper(session_data, (_mali_uk_profiling_control_set_s __user *)arg); + break; +#else + + case MALI_IOC_PROFILING_ADD_EVENT: /* FALL-THROUGH */ + case MALI_IOC_PROFILING_REPORT_SW_COUNTERS: /* FALL-THROUGH */ + MALI_DEBUG_PRINT(2, ("Profiling not supported\n")); + err = -ENOTTY; + break; +#endif + + case MALI_IOC_PROFILING_MEMORY_USAGE_GET: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_profiling_memory_usage_get_s), sizeof(u64))); + err = mem_usage_get_wrapper(session_data, (_mali_uk_profiling_memory_usage_get_s __user *)arg); + break; + + case MALI_IOC_MEM_ALLOC: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_alloc_mem_s), sizeof(u64))); + err = mem_alloc_wrapper(session_data, (_mali_uk_alloc_mem_s __user *)arg); + break; + + case MALI_IOC_MEM_FREE: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_free_mem_s), sizeof(u64))); + err = mem_free_wrapper(session_data, (_mali_uk_free_mem_s __user *)arg); + break; + + case MALI_IOC_MEM_BIND: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_bind_mem_s), sizeof(u64))); + err = mem_bind_wrapper(session_data, (_mali_uk_bind_mem_s __user *)arg); + break; + + case MALI_IOC_MEM_UNBIND: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_unbind_mem_s), sizeof(u64))); + err = mem_unbind_wrapper(session_data, (_mali_uk_unbind_mem_s __user *)arg); + break; + + case MALI_IOC_MEM_COW: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_cow_mem_s), sizeof(u64))); + err = mem_cow_wrapper(session_data, (_mali_uk_cow_mem_s __user *)arg); + break; + + case MALI_IOC_MEM_COW_MODIFY_RANGE: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_cow_modify_range_s), sizeof(u64))); + err = mem_cow_modify_range_wrapper(session_data, (_mali_uk_cow_modify_range_s __user *)arg); + break; + + case MALI_IOC_MEM_RESIZE: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_mem_resize_s), sizeof(u64))); + err = mem_resize_mem_wrapper(session_data, (_mali_uk_mem_resize_s __user *)arg); + break; + + case MALI_IOC_MEM_WRITE_SAFE: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_mem_write_safe_s), sizeof(u64))); + err = mem_write_safe_wrapper(session_data, (_mali_uk_mem_write_safe_s __user *)arg); + break; + + case MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_query_mmu_page_table_dump_size_s), sizeof(u64))); + err = mem_query_mmu_page_table_dump_size_wrapper(session_data, (_mali_uk_query_mmu_page_table_dump_size_s __user *)arg); + break; + + case MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_dump_mmu_page_table_s), sizeof(u64))); + err = mem_dump_mmu_page_table_wrapper(session_data, (_mali_uk_dump_mmu_page_table_s __user *)arg); + break; + + case MALI_IOC_MEM_DMA_BUF_GET_SIZE: +#ifdef CONFIG_DMA_SHARED_BUFFER + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_dma_buf_get_size_s), sizeof(u64))); + err = mali_dma_buf_get_size(session_data, (_mali_uk_dma_buf_get_size_s __user *)arg); +#else + MALI_DEBUG_PRINT(2, ("DMA-BUF not supported\n")); + err = -ENOTTY; +#endif + break; + + case MALI_IOC_PP_START_JOB: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_pp_start_job_s), sizeof(u64))); + err = pp_start_job_wrapper(session_data, (_mali_uk_pp_start_job_s __user *)arg); + break; + + case MALI_IOC_PP_AND_GP_START_JOB: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_pp_and_gp_start_job_s), sizeof(u64))); + err = pp_and_gp_start_job_wrapper(session_data, (_mali_uk_pp_and_gp_start_job_s __user *)arg); + break; + + case MALI_IOC_PP_NUMBER_OF_CORES_GET: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_pp_number_of_cores_s), sizeof(u64))); + err = pp_get_number_of_cores_wrapper(session_data, (_mali_uk_get_pp_number_of_cores_s __user *)arg); + break; + + case MALI_IOC_PP_CORE_VERSION_GET: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_pp_core_version_s), sizeof(u64))); + err = pp_get_core_version_wrapper(session_data, (_mali_uk_get_pp_core_version_s __user *)arg); + break; + + case MALI_IOC_PP_DISABLE_WB: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_pp_disable_wb_s), sizeof(u64))); + err = pp_disable_wb_wrapper(session_data, (_mali_uk_pp_disable_wb_s __user *)arg); + break; + + case MALI_IOC_GP2_START_JOB: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_gp_start_job_s), sizeof(u64))); + err = gp_start_job_wrapper(session_data, (_mali_uk_gp_start_job_s __user *)arg); + break; + + case MALI_IOC_GP2_NUMBER_OF_CORES_GET: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_gp_number_of_cores_s), sizeof(u64))); + err = gp_get_number_of_cores_wrapper(session_data, (_mali_uk_get_gp_number_of_cores_s __user *)arg); + break; + + case MALI_IOC_GP2_CORE_VERSION_GET: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_gp_core_version_s), sizeof(u64))); + err = gp_get_core_version_wrapper(session_data, (_mali_uk_get_gp_core_version_s __user *)arg); + break; + + case MALI_IOC_GP2_SUSPEND_RESPONSE: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_gp_suspend_response_s), sizeof(u64))); + err = gp_suspend_response_wrapper(session_data, (_mali_uk_gp_suspend_response_s __user *)arg); + break; + + case MALI_IOC_VSYNC_EVENT_REPORT: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_vsync_event_report_s), sizeof(u64))); + err = vsync_event_report_wrapper(session_data, (_mali_uk_vsync_event_report_s __user *)arg); + break; + + case MALI_IOC_TIMELINE_GET_LATEST_POINT: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_timeline_get_latest_point_s), sizeof(u64))); + err = timeline_get_latest_point_wrapper(session_data, (_mali_uk_timeline_get_latest_point_s __user *)arg); + break; + case MALI_IOC_TIMELINE_WAIT: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_timeline_wait_s), sizeof(u64))); + err = timeline_wait_wrapper(session_data, (_mali_uk_timeline_wait_s __user *)arg); + break; + case MALI_IOC_TIMELINE_CREATE_SYNC_FENCE: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_timeline_create_sync_fence_s), sizeof(u64))); + err = timeline_create_sync_fence_wrapper(session_data, (_mali_uk_timeline_create_sync_fence_s __user *)arg); + break; + case MALI_IOC_SOFT_JOB_START: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_soft_job_start_s), sizeof(u64))); + err = soft_job_start_wrapper(session_data, (_mali_uk_soft_job_start_s __user *)arg); + break; + case MALI_IOC_SOFT_JOB_SIGNAL: + BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_soft_job_signal_s), sizeof(u64))); + err = soft_job_signal_wrapper(session_data, (_mali_uk_soft_job_signal_s __user *)arg); + break; + + default: + MALI_DEBUG_PRINT(2, ("No handler for ioctl 0x%08X 0x%08lX\n", cmd, arg)); + err = -ENOTTY; + }; + + return err; +} + + +module_init(mali_module_init); +module_exit(mali_module_exit); + +MODULE_LICENSE(MALI_KERNEL_LINUX_LICENSE); +MODULE_AUTHOR("ARM Ltd."); +MODULE_VERSION(SVN_REV_STRING); diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_kernel_linux.h b/drivers/gpu/arm/mali400/linux/mali_kernel_linux.h --- a/drivers/gpu/arm/mali400/linux/mali_kernel_linux.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_kernel_linux.h 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,36 @@ +/* + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_KERNEL_LINUX_H__ +#define __MALI_KERNEL_LINUX_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include /* character device definitions */ +#include +#include +#include "mali_kernel_license.h" +#include "mali_osk_types.h" +#include + +extern struct platform_device *mali_platform_device; + +/* After 3.19.0 kenrel droped CONFIG_PM_RUNTIME define,define by ourself */ +#if defined(CONFIG_PM) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0) +#define CONFIG_PM_RUNTIME 1 +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* __MALI_KERNEL_LINUX_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_kernel_sysfs.c b/drivers/gpu/arm/mali400/linux/mali_kernel_sysfs.c --- a/drivers/gpu/arm/mali400/linux/mali_kernel_sysfs.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_kernel_sysfs.c 2018-05-06 08:49:49.178695419 +0200 @@ -0,0 +1,1410 @@ +/** + * Copyright (C) 2011-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + + +/** + * @file mali_kernel_sysfs.c + * Implementation of some sysfs data exports + */ + +#include +#include +#include +#include +#include "mali_kernel_license.h" +#include "mali_kernel_common.h" +#include "mali_ukk.h" + +#if MALI_LICENSE_IS_GPL + +#include +#include +#include +#include +#include +#include "mali_kernel_sysfs.h" +#if defined(CONFIG_MALI400_INTERNAL_PROFILING) +#include +#include "mali_osk_profiling.h" +#endif + +#include +#include "mali_pm.h" +#include "mali_pmu.h" +#include "mali_group.h" +#include "mali_gp.h" +#include "mali_pp.h" +#include "mali_l2_cache.h" +#include "mali_hw_core.h" +#include "mali_kernel_core.h" +#include "mali_user_settings_db.h" +#include "mali_profiling_internal.h" +#include "mali_gp_job.h" +#include "mali_pp_job.h" +#include "mali_executor.h" + +#define PRIVATE_DATA_COUNTER_MAKE_GP(src) (src) +#define PRIVATE_DATA_COUNTER_MAKE_PP(src) ((1 << 24) | src) +#define PRIVATE_DATA_COUNTER_MAKE_PP_SUB_JOB(src, sub_job) ((1 << 24) | (1 << 16) | (sub_job << 8) | src) +#define PRIVATE_DATA_COUNTER_IS_PP(a) ((((a) >> 24) & 0xFF) ? MALI_TRUE : MALI_FALSE) +#define PRIVATE_DATA_COUNTER_GET_SRC(a) (a & 0xFF) +#define PRIVATE_DATA_COUNTER_IS_SUB_JOB(a) ((((a) >> 16) & 0xFF) ? MALI_TRUE : MALI_FALSE) +#define PRIVATE_DATA_COUNTER_GET_SUB_JOB(a) (((a) >> 8) & 0xFF) + +#define POWER_BUFFER_SIZE 3 + +static struct dentry *mali_debugfs_dir = NULL; + +typedef enum { + _MALI_DEVICE_SUSPEND, + _MALI_DEVICE_RESUME, + _MALI_DEVICE_DVFS_PAUSE, + _MALI_DEVICE_DVFS_RESUME, + _MALI_MAX_EVENTS +} _mali_device_debug_power_events; + +static const char *const mali_power_events[_MALI_MAX_EVENTS] = { + [_MALI_DEVICE_SUSPEND] = "suspend", + [_MALI_DEVICE_RESUME] = "resume", + [_MALI_DEVICE_DVFS_PAUSE] = "dvfs_pause", + [_MALI_DEVICE_DVFS_RESUME] = "dvfs_resume", +}; + +static mali_bool power_always_on_enabled = MALI_FALSE; + +static int open_copy_private_data(struct inode *inode, struct file *filp) +{ + filp->private_data = inode->i_private; + return 0; +} + +static ssize_t group_enabled_read(struct file *filp, char __user *buf, size_t count, loff_t *offp) +{ + int r; + char buffer[64]; + struct mali_group *group; + + group = (struct mali_group *)filp->private_data; + MALI_DEBUG_ASSERT_POINTER(group); + + r = snprintf(buffer, 64, "%u\n", + mali_executor_group_is_disabled(group) ? 0 : 1); + + return simple_read_from_buffer(buf, count, offp, buffer, r); +} + +static ssize_t group_enabled_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp) +{ + int r; + char buffer[64]; + unsigned long val; + struct mali_group *group; + + group = (struct mali_group *)filp->private_data; + MALI_DEBUG_ASSERT_POINTER(group); + + if (count >= sizeof(buffer)) { + return -ENOMEM; + } + + if (copy_from_user(&buffer[0], buf, count)) { + return -EFAULT; + } + buffer[count] = '\0'; + + r = kstrtoul(&buffer[0], 10, &val); + if (0 != r) { + return -EINVAL; + } + + switch (val) { + case 1: + mali_executor_group_enable(group); + break; + case 0: + mali_executor_group_disable(group); + break; + default: + return -EINVAL; + break; + } + + *offp += count; + return count; +} + +static const struct file_operations group_enabled_fops = { + .owner = THIS_MODULE, + .open = open_copy_private_data, + .read = group_enabled_read, + .write = group_enabled_write, +}; + +static ssize_t hw_core_base_addr_read(struct file *filp, char __user *buf, size_t count, loff_t *offp) +{ + int r; + char buffer[64]; + struct mali_hw_core *hw_core; + + hw_core = (struct mali_hw_core *)filp->private_data; + MALI_DEBUG_ASSERT_POINTER(hw_core); + + r = snprintf(buffer, 64, "0x%lX\n", hw_core->phys_addr); + + return simple_read_from_buffer(buf, count, offp, buffer, r); +} + +static const struct file_operations hw_core_base_addr_fops = { + .owner = THIS_MODULE, + .open = open_copy_private_data, + .read = hw_core_base_addr_read, +}; + +static ssize_t profiling_counter_src_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) +{ + u32 is_pp = PRIVATE_DATA_COUNTER_IS_PP((uintptr_t)filp->private_data); + u32 src_id = PRIVATE_DATA_COUNTER_GET_SRC((uintptr_t)filp->private_data); + mali_bool is_sub_job = PRIVATE_DATA_COUNTER_IS_SUB_JOB((uintptr_t)filp->private_data); + u32 sub_job = PRIVATE_DATA_COUNTER_GET_SUB_JOB((uintptr_t)filp->private_data); + char buf[64]; + int r; + u32 val; + + if (MALI_TRUE == is_pp) { + /* PP counter */ + if (MALI_TRUE == is_sub_job) { + /* Get counter for a particular sub job */ + if (0 == src_id) { + val = mali_pp_job_get_pp_counter_sub_job_src0(sub_job); + } else { + val = mali_pp_job_get_pp_counter_sub_job_src1(sub_job); + } + } else { + /* Get default counter for all PP sub jobs */ + if (0 == src_id) { + val = mali_pp_job_get_pp_counter_global_src0(); + } else { + val = mali_pp_job_get_pp_counter_global_src1(); + } + } + } else { + /* GP counter */ + if (0 == src_id) { + val = mali_gp_job_get_gp_counter_src0(); + } else { + val = mali_gp_job_get_gp_counter_src1(); + } + } + + if (MALI_HW_CORE_NO_COUNTER == val) { + r = snprintf(buf, 64, "-1\n"); + } else { + r = snprintf(buf, 64, "%u\n", val); + } + + return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); +} + +static ssize_t profiling_counter_src_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) +{ + u32 is_pp = PRIVATE_DATA_COUNTER_IS_PP((uintptr_t)filp->private_data); + u32 src_id = PRIVATE_DATA_COUNTER_GET_SRC((uintptr_t)filp->private_data); + mali_bool is_sub_job = PRIVATE_DATA_COUNTER_IS_SUB_JOB((uintptr_t)filp->private_data); + u32 sub_job = PRIVATE_DATA_COUNTER_GET_SUB_JOB((uintptr_t)filp->private_data); + char buf[64]; + long val; + int ret; + + if (cnt >= sizeof(buf)) { + return -EINVAL; + } + + if (copy_from_user(&buf, ubuf, cnt)) { + return -EFAULT; + } + + buf[cnt] = 0; + + ret = kstrtol(buf, 10, &val); + if (ret < 0) { + return ret; + } + + if (val < 0) { + /* any negative input will disable counter */ + val = MALI_HW_CORE_NO_COUNTER; + } + + if (MALI_TRUE == is_pp) { + /* PP counter */ + if (MALI_TRUE == is_sub_job) { + /* Set counter for a particular sub job */ + if (0 == src_id) { + mali_pp_job_set_pp_counter_sub_job_src0(sub_job, (u32)val); + } else { + mali_pp_job_set_pp_counter_sub_job_src1(sub_job, (u32)val); + } + } else { + /* Set default counter for all PP sub jobs */ + if (0 == src_id) { + mali_pp_job_set_pp_counter_global_src0((u32)val); + } else { + mali_pp_job_set_pp_counter_global_src1((u32)val); + } + } + } else { + /* GP counter */ + if (0 == src_id) { + mali_gp_job_set_gp_counter_src0((u32)val); + } else { + mali_gp_job_set_gp_counter_src1((u32)val); + } + } + + *ppos += cnt; + return cnt; +} + +static const struct file_operations profiling_counter_src_fops = { + .owner = THIS_MODULE, + .open = open_copy_private_data, + .read = profiling_counter_src_read, + .write = profiling_counter_src_write, +}; + +static ssize_t l2_l2x_counter_srcx_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id) +{ + char buf[64]; + int r; + u32 val; + struct mali_l2_cache_core *l2_core = (struct mali_l2_cache_core *)filp->private_data; + + if (0 == src_id) { + val = mali_l2_cache_core_get_counter_src0(l2_core); + } else { + val = mali_l2_cache_core_get_counter_src1(l2_core); + } + + if (MALI_HW_CORE_NO_COUNTER == val) { + r = snprintf(buf, 64, "-1\n"); + } else { + r = snprintf(buf, 64, "%u\n", val); + } + return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); +} + +static ssize_t l2_l2x_counter_srcx_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id) +{ + struct mali_l2_cache_core *l2_core = (struct mali_l2_cache_core *)filp->private_data; + char buf[64]; + long val; + int ret; + + if (cnt >= sizeof(buf)) { + return -EINVAL; + } + + if (copy_from_user(&buf, ubuf, cnt)) { + return -EFAULT; + } + + buf[cnt] = 0; + + ret = kstrtol(buf, 10, &val); + if (ret < 0) { + return ret; + } + + if (val < 0) { + /* any negative input will disable counter */ + val = MALI_HW_CORE_NO_COUNTER; + } + + mali_l2_cache_core_set_counter_src(l2_core, src_id, (u32)val); + + *ppos += cnt; + return cnt; +} + +static ssize_t l2_all_counter_srcx_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id) +{ + char buf[64]; + long val; + int ret; + u32 l2_id; + struct mali_l2_cache_core *l2_cache; + + if (cnt >= sizeof(buf)) { + return -EINVAL; + } + + if (copy_from_user(&buf, ubuf, cnt)) { + return -EFAULT; + } + + buf[cnt] = 0; + + ret = kstrtol(buf, 10, &val); + if (ret < 0) { + return ret; + } + + if (val < 0) { + /* any negative input will disable counter */ + val = MALI_HW_CORE_NO_COUNTER; + } + + l2_id = 0; + l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id); + while (NULL != l2_cache) { + mali_l2_cache_core_set_counter_src(l2_cache, src_id, (u32)val); + + /* try next L2 */ + l2_id++; + l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id); + } + + *ppos += cnt; + return cnt; +} + +static ssize_t l2_l2x_counter_src0_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) +{ + return l2_l2x_counter_srcx_read(filp, ubuf, cnt, ppos, 0); +} + +static ssize_t l2_l2x_counter_src1_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) +{ + return l2_l2x_counter_srcx_read(filp, ubuf, cnt, ppos, 1); +} + +static ssize_t l2_l2x_counter_src0_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) +{ + return l2_l2x_counter_srcx_write(filp, ubuf, cnt, ppos, 0); +} + +static ssize_t l2_l2x_counter_src1_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) +{ + return l2_l2x_counter_srcx_write(filp, ubuf, cnt, ppos, 1); +} + +static ssize_t l2_all_counter_src0_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) +{ + return l2_all_counter_srcx_write(filp, ubuf, cnt, ppos, 0); +} + +static ssize_t l2_all_counter_src1_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) +{ + return l2_all_counter_srcx_write(filp, ubuf, cnt, ppos, 1); +} + +static const struct file_operations l2_l2x_counter_src0_fops = { + .owner = THIS_MODULE, + .open = open_copy_private_data, + .read = l2_l2x_counter_src0_read, + .write = l2_l2x_counter_src0_write, +}; + +static const struct file_operations l2_l2x_counter_src1_fops = { + .owner = THIS_MODULE, + .open = open_copy_private_data, + .read = l2_l2x_counter_src1_read, + .write = l2_l2x_counter_src1_write, +}; + +static const struct file_operations l2_all_counter_src0_fops = { + .owner = THIS_MODULE, + .write = l2_all_counter_src0_write, +}; + +static const struct file_operations l2_all_counter_src1_fops = { + .owner = THIS_MODULE, + .write = l2_all_counter_src1_write, +}; + +static ssize_t l2_l2x_counter_valx_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id) +{ + char buf[64]; + int r; + u32 src0 = 0; + u32 val0 = 0; + u32 src1 = 0; + u32 val1 = 0; + u32 val = -1; + struct mali_l2_cache_core *l2_core = (struct mali_l2_cache_core *)filp->private_data; + + mali_l2_cache_core_get_counter_values(l2_core, &src0, &val0, &src1, &val1); + + if (0 == src_id) { + if (MALI_HW_CORE_NO_COUNTER != val0) { + val = val0; + } + } else { + if (MALI_HW_CORE_NO_COUNTER != val1) { + val = val1; + } + } + + r = snprintf(buf, 64, "%u\n", val); + + return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); +} + +static ssize_t l2_l2x_counter_val0_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) +{ + return l2_l2x_counter_valx_read(filp, ubuf, cnt, ppos, 0); +} + +static ssize_t l2_l2x_counter_val1_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) +{ + return l2_l2x_counter_valx_read(filp, ubuf, cnt, ppos, 1); +} + +static const struct file_operations l2_l2x_counter_val0_fops = { + .owner = THIS_MODULE, + .open = open_copy_private_data, + .read = l2_l2x_counter_val0_read, +}; + +static const struct file_operations l2_l2x_counter_val1_fops = { + .owner = THIS_MODULE, + .open = open_copy_private_data, + .read = l2_l2x_counter_val1_read, +}; + +static ssize_t power_always_on_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) +{ + unsigned long val; + int ret; + char buf[32]; + + cnt = min(cnt, sizeof(buf) - 1); + if (copy_from_user(buf, ubuf, cnt)) { + return -EFAULT; + } + buf[cnt] = '\0'; + + ret = kstrtoul(buf, 10, &val); + if (0 != ret) { + return ret; + } + + /* Update setting (not exactly thread safe) */ + if (1 == val && MALI_FALSE == power_always_on_enabled) { + power_always_on_enabled = MALI_TRUE; + _mali_osk_pm_dev_ref_get_sync(); + } else if (0 == val && MALI_TRUE == power_always_on_enabled) { + power_always_on_enabled = MALI_FALSE; + _mali_osk_pm_dev_ref_put(); + } + + *ppos += cnt; + return cnt; +} + +static ssize_t power_always_on_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) +{ + if (MALI_TRUE == power_always_on_enabled) { + return simple_read_from_buffer(ubuf, cnt, ppos, "1\n", 2); + } else { + return simple_read_from_buffer(ubuf, cnt, ppos, "0\n", 2); + } +} + +static const struct file_operations power_always_on_fops = { + .owner = THIS_MODULE, + .read = power_always_on_read, + .write = power_always_on_write, +}; + +static ssize_t power_power_events_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) +{ + if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_SUSPEND], strlen(mali_power_events[_MALI_DEVICE_SUSPEND]) - 1)) { + mali_pm_os_suspend(MALI_TRUE); + } else if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_RESUME], strlen(mali_power_events[_MALI_DEVICE_RESUME]) - 1)) { + mali_pm_os_resume(); + } else if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_DVFS_PAUSE], strlen(mali_power_events[_MALI_DEVICE_DVFS_PAUSE]) - 1)) { + mali_dev_pause(); + } else if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_DVFS_RESUME], strlen(mali_power_events[_MALI_DEVICE_DVFS_RESUME]) - 1)) { + mali_dev_resume(); + } + *ppos += cnt; + return cnt; +} + +static loff_t power_power_events_seek(struct file *file, loff_t offset, int orig) +{ + file->f_pos = offset; + return 0; +} + +static const struct file_operations power_power_events_fops = { + .owner = THIS_MODULE, + .write = power_power_events_write, + .llseek = power_power_events_seek, +}; + +#if MALI_STATE_TRACKING +static int mali_seq_internal_state_show(struct seq_file *seq_file, void *v) +{ + u32 len = 0; + u32 size; + char *buf; + + size = seq_get_buf(seq_file, &buf); + + if (!size) { + return -ENOMEM; + } + + /* Create the internal state dump. */ + len = snprintf(buf + len, size - len, "Mali device driver %s\n", SVN_REV_STRING); + len += snprintf(buf + len, size - len, "License: %s\n\n", MALI_KERNEL_LINUX_LICENSE); + + len += _mali_kernel_core_dump_state(buf + len, size - len); + + seq_commit(seq_file, len); + + return 0; +} + +static int mali_seq_internal_state_open(struct inode *inode, struct file *file) +{ + return single_open(file, mali_seq_internal_state_show, NULL); +} + +static const struct file_operations mali_seq_internal_state_fops = { + .owner = THIS_MODULE, + .open = mali_seq_internal_state_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +#endif /* MALI_STATE_TRACKING */ + +#if defined(CONFIG_MALI400_INTERNAL_PROFILING) +static ssize_t profiling_record_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) +{ + char buf[64]; + int r; + + r = snprintf(buf, 64, "%u\n", _mali_internal_profiling_is_recording() ? 1 : 0); + return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); +} + +static ssize_t profiling_record_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) +{ + char buf[64]; + unsigned long val; + int ret; + + if (cnt >= sizeof(buf)) { + return -EINVAL; + } + + if (copy_from_user(&buf, ubuf, cnt)) { + return -EFAULT; + } + + buf[cnt] = 0; + + ret = kstrtoul(buf, 10, &val); + if (ret < 0) { + return ret; + } + + if (val != 0) { + u32 limit = MALI_PROFILING_MAX_BUFFER_ENTRIES; /* This can be made configurable at a later stage if we need to */ + + /* check if we are already recording */ + if (MALI_TRUE == _mali_internal_profiling_is_recording()) { + MALI_DEBUG_PRINT(3, ("Recording of profiling events already in progress\n")); + return -EFAULT; + } + + /* check if we need to clear out an old recording first */ + if (MALI_TRUE == _mali_internal_profiling_have_recording()) { + if (_MALI_OSK_ERR_OK != _mali_internal_profiling_clear()) { + MALI_DEBUG_PRINT(3, ("Failed to clear existing recording of profiling events\n")); + return -EFAULT; + } + } + + /* start recording profiling data */ + if (_MALI_OSK_ERR_OK != _mali_internal_profiling_start(&limit)) { + MALI_DEBUG_PRINT(3, ("Failed to start recording of profiling events\n")); + return -EFAULT; + } + + MALI_DEBUG_PRINT(3, ("Profiling recording started (max %u events)\n", limit)); + } else { + /* stop recording profiling data */ + u32 count = 0; + if (_MALI_OSK_ERR_OK != _mali_internal_profiling_stop(&count)) { + MALI_DEBUG_PRINT(2, ("Failed to stop recording of profiling events\n")); + return -EFAULT; + } + + MALI_DEBUG_PRINT(2, ("Profiling recording stopped (recorded %u events)\n", count)); + } + + *ppos += cnt; + return cnt; +} + +static const struct file_operations profiling_record_fops = { + .owner = THIS_MODULE, + .read = profiling_record_read, + .write = profiling_record_write, +}; + +static void *profiling_events_start(struct seq_file *s, loff_t *pos) +{ + loff_t *spos; + + /* check if we have data avaiable */ + if (MALI_TRUE != _mali_internal_profiling_have_recording()) { + return NULL; + } + + spos = kmalloc(sizeof(loff_t), GFP_KERNEL); + if (NULL == spos) { + return NULL; + } + + *spos = *pos; + return spos; +} + +static void *profiling_events_next(struct seq_file *s, void *v, loff_t *pos) +{ + loff_t *spos = v; + + /* check if we have data avaiable */ + if (MALI_TRUE != _mali_internal_profiling_have_recording()) { + return NULL; + } + + /* check if the next entry actually is avaiable */ + if (_mali_internal_profiling_get_count() <= (u32)(*spos + 1)) { + return NULL; + } + + *pos = ++*spos; + return spos; +} + +static void profiling_events_stop(struct seq_file *s, void *v) +{ + kfree(v); +} + +static int profiling_events_show(struct seq_file *seq_file, void *v) +{ + loff_t *spos = v; + u32 index; + u64 timestamp; + u32 event_id; + u32 data[5]; + + index = (u32) * spos; + + /* Retrieve all events */ + if (_MALI_OSK_ERR_OK == _mali_internal_profiling_get_event(index, ×tamp, &event_id, data)) { + seq_printf(seq_file, "%llu %u %u %u %u %u %u\n", timestamp, event_id, data[0], data[1], data[2], data[3], data[4]); + return 0; + } + + return 0; +} + +static int profiling_events_show_human_readable(struct seq_file *seq_file, void *v) +{ +#define MALI_EVENT_ID_IS_HW(event_id) (((event_id & 0x00FF0000) >= MALI_PROFILING_EVENT_CHANNEL_GP0) && ((event_id & 0x00FF0000) <= MALI_PROFILING_EVENT_CHANNEL_PP7)) + + static u64 start_time = 0; + loff_t *spos = v; + u32 index; + u64 timestamp; + u32 event_id; + u32 data[5]; + + index = (u32) * spos; + + /* Retrieve all events */ + if (_MALI_OSK_ERR_OK == _mali_internal_profiling_get_event(index, ×tamp, &event_id, data)) { + seq_printf(seq_file, "%llu %u %u %u %u %u %u # ", timestamp, event_id, data[0], data[1], data[2], data[3], data[4]); + + if (0 == index) { + start_time = timestamp; + } + + seq_printf(seq_file, "[%06u] ", index); + + switch (event_id & 0x0F000000) { + case MALI_PROFILING_EVENT_TYPE_SINGLE: + seq_printf(seq_file, "SINGLE | "); + break; + case MALI_PROFILING_EVENT_TYPE_START: + seq_printf(seq_file, "START | "); + break; + case MALI_PROFILING_EVENT_TYPE_STOP: + seq_printf(seq_file, "STOP | "); + break; + case MALI_PROFILING_EVENT_TYPE_SUSPEND: + seq_printf(seq_file, "SUSPEND | "); + break; + case MALI_PROFILING_EVENT_TYPE_RESUME: + seq_printf(seq_file, "RESUME | "); + break; + default: + seq_printf(seq_file, "0x%01X | ", (event_id & 0x0F000000) >> 24); + break; + } + + switch (event_id & 0x00FF0000) { + case MALI_PROFILING_EVENT_CHANNEL_SOFTWARE: + seq_printf(seq_file, "SW | "); + break; + case MALI_PROFILING_EVENT_CHANNEL_GP0: + seq_printf(seq_file, "GP0 | "); + break; + case MALI_PROFILING_EVENT_CHANNEL_PP0: + seq_printf(seq_file, "PP0 | "); + break; + case MALI_PROFILING_EVENT_CHANNEL_PP1: + seq_printf(seq_file, "PP1 | "); + break; + case MALI_PROFILING_EVENT_CHANNEL_PP2: + seq_printf(seq_file, "PP2 | "); + break; + case MALI_PROFILING_EVENT_CHANNEL_PP3: + seq_printf(seq_file, "PP3 | "); + break; + case MALI_PROFILING_EVENT_CHANNEL_PP4: + seq_printf(seq_file, "PP4 | "); + break; + case MALI_PROFILING_EVENT_CHANNEL_PP5: + seq_printf(seq_file, "PP5 | "); + break; + case MALI_PROFILING_EVENT_CHANNEL_PP6: + seq_printf(seq_file, "PP6 | "); + break; + case MALI_PROFILING_EVENT_CHANNEL_PP7: + seq_printf(seq_file, "PP7 | "); + break; + case MALI_PROFILING_EVENT_CHANNEL_GPU: + seq_printf(seq_file, "GPU | "); + break; + default: + seq_printf(seq_file, "0x%02X | ", (event_id & 0x00FF0000) >> 16); + break; + } + + if (MALI_EVENT_ID_IS_HW(event_id)) { + if (((event_id & 0x0F000000) == MALI_PROFILING_EVENT_TYPE_START) || ((event_id & 0x0F000000) == MALI_PROFILING_EVENT_TYPE_STOP)) { + switch (event_id & 0x0000FFFF) { + case MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL: + seq_printf(seq_file, "PHYSICAL | "); + break; + case MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL: + seq_printf(seq_file, "VIRTUAL | "); + break; + default: + seq_printf(seq_file, "0x%04X | ", event_id & 0x0000FFFF); + break; + } + } else { + seq_printf(seq_file, "0x%04X | ", event_id & 0x0000FFFF); + } + } else { + seq_printf(seq_file, "0x%04X | ", event_id & 0x0000FFFF); + } + + seq_printf(seq_file, "T0 + 0x%016llX\n", timestamp - start_time); + + return 0; + } + + return 0; +} + +static const struct seq_operations profiling_events_seq_ops = { + .start = profiling_events_start, + .next = profiling_events_next, + .stop = profiling_events_stop, + .show = profiling_events_show +}; + +static int profiling_events_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &profiling_events_seq_ops); +} + +static const struct file_operations profiling_events_fops = { + .owner = THIS_MODULE, + .open = profiling_events_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static const struct seq_operations profiling_events_human_readable_seq_ops = { + .start = profiling_events_start, + .next = profiling_events_next, + .stop = profiling_events_stop, + .show = profiling_events_show_human_readable +}; + +static int profiling_events_human_readable_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &profiling_events_human_readable_seq_ops); +} + +static const struct file_operations profiling_events_human_readable_fops = { + .owner = THIS_MODULE, + .open = profiling_events_human_readable_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +#endif + +static int memory_debugfs_show(struct seq_file *s, void *private_data) +{ +#ifdef MALI_MEM_SWAP_TRACKING + seq_printf(s, " %-25s %-10s %-10s %-15s %-15s %-10s %-10s %-10s \n"\ + "=================================================================================================================================\n", + "Name (:bytes)", "pid", "mali_mem", "max_mali_mem", + "external_mem", "ump_mem", "dma_mem", "swap_mem"); +#else + seq_printf(s, " %-25s %-10s %-10s %-15s %-15s %-10s %-10s \n"\ + "========================================================================================================================\n", + "Name (:bytes)", "pid", "mali_mem", "max_mali_mem", + "external_mem", "ump_mem", "dma_mem"); +#endif + mali_session_memory_tracking(s); + return 0; +} + +static int memory_debugfs_open(struct inode *inode, struct file *file) +{ + return single_open(file, memory_debugfs_show, inode->i_private); +} + +static const struct file_operations memory_usage_fops = { + .owner = THIS_MODULE, + .open = memory_debugfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static ssize_t utilization_gp_pp_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) +{ + char buf[64]; + size_t r; + u32 uval = _mali_ukk_utilization_gp_pp(); + + r = snprintf(buf, 64, "%u\n", uval); + return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); +} + +static ssize_t utilization_gp_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) +{ + char buf[64]; + size_t r; + u32 uval = _mali_ukk_utilization_gp(); + + r = snprintf(buf, 64, "%u\n", uval); + return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); +} + +static ssize_t utilization_pp_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) +{ + char buf[64]; + size_t r; + u32 uval = _mali_ukk_utilization_pp(); + + r = snprintf(buf, 64, "%u\n", uval); + return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); +} + + +static const struct file_operations utilization_gp_pp_fops = { + .owner = THIS_MODULE, + .read = utilization_gp_pp_read, +}; + +static const struct file_operations utilization_gp_fops = { + .owner = THIS_MODULE, + .read = utilization_gp_read, +}; + +static const struct file_operations utilization_pp_fops = { + .owner = THIS_MODULE, + .read = utilization_pp_read, +}; + +static ssize_t user_settings_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) +{ + unsigned long val; + int ret; + _mali_uk_user_setting_t setting; + char buf[32]; + + cnt = min(cnt, sizeof(buf) - 1); + if (copy_from_user(buf, ubuf, cnt)) { + return -EFAULT; + } + buf[cnt] = '\0'; + + ret = kstrtoul(buf, 10, &val); + if (0 != ret) { + return ret; + } + + /* Update setting */ + setting = (_mali_uk_user_setting_t)(filp->private_data); + mali_set_user_setting(setting, val); + + *ppos += cnt; + return cnt; +} + +static ssize_t user_settings_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) +{ + char buf[64]; + size_t r; + u32 value; + _mali_uk_user_setting_t setting; + + setting = (_mali_uk_user_setting_t)(filp->private_data); + value = mali_get_user_setting(setting); + + r = snprintf(buf, 64, "%u\n", value); + return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); +} + +static const struct file_operations user_settings_fops = { + .owner = THIS_MODULE, + .open = open_copy_private_data, + .read = user_settings_read, + .write = user_settings_write, +}; + +static int mali_sysfs_user_settings_register(void) +{ + struct dentry *mali_user_settings_dir = debugfs_create_dir("userspace_settings", mali_debugfs_dir); + + if (mali_user_settings_dir != NULL) { + long i; + for (i = 0; i < _MALI_UK_USER_SETTING_MAX; i++) { + debugfs_create_file(_mali_uk_user_setting_descriptions[i], + 0600, mali_user_settings_dir, (void *)i, + &user_settings_fops); + } + } + + return 0; +} + +static ssize_t pp_num_cores_enabled_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp) +{ + int ret; + char buffer[32]; + unsigned long val; + + if (count >= sizeof(buffer)) { + return -ENOMEM; + } + + if (copy_from_user(&buffer[0], buf, count)) { + return -EFAULT; + } + buffer[count] = '\0'; + + ret = kstrtoul(&buffer[0], 10, &val); + if (0 != ret) { + return -EINVAL; + } + + ret = mali_executor_set_perf_level(val, MALI_TRUE); /* override even if core scaling is disabled */ + if (ret) { + return ret; + } + + *offp += count; + return count; +} + +static ssize_t pp_num_cores_enabled_read(struct file *filp, char __user *buf, size_t count, loff_t *offp) +{ + int r; + char buffer[64]; + + r = snprintf(buffer, 64, "%u\n", mali_executor_get_num_cores_enabled()); + + return simple_read_from_buffer(buf, count, offp, buffer, r); +} + +static const struct file_operations pp_num_cores_enabled_fops = { + .owner = THIS_MODULE, + .write = pp_num_cores_enabled_write, + .read = pp_num_cores_enabled_read, + .llseek = default_llseek, +}; + +static ssize_t pp_num_cores_total_read(struct file *filp, char __user *buf, size_t count, loff_t *offp) +{ + int r; + char buffer[64]; + + r = snprintf(buffer, 64, "%u\n", mali_executor_get_num_cores_total()); + + return simple_read_from_buffer(buf, count, offp, buffer, r); +} + +static const struct file_operations pp_num_cores_total_fops = { + .owner = THIS_MODULE, + .read = pp_num_cores_total_read, +}; + +static ssize_t pp_core_scaling_enabled_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp) +{ + int ret; + char buffer[32]; + unsigned long val; + + if (count >= sizeof(buffer)) { + return -ENOMEM; + } + + if (copy_from_user(&buffer[0], buf, count)) { + return -EFAULT; + } + buffer[count] = '\0'; + + ret = kstrtoul(&buffer[0], 10, &val); + if (0 != ret) { + return -EINVAL; + } + + switch (val) { + case 1: + mali_executor_core_scaling_enable(); + break; + case 0: + mali_executor_core_scaling_disable(); + break; + default: + return -EINVAL; + break; + } + + *offp += count; + return count; +} + +static ssize_t pp_core_scaling_enabled_read(struct file *filp, char __user *buf, size_t count, loff_t *offp) +{ + return simple_read_from_buffer(buf, count, offp, mali_executor_core_scaling_is_enabled() ? "1\n" : "0\n", 2); +} +static const struct file_operations pp_core_scaling_enabled_fops = { + .owner = THIS_MODULE, + .write = pp_core_scaling_enabled_write, + .read = pp_core_scaling_enabled_read, + .llseek = default_llseek, +}; + +static ssize_t version_read(struct file *filp, char __user *buf, size_t count, loff_t *offp) +{ + int r = 0; + char buffer[64]; + + switch (mali_kernel_core_get_product_id()) { + case _MALI_PRODUCT_ID_MALI200: + r = snprintf(buffer, 64, "Mali-200\n"); + break; + case _MALI_PRODUCT_ID_MALI300: + r = snprintf(buffer, 64, "Mali-300\n"); + break; + case _MALI_PRODUCT_ID_MALI400: + r = snprintf(buffer, 64, "Mali-400 MP\n"); + break; + case _MALI_PRODUCT_ID_MALI450: + r = snprintf(buffer, 64, "Mali-450 MP\n"); + break; + case _MALI_PRODUCT_ID_MALI470: + r = snprintf(buffer, 64, "Mali-470 MP\n"); + break; + case _MALI_PRODUCT_ID_UNKNOWN: + return -EINVAL; + break; + }; + + return simple_read_from_buffer(buf, count, offp, buffer, r); +} + +static const struct file_operations version_fops = { + .owner = THIS_MODULE, + .read = version_read, +}; + +#if defined(DEBUG) +static int timeline_debugfs_show(struct seq_file *s, void *private_data) +{ + struct mali_session_data *session, *tmp; + u32 session_seq = 1; + + seq_printf(s, "timeline system info: \n=================\n\n"); + + mali_session_lock(); + MALI_SESSION_FOREACH(session, tmp, link) { + seq_printf(s, "session %d <%p> start:\n", session_seq, session); + mali_timeline_debug_print_system(session->timeline_system, s); + seq_printf(s, "session %d end\n\n\n", session_seq++); + } + mali_session_unlock(); + + return 0; +} + +static int timeline_debugfs_open(struct inode *inode, struct file *file) +{ + return single_open(file, timeline_debugfs_show, inode->i_private); +} + +static const struct file_operations timeline_dump_fops = { + .owner = THIS_MODULE, + .open = timeline_debugfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release +}; +#endif + +int mali_sysfs_register(const char *mali_dev_name) +{ + mali_debugfs_dir = debugfs_create_dir(mali_dev_name, NULL); + if (ERR_PTR(-ENODEV) == mali_debugfs_dir) { + /* Debugfs not supported. */ + mali_debugfs_dir = NULL; + } else { + if (NULL != mali_debugfs_dir) { + /* Debugfs directory created successfully; create files now */ + struct dentry *mali_power_dir; + struct dentry *mali_gp_dir; + struct dentry *mali_pp_dir; + struct dentry *mali_l2_dir; + struct dentry *mali_profiling_dir; + + debugfs_create_file("version", 0400, mali_debugfs_dir, NULL, &version_fops); + + mali_power_dir = debugfs_create_dir("power", mali_debugfs_dir); + if (mali_power_dir != NULL) { + debugfs_create_file("always_on", 0600, mali_power_dir, NULL, &power_always_on_fops); + debugfs_create_file("power_events", 0200, mali_power_dir, NULL, &power_power_events_fops); + } + + mali_gp_dir = debugfs_create_dir("gp", mali_debugfs_dir); + if (mali_gp_dir != NULL) { + u32 num_groups; + long i; + + num_groups = mali_group_get_glob_num_groups(); + for (i = 0; i < num_groups; i++) { + struct mali_group *group = mali_group_get_glob_group(i); + + struct mali_gp_core *gp_core = mali_group_get_gp_core(group); + if (NULL != gp_core) { + struct dentry *mali_gp_gpx_dir; + mali_gp_gpx_dir = debugfs_create_dir("gp0", mali_gp_dir); + if (NULL != mali_gp_gpx_dir) { + debugfs_create_file("base_addr", 0400, mali_gp_gpx_dir, &gp_core->hw_core, &hw_core_base_addr_fops); + debugfs_create_file("enabled", 0600, mali_gp_gpx_dir, group, &group_enabled_fops); + } + break; /* no need to look for any other GP cores */ + } + + } + } + + mali_pp_dir = debugfs_create_dir("pp", mali_debugfs_dir); + if (mali_pp_dir != NULL) { + u32 num_groups; + long i; + + debugfs_create_file("num_cores_total", 0400, mali_pp_dir, NULL, &pp_num_cores_total_fops); + debugfs_create_file("num_cores_enabled", 0600, mali_pp_dir, NULL, &pp_num_cores_enabled_fops); + debugfs_create_file("core_scaling_enabled", 0600, mali_pp_dir, NULL, &pp_core_scaling_enabled_fops); + + num_groups = mali_group_get_glob_num_groups(); + for (i = 0; i < num_groups; i++) { + struct mali_group *group = mali_group_get_glob_group(i); + + struct mali_pp_core *pp_core = mali_group_get_pp_core(group); + if (NULL != pp_core) { + char buf[16]; + struct dentry *mali_pp_ppx_dir; + _mali_osk_snprintf(buf, sizeof(buf), "pp%u", mali_pp_core_get_id(pp_core)); + mali_pp_ppx_dir = debugfs_create_dir(buf, mali_pp_dir); + if (NULL != mali_pp_ppx_dir) { + debugfs_create_file("base_addr", 0400, mali_pp_ppx_dir, &pp_core->hw_core, &hw_core_base_addr_fops); + if (!mali_group_is_virtual(group)) { + debugfs_create_file("enabled", 0600, mali_pp_ppx_dir, group, &group_enabled_fops); + } + } + } + } + } + + mali_l2_dir = debugfs_create_dir("l2", mali_debugfs_dir); + if (mali_l2_dir != NULL) { + struct dentry *mali_l2_all_dir; + u32 l2_id; + struct mali_l2_cache_core *l2_cache; + + mali_l2_all_dir = debugfs_create_dir("all", mali_l2_dir); + if (mali_l2_all_dir != NULL) { + debugfs_create_file("counter_src0", 0200, mali_l2_all_dir, NULL, &l2_all_counter_src0_fops); + debugfs_create_file("counter_src1", 0200, mali_l2_all_dir, NULL, &l2_all_counter_src1_fops); + } + + l2_id = 0; + l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id); + while (NULL != l2_cache) { + char buf[16]; + struct dentry *mali_l2_l2x_dir; + _mali_osk_snprintf(buf, sizeof(buf), "l2%u", l2_id); + mali_l2_l2x_dir = debugfs_create_dir(buf, mali_l2_dir); + if (NULL != mali_l2_l2x_dir) { + debugfs_create_file("counter_src0", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_src0_fops); + debugfs_create_file("counter_src1", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_src1_fops); + debugfs_create_file("counter_val0", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_val0_fops); + debugfs_create_file("counter_val1", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_val1_fops); + debugfs_create_file("base_addr", 0400, mali_l2_l2x_dir, &l2_cache->hw_core, &hw_core_base_addr_fops); + } + + /* try next L2 */ + l2_id++; + l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id); + } + } + + debugfs_create_file("gpu_memory", 0444, mali_debugfs_dir, NULL, &memory_usage_fops); + + debugfs_create_file("utilization_gp_pp", 0400, mali_debugfs_dir, NULL, &utilization_gp_pp_fops); + debugfs_create_file("utilization_gp", 0400, mali_debugfs_dir, NULL, &utilization_gp_fops); + debugfs_create_file("utilization_pp", 0400, mali_debugfs_dir, NULL, &utilization_pp_fops); + + mali_profiling_dir = debugfs_create_dir("profiling", mali_debugfs_dir); + if (mali_profiling_dir != NULL) { + u32 max_sub_jobs; + long i; + struct dentry *mali_profiling_gp_dir; + struct dentry *mali_profiling_pp_dir; +#if defined(CONFIG_MALI400_INTERNAL_PROFILING) + struct dentry *mali_profiling_proc_dir; +#endif + /* + * Create directory where we can set GP HW counters. + */ + mali_profiling_gp_dir = debugfs_create_dir("gp", mali_profiling_dir); + if (mali_profiling_gp_dir != NULL) { + debugfs_create_file("counter_src0", 0600, mali_profiling_gp_dir, (void *)PRIVATE_DATA_COUNTER_MAKE_GP(0), &profiling_counter_src_fops); + debugfs_create_file("counter_src1", 0600, mali_profiling_gp_dir, (void *)PRIVATE_DATA_COUNTER_MAKE_GP(1), &profiling_counter_src_fops); + } + + /* + * Create directory where we can set PP HW counters. + * Possible override with specific HW counters for a particular sub job + * (Disable core scaling before using the override!) + */ + mali_profiling_pp_dir = debugfs_create_dir("pp", mali_profiling_dir); + if (mali_profiling_pp_dir != NULL) { + debugfs_create_file("counter_src0", 0600, mali_profiling_pp_dir, (void *)PRIVATE_DATA_COUNTER_MAKE_PP(0), &profiling_counter_src_fops); + debugfs_create_file("counter_src1", 0600, mali_profiling_pp_dir, (void *)PRIVATE_DATA_COUNTER_MAKE_PP(1), &profiling_counter_src_fops); + } + + max_sub_jobs = mali_executor_get_num_cores_total(); + for (i = 0; i < max_sub_jobs; i++) { + char buf[16]; + struct dentry *mali_profiling_pp_x_dir; + _mali_osk_snprintf(buf, sizeof(buf), "%u", i); + mali_profiling_pp_x_dir = debugfs_create_dir(buf, mali_profiling_pp_dir); + if (NULL != mali_profiling_pp_x_dir) { + debugfs_create_file("counter_src0", + 0600, mali_profiling_pp_x_dir, + (void *)PRIVATE_DATA_COUNTER_MAKE_PP_SUB_JOB(0, i), + &profiling_counter_src_fops); + debugfs_create_file("counter_src1", + 0600, mali_profiling_pp_x_dir, + (void *)PRIVATE_DATA_COUNTER_MAKE_PP_SUB_JOB(1, i), + &profiling_counter_src_fops); + } + } + +#if defined(CONFIG_MALI400_INTERNAL_PROFILING) + mali_profiling_proc_dir = debugfs_create_dir("proc", mali_profiling_dir); + if (mali_profiling_proc_dir != NULL) { + struct dentry *mali_profiling_proc_default_dir = debugfs_create_dir("default", mali_profiling_proc_dir); + if (mali_profiling_proc_default_dir != NULL) { + debugfs_create_file("enable", 0600, mali_profiling_proc_default_dir, (void *)_MALI_UK_USER_SETTING_SW_EVENTS_ENABLE, &user_settings_fops); + } + } + debugfs_create_file("record", 0600, mali_profiling_dir, NULL, &profiling_record_fops); + debugfs_create_file("events", 0400, mali_profiling_dir, NULL, &profiling_events_fops); + debugfs_create_file("events_human_readable", 0400, mali_profiling_dir, NULL, &profiling_events_human_readable_fops); +#endif + } + +#if MALI_STATE_TRACKING + debugfs_create_file("state_dump", 0400, mali_debugfs_dir, NULL, &mali_seq_internal_state_fops); +#endif + +#if defined(DEBUG) + debugfs_create_file("timeline_dump", 0400, mali_debugfs_dir, NULL, &timeline_dump_fops); +#endif + if (mali_sysfs_user_settings_register()) { + /* Failed to create the debugfs entries for the user settings DB. */ + MALI_DEBUG_PRINT(2, ("Failed to create user setting debugfs files. Ignoring...\n")); + } + } + } + + /* Success! */ + return 0; +} + +int mali_sysfs_unregister(void) +{ + if (NULL != mali_debugfs_dir) { + debugfs_remove_recursive(mali_debugfs_dir); + } + return 0; +} + +#else /* MALI_LICENSE_IS_GPL */ + +/* Dummy implementations for non-GPL */ + +int mali_sysfs_register(struct mali_dev *device, dev_t dev, const char *mali_dev_name) +{ + return 0; +} + +int mali_sysfs_unregister(void) +{ + return 0; +} + +#endif /* MALI_LICENSE_IS_GPL */ diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_kernel_sysfs.h b/drivers/gpu/arm/mali400/linux/mali_kernel_sysfs.h --- a/drivers/gpu/arm/mali400/linux/mali_kernel_sysfs.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_kernel_sysfs.h 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2011-2013, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_KERNEL_SYSFS_H__ +#define __MALI_KERNEL_SYSFS_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +#define MALI_PROC_DIR "driver/mali" + +int mali_sysfs_register(const char *mali_dev_name); +int mali_sysfs_unregister(void); + +#ifdef __cplusplus +} +#endif + +#endif /* __MALI_KERNEL_LINUX_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_linux_trace.h b/drivers/gpu/arm/mali400/linux/mali_linux_trace.h --- a/drivers/gpu/arm/mali400/linux/mali_linux_trace.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_linux_trace.h 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,158 @@ +/* + * Copyright (C) 2012-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#if !defined (MALI_LINUX_TRACE_H) || defined (TRACE_HEADER_MULTI_READ) +#define MALI_LINUX_TRACE_H + +#include + +#include +#include + +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE mali_linux_trace + +/** + * Define the tracepoint used to communicate the status of a GPU. Called + * when a GPU turns on or turns off. + * + * @param event_id The type of the event. This parameter is a bitfield + * encoding the type of the event. + * + * @param d0 First data parameter. + * @param d1 Second data parameter. + * @param d2 Third data parameter. + * @param d3 Fourth data parameter. + * @param d4 Fifth data parameter. + */ +TRACE_EVENT(mali_timeline_event, + + TP_PROTO(unsigned int event_id, unsigned int d0, unsigned int d1, + unsigned int d2, unsigned int d3, unsigned int d4), + + TP_ARGS(event_id, d0, d1, d2, d3, d4), + + TP_STRUCT__entry( + __field(unsigned int, event_id) + __field(unsigned int, d0) + __field(unsigned int, d1) + __field(unsigned int, d2) + __field(unsigned int, d3) + __field(unsigned int, d4) + ), + + TP_fast_assign( + __entry->event_id = event_id; + __entry->d0 = d0; + __entry->d1 = d1; + __entry->d2 = d2; + __entry->d3 = d3; + __entry->d4 = d4; + ), + + TP_printk("event=%d", __entry->event_id) + ); + +/** + * Define a tracepoint used to regsiter the value of a hardware counter. + * Hardware counters belonging to the vertex or fragment processor are + * reported via this tracepoint each frame, whilst L2 cache hardware + * counters are reported continuously. + * + * @param counter_id The counter ID. + * @param value The value of the counter. + */ +TRACE_EVENT(mali_hw_counter, + + TP_PROTO(unsigned int counter_id, unsigned int value), + + TP_ARGS(counter_id, value), + + TP_STRUCT__entry( + __field(unsigned int, counter_id) + __field(unsigned int, value) + ), + + TP_fast_assign( + __entry->counter_id = counter_id; + ), + + TP_printk("event %d = %d", __entry->counter_id, __entry->value) + ); + +/** + * Define a tracepoint used to send a bundle of software counters. + * + * @param counters The bundle of counters. + */ +TRACE_EVENT(mali_sw_counters, + + TP_PROTO(pid_t pid, pid_t tid, void *surface_id, unsigned int *counters), + + TP_ARGS(pid, tid, surface_id, counters), + + TP_STRUCT__entry( + __field(pid_t, pid) + __field(pid_t, tid) + __field(void *, surface_id) + __field(unsigned int *, counters) + ), + + TP_fast_assign( + __entry->pid = pid; + __entry->tid = tid; + __entry->surface_id = surface_id; + __entry->counters = counters; + ), + + TP_printk("counters were %s", __entry->counters == NULL ? "NULL" : "not NULL") + ); + +/** + * Define a tracepoint used to gather core activity for systrace + * @param pid The process id for which the core activity originates from + * @param active If the core is active (1) or not (0) + * @param core_type The type of core active, either GP (1) or PP (0) + * @param core_id The core id that is active for the core_type + * @param frame_builder_id The frame builder id associated with this core activity + * @param flush_id The flush id associated with this core activity + */ +TRACE_EVENT(mali_core_active, + + TP_PROTO(pid_t pid, unsigned int active, unsigned int core_type, unsigned int core_id, unsigned int frame_builder_id, unsigned int flush_id), + + TP_ARGS(pid, active, core_type, core_id, frame_builder_id, flush_id), + + TP_STRUCT__entry( + __field(pid_t, pid) + __field(unsigned int, active) + __field(unsigned int, core_type) + __field(unsigned int, core_id) + __field(unsigned int, frame_builder_id) + __field(unsigned int, flush_id) + ), + + TP_fast_assign( + __entry->pid = pid; + __entry->active = active; + __entry->core_type = core_type; + __entry->core_id = core_id; + __entry->frame_builder_id = frame_builder_id; + __entry->flush_id = flush_id; + ), + + TP_printk("%s|%d|%s%i:%x|%d", __entry->active ? "S" : "F", __entry->pid, __entry->core_type ? "GP" : "PP", __entry->core_id, __entry->flush_id, __entry->frame_builder_id) + ); + +#endif /* MALI_LINUX_TRACE_H */ + +/* This part must exist outside the header guard. */ +#include + diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_memory_block_alloc.c b/drivers/gpu/arm/mali400/linux/mali_memory_block_alloc.c --- a/drivers/gpu/arm/mali400/linux/mali_memory_block_alloc.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_memory_block_alloc.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,362 @@ +/* + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_kernel_common.h" +#include "mali_memory.h" +#include "mali_memory_block_alloc.h" +#include "mali_osk.h" +#include + + +static mali_block_allocator *mali_mem_block_gobal_allocator = NULL; + +unsigned long _mali_blk_item_get_phy_addr(mali_block_item *item) +{ + return (item->phy_addr & ~(MALI_BLOCK_REF_MASK)); +} + + +unsigned long _mali_blk_item_get_pfn(mali_block_item *item) +{ + return (item->phy_addr / MALI_BLOCK_SIZE); +} + + +u32 mali_mem_block_get_ref_count(mali_page_node *node) +{ + MALI_DEBUG_ASSERT(node->type == MALI_PAGE_NODE_BLOCK); + return (node->blk_it->phy_addr & MALI_BLOCK_REF_MASK); +} + + +/* Increase the refence count +* It not atomic, so it need to get sp_lock before call this function +*/ + +u32 mali_mem_block_add_ref(mali_page_node *node) +{ + MALI_DEBUG_ASSERT(node->type == MALI_PAGE_NODE_BLOCK); + MALI_DEBUG_ASSERT(mali_mem_block_get_ref_count(node) < MALI_BLOCK_MAX_REF_COUNT); + return (node->blk_it->phy_addr++ & MALI_BLOCK_REF_MASK); +} + +/* Decase the refence count +* It not atomic, so it need to get sp_lock before call this function +*/ +u32 mali_mem_block_dec_ref(mali_page_node *node) +{ + MALI_DEBUG_ASSERT(node->type == MALI_PAGE_NODE_BLOCK); + MALI_DEBUG_ASSERT(mali_mem_block_get_ref_count(node) > 0); + return (node->blk_it->phy_addr-- & MALI_BLOCK_REF_MASK); +} + + +static mali_block_allocator *mali_mem_block_allocator_create(u32 base_address, u32 size) +{ + mali_block_allocator *info; + u32 usable_size; + u32 num_blocks; + mali_page_node *m_node; + mali_block_item *mali_blk_items = NULL; + int i = 0; + + usable_size = size & ~(MALI_BLOCK_SIZE - 1); + MALI_DEBUG_PRINT(3, ("Mali block allocator create for region starting at 0x%08X length 0x%08X\n", base_address, size)); + MALI_DEBUG_PRINT(4, ("%d usable bytes\n", usable_size)); + num_blocks = usable_size / MALI_BLOCK_SIZE; + MALI_DEBUG_PRINT(4, ("which becomes %d blocks\n", num_blocks)); + + if (usable_size == 0) { + MALI_DEBUG_PRINT(1, ("Memory block of size %d is unusable\n", size)); + return NULL; + } + + info = _mali_osk_calloc(1, sizeof(mali_block_allocator)); + if (NULL != info) { + INIT_LIST_HEAD(&info->free); + spin_lock_init(&info->sp_lock); + info->total_num = num_blocks; + mali_blk_items = _mali_osk_calloc(1, sizeof(mali_block_item) * num_blocks); + + if (mali_blk_items) { + info->items = mali_blk_items; + /* add blocks(4k size) to free list*/ + for (i = 0 ; i < num_blocks ; i++) { + /* add block information*/ + mali_blk_items[i].phy_addr = base_address + (i * MALI_BLOCK_SIZE); + /* add to free list */ + m_node = _mali_page_node_allocate(MALI_PAGE_NODE_BLOCK); + if (m_node == NULL) + goto fail; + _mali_page_node_add_block_item(m_node, &(mali_blk_items[i])); + list_add_tail(&m_node->list, &info->free); + atomic_add(1, &info->free_num); + } + return info; + } + } +fail: + mali_mem_block_allocator_destroy(); + return NULL; +} + +void mali_mem_block_allocator_destroy(void) +{ + struct mali_page_node *m_page, *m_tmp; + mali_block_allocator *info = mali_mem_block_gobal_allocator; + MALI_DEBUG_ASSERT_POINTER(info); + MALI_DEBUG_PRINT(4, ("Memory block destroy !\n")); + + if (NULL == info) + return; + + list_for_each_entry_safe(m_page, m_tmp , &info->free, list) { + MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_BLOCK); + list_del(&m_page->list); + kfree(m_page); + } + + _mali_osk_free(info->items); + _mali_osk_free(info); +} + +u32 mali_mem_block_release(mali_mem_backend *mem_bkend) +{ + mali_mem_allocation *alloc = mem_bkend->mali_allocation; + u32 free_pages_nr = 0; + MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_BLOCK); + + /* Unmap the memory from the mali virtual address space. */ + mali_mem_block_mali_unmap(alloc); + mutex_lock(&mem_bkend->mutex); + free_pages_nr = mali_mem_block_free(&mem_bkend->block_mem); + mutex_unlock(&mem_bkend->mutex); + return free_pages_nr; +} + + +int mali_mem_block_alloc(mali_mem_block_mem *block_mem, u32 size) +{ + struct mali_page_node *m_page, *m_tmp; + size_t page_count = PAGE_ALIGN(size) / _MALI_OSK_MALI_PAGE_SIZE; + mali_block_allocator *info = mali_mem_block_gobal_allocator; + MALI_DEBUG_ASSERT_POINTER(info); + + MALI_DEBUG_PRINT(4, ("BLOCK Mem: Allocate size = 0x%x\n", size)); + /*do some init */ + INIT_LIST_HEAD(&block_mem->pfns); + + spin_lock(&info->sp_lock); + /*check if have enough space*/ + if (atomic_read(&info->free_num) > page_count) { + list_for_each_entry_safe(m_page, m_tmp , &info->free, list) { + if (page_count > 0) { + MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_BLOCK); + MALI_DEBUG_ASSERT(mali_mem_block_get_ref_count(m_page) == 0); + list_move(&m_page->list, &block_mem->pfns); + block_mem->count++; + atomic_dec(&info->free_num); + _mali_page_node_ref(m_page); + } else { + break; + } + page_count--; + } + } else { + /* can't allocate from BLOCK memory*/ + spin_unlock(&info->sp_lock); + return -1; + } + + spin_unlock(&info->sp_lock); + return 0; +} + +u32 mali_mem_block_free(mali_mem_block_mem *block_mem) +{ + u32 free_pages_nr = 0; + + free_pages_nr = mali_mem_block_free_list(&block_mem->pfns); + MALI_DEBUG_PRINT(4, ("BLOCK Mem free : allocated size = 0x%x, free size = 0x%x\n", block_mem->count * _MALI_OSK_MALI_PAGE_SIZE, + free_pages_nr * _MALI_OSK_MALI_PAGE_SIZE)); + block_mem->count = 0; + MALI_DEBUG_ASSERT(list_empty(&block_mem->pfns)); + + return free_pages_nr; +} + + +u32 mali_mem_block_free_list(struct list_head *list) +{ + struct mali_page_node *m_page, *m_tmp; + mali_block_allocator *info = mali_mem_block_gobal_allocator; + u32 free_pages_nr = 0; + + if (info) { + spin_lock(&info->sp_lock); + list_for_each_entry_safe(m_page, m_tmp , list, list) { + if (1 == _mali_page_node_get_ref_count(m_page)) { + free_pages_nr++; + } + mali_mem_block_free_node(m_page); + } + spin_unlock(&info->sp_lock); + } + return free_pages_nr; +} + +/* free the node,*/ +void mali_mem_block_free_node(struct mali_page_node *node) +{ + mali_block_allocator *info = mali_mem_block_gobal_allocator; + + /* only handle BLOCK node */ + if (node->type == MALI_PAGE_NODE_BLOCK && info) { + /*Need to make this atomic?*/ + if (1 == _mali_page_node_get_ref_count(node)) { + /*Move to free list*/ + _mali_page_node_unref(node); + list_move_tail(&node->list, &info->free); + atomic_add(1, &info->free_num); + } else { + _mali_page_node_unref(node); + list_del(&node->list); + kfree(node); + } + } +} + +/* unref the node, but not free it */ +_mali_osk_errcode_t mali_mem_block_unref_node(struct mali_page_node *node) +{ + mali_block_allocator *info = mali_mem_block_gobal_allocator; + mali_page_node *new_node; + + /* only handle BLOCK node */ + if (node->type == MALI_PAGE_NODE_BLOCK && info) { + /*Need to make this atomic?*/ + if (1 == _mali_page_node_get_ref_count(node)) { + /* allocate a new node, Add to free list, keep the old node*/ + _mali_page_node_unref(node); + new_node = _mali_page_node_allocate(MALI_PAGE_NODE_BLOCK); + if (new_node) { + memcpy(new_node, node, sizeof(mali_page_node)); + list_add(&new_node->list, &info->free); + atomic_add(1, &info->free_num); + } else + return _MALI_OSK_ERR_FAULT; + + } else { + _mali_page_node_unref(node); + } + } + return _MALI_OSK_ERR_OK; +} + + +int mali_mem_block_mali_map(mali_mem_block_mem *block_mem, struct mali_session_data *session, u32 vaddr, u32 props) +{ + struct mali_page_directory *pagedir = session->page_directory; + struct mali_page_node *m_page; + dma_addr_t phys; + u32 virt = vaddr; + u32 prop = props; + + list_for_each_entry(m_page, &block_mem->pfns, list) { + MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_BLOCK); + phys = _mali_page_node_get_dma_addr(m_page); +#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) + /* Verify that the "physical" address is 32-bit and + * usable for Mali, when on a system with bus addresses + * wider than 32-bit. */ + MALI_DEBUG_ASSERT(0 == (phys >> 32)); +#endif + mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys, MALI_MMU_PAGE_SIZE, prop); + virt += MALI_MMU_PAGE_SIZE; + } + + return 0; +} + +void mali_mem_block_mali_unmap(mali_mem_allocation *alloc) +{ + struct mali_session_data *session; + MALI_DEBUG_ASSERT_POINTER(alloc); + session = alloc->session; + MALI_DEBUG_ASSERT_POINTER(session); + + mali_session_memory_lock(session); + mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start, + alloc->flags); + mali_session_memory_unlock(session); +} + + +int mali_mem_block_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma) +{ + int ret; + mali_mem_block_mem *block_mem = &mem_bkend->block_mem; + unsigned long addr = vma->vm_start; + struct mali_page_node *m_page; + MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_BLOCK); + + list_for_each_entry(m_page, &block_mem->pfns, list) { + MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_BLOCK); + ret = vm_insert_pfn(vma, addr, _mali_page_node_get_pfn(m_page)); + + if (unlikely(0 != ret)) { + return -EFAULT; + } + addr += _MALI_OSK_MALI_PAGE_SIZE; + + } + + return 0; +} + + +_mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(u32 start, u32 size) +{ + mali_block_allocator *allocator; + + /* Do the low level linux operation first */ + + /* Request ownership of the memory */ + if (_MALI_OSK_ERR_OK != _mali_osk_mem_reqregion(start, size, "Dedicated Mali GPU memory")) { + MALI_DEBUG_PRINT(1, ("Failed to request memory region for frame buffer (0x%08X - 0x%08X)\n", start, start + size - 1)); + return _MALI_OSK_ERR_FAULT; + } + + /* Create generic block allocator object to handle it */ + allocator = mali_mem_block_allocator_create(start, size); + + if (NULL == allocator) { + MALI_DEBUG_PRINT(1, ("Memory bank registration failed\n")); + _mali_osk_mem_unreqregion(start, size); + MALI_ERROR(_MALI_OSK_ERR_FAULT); + } + + mali_mem_block_gobal_allocator = (mali_block_allocator *)allocator; + + return _MALI_OSK_ERR_OK; +} + +mali_bool mali_memory_have_dedicated_memory(void) +{ + return mali_mem_block_gobal_allocator ? MALI_TRUE : MALI_FALSE; +} + +u32 mali_mem_block_allocator_stat(void) +{ + mali_block_allocator *allocator = mali_mem_block_gobal_allocator; + MALI_DEBUG_ASSERT_POINTER(allocator); + + return (allocator->total_num - atomic_read(&allocator->free_num)) * _MALI_OSK_MALI_PAGE_SIZE; +} diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_memory_block_alloc.h b/drivers/gpu/arm/mali400/linux/mali_memory_block_alloc.h --- a/drivers/gpu/arm/mali400/linux/mali_memory_block_alloc.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_memory_block_alloc.h 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2010, 2013, 2015-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_BLOCK_ALLOCATOR_H__ +#define __MALI_BLOCK_ALLOCATOR_H__ + +#include "mali_session.h" +#include "mali_memory.h" +#include + +#include "mali_memory_types.h" + +#define MALI_BLOCK_SIZE (PAGE_SIZE) /* 4 kB, manage BLOCK memory as page size */ +#define MALI_BLOCK_REF_MASK (0xFFF) +#define MALI_BLOCK_MAX_REF_COUNT (0xFFF) + + + +typedef struct mali_block_allocator { + /* + * In free list, each node's ref_count is 0, + * ref_count added when allocated or referenced in COW + */ + mali_block_item *items; /* information for each block item*/ + struct list_head free; /*free list of mali_memory_node*/ + spinlock_t sp_lock; /*lock for reference count & free list opertion*/ + u32 total_num; /* Number of total pages*/ + atomic_t free_num; /*number of free pages*/ +} mali_block_allocator; + +unsigned long _mali_blk_item_get_phy_addr(mali_block_item *item); +unsigned long _mali_blk_item_get_pfn(mali_block_item *item); +u32 mali_mem_block_get_ref_count(mali_page_node *node); +u32 mali_mem_block_add_ref(mali_page_node *node); +u32 mali_mem_block_dec_ref(mali_page_node *node); +u32 mali_mem_block_release(mali_mem_backend *mem_bkend); +int mali_mem_block_alloc(mali_mem_block_mem *block_mem, u32 size); +int mali_mem_block_mali_map(mali_mem_block_mem *block_mem, struct mali_session_data *session, u32 vaddr, u32 props); +void mali_mem_block_mali_unmap(mali_mem_allocation *alloc); + +int mali_mem_block_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma); +_mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(u32 start, u32 size); +mali_bool mali_memory_have_dedicated_memory(void); +u32 mali_mem_block_free(mali_mem_block_mem *block_mem); +u32 mali_mem_block_free_list(struct list_head *list); +void mali_mem_block_free_node(struct mali_page_node *node); +void mali_mem_block_allocator_destroy(void); +_mali_osk_errcode_t mali_mem_block_unref_node(struct mali_page_node *node); +u32 mali_mem_block_allocator_stat(void); + +#endif /* __MALI_BLOCK_ALLOCATOR_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_memory.c b/drivers/gpu/arm/mali400/linux/mali_memory.c --- a/drivers/gpu/arm/mali400/linux/mali_memory.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_memory.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,531 @@ +/* + * Copyright (C) 2013-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mali_osk.h" +#include "mali_executor.h" + +#include "mali_memory.h" +#include "mali_memory_os_alloc.h" +#include "mali_memory_block_alloc.h" +#include "mali_memory_util.h" +#include "mali_memory_virtual.h" +#include "mali_memory_manager.h" +#include "mali_memory_cow.h" +#include "mali_memory_swap_alloc.h" +#include "mali_memory_defer_bind.h" +#if defined(CONFIG_DMA_SHARED_BUFFER) +#include "mali_memory_secure.h" +#endif + +extern unsigned int mali_dedicated_mem_size; +extern unsigned int mali_shared_mem_size; + +#define MALI_VM_NUM_FAULT_PREFETCH (0x8) + +static void mali_mem_vma_open(struct vm_area_struct *vma) +{ + mali_mem_allocation *alloc = (mali_mem_allocation *)vma->vm_private_data; + MALI_DEBUG_PRINT(4, ("Open called on vma %p\n", vma)); + + /* If need to share the allocation, add ref_count here */ + mali_allocation_ref(alloc); + return; +} +static void mali_mem_vma_close(struct vm_area_struct *vma) +{ + /* If need to share the allocation, unref ref_count here */ + mali_mem_allocation *alloc = (mali_mem_allocation *)vma->vm_private_data; + + mali_allocation_unref(&alloc); + vma->vm_private_data = NULL; +} + +static int mali_mem_vma_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + mali_mem_allocation *alloc = (mali_mem_allocation *)vma->vm_private_data; + mali_mem_backend *mem_bkend = NULL; + int ret; + int prefetch_num = MALI_VM_NUM_FAULT_PREFETCH; + + unsigned long address = vmf->address; + MALI_DEBUG_ASSERT(alloc->backend_handle); + MALI_DEBUG_ASSERT((unsigned long)alloc->cpu_mapping.addr <= address); + + /* Get backend memory & Map on CPU */ + mutex_lock(&mali_idr_mutex); + if (!(mem_bkend = idr_find(&mali_backend_idr, alloc->backend_handle))) { + MALI_DEBUG_PRINT(1, ("Can't find memory backend in mmap!\n")); + mutex_unlock(&mali_idr_mutex); + return VM_FAULT_SIGBUS; + } + mutex_unlock(&mali_idr_mutex); + MALI_DEBUG_ASSERT(mem_bkend->type == alloc->type); + + if ((mem_bkend->type == MALI_MEM_COW && (MALI_MEM_BACKEND_FLAG_SWAP_COWED != + (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) && + (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_COW_CPU_NO_WRITE)) { + /*check if use page fault to do COW*/ + MALI_DEBUG_PRINT(4, ("mali_vma_fault: do cow allocate on demand!, address=0x%x\n", address)); + mutex_lock(&mem_bkend->mutex); + ret = mali_mem_cow_allocate_on_demand(mem_bkend, + (address - vma->vm_start) / PAGE_SIZE); + mutex_unlock(&mem_bkend->mutex); + + if (ret != _MALI_OSK_ERR_OK) { + return VM_FAULT_OOM; + } + prefetch_num = 1; + + /* handle COW modified range cpu mapping + we zap the mapping in cow_modify_range, it will trigger page fault + when CPU access it, so here we map it to CPU*/ + mutex_lock(&mem_bkend->mutex); + ret = mali_mem_cow_cpu_map_pages_locked(mem_bkend, vma, address, prefetch_num); + mutex_unlock(&mem_bkend->mutex); + + if (unlikely(ret != _MALI_OSK_ERR_OK)) { + return VM_FAULT_SIGBUS; + } + } else if ((mem_bkend->type == MALI_MEM_SWAP) || + (mem_bkend->type == MALI_MEM_COW && (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) { + u32 offset_in_bkend = (address - vma->vm_start) / PAGE_SIZE; + int ret = _MALI_OSK_ERR_OK; + + mutex_lock(&mem_bkend->mutex); + if (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_COW_CPU_NO_WRITE) { + ret = mali_mem_swap_cow_page_on_demand(mem_bkend, offset_in_bkend, &vmf->page); + } else { + ret = mali_mem_swap_allocate_page_on_demand(mem_bkend, offset_in_bkend, &vmf->page); + } + mutex_unlock(&mem_bkend->mutex); + + if (ret != _MALI_OSK_ERR_OK) { + MALI_DEBUG_PRINT(2, ("Mali swap memory page fault process failed, address=0x%x\n", address)); + return VM_FAULT_OOM; + } else { + return VM_FAULT_LOCKED; + } + } else { + MALI_PRINT_ERROR(("Mali vma fault! It never happen, indicating some logic errors in caller.\n")); + /*NOT support yet or OOM*/ + return VM_FAULT_OOM; + } + return VM_FAULT_NOPAGE; +} + +static struct vm_operations_struct mali_kernel_vm_ops = { + .open = mali_mem_vma_open, + .close = mali_mem_vma_close, + .fault = mali_mem_vma_fault, +}; + + +/** @ map mali allocation to CPU address +* +* Supported backend types: +* --MALI_MEM_OS +* -- need to add COW? + *Not supported backend types: +* -_MALI_MEMORY_BIND_BACKEND_UMP +* -_MALI_MEMORY_BIND_BACKEND_DMA_BUF +* -_MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY +* +*/ +int mali_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct mali_session_data *session; + mali_mem_allocation *mali_alloc = NULL; + u32 mali_addr = vma->vm_pgoff << PAGE_SHIFT; + struct mali_vma_node *mali_vma_node = NULL; + mali_mem_backend *mem_bkend = NULL; + int ret = -EFAULT; + + session = (struct mali_session_data *)filp->private_data; + if (NULL == session) { + MALI_PRINT_ERROR(("mmap called without any session data available\n")); + return -EFAULT; + } + + MALI_DEBUG_PRINT(4, ("MMap() handler: start=0x%08X, phys=0x%08X, size=0x%08X vma->flags 0x%08x\n", + (unsigned int)vma->vm_start, (unsigned int)(vma->vm_pgoff << PAGE_SHIFT), + (unsigned int)(vma->vm_end - vma->vm_start), vma->vm_flags)); + + /* Operations used on any memory system */ + /* do not need to anything in vm open/close now */ + + /* find mali allocation structure by vaddress*/ + mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0); + if (likely(mali_vma_node)) { + mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node); + MALI_DEBUG_ASSERT(mali_addr == mali_vma_node->vm_node.start); + if (unlikely(mali_addr != mali_vma_node->vm_node.start)) { + /* only allow to use start address for mmap */ + MALI_DEBUG_PRINT(1, ("mali_addr != mali_vma_node->vm_node.start\n")); + return -EFAULT; + } + } else { + MALI_DEBUG_ASSERT(NULL == mali_vma_node); + return -EFAULT; + } + + mali_alloc->cpu_mapping.addr = (void __user *)vma->vm_start; + + if (mali_alloc->flags & _MALI_MEMORY_ALLOCATE_DEFER_BIND) { + MALI_DEBUG_PRINT(1, ("ERROR : trying to access varying memory by CPU!\n")); + return -EFAULT; + } + + /* Get backend memory & Map on CPU */ + mutex_lock(&mali_idr_mutex); + if (!(mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle))) { + MALI_DEBUG_PRINT(1, ("Can't find memory backend in mmap!\n")); + mutex_unlock(&mali_idr_mutex); + return -EFAULT; + } + mutex_unlock(&mali_idr_mutex); + + if (!(MALI_MEM_SWAP == mali_alloc->type || + (MALI_MEM_COW == mali_alloc->type && (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)))) { + /* Set some bits which indicate that, the memory is IO memory, meaning + * that no paging is to be performed and the memory should not be + * included in crash dumps. And that the memory is reserved, meaning + * that it's present and can never be paged out (see also previous + * entry) + */ + vma->vm_flags |= VM_IO; + vma->vm_flags |= VM_DONTCOPY; + vma->vm_flags |= VM_PFNMAP; +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0) + vma->vm_flags |= VM_RESERVED; +#else + vma->vm_flags |= VM_DONTDUMP; + vma->vm_flags |= VM_DONTEXPAND; +#endif + } else if (MALI_MEM_SWAP == mali_alloc->type) { + vma->vm_pgoff = mem_bkend->start_idx; + } + + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + vma->vm_ops = &mali_kernel_vm_ops; + + mali_alloc->cpu_mapping.addr = (void __user *)vma->vm_start; + + /* If it's a copy-on-write mapping, map to read only */ + if (!(vma->vm_flags & VM_WRITE)) { + MALI_DEBUG_PRINT(4, ("mmap allocation with read only !\n")); + /* add VM_WRITE for do_page_fault will check this when a write fault */ + vma->vm_flags |= VM_WRITE | VM_READ; + vma->vm_page_prot = PAGE_READONLY; + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + mem_bkend->flags |= MALI_MEM_BACKEND_FLAG_COW_CPU_NO_WRITE; + goto out; + } + + if (mem_bkend->type == MALI_MEM_OS) { + ret = mali_mem_os_cpu_map(mem_bkend, vma); + } else if (mem_bkend->type == MALI_MEM_COW && + (MALI_MEM_BACKEND_FLAG_SWAP_COWED != (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) { + ret = mali_mem_cow_cpu_map(mem_bkend, vma); + } else if (mem_bkend->type == MALI_MEM_BLOCK) { + ret = mali_mem_block_cpu_map(mem_bkend, vma); + } else if ((mem_bkend->type == MALI_MEM_SWAP) || (mem_bkend->type == MALI_MEM_COW && + (MALI_MEM_BACKEND_FLAG_SWAP_COWED == (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)))) { + /*For swappable memory, CPU page table will be created by page fault handler. */ + ret = 0; + } else if (mem_bkend->type == MALI_MEM_SECURE) { +#if defined(CONFIG_DMA_SHARED_BUFFER) + ret = mali_mem_secure_cpu_map(mem_bkend, vma); +#else + MALI_DEBUG_PRINT(1, ("DMA not supported for mali secure memory\n")); + return -EFAULT; +#endif + } else { + /* Not support yet*/ + MALI_DEBUG_PRINT_ERROR(("Invalid type of backend memory! \n")); + return -EFAULT; + } + + if (ret != 0) { + MALI_DEBUG_PRINT(1, ("ret != 0\n")); + return -EFAULT; + } +out: + MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == mali_alloc->magic); + + vma->vm_private_data = (void *)mali_alloc; + mali_alloc->cpu_mapping.vma = vma; + + mali_allocation_ref(mali_alloc); + + return 0; +} + +_mali_osk_errcode_t mali_mem_mali_map_prepare(mali_mem_allocation *descriptor) +{ + u32 size = descriptor->psize; + struct mali_session_data *session = descriptor->session; + + MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic); + + /* Map dma-buf into this session's page tables */ + + if (descriptor->flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) { + size += MALI_MMU_PAGE_SIZE; + } + + return mali_mmu_pagedir_map(session->page_directory, descriptor->mali_vma_node.vm_node.start, size); +} + +_mali_osk_errcode_t mali_mem_mali_map_resize(mali_mem_allocation *descriptor, u32 new_size) +{ + u32 old_size = descriptor->psize; + struct mali_session_data *session = descriptor->session; + + MALI_DEBUG_ASSERT(MALI_MEM_ALLOCATION_VALID_MAGIC == descriptor->magic); + + if (descriptor->flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) { + new_size += MALI_MMU_PAGE_SIZE; + } + + if (new_size > old_size) { + MALI_DEBUG_ASSERT(new_size <= descriptor->mali_vma_node.vm_node.size); + return mali_mmu_pagedir_map(session->page_directory, descriptor->mali_vma_node.vm_node.start + old_size, new_size - old_size); + } + return _MALI_OSK_ERR_OK; +} + +void mali_mem_mali_map_free(struct mali_session_data *session, u32 size, mali_address_t vaddr, u32 flags) +{ + if (flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) { + size += MALI_MMU_PAGE_SIZE; + } + + /* Umap and flush L2 */ + mali_mmu_pagedir_unmap(session->page_directory, vaddr, size); + mali_executor_zap_all_active(session); +} + +u32 _mali_ukk_report_memory_usage(void) +{ + u32 sum = 0; + + if (MALI_TRUE == mali_memory_have_dedicated_memory()) { + sum += mali_mem_block_allocator_stat(); + } + + sum += mali_mem_os_stat(); + + return sum; +} + +u32 _mali_ukk_report_total_memory_size(void) +{ + return mali_dedicated_mem_size + mali_shared_mem_size; +} + + +/** + * Per-session memory descriptor mapping table sizes + */ +#define MALI_MEM_DESCRIPTORS_INIT 64 +#define MALI_MEM_DESCRIPTORS_MAX 65536 + +_mali_osk_errcode_t mali_memory_session_begin(struct mali_session_data *session_data) +{ + MALI_DEBUG_PRINT(5, ("Memory session begin\n")); + + session_data->memory_lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_ORDERED, + _MALI_OSK_LOCK_ORDER_MEM_SESSION); + + if (NULL == session_data->memory_lock) { + MALI_ERROR(_MALI_OSK_ERR_FAULT); + } + + session_data->cow_lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_UNORDERED, 0); + if (NULL == session_data->cow_lock) { + _mali_osk_mutex_term(session_data->memory_lock); + MALI_ERROR(_MALI_OSK_ERR_FAULT); + } + + mali_memory_manager_init(&session_data->allocation_mgr); + + MALI_DEBUG_PRINT(5, ("MMU session begin: success\n")); + MALI_SUCCESS; +} + +void mali_memory_session_end(struct mali_session_data *session) +{ + MALI_DEBUG_PRINT(3, ("MMU session end\n")); + + if (NULL == session) { + MALI_DEBUG_PRINT(1, ("No session data found during session end\n")); + return; + } + /* free allocation */ + mali_free_session_allocations(session); + /* do some check in unint*/ + mali_memory_manager_uninit(&session->allocation_mgr); + + /* Free the lock */ + _mali_osk_mutex_term(session->memory_lock); + _mali_osk_mutex_term(session->cow_lock); + return; +} + +_mali_osk_errcode_t mali_memory_initialize(void) +{ + _mali_osk_errcode_t err; + + idr_init(&mali_backend_idr); + mutex_init(&mali_idr_mutex); + + err = mali_mem_swap_init(); + if (err != _MALI_OSK_ERR_OK) { + return err; + } + err = mali_mem_os_init(); + if (_MALI_OSK_ERR_OK == err) { + err = mali_mem_defer_bind_manager_init(); + } + + return err; +} + +void mali_memory_terminate(void) +{ + mali_mem_swap_term(); + mali_mem_defer_bind_manager_destory(); + mali_mem_os_term(); + if (mali_memory_have_dedicated_memory()) { + mali_mem_block_allocator_destroy(); + } +} + + +struct mali_page_node *_mali_page_node_allocate(mali_page_node_type type) +{ + mali_page_node *page_node = NULL; + + page_node = kzalloc(sizeof(mali_page_node), GFP_KERNEL); + MALI_DEBUG_ASSERT(NULL != page_node); + + if (page_node) { + page_node->type = type; + INIT_LIST_HEAD(&page_node->list); + } + + return page_node; +} + +void _mali_page_node_ref(struct mali_page_node *node) +{ + if (node->type == MALI_PAGE_NODE_OS) { + /* add ref to this page */ + get_page(node->page); + } else if (node->type == MALI_PAGE_NODE_BLOCK) { + mali_mem_block_add_ref(node); + } else if (node->type == MALI_PAGE_NODE_SWAP) { + atomic_inc(&node->swap_it->ref_count); + } else { + MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n")); + } +} + +void _mali_page_node_unref(struct mali_page_node *node) +{ + if (node->type == MALI_PAGE_NODE_OS) { + /* unref to this page */ + put_page(node->page); + } else if (node->type == MALI_PAGE_NODE_BLOCK) { + mali_mem_block_dec_ref(node); + } else { + MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n")); + } +} + + +void _mali_page_node_add_page(struct mali_page_node *node, struct page *page) +{ + MALI_DEBUG_ASSERT(MALI_PAGE_NODE_OS == node->type); + node->page = page; +} + + +void _mali_page_node_add_swap_item(struct mali_page_node *node, struct mali_swap_item *item) +{ + MALI_DEBUG_ASSERT(MALI_PAGE_NODE_SWAP == node->type); + node->swap_it = item; +} + +void _mali_page_node_add_block_item(struct mali_page_node *node, mali_block_item *item) +{ + MALI_DEBUG_ASSERT(MALI_PAGE_NODE_BLOCK == node->type); + node->blk_it = item; +} + + +int _mali_page_node_get_ref_count(struct mali_page_node *node) +{ + if (node->type == MALI_PAGE_NODE_OS) { + /* get ref count of this page */ + return page_count(node->page); + } else if (node->type == MALI_PAGE_NODE_BLOCK) { + return mali_mem_block_get_ref_count(node); + } else if (node->type == MALI_PAGE_NODE_SWAP) { + return atomic_read(&node->swap_it->ref_count); + } else { + MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n")); + } + return -1; +} + + +dma_addr_t _mali_page_node_get_dma_addr(struct mali_page_node *node) +{ + if (node->type == MALI_PAGE_NODE_OS) { + return page_private(node->page); + } else if (node->type == MALI_PAGE_NODE_BLOCK) { + return _mali_blk_item_get_phy_addr(node->blk_it); + } else if (node->type == MALI_PAGE_NODE_SWAP) { + return node->swap_it->dma_addr; + } else { + MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n")); + } + return 0; +} + + +unsigned long _mali_page_node_get_pfn(struct mali_page_node *node) +{ + if (node->type == MALI_PAGE_NODE_OS) { + return page_to_pfn(node->page); + } else if (node->type == MALI_PAGE_NODE_BLOCK) { + /* get phy addr for BLOCK page*/ + return _mali_blk_item_get_pfn(node->blk_it); + } else if (node->type == MALI_PAGE_NODE_SWAP) { + return page_to_pfn(node->swap_it->page); + } else { + MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n")); + } + return 0; +} + + diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_memory_cow.c b/drivers/gpu/arm/mali400/linux/mali_memory_cow.c --- a/drivers/gpu/arm/mali400/linux/mali_memory_cow.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_memory_cow.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,776 @@ +/* + * Copyright (C) 2013-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_ARM +#include +#endif +#include + +#include "mali_memory.h" +#include "mali_kernel_common.h" +#include "mali_uk_types.h" +#include "mali_osk.h" +#include "mali_kernel_linux.h" +#include "mali_memory_cow.h" +#include "mali_memory_block_alloc.h" +#include "mali_memory_swap_alloc.h" + +/** +* allocate pages for COW backend and flush cache +*/ +static struct page *mali_mem_cow_alloc_page(void) + +{ + mali_mem_os_mem os_mem; + struct mali_page_node *node; + struct page *new_page; + + int ret = 0; + /* allocate pages from os mem */ + ret = mali_mem_os_alloc_pages(&os_mem, _MALI_OSK_MALI_PAGE_SIZE); + + if (ret) { + return NULL; + } + + MALI_DEBUG_ASSERT(1 == os_mem.count); + + node = _MALI_OSK_CONTAINER_OF(os_mem.pages.next, struct mali_page_node, list); + new_page = node->page; + node->page = NULL; + list_del(&node->list); + kfree(node); + + return new_page; +} + + +static struct list_head *_mali_memory_cow_get_node_list(mali_mem_backend *target_bk, + u32 target_offset, + u32 target_size) +{ + MALI_DEBUG_ASSERT(MALI_MEM_OS == target_bk->type || MALI_MEM_COW == target_bk->type || + MALI_MEM_BLOCK == target_bk->type || MALI_MEM_SWAP == target_bk->type); + + if (MALI_MEM_OS == target_bk->type) { + MALI_DEBUG_ASSERT(&target_bk->os_mem); + MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->os_mem.count); + return &target_bk->os_mem.pages; + } else if (MALI_MEM_COW == target_bk->type) { + MALI_DEBUG_ASSERT(&target_bk->cow_mem); + MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->cow_mem.count); + return &target_bk->cow_mem.pages; + } else if (MALI_MEM_BLOCK == target_bk->type) { + MALI_DEBUG_ASSERT(&target_bk->block_mem); + MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->block_mem.count); + return &target_bk->block_mem.pfns; + } else if (MALI_MEM_SWAP == target_bk->type) { + MALI_DEBUG_ASSERT(&target_bk->swap_mem); + MALI_DEBUG_ASSERT(((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE) <= target_bk->swap_mem.count); + return &target_bk->swap_mem.pages; + } + + return NULL; +} + +/** +* Do COW for os memory - support do COW for memory from bank memory +* The range_start/size can be zero, which means it will call cow_modify_range +* latter. +* This function allocate new pages for COW backend from os mem for a modified range +* It will keep the page which not in the modified range and Add ref to it +* +* @target_bk - target allocation's backend(the allocation need to do COW) +* @target_offset - the offset in target allocation to do COW(for support COW a memory allocated from memory_bank, 4K align) +* @target_size - size of target allocation to do COW (for support memory bank) +* @backend -COW backend +* @range_start - offset of modified range (4K align) +* @range_size - size of modified range +*/ +_mali_osk_errcode_t mali_memory_cow_os_memory(mali_mem_backend *target_bk, + u32 target_offset, + u32 target_size, + mali_mem_backend *backend, + u32 range_start, + u32 range_size) +{ + mali_mem_cow *cow = &backend->cow_mem; + struct mali_page_node *m_page, *m_tmp, *page_node; + int target_page = 0; + struct page *new_page; + struct list_head *pages = NULL; + + pages = _mali_memory_cow_get_node_list(target_bk, target_offset, target_size); + + if (NULL == pages) { + MALI_DEBUG_PRINT_ERROR(("No memory page need to cow ! \n")); + return _MALI_OSK_ERR_FAULT; + } + + MALI_DEBUG_ASSERT(0 == cow->count); + + INIT_LIST_HEAD(&cow->pages); + mutex_lock(&target_bk->mutex); + list_for_each_entry_safe(m_page, m_tmp, pages, list) { + /* add page from (target_offset,target_offset+size) to cow backend */ + if ((target_page >= target_offset / _MALI_OSK_MALI_PAGE_SIZE) && + (target_page < ((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE))) { + + /* allocate a new page node, alway use OS memory for COW */ + page_node = _mali_page_node_allocate(MALI_PAGE_NODE_OS); + + if (NULL == page_node) { + mutex_unlock(&target_bk->mutex); + goto error; + } + + INIT_LIST_HEAD(&page_node->list); + + /* check if in the modified range*/ + if ((cow->count >= range_start / _MALI_OSK_MALI_PAGE_SIZE) && + (cow->count < (range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE)) { + /* need to allocate a new page */ + /* To simplify the case, All COW memory is allocated from os memory ?*/ + new_page = mali_mem_cow_alloc_page(); + + if (NULL == new_page) { + kfree(page_node); + mutex_unlock(&target_bk->mutex); + goto error; + } + + _mali_page_node_add_page(page_node, new_page); + } else { + /*Add Block memory case*/ + if (m_page->type != MALI_PAGE_NODE_BLOCK) { + _mali_page_node_add_page(page_node, m_page->page); + } else { + page_node->type = MALI_PAGE_NODE_BLOCK; + _mali_page_node_add_block_item(page_node, m_page->blk_it); + } + + /* add ref to this page */ + _mali_page_node_ref(m_page); + } + + /* add it to COW backend page list */ + list_add_tail(&page_node->list, &cow->pages); + cow->count++; + } + target_page++; + } + mutex_unlock(&target_bk->mutex); + return _MALI_OSK_ERR_OK; +error: + mali_mem_cow_release(backend, MALI_FALSE); + return _MALI_OSK_ERR_FAULT; +} + +_mali_osk_errcode_t mali_memory_cow_swap_memory(mali_mem_backend *target_bk, + u32 target_offset, + u32 target_size, + mali_mem_backend *backend, + u32 range_start, + u32 range_size) +{ + mali_mem_cow *cow = &backend->cow_mem; + struct mali_page_node *m_page, *m_tmp, *page_node; + int target_page = 0; + struct mali_swap_item *swap_item; + struct list_head *pages = NULL; + + pages = _mali_memory_cow_get_node_list(target_bk, target_offset, target_size); + if (NULL == pages) { + MALI_DEBUG_PRINT_ERROR(("No swap memory page need to cow ! \n")); + return _MALI_OSK_ERR_FAULT; + } + + MALI_DEBUG_ASSERT(0 == cow->count); + + INIT_LIST_HEAD(&cow->pages); + mutex_lock(&target_bk->mutex); + + backend->flags |= MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN; + + list_for_each_entry_safe(m_page, m_tmp, pages, list) { + /* add page from (target_offset,target_offset+size) to cow backend */ + if ((target_page >= target_offset / _MALI_OSK_MALI_PAGE_SIZE) && + (target_page < ((target_size + target_offset) / _MALI_OSK_MALI_PAGE_SIZE))) { + + /* allocate a new page node, use swap memory for COW memory swap cowed flag. */ + page_node = _mali_page_node_allocate(MALI_PAGE_NODE_SWAP); + + if (NULL == page_node) { + mutex_unlock(&target_bk->mutex); + goto error; + } + + /* check if in the modified range*/ + if ((cow->count >= range_start / _MALI_OSK_MALI_PAGE_SIZE) && + (cow->count < (range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE)) { + /* need to allocate a new page */ + /* To simplify the case, All COW memory is allocated from os memory ?*/ + swap_item = mali_mem_swap_alloc_swap_item(); + + if (NULL == swap_item) { + kfree(page_node); + mutex_unlock(&target_bk->mutex); + goto error; + } + + swap_item->idx = mali_mem_swap_idx_alloc(); + + if (_MALI_OSK_BITMAP_INVALIDATE_INDEX == swap_item->idx) { + MALI_DEBUG_PRINT(1, ("Failed to allocate swap index in swap CoW.\n")); + kfree(page_node); + kfree(swap_item); + mutex_unlock(&target_bk->mutex); + goto error; + } + + _mali_page_node_add_swap_item(page_node, swap_item); + } else { + _mali_page_node_add_swap_item(page_node, m_page->swap_it); + + /* add ref to this page */ + _mali_page_node_ref(m_page); + } + + list_add_tail(&page_node->list, &cow->pages); + cow->count++; + } + target_page++; + } + mutex_unlock(&target_bk->mutex); + + return _MALI_OSK_ERR_OK; +error: + mali_mem_swap_release(backend, MALI_FALSE); + return _MALI_OSK_ERR_FAULT; + +} + + +_mali_osk_errcode_t _mali_mem_put_page_node(mali_page_node *node) +{ + if (node->type == MALI_PAGE_NODE_OS) { + return mali_mem_os_put_page(node->page); + } else if (node->type == MALI_PAGE_NODE_BLOCK) { + return mali_mem_block_unref_node(node); + } else if (node->type == MALI_PAGE_NODE_SWAP) { + return _mali_mem_swap_put_page_node(node); + } else + MALI_DEBUG_ASSERT(0); + return _MALI_OSK_ERR_FAULT; +} + + +/** +* Modify a range of a exist COW backend +* @backend -COW backend +* @range_start - offset of modified range (4K align) +* @range_size - size of modified range(in byte) +*/ +_mali_osk_errcode_t mali_memory_cow_modify_range(mali_mem_backend *backend, + u32 range_start, + u32 range_size) +{ + mali_mem_allocation *alloc = NULL; + struct mali_session_data *session; + mali_mem_cow *cow = &backend->cow_mem; + struct mali_page_node *m_page, *m_tmp; + LIST_HEAD(pages); + struct page *new_page; + u32 count = 0; + s32 change_pages_nr = 0; + _mali_osk_errcode_t ret = _MALI_OSK_ERR_OK; + + if (range_start % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); + if (range_size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); + + alloc = backend->mali_allocation; + MALI_DEBUG_ASSERT_POINTER(alloc); + + session = alloc->session; + MALI_DEBUG_ASSERT_POINTER(session); + + MALI_DEBUG_ASSERT(MALI_MEM_COW == backend->type); + MALI_DEBUG_ASSERT(((range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE) <= cow->count); + + mutex_lock(&backend->mutex); + + /* free pages*/ + list_for_each_entry_safe(m_page, m_tmp, &cow->pages, list) { + + /* check if in the modified range*/ + if ((count >= range_start / _MALI_OSK_MALI_PAGE_SIZE) && + (count < (range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE)) { + if (MALI_PAGE_NODE_SWAP != m_page->type) { + new_page = mali_mem_cow_alloc_page(); + + if (NULL == new_page) { + goto error; + } + if (1 != _mali_page_node_get_ref_count(m_page)) + change_pages_nr++; + /* unref old page*/ + _mali_osk_mutex_wait(session->cow_lock); + if (_mali_mem_put_page_node(m_page)) { + __free_page(new_page); + _mali_osk_mutex_signal(session->cow_lock); + goto error; + } + _mali_osk_mutex_signal(session->cow_lock); + /* add new page*/ + /* always use OS for COW*/ + m_page->type = MALI_PAGE_NODE_OS; + _mali_page_node_add_page(m_page, new_page); + } else { + struct mali_swap_item *swap_item; + + swap_item = mali_mem_swap_alloc_swap_item(); + + if (NULL == swap_item) { + goto error; + } + + swap_item->idx = mali_mem_swap_idx_alloc(); + + if (_MALI_OSK_BITMAP_INVALIDATE_INDEX == swap_item->idx) { + MALI_DEBUG_PRINT(1, ("Failed to allocate swap index in swap CoW modify range.\n")); + kfree(swap_item); + goto error; + } + + if (1 != _mali_page_node_get_ref_count(m_page)) { + change_pages_nr++; + } + + if (_mali_mem_put_page_node(m_page)) { + mali_mem_swap_free_swap_item(swap_item); + goto error; + } + + _mali_page_node_add_swap_item(m_page, swap_item); + } + } + count++; + } + cow->change_pages_nr = change_pages_nr; + + MALI_DEBUG_ASSERT(MALI_MEM_COW == alloc->type); + + /* ZAP cpu mapping(modified range), and do cpu mapping here if need */ + if (NULL != alloc->cpu_mapping.vma) { + MALI_DEBUG_ASSERT(0 != alloc->backend_handle); + MALI_DEBUG_ASSERT(NULL != alloc->cpu_mapping.vma); + MALI_DEBUG_ASSERT(alloc->cpu_mapping.vma->vm_end - alloc->cpu_mapping.vma->vm_start >= range_size); + + if (MALI_MEM_BACKEND_FLAG_SWAP_COWED != (backend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)) { + zap_vma_ptes(alloc->cpu_mapping.vma, alloc->cpu_mapping.vma->vm_start + range_start, range_size); + + ret = mali_mem_cow_cpu_map_pages_locked(backend, alloc->cpu_mapping.vma, alloc->cpu_mapping.vma->vm_start + range_start, range_size / _MALI_OSK_MALI_PAGE_SIZE); + + if (unlikely(ret != _MALI_OSK_ERR_OK)) { + MALI_DEBUG_PRINT(2, ("mali_memory_cow_modify_range: cpu mapping failed !\n")); + ret = _MALI_OSK_ERR_FAULT; + } + } else { + /* used to trigger page fault for swappable cowed memory. */ + alloc->cpu_mapping.vma->vm_flags |= VM_PFNMAP; + alloc->cpu_mapping.vma->vm_flags |= VM_MIXEDMAP; + + zap_vma_ptes(alloc->cpu_mapping.vma, alloc->cpu_mapping.vma->vm_start + range_start, range_size); + /* delete this flag to let swappble is ummapped regard to stauct page not page frame. */ + alloc->cpu_mapping.vma->vm_flags &= ~VM_PFNMAP; + alloc->cpu_mapping.vma->vm_flags &= ~VM_MIXEDMAP; + } + } + +error: + mutex_unlock(&backend->mutex); + return ret; + +} + + +/** +* Allocate pages for COW backend +* @alloc -allocation for COW allocation +* @target_bk - target allocation's backend(the allocation need to do COW) +* @target_offset - the offset in target allocation to do COW(for support COW a memory allocated from memory_bank, 4K align) +* @target_size - size of target allocation to do COW (for support memory bank)(in byte) +* @backend -COW backend +* @range_start - offset of modified range (4K align) +* @range_size - size of modified range(in byte) +*/ +_mali_osk_errcode_t mali_memory_do_cow(mali_mem_backend *target_bk, + u32 target_offset, + u32 target_size, + mali_mem_backend *backend, + u32 range_start, + u32 range_size) +{ + struct mali_session_data *session = backend->mali_allocation->session; + + MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS); + + /* size & offset must be a multiple of the system page size */ + if (target_size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); + if (range_size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); + if (target_offset % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); + if (range_start % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); + + /* check backend type */ + MALI_DEBUG_ASSERT(MALI_MEM_COW == backend->type); + + switch (target_bk->type) { + case MALI_MEM_OS: + case MALI_MEM_BLOCK: + return mali_memory_cow_os_memory(target_bk, target_offset, target_size, backend, range_start, range_size); + break; + case MALI_MEM_COW: + if (backend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED) { + return mali_memory_cow_swap_memory(target_bk, target_offset, target_size, backend, range_start, range_size); + } else { + return mali_memory_cow_os_memory(target_bk, target_offset, target_size, backend, range_start, range_size); + } + break; + case MALI_MEM_SWAP: + return mali_memory_cow_swap_memory(target_bk, target_offset, target_size, backend, range_start, range_size); + break; + case MALI_MEM_EXTERNAL: + /*NOT support yet*/ + MALI_DEBUG_PRINT_ERROR(("External physical memory not supported ! \n")); + return _MALI_OSK_ERR_UNSUPPORTED; + break; + case MALI_MEM_DMA_BUF: + /*NOT support yet*/ + MALI_DEBUG_PRINT_ERROR(("DMA buffer not supported ! \n")); + return _MALI_OSK_ERR_UNSUPPORTED; + break; + case MALI_MEM_UMP: + /*NOT support yet*/ + MALI_DEBUG_PRINT_ERROR(("UMP buffer not supported ! \n")); + return _MALI_OSK_ERR_UNSUPPORTED; + break; + default: + /*Not support yet*/ + MALI_DEBUG_PRINT_ERROR(("Invalid memory type not supported ! \n")); + return _MALI_OSK_ERR_UNSUPPORTED; + break; + } + return _MALI_OSK_ERR_OK; +} + + +/** +* Map COW backend memory to mali +* Support OS/BLOCK for mali_page_node +*/ +int mali_mem_cow_mali_map(mali_mem_backend *mem_bkend, u32 range_start, u32 range_size) +{ + mali_mem_allocation *cow_alloc; + struct mali_page_node *m_page; + struct mali_session_data *session; + struct mali_page_directory *pagedir; + u32 virt, start; + + cow_alloc = mem_bkend->mali_allocation; + virt = cow_alloc->mali_vma_node.vm_node.start; + start = virt; + + MALI_DEBUG_ASSERT_POINTER(mem_bkend); + MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type); + MALI_DEBUG_ASSERT_POINTER(cow_alloc); + + session = cow_alloc->session; + pagedir = session->page_directory; + MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS); + list_for_each_entry(m_page, &mem_bkend->cow_mem.pages, list) { + if ((virt - start >= range_start) && (virt - start < range_start + range_size)) { + dma_addr_t phys = _mali_page_node_get_dma_addr(m_page); +#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) + MALI_DEBUG_ASSERT(0 == (phys >> 32)); +#endif + mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys, + MALI_MMU_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT); + } + virt += MALI_MMU_PAGE_SIZE; + } + return 0; +} + +/** +* Map COW backend to cpu +* support OS/BLOCK memory +*/ +int mali_mem_cow_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma) +{ + mali_mem_cow *cow = &mem_bkend->cow_mem; + struct mali_page_node *m_page; + int ret; + unsigned long addr = vma->vm_start; + MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_COW); + + list_for_each_entry(m_page, &cow->pages, list) { + /* We should use vm_insert_page, but it does a dcache + * flush which makes it way slower than remap_pfn_range or vm_insert_pfn. + ret = vm_insert_page(vma, addr, page); + */ + ret = vm_insert_pfn(vma, addr, _mali_page_node_get_pfn(m_page)); + + if (unlikely(0 != ret)) { + return ret; + } + addr += _MALI_OSK_MALI_PAGE_SIZE; + } + + return 0; +} + +/** +* Map some pages(COW backend) to CPU vma@vaddr +*@ mem_bkend - COW backend +*@ vma +*@ vaddr -start CPU vaddr mapped to +*@ num - max number of pages to map to CPU vaddr +*/ +_mali_osk_errcode_t mali_mem_cow_cpu_map_pages_locked(mali_mem_backend *mem_bkend, + struct vm_area_struct *vma, + unsigned long vaddr, + int num) +{ + mali_mem_cow *cow = &mem_bkend->cow_mem; + struct mali_page_node *m_page; + int ret; + int offset; + int count ; + unsigned long vstart = vma->vm_start; + count = 0; + MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_COW); + MALI_DEBUG_ASSERT(0 == vaddr % _MALI_OSK_MALI_PAGE_SIZE); + MALI_DEBUG_ASSERT(0 == vstart % _MALI_OSK_MALI_PAGE_SIZE); + offset = (vaddr - vstart) / _MALI_OSK_MALI_PAGE_SIZE; + + list_for_each_entry(m_page, &cow->pages, list) { + if ((count >= offset) && (count < offset + num)) { + ret = vm_insert_pfn(vma, vaddr, _mali_page_node_get_pfn(m_page)); + + if (unlikely(0 != ret)) { + if (count == offset) { + return _MALI_OSK_ERR_FAULT; + } else { + /* ret is EBUSY when page isn't in modify range, but now it's OK*/ + return _MALI_OSK_ERR_OK; + } + } + vaddr += _MALI_OSK_MALI_PAGE_SIZE; + } + count++; + } + return _MALI_OSK_ERR_OK; +} + +/** +* Release COW backend memory +* free it directly(put_page--unref page), not put into pool +*/ +u32 mali_mem_cow_release(mali_mem_backend *mem_bkend, mali_bool is_mali_mapped) +{ + mali_mem_allocation *alloc; + struct mali_session_data *session; + u32 free_pages_nr = 0; + MALI_DEBUG_ASSERT_POINTER(mem_bkend); + MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type); + alloc = mem_bkend->mali_allocation; + MALI_DEBUG_ASSERT_POINTER(alloc); + + session = alloc->session; + MALI_DEBUG_ASSERT_POINTER(session); + + if (MALI_MEM_BACKEND_FLAG_SWAP_COWED != (MALI_MEM_BACKEND_FLAG_SWAP_COWED & mem_bkend->flags)) { + /* Unmap the memory from the mali virtual address space. */ + if (MALI_TRUE == is_mali_mapped) + mali_mem_os_mali_unmap(alloc); + /* free cow backend list*/ + _mali_osk_mutex_wait(session->cow_lock); + free_pages_nr = mali_mem_os_free(&mem_bkend->cow_mem.pages, mem_bkend->cow_mem.count, MALI_TRUE); + _mali_osk_mutex_signal(session->cow_lock); + + free_pages_nr += mali_mem_block_free_list(&mem_bkend->cow_mem.pages); + + MALI_DEBUG_ASSERT(list_empty(&mem_bkend->cow_mem.pages)); + } else { + free_pages_nr = mali_mem_swap_release(mem_bkend, is_mali_mapped); + } + + + MALI_DEBUG_PRINT(4, ("COW Mem free : allocated size = 0x%x, free size = 0x%x\n", mem_bkend->cow_mem.count * _MALI_OSK_MALI_PAGE_SIZE, + free_pages_nr * _MALI_OSK_MALI_PAGE_SIZE)); + + mem_bkend->cow_mem.count = 0; + return free_pages_nr; +} + + +/* Dst node could os node or swap node. */ +void _mali_mem_cow_copy_page(mali_page_node *src_node, mali_page_node *dst_node) +{ + void *dst, *src; + struct page *dst_page; + dma_addr_t dma_addr; + + MALI_DEBUG_ASSERT(src_node != NULL); + MALI_DEBUG_ASSERT(dst_node != NULL); + MALI_DEBUG_ASSERT(dst_node->type == MALI_PAGE_NODE_OS + || dst_node->type == MALI_PAGE_NODE_SWAP); + + if (dst_node->type == MALI_PAGE_NODE_OS) { + dst_page = dst_node->page; + } else { + dst_page = dst_node->swap_it->page; + } + + dma_unmap_page(&mali_platform_device->dev, _mali_page_node_get_dma_addr(dst_node), + _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL); + + /* map it , and copy the content*/ + dst = kmap_atomic(dst_page); + + if (src_node->type == MALI_PAGE_NODE_OS || + src_node->type == MALI_PAGE_NODE_SWAP) { + struct page *src_page; + + if (src_node->type == MALI_PAGE_NODE_OS) { + src_page = src_node->page; + } else { + src_page = src_node->swap_it->page; + } + + /* Clear and invaliate cache */ + /* In ARM architecture, speculative read may pull stale data into L1 cache + * for kernel linear mapping page table. DMA_BIDIRECTIONAL could + * invalidate the L1 cache so that following read get the latest data + */ + dma_unmap_page(&mali_platform_device->dev, _mali_page_node_get_dma_addr(src_node), + _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL); + + src = kmap_atomic(src_page); + memcpy(dst, src , _MALI_OSK_MALI_PAGE_SIZE); + kunmap_atomic(src); + dma_addr = dma_map_page(&mali_platform_device->dev, src_page, + 0, _MALI_OSK_MALI_PAGE_SIZE, DMA_BIDIRECTIONAL); + + if (src_node->type == MALI_PAGE_NODE_SWAP) { + src_node->swap_it->dma_addr = dma_addr; + } + } else if (src_node->type == MALI_PAGE_NODE_BLOCK) { + /* + * use ioremap to map src for BLOCK memory + */ + src = ioremap_nocache(_mali_page_node_get_dma_addr(src_node), _MALI_OSK_MALI_PAGE_SIZE); + memcpy(dst, src , _MALI_OSK_MALI_PAGE_SIZE); + iounmap(src); + } + kunmap_atomic(dst); + dma_addr = dma_map_page(&mali_platform_device->dev, dst_page, + 0, _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE); + + if (dst_node->type == MALI_PAGE_NODE_SWAP) { + dst_node->swap_it->dma_addr = dma_addr; + } +} + + +/* +* allocate page on demand when CPU access it, +* THis used in page fault handler +*/ +_mali_osk_errcode_t mali_mem_cow_allocate_on_demand(mali_mem_backend *mem_bkend, u32 offset_page) +{ + struct page *new_page = NULL; + struct mali_page_node *new_node = NULL; + int i = 0; + struct mali_page_node *m_page, *found_node = NULL; + struct mali_session_data *session = NULL; + mali_mem_cow *cow = &mem_bkend->cow_mem; + MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type); + MALI_DEBUG_ASSERT(offset_page < mem_bkend->size / _MALI_OSK_MALI_PAGE_SIZE); + MALI_DEBUG_PRINT(4, ("mali_mem_cow_allocate_on_demand !, offset_page =0x%x\n", offset_page)); + + /* allocate new page here */ + new_page = mali_mem_cow_alloc_page(); + if (!new_page) + return _MALI_OSK_ERR_NOMEM; + + new_node = _mali_page_node_allocate(MALI_PAGE_NODE_OS); + if (!new_node) { + __free_page(new_page); + return _MALI_OSK_ERR_NOMEM; + } + + /* find the page in backend*/ + list_for_each_entry(m_page, &cow->pages, list) { + if (i == offset_page) { + found_node = m_page; + break; + } + i++; + } + MALI_DEBUG_ASSERT(found_node); + if (NULL == found_node) { + __free_page(new_page); + kfree(new_node); + return _MALI_OSK_ERR_ITEM_NOT_FOUND; + } + + _mali_page_node_add_page(new_node, new_page); + + /* Copy the src page's content to new page */ + _mali_mem_cow_copy_page(found_node, new_node); + + MALI_DEBUG_ASSERT_POINTER(mem_bkend->mali_allocation); + session = mem_bkend->mali_allocation->session; + MALI_DEBUG_ASSERT_POINTER(session); + if (1 != _mali_page_node_get_ref_count(found_node)) { + atomic_add(1, &session->mali_mem_allocated_pages); + if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) { + session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE; + } + mem_bkend->cow_mem.change_pages_nr++; + } + + _mali_osk_mutex_wait(session->cow_lock); + if (_mali_mem_put_page_node(found_node)) { + __free_page(new_page); + kfree(new_node); + _mali_osk_mutex_signal(session->cow_lock); + return _MALI_OSK_ERR_NOMEM; + } + _mali_osk_mutex_signal(session->cow_lock); + + list_replace(&found_node->list, &new_node->list); + + kfree(found_node); + + /* map to GPU side*/ + _mali_osk_mutex_wait(session->memory_lock); + mali_mem_cow_mali_map(mem_bkend, offset_page * _MALI_OSK_MALI_PAGE_SIZE, _MALI_OSK_MALI_PAGE_SIZE); + _mali_osk_mutex_signal(session->memory_lock); + return _MALI_OSK_ERR_OK; +} diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_memory_cow.h b/drivers/gpu/arm/mali400/linux/mali_memory_cow.h --- a/drivers/gpu/arm/mali400/linux/mali_memory_cow.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_memory_cow.h 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2013-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_MEMORY_COW_H__ +#define __MALI_MEMORY_COW_H__ + +#include "mali_osk.h" +#include "mali_session.h" +#include "mali_memory_types.h" + +int mali_mem_cow_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma); +_mali_osk_errcode_t mali_mem_cow_cpu_map_pages_locked(mali_mem_backend *mem_bkend, + struct vm_area_struct *vma, + unsigned long vaddr, + int num); + +_mali_osk_errcode_t mali_memory_do_cow(mali_mem_backend *target_bk, + u32 target_offset, + u32 target_size, + mali_mem_backend *backend, + u32 range_start, + u32 range_size); + +_mali_osk_errcode_t mali_memory_cow_modify_range(mali_mem_backend *backend, + u32 range_start, + u32 range_size); + +_mali_osk_errcode_t mali_memory_cow_os_memory(mali_mem_backend *target_bk, + u32 target_offset, + u32 target_size, + mali_mem_backend *backend, + u32 range_start, + u32 range_size); + +void _mali_mem_cow_copy_page(mali_page_node *src_node, mali_page_node *dst_node); + +int mali_mem_cow_mali_map(mali_mem_backend *mem_bkend, u32 range_start, u32 range_size); +u32 mali_mem_cow_release(mali_mem_backend *mem_bkend, mali_bool is_mali_mapped); +_mali_osk_errcode_t mali_mem_cow_allocate_on_demand(mali_mem_backend *mem_bkend, u32 offset_page); +#endif + diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_memory_defer_bind.c b/drivers/gpu/arm/mali400/linux/mali_memory_defer_bind.c --- a/drivers/gpu/arm/mali400/linux/mali_memory_defer_bind.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_memory_defer_bind.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,262 @@ +/* + * Copyright (C) 2013-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_ARM +#include +#endif +#include + +#include "mali_memory.h" +#include "mali_kernel_common.h" +#include "mali_uk_types.h" +#include "mali_osk.h" +#include "mali_kernel_linux.h" +#include "mali_memory_defer_bind.h" +#include "mali_executor.h" +#include "mali_osk.h" +#include "mali_scheduler.h" +#include "mali_gp_job.h" + +mali_defer_bind_manager *mali_dmem_man = NULL; + +static u32 mali_dmem_get_gp_varying_size(struct mali_gp_job *gp_job) +{ + return gp_job->required_varying_memsize / _MALI_OSK_MALI_PAGE_SIZE; +} + +_mali_osk_errcode_t mali_mem_defer_bind_manager_init(void) +{ + mali_dmem_man = _mali_osk_calloc(1, sizeof(struct mali_defer_bind_manager)); + if (!mali_dmem_man) + return _MALI_OSK_ERR_NOMEM; + + atomic_set(&mali_dmem_man->num_used_pages, 0); + atomic_set(&mali_dmem_man->num_dmem, 0); + + return _MALI_OSK_ERR_OK; +} + + +void mali_mem_defer_bind_manager_destory(void) +{ + if (mali_dmem_man) { + MALI_DEBUG_ASSERT(0 == atomic_read(&mali_dmem_man->num_dmem)); + kfree(mali_dmem_man); + } + mali_dmem_man = NULL; +} + + +/*allocate pages from OS memory*/ +_mali_osk_errcode_t mali_mem_defer_alloc_mem(u32 require, struct mali_session_data *session, mali_defer_mem_block *dblock) +{ + int retval = 0; + u32 num_pages = require; + mali_mem_os_mem os_mem; + + retval = mali_mem_os_alloc_pages(&os_mem, num_pages * _MALI_OSK_MALI_PAGE_SIZE); + + /* add to free pages list */ + if (0 == retval) { + MALI_DEBUG_PRINT(4, ("mali_mem_defer_alloc_mem ,,*** pages allocate = 0x%x \n", num_pages)); + list_splice(&os_mem.pages, &dblock->free_pages); + atomic_add(os_mem.count, &dblock->num_free_pages); + atomic_add(os_mem.count, &session->mali_mem_allocated_pages); + if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) { + session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE; + } + return _MALI_OSK_ERR_OK; + } else + return _MALI_OSK_ERR_FAULT; +} + +_mali_osk_errcode_t mali_mem_prepare_mem_for_job(struct mali_gp_job *next_gp_job, mali_defer_mem_block *dblock) +{ + u32 require_page; + + if (!next_gp_job) + return _MALI_OSK_ERR_FAULT; + + require_page = mali_dmem_get_gp_varying_size(next_gp_job); + + MALI_DEBUG_PRINT(4, ("mali_mem_defer_prepare_mem_work, require alloc page 0x%x\n", + require_page)); + /* allocate more pages from OS */ + if (_MALI_OSK_ERR_OK != mali_mem_defer_alloc_mem(require_page, next_gp_job->session, dblock)) { + MALI_DEBUG_PRINT(1, ("ERROR##mali_mem_defer_prepare_mem_work, allocate page failed!!")); + return _MALI_OSK_ERR_NOMEM; + } + + next_gp_job->bind_flag = MALI_DEFER_BIND_MEMORY_PREPARED; + + return _MALI_OSK_ERR_OK; +} + + +/* do preparetion for allocation before defer bind */ +_mali_osk_errcode_t mali_mem_defer_bind_allocation_prepare(mali_mem_allocation *alloc, struct list_head *list, u32 *required_varying_memsize) +{ + mali_mem_backend *mem_bkend = NULL; + struct mali_backend_bind_list *bk_list = _mali_osk_calloc(1, sizeof(struct mali_backend_bind_list)); + if (NULL == bk_list) + return _MALI_OSK_ERR_FAULT; + + INIT_LIST_HEAD(&bk_list->node); + /* Get backend memory */ + mutex_lock(&mali_idr_mutex); + if (!(mem_bkend = idr_find(&mali_backend_idr, alloc->backend_handle))) { + MALI_DEBUG_PRINT(1, ("Can't find memory backend in defer bind!\n")); + mutex_unlock(&mali_idr_mutex); + _mali_osk_free(bk_list); + return _MALI_OSK_ERR_FAULT; + } + mutex_unlock(&mali_idr_mutex); + + /* If the mem backend has already been bound, no need to bind again.*/ + if (mem_bkend->os_mem.count > 0) { + _mali_osk_free(bk_list); + return _MALI_OSK_ERR_OK; + } + + MALI_DEBUG_PRINT(4, ("bind_allocation_prepare:: allocation =%x vaddr=0x%x!\n", alloc, alloc->mali_vma_node.vm_node.start)); + + INIT_LIST_HEAD(&mem_bkend->os_mem.pages); + + bk_list->bkend = mem_bkend; + bk_list->vaddr = alloc->mali_vma_node.vm_node.start; + bk_list->session = alloc->session; + bk_list->page_num = mem_bkend->size / _MALI_OSK_MALI_PAGE_SIZE; + *required_varying_memsize += mem_bkend->size; + MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_OS); + + /* add to job to do list */ + list_add(&bk_list->node, list); + + return _MALI_OSK_ERR_OK; +} + + + +/* bind phyiscal memory to allocation +This function will be called in IRQ handler*/ +static _mali_osk_errcode_t mali_mem_defer_bind_allocation(struct mali_backend_bind_list *bk_node, + struct list_head *pages) +{ + struct mali_session_data *session = bk_node->session; + mali_mem_backend *mem_bkend = bk_node->bkend; + MALI_DEBUG_PRINT(4, ("mali_mem_defer_bind_allocation, bind bkend = %x page num=0x%x vaddr=%x session=%x\n", mem_bkend, bk_node->page_num, bk_node->vaddr, session)); + + MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_OS); + list_splice(pages, &mem_bkend->os_mem.pages); + mem_bkend->os_mem.count = bk_node->page_num; + + if (mem_bkend->type == MALI_MEM_OS) { + mali_mem_os_mali_map(&mem_bkend->os_mem, session, bk_node->vaddr, 0, + mem_bkend->os_mem.count, MALI_MMU_FLAGS_DEFAULT); + } + smp_wmb(); + bk_node->flag = MALI_DEFER_BIND_MEMORY_BINDED; + mem_bkend->flags &= ~MALI_MEM_BACKEND_FLAG_NOT_BINDED; + mem_bkend->flags |= MALI_MEM_BACKEND_FLAG_BINDED; + return _MALI_OSK_ERR_OK; +} + + +static struct list_head *mali_mem_defer_get_free_page_list(u32 count, struct list_head *pages, mali_defer_mem_block *dblock) +{ + int i = 0; + struct mali_page_node *m_page, *m_tmp; + + if (atomic_read(&dblock->num_free_pages) < count) { + return NULL; + } else { + list_for_each_entry_safe(m_page, m_tmp, &dblock->free_pages, list) { + if (i < count) { + list_move_tail(&m_page->list, pages); + } else { + break; + } + i++; + } + MALI_DEBUG_ASSERT(i == count); + atomic_sub(count, &dblock->num_free_pages); + return pages; + } +} + + +/* called in job start IOCTL to bind physical memory for each allocations +@ bk_list backend list to do defer bind +@ pages page list to do this bind +@ count number of pages +*/ +_mali_osk_errcode_t mali_mem_defer_bind(struct mali_gp_job *gp, + struct mali_defer_mem_block *dmem_block) +{ + struct mali_defer_mem *dmem = NULL; + struct mali_backend_bind_list *bkn, *bkn_tmp; + LIST_HEAD(pages); + + if (gp->required_varying_memsize != (atomic_read(&dmem_block->num_free_pages) * _MALI_OSK_MALI_PAGE_SIZE)) { + MALI_DEBUG_PRINT_ERROR(("#BIND: The memsize of varying buffer not match to the pagesize of the dmem_block!!## \n")); + return _MALI_OSK_ERR_FAULT; + } + + MALI_DEBUG_PRINT(4, ("#BIND: GP job=%x## \n", gp)); + dmem = (mali_defer_mem *)_mali_osk_calloc(1, sizeof(struct mali_defer_mem)); + if (dmem) { + INIT_LIST_HEAD(&dmem->node); + gp->dmem = dmem; + } else { + return _MALI_OSK_ERR_NOMEM; + } + + atomic_add(1, &mali_dmem_man->num_dmem); + /* for each bk_list backend, do bind */ + list_for_each_entry_safe(bkn, bkn_tmp , &gp->vary_todo, node) { + INIT_LIST_HEAD(&pages); + if (likely(mali_mem_defer_get_free_page_list(bkn->page_num, &pages, dmem_block))) { + list_del(&bkn->node); + mali_mem_defer_bind_allocation(bkn, &pages); + _mali_osk_free(bkn); + } else { + /* not enough memory will not happen */ + MALI_DEBUG_PRINT_ERROR(("#BIND: NOT enough memory when binded !!## \n")); + _mali_osk_free(gp->dmem); + return _MALI_OSK_ERR_NOMEM; + } + } + + if (!list_empty(&gp->vary_todo)) { + MALI_DEBUG_PRINT_ERROR(("#BIND: The deferbind backend list isn't empty !!## \n")); + _mali_osk_free(gp->dmem); + return _MALI_OSK_ERR_FAULT; + } + + dmem->flag = MALI_DEFER_BIND_MEMORY_BINDED; + + return _MALI_OSK_ERR_OK; +} + +void mali_mem_defer_dmem_free(struct mali_gp_job *gp) +{ + if (gp->dmem) { + atomic_dec(&mali_dmem_man->num_dmem); + _mali_osk_free(gp->dmem); + } +} + diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_memory_defer_bind.h b/drivers/gpu/arm/mali400/linux/mali_memory_defer_bind.h --- a/drivers/gpu/arm/mali400/linux/mali_memory_defer_bind.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_memory_defer_bind.h 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,64 @@ +/* + * Copyright (C) 2013-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ +#ifndef __MALI_MEMORY_DEFER_BIND_H_ +#define __MALI_MEMORY_DEFER_BIND_H_ + + +#include "mali_osk.h" +#include "mali_session.h" + +#include +#include +#include +#include +#include + + +#include "mali_memory_types.h" +#include "mali_memory_os_alloc.h" +#include "mali_uk_types.h" + +struct mali_gp_job; + +typedef struct mali_defer_mem { + struct list_head node; /*dlist node in bind manager */ + u32 flag; +} mali_defer_mem; + + +typedef struct mali_defer_mem_block { + struct list_head free_pages; /* page pool */ + atomic_t num_free_pages; +} mali_defer_mem_block; + +/* varying memory list need to bind */ +typedef struct mali_backend_bind_list { + struct list_head node; + struct mali_mem_backend *bkend; + u32 vaddr; + u32 page_num; + struct mali_session_data *session; + u32 flag; +} mali_backend_bind_lists; + + +typedef struct mali_defer_bind_manager { + atomic_t num_used_pages; + atomic_t num_dmem; +} mali_defer_bind_manager; + +_mali_osk_errcode_t mali_mem_defer_bind_manager_init(void); +void mali_mem_defer_bind_manager_destory(void); +_mali_osk_errcode_t mali_mem_defer_bind(struct mali_gp_job *gp, struct mali_defer_mem_block *dmem_block); +_mali_osk_errcode_t mali_mem_defer_bind_allocation_prepare(mali_mem_allocation *alloc, struct list_head *list, u32 *required_varying_memsize); +_mali_osk_errcode_t mali_mem_prepare_mem_for_job(struct mali_gp_job *next_gp_job, mali_defer_mem_block *dblock); +void mali_mem_defer_dmem_free(struct mali_gp_job *gp); + +#endif diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_memory_dma_buf.c b/drivers/gpu/arm/mali400/linux/mali_memory_dma_buf.c --- a/drivers/gpu/arm/mali400/linux/mali_memory_dma_buf.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_memory_dma_buf.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,369 @@ +/* + * Copyright (C) 2012-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include /* file system operations */ +#include /* user space access */ +#include +#include +#include +#include +#include +#include +#include + +#include "mali_ukk.h" +#include "mali_osk.h" +#include "mali_kernel_common.h" +#include "mali_session.h" +#include "mali_kernel_linux.h" + +#include "mali_memory.h" +#include "mali_memory_dma_buf.h" +#include "mali_memory_virtual.h" +#include "mali_pp_job.h" + +/* + * Map DMA buf attachment \a mem into \a session at virtual address \a virt. + */ +static int mali_dma_buf_map(mali_mem_backend *mem_backend) +{ + mali_mem_allocation *alloc; + struct mali_dma_buf_attachment *mem; + struct mali_session_data *session; + struct mali_page_directory *pagedir; + _mali_osk_errcode_t err; + struct scatterlist *sg; + u32 virt, flags; + int i; + + MALI_DEBUG_ASSERT_POINTER(mem_backend); + + alloc = mem_backend->mali_allocation; + MALI_DEBUG_ASSERT_POINTER(alloc); + + mem = mem_backend->dma_buf.attachment; + MALI_DEBUG_ASSERT_POINTER(mem); + + session = alloc->session; + MALI_DEBUG_ASSERT_POINTER(session); + MALI_DEBUG_ASSERT(mem->session == session); + + virt = alloc->mali_vma_node.vm_node.start; + flags = alloc->flags; + + mali_session_memory_lock(session); + mem->map_ref++; + + MALI_DEBUG_PRINT(5, ("Mali DMA-buf: map attachment %p, new map_ref = %d\n", mem, mem->map_ref)); + + if (1 == mem->map_ref) { + + /* First reference taken, so we need to map the dma buf */ + MALI_DEBUG_ASSERT(!mem->is_mapped); + + mem->sgt = dma_buf_map_attachment(mem->attachment, DMA_BIDIRECTIONAL); + if (IS_ERR_OR_NULL(mem->sgt)) { + MALI_DEBUG_PRINT_ERROR(("Failed to map dma-buf attachment\n")); + mem->map_ref--; + mali_session_memory_unlock(session); + return -EFAULT; + } + + err = mali_mem_mali_map_prepare(alloc); + if (_MALI_OSK_ERR_OK != err) { + MALI_DEBUG_PRINT(1, ("Mapping of DMA memory failed\n")); + mem->map_ref--; + mali_session_memory_unlock(session); + return -ENOMEM; + } + + pagedir = mali_session_get_page_directory(session); + MALI_DEBUG_ASSERT_POINTER(pagedir); + + for_each_sg(mem->sgt->sgl, sg, mem->sgt->nents, i) { + u32 size = sg_dma_len(sg); + dma_addr_t phys = sg_dma_address(sg); + + /* sg must be page aligned. */ + MALI_DEBUG_ASSERT(0 == size % MALI_MMU_PAGE_SIZE); + MALI_DEBUG_ASSERT(0 == (phys & ~(uintptr_t)0xFFFFFFFF)); + + mali_mmu_pagedir_update(pagedir, virt, phys, size, MALI_MMU_FLAGS_DEFAULT); + + virt += size; + } + + if (flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) { + u32 guard_phys; + MALI_DEBUG_PRINT(7, ("Mapping in extra guard page\n")); + + guard_phys = sg_dma_address(mem->sgt->sgl); + mali_mmu_pagedir_update(pagedir, virt, guard_phys, MALI_MMU_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT); + } + + mem->is_mapped = MALI_TRUE; + mali_session_memory_unlock(session); + /* Wake up any thread waiting for buffer to become mapped */ + wake_up_all(&mem->wait_queue); + } else { + MALI_DEBUG_ASSERT(mem->is_mapped); + mali_session_memory_unlock(session); + } + + return 0; +} + +static void mali_dma_buf_unmap(mali_mem_allocation *alloc, struct mali_dma_buf_attachment *mem) +{ + MALI_DEBUG_ASSERT_POINTER(alloc); + MALI_DEBUG_ASSERT_POINTER(mem); + MALI_DEBUG_ASSERT_POINTER(mem->attachment); + MALI_DEBUG_ASSERT_POINTER(mem->buf); + MALI_DEBUG_ASSERT_POINTER(alloc->session); + + mali_session_memory_lock(alloc->session); + mem->map_ref--; + + MALI_DEBUG_PRINT(5, ("Mali DMA-buf: unmap attachment %p, new map_ref = %d\n", mem, mem->map_ref)); + + if (0 == mem->map_ref) { + dma_buf_unmap_attachment(mem->attachment, mem->sgt, DMA_BIDIRECTIONAL); + if (MALI_TRUE == mem->is_mapped) { + mali_mem_mali_map_free(alloc->session, alloc->psize, alloc->mali_vma_node.vm_node.start, + alloc->flags); + } + mem->is_mapped = MALI_FALSE; + } + mali_session_memory_unlock(alloc->session); + /* Wake up any thread waiting for buffer to become unmapped */ + wake_up_all(&mem->wait_queue); +} + +#if !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) +int mali_dma_buf_map_job(struct mali_pp_job *job) +{ + struct mali_dma_buf_attachment *mem; + _mali_osk_errcode_t err; + int i; + int ret = 0; + u32 num_memory_cookies; + struct mali_session_data *session; + struct mali_vma_node *mali_vma_node = NULL; + mali_mem_allocation *mali_alloc = NULL; + mali_mem_backend *mem_bkend = NULL; + + MALI_DEBUG_ASSERT_POINTER(job); + + num_memory_cookies = mali_pp_job_num_memory_cookies(job); + + session = mali_pp_job_get_session(job); + + MALI_DEBUG_ASSERT_POINTER(session); + + for (i = 0; i < num_memory_cookies; i++) { + u32 mali_addr = mali_pp_job_get_memory_cookie(job, i); + mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0); + MALI_DEBUG_ASSERT(NULL != mali_vma_node); + mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node); + MALI_DEBUG_ASSERT(NULL != mali_alloc); + if (MALI_MEM_DMA_BUF != mali_alloc->type) { + continue; + } + + /* Get backend memory & Map on CPU */ + mutex_lock(&mali_idr_mutex); + mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle); + mutex_unlock(&mali_idr_mutex); + MALI_DEBUG_ASSERT(NULL != mem_bkend); + + mem = mem_bkend->dma_buf.attachment; + + MALI_DEBUG_ASSERT_POINTER(mem); + MALI_DEBUG_ASSERT(mem->session == mali_pp_job_get_session(job)); + + err = mali_dma_buf_map(mem_bkend); + if (0 != err) { + MALI_DEBUG_PRINT_ERROR(("Mali DMA-buf: Failed to map dma-buf for mali address %x\n", mali_addr)); + ret = -EFAULT; + continue; + } + } + return ret; +} + +void mali_dma_buf_unmap_job(struct mali_pp_job *job) +{ + struct mali_dma_buf_attachment *mem; + int i; + u32 num_memory_cookies; + struct mali_session_data *session; + struct mali_vma_node *mali_vma_node = NULL; + mali_mem_allocation *mali_alloc = NULL; + mali_mem_backend *mem_bkend = NULL; + + MALI_DEBUG_ASSERT_POINTER(job); + + num_memory_cookies = mali_pp_job_num_memory_cookies(job); + + session = mali_pp_job_get_session(job); + + MALI_DEBUG_ASSERT_POINTER(session); + + for (i = 0; i < num_memory_cookies; i++) { + u32 mali_addr = mali_pp_job_get_memory_cookie(job, i); + mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0); + MALI_DEBUG_ASSERT(NULL != mali_vma_node); + mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node); + MALI_DEBUG_ASSERT(NULL != mali_alloc); + if (MALI_MEM_DMA_BUF != mali_alloc->type) { + continue; + } + + /* Get backend memory & Map on CPU */ + mutex_lock(&mali_idr_mutex); + mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle); + mutex_unlock(&mali_idr_mutex); + MALI_DEBUG_ASSERT(NULL != mem_bkend); + + mem = mem_bkend->dma_buf.attachment; + + MALI_DEBUG_ASSERT_POINTER(mem); + MALI_DEBUG_ASSERT(mem->session == mali_pp_job_get_session(job)); + mali_dma_buf_unmap(mem_bkend->mali_allocation, mem); + } +} +#endif /* !CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH */ + +int mali_dma_buf_get_size(struct mali_session_data *session, _mali_uk_dma_buf_get_size_s __user *user_arg) +{ + _mali_uk_dma_buf_get_size_s args; + int fd; + struct dma_buf *buf; + + /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */ + if (0 != copy_from_user(&args, (void __user *)user_arg, sizeof(_mali_uk_dma_buf_get_size_s))) { + return -EFAULT; + } + + /* Do DMA-BUF stuff */ + fd = args.mem_fd; + + buf = dma_buf_get(fd); + if (IS_ERR_OR_NULL(buf)) { + MALI_DEBUG_PRINT_ERROR(("Failed to get dma-buf from fd: %d\n", fd)); + return PTR_RET(buf); + } + + if (0 != put_user(buf->size, &user_arg->size)) { + dma_buf_put(buf); + return -EFAULT; + } + + dma_buf_put(buf); + + return 0; +} + +_mali_osk_errcode_t mali_mem_bind_dma_buf(mali_mem_allocation *alloc, + mali_mem_backend *mem_backend, + int fd, u32 flags) +{ + struct dma_buf *buf; + struct mali_dma_buf_attachment *dma_mem; + struct mali_session_data *session = alloc->session; + + MALI_DEBUG_ASSERT_POINTER(session); + MALI_DEBUG_ASSERT_POINTER(mem_backend); + MALI_DEBUG_ASSERT_POINTER(alloc); + + /* get dma buffer */ + buf = dma_buf_get(fd); + if (IS_ERR_OR_NULL(buf)) { + return _MALI_OSK_ERR_FAULT; + } + + /* Currently, mapping of the full buffer are supported. */ + if (alloc->psize != buf->size) { + goto failed_alloc_mem; + } + + dma_mem = _mali_osk_calloc(1, sizeof(struct mali_dma_buf_attachment)); + if (NULL == dma_mem) { + goto failed_alloc_mem; + } + + dma_mem->buf = buf; + dma_mem->session = session; + dma_mem->map_ref = 0; + init_waitqueue_head(&dma_mem->wait_queue); + + dma_mem->attachment = dma_buf_attach(dma_mem->buf, &mali_platform_device->dev); + if (NULL == dma_mem->attachment) { + goto failed_dma_attach; + } + + mem_backend->dma_buf.attachment = dma_mem; + + alloc->flags |= MALI_MEM_FLAG_DONT_CPU_MAP; + if (flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) { + alloc->flags |= MALI_MEM_FLAG_MALI_GUARD_PAGE; + } + + +#if defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) + /* Map memory into session's Mali virtual address space. */ + if (0 != mali_dma_buf_map(mem_backend)) { + goto Failed_dma_map; + } +#endif + + return _MALI_OSK_ERR_OK; + +#if defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) +Failed_dma_map: + mali_dma_buf_unmap(alloc, dma_mem); +#endif + /* Wait for buffer to become unmapped */ + wait_event(dma_mem->wait_queue, !dma_mem->is_mapped); + MALI_DEBUG_ASSERT(!dma_mem->is_mapped); + dma_buf_detach(dma_mem->buf, dma_mem->attachment); +failed_dma_attach: + _mali_osk_free(dma_mem); +failed_alloc_mem: + dma_buf_put(buf); + return _MALI_OSK_ERR_FAULT; +} + +void mali_mem_unbind_dma_buf(mali_mem_backend *mem_backend) +{ + struct mali_dma_buf_attachment *mem; + MALI_DEBUG_ASSERT_POINTER(mem_backend); + MALI_DEBUG_ASSERT(MALI_MEM_DMA_BUF == mem_backend->type); + + mem = mem_backend->dma_buf.attachment; + MALI_DEBUG_ASSERT_POINTER(mem); + MALI_DEBUG_ASSERT_POINTER(mem->attachment); + MALI_DEBUG_ASSERT_POINTER(mem->buf); + MALI_DEBUG_PRINT(3, ("Mali DMA-buf: release attachment %p\n", mem)); + +#if defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) + MALI_DEBUG_ASSERT_POINTER(mem_backend->mali_allocation); + /* We mapped implicitly on attach, so we need to unmap on release */ + mali_dma_buf_unmap(mem_backend->mali_allocation, mem); +#endif + /* Wait for buffer to become unmapped */ + wait_event(mem->wait_queue, !mem->is_mapped); + MALI_DEBUG_ASSERT(!mem->is_mapped); + + dma_buf_detach(mem->buf, mem->attachment); + dma_buf_put(mem->buf); + + _mali_osk_free(mem); +} diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_memory_dma_buf.h b/drivers/gpu/arm/mali400/linux/mali_memory_dma_buf.h --- a/drivers/gpu/arm/mali400/linux/mali_memory_dma_buf.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_memory_dma_buf.h 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2011-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_MEMORY_DMA_BUF_H__ +#define __MALI_MEMORY_DMA_BUF_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "mali_uk_types.h" +#include "mali_osk.h" +#include "mali_memory.h" + +struct mali_pp_job; + +struct mali_dma_buf_attachment; +struct mali_dma_buf_attachment { + struct dma_buf *buf; + struct dma_buf_attachment *attachment; + struct sg_table *sgt; + struct mali_session_data *session; + int map_ref; + struct mutex map_lock; + mali_bool is_mapped; + wait_queue_head_t wait_queue; +}; + +int mali_dma_buf_get_size(struct mali_session_data *session, _mali_uk_dma_buf_get_size_s __user *arg); + +void mali_mem_unbind_dma_buf(mali_mem_backend *mem_backend); + +_mali_osk_errcode_t mali_mem_bind_dma_buf(mali_mem_allocation *alloc, + mali_mem_backend *mem_backend, + int fd, u32 flags); + +#if !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) +int mali_dma_buf_map_job(struct mali_pp_job *job); +void mali_dma_buf_unmap_job(struct mali_pp_job *job); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* __MALI_MEMORY_DMA_BUF_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_memory_external.c b/drivers/gpu/arm/mali400/linux/mali_memory_external.c --- a/drivers/gpu/arm/mali400/linux/mali_memory_external.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_memory_external.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,89 @@ +/* + * Copyright (C) 2013-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_kernel_common.h" +#include "mali_osk.h" +#include "mali_ukk.h" +#include "mali_memory.h" +#include "mali_mem_validation.h" +#include "mali_uk_types.h" + +void mali_mem_unbind_ext_buf(mali_mem_backend *mem_backend) +{ + mali_mem_allocation *alloc; + struct mali_session_data *session; + MALI_DEBUG_ASSERT_POINTER(mem_backend); + alloc = mem_backend->mali_allocation; + MALI_DEBUG_ASSERT_POINTER(alloc); + MALI_DEBUG_ASSERT(MALI_MEM_EXTERNAL == mem_backend->type); + + session = alloc->session; + MALI_DEBUG_ASSERT_POINTER(session); + mali_session_memory_lock(session); + mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start, + alloc->flags); + mali_session_memory_unlock(session); +} + +_mali_osk_errcode_t mali_mem_bind_ext_buf(mali_mem_allocation *alloc, + mali_mem_backend *mem_backend, + u32 phys_addr, + u32 flag) +{ + struct mali_session_data *session; + _mali_osk_errcode_t err; + u32 virt, phys, size; + MALI_DEBUG_ASSERT_POINTER(mem_backend); + MALI_DEBUG_ASSERT_POINTER(alloc); + size = alloc->psize; + session = (struct mali_session_data *)(uintptr_t)alloc->session; + MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_INVALID_ARGS); + + /* check arguments */ + /* NULL might be a valid Mali address */ + if (!size) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); + + /* size must be a multiple of the system page size */ + if (size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); + + /* Validate the mali physical range */ + if (_MALI_OSK_ERR_OK != mali_mem_validation_check(phys_addr, size)) { + return _MALI_OSK_ERR_FAULT; + } + + if (flag & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) { + alloc->flags |= MALI_MEM_FLAG_MALI_GUARD_PAGE; + } + + mali_session_memory_lock(session); + + virt = alloc->mali_vma_node.vm_node.start; + phys = phys_addr; + + err = mali_mem_mali_map_prepare(alloc); + if (_MALI_OSK_ERR_OK != err) { + mali_session_memory_unlock(session); + return _MALI_OSK_ERR_NOMEM; + } + + mali_mmu_pagedir_update(session->page_directory, virt, phys, size, MALI_MMU_FLAGS_DEFAULT); + + if (alloc->flags & MALI_MEM_FLAG_MALI_GUARD_PAGE) { + mali_mmu_pagedir_update(session->page_directory, virt + size, phys, _MALI_OSK_MALI_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT); + } + MALI_DEBUG_PRINT(3, + ("Requested to map physical memory 0x%x-0x%x into virtual memory 0x%x\n", + phys_addr, (phys_addr + size - 1), + virt)); + mali_session_memory_unlock(session); + + MALI_SUCCESS; +} + diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_memory_external.h b/drivers/gpu/arm/mali400/linux/mali_memory_external.h --- a/drivers/gpu/arm/mali400/linux/mali_memory_external.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_memory_external.h 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,29 @@ + +/* + * Copyright (C) 2011-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_MEMORY_EXTERNAL_H__ +#define __MALI_MEMORY_EXTERNAL_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +_mali_osk_errcode_t mali_mem_bind_ext_buf(mali_mem_allocation *alloc, + mali_mem_backend *mem_backend, + u32 phys_addr, + u32 flag); +void mali_mem_unbind_ext_buf(mali_mem_backend *mem_backend); + +#ifdef __cplusplus +} +#endif + +#endif diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_memory.h b/drivers/gpu/arm/mali400/linux/mali_memory.h --- a/drivers/gpu/arm/mali400/linux/mali_memory.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_memory.h 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,143 @@ +/* + * Copyright (C) 2013-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_MEMORY_H__ +#define __MALI_MEMORY_H__ + +#include "mali_osk.h" +#include "mali_session.h" + +#include +#include + +#include "mali_memory_types.h" +#include "mali_memory_os_alloc.h" + +_mali_osk_errcode_t mali_memory_initialize(void); +void mali_memory_terminate(void); + +/** @brief Allocate a page table page + * + * Allocate a page for use as a page directory or page table. The page is + * mapped into kernel space. + * + * @return _MALI_OSK_ERR_OK on success, otherwise an error code + * @param table_page GPU pointer to the allocated page + * @param mapping CPU pointer to the mapping of the allocated page + */ +MALI_STATIC_INLINE _mali_osk_errcode_t +mali_mmu_get_table_page(mali_dma_addr *table_page, mali_io_address *mapping) +{ + return mali_mem_os_get_table_page(table_page, mapping); +} + +/** @brief Release a page table page + * + * Release a page table page allocated through \a mali_mmu_get_table_page + * + * @param pa the GPU address of the page to release + */ +MALI_STATIC_INLINE void +mali_mmu_release_table_page(mali_dma_addr phys, void *virt) +{ + mali_mem_os_release_table_page(phys, virt); +} + +/** @brief mmap function + * + * mmap syscalls on the Mali device node will end up here. + * + * This function allocates Mali memory and maps it on CPU and Mali. + */ +int mali_mmap(struct file *filp, struct vm_area_struct *vma); + +/** @brief Start a new memory session + * + * Called when a process opens the Mali device node. + * + * @param session Pointer to session to initialize + */ +_mali_osk_errcode_t mali_memory_session_begin(struct mali_session_data *session); + +/** @brief Close a memory session + * + * Called when a process closes the Mali device node. + * + * Memory allocated by the session will be freed + * + * @param session Pointer to the session to terminate + */ +void mali_memory_session_end(struct mali_session_data *session); + +/** @brief Prepare Mali page tables for mapping + * + * This function will prepare the Mali page tables for mapping the memory + * described by \a descriptor. + * + * Page tables will be reference counted and allocated, if not yet present. + * + * @param descriptor Pointer to the memory descriptor to the mapping + */ +_mali_osk_errcode_t mali_mem_mali_map_prepare(mali_mem_allocation *descriptor); + +/** @brief Resize Mali page tables for mapping + * + * This function will Resize the Mali page tables for mapping the memory + * described by \a descriptor. + * + * Page tables will be reference counted and allocated, if not yet present. + * + * @param descriptor Pointer to the memory descriptor to the mapping + * @param new_size The new size of descriptor + */ +_mali_osk_errcode_t mali_mem_mali_map_resize(mali_mem_allocation *descriptor, u32 new_size); + +/** @brief Free Mali page tables for mapping + * + * This function will unmap pages from Mali memory and free the page tables + * that are now unused. + * + * The updated pages in the Mali L2 cache will be invalidated, and the MMU TLBs will be zapped if necessary. + * + * @param descriptor Pointer to the memory descriptor to unmap + */ +void mali_mem_mali_map_free(struct mali_session_data *session, u32 size, mali_address_t vaddr, u32 flags); + +/** @brief Parse resource and prepare the OS memory allocator + * + * @param size Maximum size to allocate for Mali GPU. + * @return _MALI_OSK_ERR_OK on success, otherwise failure. + */ +_mali_osk_errcode_t mali_memory_core_resource_os_memory(u32 size); + +/** @brief Parse resource and prepare the dedicated memory allocator + * + * @param start Physical start address of dedicated Mali GPU memory. + * @param size Size of dedicated Mali GPU memory. + * @return _MALI_OSK_ERR_OK on success, otherwise failure. + */ +_mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(u32 start, u32 size); + + +struct mali_page_node *_mali_page_node_allocate(mali_page_node_type type); + +void _mali_page_node_ref(struct mali_page_node *node); +void _mali_page_node_unref(struct mali_page_node *node); +void _mali_page_node_add_page(struct mali_page_node *node, struct page *page); + +void _mali_page_node_add_block_item(struct mali_page_node *node, mali_block_item *item); + +void _mali_page_node_add_swap_item(struct mali_page_node *node, struct mali_swap_item *item); + +int _mali_page_node_get_ref_count(struct mali_page_node *node); +dma_addr_t _mali_page_node_get_dma_addr(struct mali_page_node *node); +unsigned long _mali_page_node_get_pfn(struct mali_page_node *node); + +#endif /* __MALI_MEMORY_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_memory_manager.c b/drivers/gpu/arm/mali400/linux/mali_memory_manager.c --- a/drivers/gpu/arm/mali400/linux/mali_memory_manager.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_memory_manager.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,993 @@ +/* + * Copyright (C) 2013-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#if defined(CONFIG_DMA_SHARED_BUFFER) +#include +#endif +#include + +#include "mali_osk.h" +#include "mali_osk_mali.h" +#include "mali_kernel_linux.h" +#include "mali_scheduler.h" +#include "mali_memory.h" +#include "mali_memory_os_alloc.h" +#if defined(CONFIG_DMA_SHARED_BUFFER) +#include "mali_memory_dma_buf.h" +#include "mali_memory_secure.h" +#endif +#if defined(CONFIG_MALI400_UMP) +#include "mali_memory_ump.h" +#endif +#include "mali_memory_manager.h" +#include "mali_memory_virtual.h" +#include "mali_memory_util.h" +#include "mali_memory_external.h" +#include "mali_memory_cow.h" +#include "mali_memory_block_alloc.h" +#include "mali_ukk.h" +#include "mali_memory_swap_alloc.h" + +/* +* New memory system interface +*/ + +/*inti idr for backend memory */ +struct idr mali_backend_idr; +struct mutex mali_idr_mutex; + +/* init allocation manager */ +int mali_memory_manager_init(struct mali_allocation_manager *mgr) +{ + /* init Locks */ + rwlock_init(&mgr->vm_lock); + mutex_init(&mgr->list_mutex); + + /* init link */ + INIT_LIST_HEAD(&mgr->head); + + /* init RB tree */ + mgr->allocation_mgr_rb = RB_ROOT; + mgr->mali_allocation_num = 0; + return 0; +} + +/* Deinit allocation manager +* Do some check for debug +*/ +void mali_memory_manager_uninit(struct mali_allocation_manager *mgr) +{ + /* check RB tree is empty */ + MALI_DEBUG_ASSERT(((void *)(mgr->allocation_mgr_rb.rb_node) == (void *)rb_last(&mgr->allocation_mgr_rb))); + /* check allocation List */ + MALI_DEBUG_ASSERT(list_empty(&mgr->head)); +} + +/* Prepare memory descriptor */ +static mali_mem_allocation *mali_mem_allocation_struct_create(struct mali_session_data *session) +{ + mali_mem_allocation *mali_allocation; + + /* Allocate memory */ + mali_allocation = (mali_mem_allocation *)kzalloc(sizeof(mali_mem_allocation), GFP_KERNEL); + if (NULL == mali_allocation) { + MALI_DEBUG_PRINT(1, ("mali_mem_allocation_struct_create: descriptor was NULL\n")); + return NULL; + } + + MALI_DEBUG_CODE(mali_allocation->magic = MALI_MEM_ALLOCATION_VALID_MAGIC); + + /* do init */ + mali_allocation->flags = 0; + mali_allocation->session = session; + + INIT_LIST_HEAD(&mali_allocation->list); + _mali_osk_atomic_init(&mali_allocation->mem_alloc_refcount, 1); + + /** + *add to session list + */ + mutex_lock(&session->allocation_mgr.list_mutex); + list_add_tail(&mali_allocation->list, &session->allocation_mgr.head); + session->allocation_mgr.mali_allocation_num++; + mutex_unlock(&session->allocation_mgr.list_mutex); + + return mali_allocation; +} + +void mali_mem_allocation_struct_destory(mali_mem_allocation *alloc) +{ + MALI_DEBUG_ASSERT_POINTER(alloc); + MALI_DEBUG_ASSERT_POINTER(alloc->session); + mutex_lock(&alloc->session->allocation_mgr.list_mutex); + list_del(&alloc->list); + alloc->session->allocation_mgr.mali_allocation_num--; + mutex_unlock(&alloc->session->allocation_mgr.list_mutex); + + kfree(alloc); +} + +int mali_mem_backend_struct_create(mali_mem_backend **backend, u32 psize) +{ + mali_mem_backend *mem_backend = NULL; + s32 ret = -ENOSPC; + s32 index = -1; + *backend = (mali_mem_backend *)kzalloc(sizeof(mali_mem_backend), GFP_KERNEL); + if (NULL == *backend) { + MALI_DEBUG_PRINT(1, ("mali_mem_backend_struct_create: backend descriptor was NULL\n")); + return -1; + } + mem_backend = *backend; + mem_backend->size = psize; + mutex_init(&mem_backend->mutex); + INIT_LIST_HEAD(&mem_backend->list); + mem_backend->using_count = 0; + + + /* link backend with id */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0) +again: + if (!idr_pre_get(&mali_backend_idr, GFP_KERNEL)) { + kfree(mem_backend); + return -ENOMEM; + } + mutex_lock(&mali_idr_mutex); + ret = idr_get_new_above(&mali_backend_idr, mem_backend, 1, &index); + mutex_unlock(&mali_idr_mutex); + + if (-ENOSPC == ret) { + kfree(mem_backend); + return -ENOSPC; + } + if (-EAGAIN == ret) + goto again; +#else + mutex_lock(&mali_idr_mutex); + ret = idr_alloc(&mali_backend_idr, mem_backend, 1, MALI_S32_MAX, GFP_KERNEL); + mutex_unlock(&mali_idr_mutex); + index = ret; + if (ret < 0) { + MALI_DEBUG_PRINT(1, ("mali_mem_backend_struct_create: Can't allocate idr for backend! \n")); + kfree(mem_backend); + return -ENOSPC; + } +#endif + return index; +} + + +static void mali_mem_backend_struct_destory(mali_mem_backend **backend, s32 backend_handle) +{ + mali_mem_backend *mem_backend = *backend; + + mutex_lock(&mali_idr_mutex); + idr_remove(&mali_backend_idr, backend_handle); + mutex_unlock(&mali_idr_mutex); + kfree(mem_backend); + *backend = NULL; +} + +mali_mem_backend *mali_mem_backend_struct_search(struct mali_session_data *session, u32 mali_address) +{ + struct mali_vma_node *mali_vma_node = NULL; + mali_mem_backend *mem_bkend = NULL; + mali_mem_allocation *mali_alloc = NULL; + MALI_DEBUG_ASSERT_POINTER(session); + mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_address, 0); + if (NULL == mali_vma_node) { + MALI_DEBUG_PRINT(1, ("mali_mem_backend_struct_search:vma node was NULL\n")); + return NULL; + } + mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node); + /* Get backend memory & Map on CPU */ + mutex_lock(&mali_idr_mutex); + mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle); + mutex_unlock(&mali_idr_mutex); + MALI_DEBUG_ASSERT(NULL != mem_bkend); + return mem_bkend; +} + +static _mali_osk_errcode_t mali_mem_resize(struct mali_session_data *session, mali_mem_backend *mem_backend, u32 physical_size) +{ + _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT; + int retval = 0; + mali_mem_allocation *mali_allocation = NULL; + mali_mem_os_mem tmp_os_mem; + s32 change_page_count; + + MALI_DEBUG_ASSERT_POINTER(session); + MALI_DEBUG_ASSERT_POINTER(mem_backend); + MALI_DEBUG_PRINT(4, (" mali_mem_resize_memory called! \n")); + MALI_DEBUG_ASSERT(0 == physical_size % MALI_MMU_PAGE_SIZE); + + mali_allocation = mem_backend->mali_allocation; + MALI_DEBUG_ASSERT_POINTER(mali_allocation); + + MALI_DEBUG_ASSERT(MALI_MEM_FLAG_CAN_RESIZE & mali_allocation->flags); + MALI_DEBUG_ASSERT(MALI_MEM_OS == mali_allocation->type); + + mutex_lock(&mem_backend->mutex); + + /* Do resize*/ + if (physical_size > mem_backend->size) { + u32 add_size = physical_size - mem_backend->size; + + MALI_DEBUG_ASSERT(0 == add_size % MALI_MMU_PAGE_SIZE); + + /* Allocate new pages from os mem */ + retval = mali_mem_os_alloc_pages(&tmp_os_mem, add_size); + + if (retval) { + if (-ENOMEM == retval) { + ret = _MALI_OSK_ERR_NOMEM; + } else { + ret = _MALI_OSK_ERR_FAULT; + } + MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory allocation failed !\n")); + goto failed_alloc_memory; + } + + MALI_DEBUG_ASSERT(tmp_os_mem.count == add_size / MALI_MMU_PAGE_SIZE); + + /* Resize the memory of the backend */ + ret = mali_mem_os_resize_pages(&tmp_os_mem, &mem_backend->os_mem, 0, tmp_os_mem.count); + + if (ret) { + MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory resizing failed !\n")); + goto failed_resize_pages; + } + + /*Resize cpu mapping */ + if (NULL != mali_allocation->cpu_mapping.vma) { + ret = mali_mem_os_resize_cpu_map_locked(mem_backend, mali_allocation->cpu_mapping.vma, mali_allocation->cpu_mapping.vma->vm_start + mem_backend->size, add_size); + if (unlikely(ret != _MALI_OSK_ERR_OK)) { + MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: cpu mapping failed !\n")); + goto failed_cpu_map; + } + } + + /* Resize mali mapping */ + _mali_osk_mutex_wait(session->memory_lock); + ret = mali_mem_mali_map_resize(mali_allocation, physical_size); + + if (ret) { + MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_resize: mali map resize fail !\n")); + goto failed_gpu_map; + } + + ret = mali_mem_os_mali_map(&mem_backend->os_mem, session, mali_allocation->mali_vma_node.vm_node.start, + mali_allocation->psize / MALI_MMU_PAGE_SIZE, add_size / MALI_MMU_PAGE_SIZE, mali_allocation->mali_mapping.properties); + if (ret) { + MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: mali mapping failed !\n")); + goto failed_gpu_map; + } + + _mali_osk_mutex_signal(session->memory_lock); + } else { + u32 dec_size, page_count; + u32 vaddr = 0; + INIT_LIST_HEAD(&tmp_os_mem.pages); + tmp_os_mem.count = 0; + + dec_size = mem_backend->size - physical_size; + MALI_DEBUG_ASSERT(0 == dec_size % MALI_MMU_PAGE_SIZE); + + page_count = dec_size / MALI_MMU_PAGE_SIZE; + vaddr = mali_allocation->mali_vma_node.vm_node.start + physical_size; + + /* Resize the memory of the backend */ + ret = mali_mem_os_resize_pages(&mem_backend->os_mem, &tmp_os_mem, physical_size / MALI_MMU_PAGE_SIZE, page_count); + + if (ret) { + MALI_DEBUG_PRINT(4, ("_mali_ukk_mem_resize: mali map resize failed!\n")); + goto failed_resize_pages; + } + + /* Resize mali map */ + _mali_osk_mutex_wait(session->memory_lock); + mali_mem_mali_map_free(session, dec_size, vaddr, mali_allocation->flags); + _mali_osk_mutex_signal(session->memory_lock); + + /* Zap cpu mapping */ + if (0 != mali_allocation->cpu_mapping.addr) { + MALI_DEBUG_ASSERT(NULL != mali_allocation->cpu_mapping.vma); + zap_vma_ptes(mali_allocation->cpu_mapping.vma, mali_allocation->cpu_mapping.vma->vm_start + physical_size, dec_size); + } + + /* Free those extra pages */ + mali_mem_os_free(&tmp_os_mem.pages, tmp_os_mem.count, MALI_FALSE); + } + + /* Resize memory allocation and memory backend */ + change_page_count = (s32)(physical_size - mem_backend->size) / MALI_MMU_PAGE_SIZE; + mali_allocation->psize = physical_size; + mem_backend->size = physical_size; + mutex_unlock(&mem_backend->mutex); + + if (change_page_count > 0) { + atomic_add(change_page_count, &session->mali_mem_allocated_pages); + if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) { + session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE; + } + + } else { + atomic_sub((s32)(-change_page_count), &session->mali_mem_allocated_pages); + } + + return _MALI_OSK_ERR_OK; + +failed_gpu_map: + _mali_osk_mutex_signal(session->memory_lock); +failed_cpu_map: + if (physical_size > mem_backend->size) { + mali_mem_os_resize_pages(&mem_backend->os_mem, &tmp_os_mem, mem_backend->size / MALI_MMU_PAGE_SIZE, + (physical_size - mem_backend->size) / MALI_MMU_PAGE_SIZE); + } else { + mali_mem_os_resize_pages(&tmp_os_mem, &mem_backend->os_mem, 0, tmp_os_mem.count); + } +failed_resize_pages: + if (0 != tmp_os_mem.count) + mali_mem_os_free(&tmp_os_mem.pages, tmp_os_mem.count, MALI_FALSE); +failed_alloc_memory: + + mutex_unlock(&mem_backend->mutex); + return ret; +} + + +/* Set GPU MMU properties */ +static void _mali_memory_gpu_map_property_set(u32 *properties, u32 flags) +{ + if (_MALI_MEMORY_GPU_READ_ALLOCATE & flags) { + *properties = MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE; + } else { + *properties = MALI_MMU_FLAGS_DEFAULT; + } +} + +_mali_osk_errcode_t mali_mem_add_mem_size(struct mali_session_data *session, u32 mali_addr, u32 add_size) +{ + mali_mem_backend *mem_backend = NULL; + _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT; + mali_mem_allocation *mali_allocation = NULL; + u32 new_physical_size; + MALI_DEBUG_ASSERT_POINTER(session); + MALI_DEBUG_ASSERT(0 == add_size % MALI_MMU_PAGE_SIZE); + + /* Get the memory backend that need to be resize. */ + mem_backend = mali_mem_backend_struct_search(session, mali_addr); + + if (NULL == mem_backend) { + MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory backend = NULL!\n")); + return ret; + } + + mali_allocation = mem_backend->mali_allocation; + + MALI_DEBUG_ASSERT_POINTER(mali_allocation); + + new_physical_size = add_size + mem_backend->size; + + if (new_physical_size > (mali_allocation->mali_vma_node.vm_node.size)) + return ret; + + MALI_DEBUG_ASSERT(new_physical_size != mem_backend->size); + + ret = mali_mem_resize(session, mem_backend, new_physical_size); + + return ret; +} + +/** +* function@_mali_ukk_mem_allocate - allocate mali memory +*/ +_mali_osk_errcode_t _mali_ukk_mem_allocate(_mali_uk_alloc_mem_s *args) +{ + struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx; + mali_mem_backend *mem_backend = NULL; + _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT; + int retval = 0; + mali_mem_allocation *mali_allocation = NULL; + struct mali_vma_node *mali_vma_node = NULL; + + MALI_DEBUG_PRINT(4, (" _mali_ukk_mem_allocate, vaddr=0x%x, size =0x%x! \n", args->gpu_vaddr, args->psize)); + + /* Check if the address is allocated + */ + mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, args->gpu_vaddr, 0); + + if (unlikely(mali_vma_node)) { + MALI_DEBUG_PRINT_ERROR(("The mali virtual address has already been used ! \n")); + return _MALI_OSK_ERR_FAULT; + } + /** + *create mali memory allocation + */ + + mali_allocation = mali_mem_allocation_struct_create(session); + + if (mali_allocation == NULL) { + MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_allocate: Failed to create allocation struct! \n")); + return _MALI_OSK_ERR_NOMEM; + } + mali_allocation->psize = args->psize; + mali_allocation->vsize = args->vsize; + + /* MALI_MEM_OS if need to support mem resize, + * or MALI_MEM_BLOCK if have dedicated memory, + * or MALI_MEM_OS, + * or MALI_MEM_SWAP. + */ + if (args->flags & _MALI_MEMORY_ALLOCATE_SWAPPABLE) { + mali_allocation->type = MALI_MEM_SWAP; + } else if (args->flags & _MALI_MEMORY_ALLOCATE_RESIZEABLE) { + mali_allocation->type = MALI_MEM_OS; + mali_allocation->flags |= MALI_MEM_FLAG_CAN_RESIZE; + } else if (args->flags & _MALI_MEMORY_ALLOCATE_SECURE) { + mali_allocation->type = MALI_MEM_SECURE; + } else if (MALI_TRUE == mali_memory_have_dedicated_memory()) { + mali_allocation->type = MALI_MEM_BLOCK; + } else { + mali_allocation->type = MALI_MEM_OS; + } + + /** + *add allocation node to RB tree for index + */ + mali_allocation->mali_vma_node.vm_node.start = args->gpu_vaddr; + mali_allocation->mali_vma_node.vm_node.size = args->vsize; + + mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node); + + mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, args->psize); + if (mali_allocation->backend_handle < 0) { + ret = _MALI_OSK_ERR_NOMEM; + MALI_DEBUG_PRINT(1, ("mali_allocation->backend_handle < 0! \n")); + goto failed_alloc_backend; + } + + + mem_backend->mali_allocation = mali_allocation; + mem_backend->type = mali_allocation->type; + + mali_allocation->mali_mapping.addr = args->gpu_vaddr; + + /* set gpu mmu propery */ + _mali_memory_gpu_map_property_set(&mali_allocation->mali_mapping.properties, args->flags); + /* do prepare for MALI mapping */ + if (!(args->flags & _MALI_MEMORY_ALLOCATE_NO_BIND_GPU) && mali_allocation->psize > 0) { + _mali_osk_mutex_wait(session->memory_lock); + + ret = mali_mem_mali_map_prepare(mali_allocation); + if (0 != ret) { + _mali_osk_mutex_signal(session->memory_lock); + goto failed_prepare_map; + } + _mali_osk_mutex_signal(session->memory_lock); + } + + if (mali_allocation->psize == 0) { + mem_backend->os_mem.count = 0; + INIT_LIST_HEAD(&mem_backend->os_mem.pages); + goto done; + } + + if (args->flags & _MALI_MEMORY_ALLOCATE_DEFER_BIND) { + mali_allocation->flags |= _MALI_MEMORY_ALLOCATE_DEFER_BIND; + mem_backend->flags |= MALI_MEM_BACKEND_FLAG_NOT_BINDED; + /* init for defer bind backend*/ + mem_backend->os_mem.count = 0; + INIT_LIST_HEAD(&mem_backend->os_mem.pages); + + goto done; + } + + if (likely(mali_allocation->psize > 0)) { + + if (MALI_MEM_SECURE == mem_backend->type) { +#if defined(CONFIG_DMA_SHARED_BUFFER) + ret = mali_mem_secure_attach_dma_buf(&mem_backend->secure_mem, mem_backend->size, args->secure_shared_fd); + if (_MALI_OSK_ERR_OK != ret) { + MALI_DEBUG_PRINT(1, ("Failed to attach dma buf for secure memory! \n")); + goto failed_alloc_pages; + } +#else + ret = _MALI_OSK_ERR_UNSUPPORTED; + MALI_DEBUG_PRINT(1, ("DMA not supported for mali secure memory! \n")); + goto failed_alloc_pages; +#endif + } else { + + /** + *allocate physical memory + */ + if (mem_backend->type == MALI_MEM_OS) { + retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size); + } else if (mem_backend->type == MALI_MEM_BLOCK) { + /* try to allocated from BLOCK memory first, then try OS memory if failed.*/ + if (mali_mem_block_alloc(&mem_backend->block_mem, mem_backend->size)) { + retval = mali_mem_os_alloc_pages(&mem_backend->os_mem, mem_backend->size); + mem_backend->type = MALI_MEM_OS; + mali_allocation->type = MALI_MEM_OS; + } + } else if (MALI_MEM_SWAP == mem_backend->type) { + retval = mali_mem_swap_alloc_pages(&mem_backend->swap_mem, mali_allocation->mali_vma_node.vm_node.size, &mem_backend->start_idx); + } else { + /* ONLY support mem_os type */ + MALI_DEBUG_ASSERT(0); + } + + if (retval) { + ret = _MALI_OSK_ERR_NOMEM; + MALI_DEBUG_PRINT(1, (" can't allocate enough pages! \n")); + goto failed_alloc_pages; + } + } + } + + /** + *map to GPU side + */ + if (!(args->flags & _MALI_MEMORY_ALLOCATE_NO_BIND_GPU) && mali_allocation->psize > 0) { + _mali_osk_mutex_wait(session->memory_lock); + /* Map on Mali */ + + if (mem_backend->type == MALI_MEM_OS) { + ret = mali_mem_os_mali_map(&mem_backend->os_mem, session, args->gpu_vaddr, 0, + mem_backend->size / MALI_MMU_PAGE_SIZE, mali_allocation->mali_mapping.properties); + + } else if (mem_backend->type == MALI_MEM_BLOCK) { + mali_mem_block_mali_map(&mem_backend->block_mem, session, args->gpu_vaddr, + mali_allocation->mali_mapping.properties); + } else if (mem_backend->type == MALI_MEM_SWAP) { + ret = mali_mem_swap_mali_map(&mem_backend->swap_mem, session, args->gpu_vaddr, + mali_allocation->mali_mapping.properties); + } else if (mem_backend->type == MALI_MEM_SECURE) { +#if defined(CONFIG_DMA_SHARED_BUFFER) + ret = mali_mem_secure_mali_map(&mem_backend->secure_mem, session, args->gpu_vaddr, mali_allocation->mali_mapping.properties); +#endif + } else { /* unsupport type */ + MALI_DEBUG_ASSERT(0); + } + + _mali_osk_mutex_signal(session->memory_lock); + } +done: + if (MALI_MEM_OS == mem_backend->type) { + atomic_add(mem_backend->os_mem.count, &session->mali_mem_allocated_pages); + } else if (MALI_MEM_BLOCK == mem_backend->type) { + atomic_add(mem_backend->block_mem.count, &session->mali_mem_allocated_pages); + } else if (MALI_MEM_SECURE == mem_backend->type) { + atomic_add(mem_backend->secure_mem.count, &session->mali_mem_allocated_pages); + } else { + MALI_DEBUG_ASSERT(MALI_MEM_SWAP == mem_backend->type); + atomic_add(mem_backend->swap_mem.count, &session->mali_mem_allocated_pages); + atomic_add(mem_backend->swap_mem.count, &session->mali_mem_array[mem_backend->type]); + } + + if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) { + session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE; + } + return _MALI_OSK_ERR_OK; + +failed_alloc_pages: + mali_mem_mali_map_free(session, mali_allocation->psize, mali_allocation->mali_vma_node.vm_node.start, mali_allocation->flags); +failed_prepare_map: + mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle); +failed_alloc_backend: + + mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node); + mali_mem_allocation_struct_destory(mali_allocation); + + return ret; +} + + +_mali_osk_errcode_t _mali_ukk_mem_free(_mali_uk_free_mem_s *args) +{ + struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx; + u32 vaddr = args->gpu_vaddr; + mali_mem_allocation *mali_alloc = NULL; + struct mali_vma_node *mali_vma_node = NULL; + + /* find mali allocation structure by vaddress*/ + mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, vaddr, 0); + if (NULL == mali_vma_node) { + MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_free: invalid addr: 0x%x\n", vaddr)); + return _MALI_OSK_ERR_INVALID_ARGS; + } + MALI_DEBUG_ASSERT(NULL != mali_vma_node); + mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node); + + if (mali_alloc) + /* check ref_count */ + args->free_pages_nr = mali_allocation_unref(&mali_alloc); + + return _MALI_OSK_ERR_OK; +} + + +/** +* Function _mali_ukk_mem_bind -- bind a external memory to a new GPU address +* It will allocate a new mem allocation and bind external memory to it. +* Supported backend type are: +* _MALI_MEMORY_BIND_BACKEND_UMP +* _MALI_MEMORY_BIND_BACKEND_DMA_BUF +* _MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY +* CPU access is not supported yet +*/ +_mali_osk_errcode_t _mali_ukk_mem_bind(_mali_uk_bind_mem_s *args) +{ + struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx; + mali_mem_backend *mem_backend = NULL; + _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT; + mali_mem_allocation *mali_allocation = NULL; + MALI_DEBUG_PRINT(5, (" _mali_ukk_mem_bind, vaddr=0x%x, size =0x%x! \n", args->vaddr, args->size)); + + /** + * allocate mali allocation. + */ + mali_allocation = mali_mem_allocation_struct_create(session); + + if (mali_allocation == NULL) { + return _MALI_OSK_ERR_NOMEM; + } + mali_allocation->psize = args->size; + mali_allocation->vsize = args->size; + mali_allocation->mali_mapping.addr = args->vaddr; + + /* add allocation node to RB tree for index */ + mali_allocation->mali_vma_node.vm_node.start = args->vaddr; + mali_allocation->mali_vma_node.vm_node.size = args->size; + mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node); + + /* allocate backend*/ + if (mali_allocation->psize > 0) { + mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, mali_allocation->psize); + if (mali_allocation->backend_handle < 0) { + goto Failed_alloc_backend; + } + + } else { + goto Failed_alloc_backend; + } + + mem_backend->size = mali_allocation->psize; + mem_backend->mali_allocation = mali_allocation; + + switch (args->flags & _MALI_MEMORY_BIND_BACKEND_MASK) { + case _MALI_MEMORY_BIND_BACKEND_UMP: +#if defined(CONFIG_MALI400_UMP) + mali_allocation->type = MALI_MEM_UMP; + mem_backend->type = MALI_MEM_UMP; + ret = mali_mem_bind_ump_buf(mali_allocation, mem_backend, + args->mem_union.bind_ump.secure_id, args->mem_union.bind_ump.flags); + if (_MALI_OSK_ERR_OK != ret) { + MALI_DEBUG_PRINT(1, ("Bind ump buf failed\n")); + goto Failed_bind_backend; + } +#else + MALI_DEBUG_PRINT(1, ("UMP not supported\n")); + goto Failed_bind_backend; +#endif + break; + case _MALI_MEMORY_BIND_BACKEND_DMA_BUF: +#if defined(CONFIG_DMA_SHARED_BUFFER) + mali_allocation->type = MALI_MEM_DMA_BUF; + mem_backend->type = MALI_MEM_DMA_BUF; + ret = mali_mem_bind_dma_buf(mali_allocation, mem_backend, + args->mem_union.bind_dma_buf.mem_fd, args->mem_union.bind_dma_buf.flags); + if (_MALI_OSK_ERR_OK != ret) { + MALI_DEBUG_PRINT(1, ("Bind dma buf failed\n")); + goto Failed_bind_backend; + } +#else + MALI_DEBUG_PRINT(1, ("DMA not supported\n")); + goto Failed_bind_backend; +#endif + break; + case _MALI_MEMORY_BIND_BACKEND_MALI_MEMORY: + /* not allowed */ + MALI_DEBUG_PRINT_ERROR(("Mali internal memory type not supported !\n")); + goto Failed_bind_backend; + break; + + case _MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY: + mali_allocation->type = MALI_MEM_EXTERNAL; + mem_backend->type = MALI_MEM_EXTERNAL; + ret = mali_mem_bind_ext_buf(mali_allocation, mem_backend, args->mem_union.bind_ext_memory.phys_addr, + args->mem_union.bind_ext_memory.flags); + if (_MALI_OSK_ERR_OK != ret) { + MALI_DEBUG_PRINT(1, ("Bind external buf failed\n")); + goto Failed_bind_backend; + } + break; + + case _MALI_MEMORY_BIND_BACKEND_EXT_COW: + /* not allowed */ + MALI_DEBUG_PRINT_ERROR(("External cow memory type not supported !\n")); + goto Failed_bind_backend; + break; + + default: + MALI_DEBUG_PRINT_ERROR(("Invalid memory type not supported !\n")); + goto Failed_bind_backend; + break; + } + MALI_DEBUG_ASSERT(0 == mem_backend->size % MALI_MMU_PAGE_SIZE); + atomic_add(mem_backend->size / MALI_MMU_PAGE_SIZE, &session->mali_mem_array[mem_backend->type]); + return _MALI_OSK_ERR_OK; + +Failed_bind_backend: + mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle); + +Failed_alloc_backend: + mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node); + mali_mem_allocation_struct_destory(mali_allocation); + + MALI_DEBUG_PRINT(1, (" _mali_ukk_mem_bind, return ERROR! \n")); + return ret; +} + + +/* +* Function _mali_ukk_mem_unbind -- unbind a external memory to a new GPU address +* This function unbind the backend memory and free the allocation +* no ref_count for this type of memory +*/ +_mali_osk_errcode_t _mali_ukk_mem_unbind(_mali_uk_unbind_mem_s *args) +{ + /**/ + struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx; + mali_mem_allocation *mali_allocation = NULL; + struct mali_vma_node *mali_vma_node = NULL; + u32 mali_addr = args->vaddr; + MALI_DEBUG_PRINT(5, (" _mali_ukk_mem_unbind, vaddr=0x%x! \n", args->vaddr)); + + /* find the allocation by vaddr */ + mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0); + if (likely(mali_vma_node)) { + MALI_DEBUG_ASSERT(mali_addr == mali_vma_node->vm_node.start); + mali_allocation = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node); + } else { + MALI_DEBUG_ASSERT(NULL != mali_vma_node); + return _MALI_OSK_ERR_INVALID_ARGS; + } + + if (NULL != mali_allocation) + /* check ref_count */ + mali_allocation_unref(&mali_allocation); + return _MALI_OSK_ERR_OK; +} + +/* +* Function _mali_ukk_mem_cow -- COW for an allocation +* This function allocate new pages for a range (range, range+size) of allocation +* And Map it(keep use the not in range pages from target allocation ) to an GPU vaddr +*/ +_mali_osk_errcode_t _mali_ukk_mem_cow(_mali_uk_cow_mem_s *args) +{ + _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT; + mali_mem_backend *target_backend = NULL; + mali_mem_backend *mem_backend = NULL; + struct mali_vma_node *mali_vma_node = NULL; + mali_mem_allocation *mali_allocation = NULL; + + struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx; + /* Get the target backend for cow */ + target_backend = mali_mem_backend_struct_search(session, args->target_handle); + + if (NULL == target_backend || 0 == target_backend->size) { + MALI_DEBUG_ASSERT_POINTER(target_backend); + MALI_DEBUG_ASSERT(0 != target_backend->size); + return ret; + } + + /*Cow not support resized mem */ + MALI_DEBUG_ASSERT(MALI_MEM_FLAG_CAN_RESIZE != (MALI_MEM_FLAG_CAN_RESIZE & target_backend->mali_allocation->flags)); + + /* Check if the new mali address is allocated */ + mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, args->vaddr, 0); + + if (unlikely(mali_vma_node)) { + MALI_DEBUG_PRINT_ERROR(("The mali virtual address has already been used ! \n")); + return ret; + } + + /* create new alloction for COW*/ + mali_allocation = mali_mem_allocation_struct_create(session); + if (mali_allocation == NULL) { + MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_cow: Failed to create allocation struct!\n")); + return _MALI_OSK_ERR_NOMEM; + } + mali_allocation->psize = args->target_size; + mali_allocation->vsize = args->target_size; + mali_allocation->type = MALI_MEM_COW; + + /*add allocation node to RB tree for index*/ + mali_allocation->mali_vma_node.vm_node.start = args->vaddr; + mali_allocation->mali_vma_node.vm_node.size = mali_allocation->vsize; + mali_vma_offset_add(&session->allocation_mgr, &mali_allocation->mali_vma_node); + + /* create new backend for COW memory */ + mali_allocation->backend_handle = mali_mem_backend_struct_create(&mem_backend, mali_allocation->psize); + if (mali_allocation->backend_handle < 0) { + ret = _MALI_OSK_ERR_NOMEM; + MALI_DEBUG_PRINT(1, ("mali_allocation->backend_handle < 0! \n")); + goto failed_alloc_backend; + } + mem_backend->mali_allocation = mali_allocation; + mem_backend->type = mali_allocation->type; + + if (target_backend->type == MALI_MEM_SWAP || + (MALI_MEM_COW == target_backend->type && (MALI_MEM_BACKEND_FLAG_SWAP_COWED & target_backend->flags))) { + mem_backend->flags |= MALI_MEM_BACKEND_FLAG_SWAP_COWED; + /** + * CoWed swap backends couldn't be mapped as non-linear vma, because if one + * vma is set with flag VM_NONLINEAR, the vma->vm_private_data will be used by kernel, + * while in mali driver, we use this variable to store the pointer of mali_allocation, so there + * is a conflict. + * To resolve this problem, we have to do some fake things, we reserved about 64MB + * space from index 0, there isn't really page's index will be set from 0 to (64MB>>PAGE_SHIFT_NUM), + * and all of CoWed swap memory backends' start_idx will be assigned with 0, and these + * backends will be mapped as linear and will add to priority tree of global swap file, while + * these vmas will never be found by using normal page->index, these pages in those vma + * also couldn't be swapped out. + */ + mem_backend->start_idx = 0; + } + + /* Add the target backend's cow count, also allocate new pages for COW backend from os mem + *for a modified range and keep the page which not in the modified range and Add ref to it + */ + MALI_DEBUG_PRINT(3, ("Cow mapping: target_addr: 0x%x; cow_addr: 0x%x, size: %u\n", target_backend->mali_allocation->mali_vma_node.vm_node.start, + mali_allocation->mali_vma_node.vm_node.start, mali_allocation->mali_vma_node.vm_node.size)); + + ret = mali_memory_do_cow(target_backend, args->target_offset, args->target_size, mem_backend, args->range_start, args->range_size); + if (_MALI_OSK_ERR_OK != ret) { + MALI_DEBUG_PRINT(1, ("_mali_ukk_mem_cow: Failed to cow!\n")); + goto failed_do_cow; + } + + /** + *map to GPU side + */ + mali_allocation->mali_mapping.addr = args->vaddr; + /* set gpu mmu propery */ + _mali_memory_gpu_map_property_set(&mali_allocation->mali_mapping.properties, args->flags); + + _mali_osk_mutex_wait(session->memory_lock); + /* Map on Mali */ + ret = mali_mem_mali_map_prepare(mali_allocation); + if (0 != ret) { + MALI_DEBUG_PRINT(1, (" prepare map fail! \n")); + goto failed_gpu_map; + } + + if (!(mem_backend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)) { + mali_mem_cow_mali_map(mem_backend, 0, mem_backend->size); + } + + _mali_osk_mutex_signal(session->memory_lock); + + mutex_lock(&target_backend->mutex); + target_backend->flags |= MALI_MEM_BACKEND_FLAG_COWED; + mutex_unlock(&target_backend->mutex); + + atomic_add(args->range_size / MALI_MMU_PAGE_SIZE, &session->mali_mem_allocated_pages); + if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) { + session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE; + } + return _MALI_OSK_ERR_OK; + +failed_gpu_map: + _mali_osk_mutex_signal(session->memory_lock); + mali_mem_cow_release(mem_backend, MALI_FALSE); + mem_backend->cow_mem.count = 0; +failed_do_cow: + mali_mem_backend_struct_destory(&mem_backend, mali_allocation->backend_handle); +failed_alloc_backend: + mali_vma_offset_remove(&session->allocation_mgr, &mali_allocation->mali_vma_node); + mali_mem_allocation_struct_destory(mali_allocation); + + return ret; +} + +_mali_osk_errcode_t _mali_ukk_mem_cow_modify_range(_mali_uk_cow_modify_range_s *args) +{ + _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT; + mali_mem_backend *mem_backend = NULL; + struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx; + + MALI_DEBUG_PRINT(4, (" _mali_ukk_mem_cow_modify_range called! \n")); + /* Get the backend that need to be modified. */ + mem_backend = mali_mem_backend_struct_search(session, args->vaddr); + + if (NULL == mem_backend || 0 == mem_backend->size) { + MALI_DEBUG_ASSERT_POINTER(mem_backend); + MALI_DEBUG_ASSERT(0 != mem_backend->size); + return ret; + } + + MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_backend->type); + + ret = mali_memory_cow_modify_range(mem_backend, args->range_start, args->size); + args->change_pages_nr = mem_backend->cow_mem.change_pages_nr; + if (_MALI_OSK_ERR_OK != ret) + return ret; + _mali_osk_mutex_wait(session->memory_lock); + if (!(mem_backend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)) { + mali_mem_cow_mali_map(mem_backend, args->range_start, args->size); + } + _mali_osk_mutex_signal(session->memory_lock); + + atomic_add(args->change_pages_nr, &session->mali_mem_allocated_pages); + if (atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > session->max_mali_mem_allocated_size) { + session->max_mali_mem_allocated_size = atomic_read(&session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE; + } + + return _MALI_OSK_ERR_OK; +} + + +_mali_osk_errcode_t _mali_ukk_mem_resize(_mali_uk_mem_resize_s *args) +{ + mali_mem_backend *mem_backend = NULL; + _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT; + + struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx; + + MALI_DEBUG_ASSERT_POINTER(session); + MALI_DEBUG_PRINT(4, (" mali_mem_resize_memory called! \n")); + MALI_DEBUG_ASSERT(0 == args->psize % MALI_MMU_PAGE_SIZE); + + /* Get the memory backend that need to be resize. */ + mem_backend = mali_mem_backend_struct_search(session, args->vaddr); + + if (NULL == mem_backend) { + MALI_DEBUG_PRINT(2, ("_mali_ukk_mem_resize: memory backend = NULL!\n")); + return ret; + } + + MALI_DEBUG_ASSERT(args->psize != mem_backend->size); + + ret = mali_mem_resize(session, mem_backend, args->psize); + + return ret; +} + +_mali_osk_errcode_t _mali_ukk_mem_usage_get(_mali_uk_profiling_memory_usage_get_s *args) +{ + args->memory_usage = _mali_ukk_report_memory_usage(); + if (0 != args->vaddr) { + mali_mem_backend *mem_backend = NULL; + struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx; + /* Get the backend that need to be modified. */ + mem_backend = mali_mem_backend_struct_search(session, args->vaddr); + if (NULL == mem_backend) { + MALI_DEBUG_ASSERT_POINTER(mem_backend); + return _MALI_OSK_ERR_FAULT; + } + + if (MALI_MEM_COW == mem_backend->type) + args->change_pages_nr = mem_backend->cow_mem.change_pages_nr; + } + return _MALI_OSK_ERR_OK; +} diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_memory_manager.h b/drivers/gpu/arm/mali400/linux/mali_memory_manager.h --- a/drivers/gpu/arm/mali400/linux/mali_memory_manager.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_memory_manager.h 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2013-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_MEMORY_MANAGER_H__ +#define __MALI_MEMORY_MANAGER_H__ + +#include "mali_osk.h" +#include +#include +#include +#include +#include +#include "mali_memory_types.h" +#include "mali_memory_os_alloc.h" +#include "mali_uk_types.h" + +struct mali_allocation_manager { + rwlock_t vm_lock; + struct rb_root allocation_mgr_rb; + struct list_head head; + struct mutex list_mutex; + u32 mali_allocation_num; +}; + +extern struct idr mali_backend_idr; +extern struct mutex mali_idr_mutex; + +int mali_memory_manager_init(struct mali_allocation_manager *mgr); +void mali_memory_manager_uninit(struct mali_allocation_manager *mgr); + +void mali_mem_allocation_struct_destory(mali_mem_allocation *alloc); +_mali_osk_errcode_t mali_mem_add_mem_size(struct mali_session_data *session, u32 mali_addr, u32 add_size); +mali_mem_backend *mali_mem_backend_struct_search(struct mali_session_data *session, u32 mali_address); +_mali_osk_errcode_t _mali_ukk_mem_allocate(_mali_uk_alloc_mem_s *args); +_mali_osk_errcode_t _mali_ukk_mem_free(_mali_uk_free_mem_s *args); +_mali_osk_errcode_t _mali_ukk_mem_bind(_mali_uk_bind_mem_s *args); +_mali_osk_errcode_t _mali_ukk_mem_unbind(_mali_uk_unbind_mem_s *args); +_mali_osk_errcode_t _mali_ukk_mem_cow(_mali_uk_cow_mem_s *args); +_mali_osk_errcode_t _mali_ukk_mem_cow_modify_range(_mali_uk_cow_modify_range_s *args); +_mali_osk_errcode_t _mali_ukk_mem_usage_get(_mali_uk_profiling_memory_usage_get_s *args); +_mali_osk_errcode_t _mali_ukk_mem_resize(_mali_uk_mem_resize_s *args); + +#endif + diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_memory_os_alloc.c b/drivers/gpu/arm/mali400/linux/mali_memory_os_alloc.c --- a/drivers/gpu/arm/mali400/linux/mali_memory_os_alloc.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_memory_os_alloc.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,812 @@ +/* + * Copyright (C) 2013-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mali_osk.h" +#include "mali_memory.h" +#include "mali_memory_os_alloc.h" +#include "mali_kernel_linux.h" + +/* Minimum size of allocator page pool */ +#define MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB * 256) +#define MALI_OS_MEMORY_POOL_TRIM_JIFFIES (10 * CONFIG_HZ) /* Default to 10s */ + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) +/* Write combine dma_attrs */ +static unsigned long dma_attrs_wc; +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0) +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35) +static int mali_mem_os_shrink(int nr_to_scan, gfp_t gfp_mask); +#else +static int mali_mem_os_shrink(struct shrinker *shrinker, int nr_to_scan, gfp_t gfp_mask); +#endif +#else +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) +static int mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc); +#else +static unsigned long mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc); +static unsigned long mali_mem_os_shrink_count(struct shrinker *shrinker, struct shrink_control *sc); +#endif +#endif +static void mali_mem_os_trim_pool(struct work_struct *work); + +struct mali_mem_os_allocator mali_mem_os_allocator = { + .pool_lock = __SPIN_LOCK_UNLOCKED(pool_lock), + .pool_pages = LIST_HEAD_INIT(mali_mem_os_allocator.pool_pages), + .pool_count = 0, + + .allocated_pages = ATOMIC_INIT(0), + .allocation_limit = 0, + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) + .shrinker.shrink = mali_mem_os_shrink, +#else + .shrinker.count_objects = mali_mem_os_shrink_count, + .shrinker.scan_objects = mali_mem_os_shrink, +#endif + .shrinker.seeks = DEFAULT_SEEKS, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0) + .timed_shrinker = __DELAYED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool, TIMER_DEFERRABLE), +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38) + .timed_shrinker = __DEFERRED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool), +#else + .timed_shrinker = __DELAYED_WORK_INITIALIZER(mali_mem_os_allocator.timed_shrinker, mali_mem_os_trim_pool), +#endif +}; + +u32 mali_mem_os_free(struct list_head *os_pages, u32 pages_count, mali_bool cow_flag) +{ + LIST_HEAD(pages); + struct mali_page_node *m_page, *m_tmp; + u32 free_pages_nr = 0; + + if (MALI_TRUE == cow_flag) { + list_for_each_entry_safe(m_page, m_tmp, os_pages, list) { + /*only handle OS node here */ + if (m_page->type == MALI_PAGE_NODE_OS) { + if (1 == _mali_page_node_get_ref_count(m_page)) { + list_move(&m_page->list, &pages); + atomic_sub(1, &mali_mem_os_allocator.allocated_pages); + free_pages_nr ++; + } else { + _mali_page_node_unref(m_page); + m_page->page = NULL; + list_del(&m_page->list); + kfree(m_page); + } + } + } + } else { + list_cut_position(&pages, os_pages, os_pages->prev); + atomic_sub(pages_count, &mali_mem_os_allocator.allocated_pages); + free_pages_nr = pages_count; + } + + /* Put pages on pool. */ + spin_lock(&mali_mem_os_allocator.pool_lock); + list_splice(&pages, &mali_mem_os_allocator.pool_pages); + mali_mem_os_allocator.pool_count += free_pages_nr; + spin_unlock(&mali_mem_os_allocator.pool_lock); + + if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) { + MALI_DEBUG_PRINT(5, ("OS Mem: Starting pool trim timer %u\n", mali_mem_os_allocator.pool_count)); + queue_delayed_work(mali_mem_os_allocator.wq, &mali_mem_os_allocator.timed_shrinker, MALI_OS_MEMORY_POOL_TRIM_JIFFIES); + } + return free_pages_nr; +} + +/** +* put page without put it into page pool +*/ +_mali_osk_errcode_t mali_mem_os_put_page(struct page *page) +{ + MALI_DEBUG_ASSERT_POINTER(page); + if (1 == page_count(page)) { + atomic_sub(1, &mali_mem_os_allocator.allocated_pages); + dma_unmap_page(&mali_platform_device->dev, page_private(page), + _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE); + ClearPagePrivate(page); + } + put_page(page); + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t mali_mem_os_resize_pages(mali_mem_os_mem *mem_from, mali_mem_os_mem *mem_to, u32 start_page, u32 page_count) +{ + struct mali_page_node *m_page, *m_tmp; + u32 i = 0; + + MALI_DEBUG_ASSERT_POINTER(mem_from); + MALI_DEBUG_ASSERT_POINTER(mem_to); + + if (mem_from->count < start_page + page_count) { + return _MALI_OSK_ERR_INVALID_ARGS; + } + + list_for_each_entry_safe(m_page, m_tmp, &mem_from->pages, list) { + if (i >= start_page && i < start_page + page_count) { + list_move_tail(&m_page->list, &mem_to->pages); + mem_from->count--; + mem_to->count++; + } + i++; + } + + return _MALI_OSK_ERR_OK; +} + + +int mali_mem_os_alloc_pages(mali_mem_os_mem *os_mem, u32 size) +{ + struct page *new_page; + LIST_HEAD(pages_list); + size_t page_count = PAGE_ALIGN(size) / _MALI_OSK_MALI_PAGE_SIZE; + size_t remaining = page_count; + struct mali_page_node *m_page, *m_tmp; + u32 i; + + MALI_DEBUG_ASSERT_POINTER(os_mem); + + if (atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE + size > mali_mem_os_allocator.allocation_limit) { + MALI_DEBUG_PRINT(2, ("Mali Mem: Unable to allocate %u bytes. Currently allocated: %lu, max limit %lu\n", + size, + atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE, + mali_mem_os_allocator.allocation_limit)); + return -ENOMEM; + } + + INIT_LIST_HEAD(&os_mem->pages); + os_mem->count = page_count; + + /* Grab pages from pool. */ + { + size_t pool_pages; + spin_lock(&mali_mem_os_allocator.pool_lock); + pool_pages = min(remaining, mali_mem_os_allocator.pool_count); + for (i = pool_pages; i > 0; i--) { + BUG_ON(list_empty(&mali_mem_os_allocator.pool_pages)); + list_move(mali_mem_os_allocator.pool_pages.next, &pages_list); + } + mali_mem_os_allocator.pool_count -= pool_pages; + remaining -= pool_pages; + spin_unlock(&mali_mem_os_allocator.pool_lock); + } + + /* Process pages from pool. */ + i = 0; + list_for_each_entry_safe(m_page, m_tmp, &pages_list, list) { + BUG_ON(NULL == m_page); + + list_move_tail(&m_page->list, &os_mem->pages); + } + + /* Allocate new pages, if needed. */ + for (i = 0; i < remaining; i++) { + dma_addr_t dma_addr; + gfp_t flags = __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN | __GFP_COLD; + int err; + +#if defined(CONFIG_ARM) && !defined(CONFIG_ARM_LPAE) + flags |= GFP_HIGHUSER; +#else +#ifdef CONFIG_ZONE_DMA32 + flags |= GFP_DMA32; +#else +#ifdef CONFIG_ZONE_DMA + flags |= GFP_DMA; +#else + /* arm64 utgard only work on < 4G, but the kernel + * didn't provide method to allocte memory < 4G + */ + MALI_DEBUG_ASSERT(0); +#endif +#endif +#endif + + new_page = alloc_page(flags); + + if (unlikely(NULL == new_page)) { + /* Calculate the number of pages actually allocated, and free them. */ + os_mem->count = (page_count - remaining) + i; + atomic_add(os_mem->count, &mali_mem_os_allocator.allocated_pages); + mali_mem_os_free(&os_mem->pages, os_mem->count, MALI_FALSE); + return -ENOMEM; + } + + /* Ensure page is flushed from CPU caches. */ + dma_addr = dma_map_page(&mali_platform_device->dev, new_page, + 0, _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE); + + err = dma_mapping_error(&mali_platform_device->dev, dma_addr); + if (unlikely(err)) { + MALI_DEBUG_PRINT_ERROR(("OS Mem: Failed to DMA map page %p: %u", + new_page, err)); + __free_page(new_page); + os_mem->count = (page_count - remaining) + i; + atomic_add(os_mem->count, &mali_mem_os_allocator.allocated_pages); + mali_mem_os_free(&os_mem->pages, os_mem->count, MALI_FALSE); + return -EFAULT; + } + + /* Store page phys addr */ + SetPagePrivate(new_page); + set_page_private(new_page, dma_addr); + + m_page = _mali_page_node_allocate(MALI_PAGE_NODE_OS); + if (unlikely(NULL == m_page)) { + MALI_PRINT_ERROR(("OS Mem: Can't allocate mali_page node! \n")); + dma_unmap_page(&mali_platform_device->dev, page_private(new_page), + _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE); + ClearPagePrivate(new_page); + __free_page(new_page); + os_mem->count = (page_count - remaining) + i; + atomic_add(os_mem->count, &mali_mem_os_allocator.allocated_pages); + mali_mem_os_free(&os_mem->pages, os_mem->count, MALI_FALSE); + return -EFAULT; + } + m_page->page = new_page; + + list_add_tail(&m_page->list, &os_mem->pages); + } + + atomic_add(page_count, &mali_mem_os_allocator.allocated_pages); + + if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES > mali_mem_os_allocator.pool_count) { + MALI_DEBUG_PRINT(4, ("OS Mem: Stopping pool trim timer, only %u pages on pool\n", mali_mem_os_allocator.pool_count)); + cancel_delayed_work(&mali_mem_os_allocator.timed_shrinker); + } + + return 0; +} + + +_mali_osk_errcode_t mali_mem_os_mali_map(mali_mem_os_mem *os_mem, struct mali_session_data *session, u32 vaddr, u32 start_page, u32 mapping_pgae_num, u32 props) +{ + struct mali_page_directory *pagedir = session->page_directory; + struct mali_page_node *m_page; + u32 virt; + u32 prop = props; + + MALI_DEBUG_ASSERT_POINTER(session); + MALI_DEBUG_ASSERT_POINTER(os_mem); + + MALI_DEBUG_ASSERT(start_page <= os_mem->count); + MALI_DEBUG_ASSERT((start_page + mapping_pgae_num) <= os_mem->count); + + if ((start_page + mapping_pgae_num) == os_mem->count) { + + virt = vaddr + MALI_MMU_PAGE_SIZE * (start_page + mapping_pgae_num); + + list_for_each_entry_reverse(m_page, &os_mem->pages, list) { + + virt -= MALI_MMU_PAGE_SIZE; + if (mapping_pgae_num > 0) { + dma_addr_t phys = page_private(m_page->page); +#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) + /* Verify that the "physical" address is 32-bit and + * usable for Mali, when on a system with bus addresses + * wider than 32-bit. */ + MALI_DEBUG_ASSERT(0 == (phys >> 32)); +#endif + mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys, MALI_MMU_PAGE_SIZE, prop); + } else { + break; + } + mapping_pgae_num--; + } + + } else { + u32 i = 0; + virt = vaddr; + list_for_each_entry(m_page, &os_mem->pages, list) { + + if (i >= start_page) { + dma_addr_t phys = page_private(m_page->page); + +#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) + /* Verify that the "physical" address is 32-bit and + * usable for Mali, when on a system with bus addresses + * wider than 32-bit. */ + MALI_DEBUG_ASSERT(0 == (phys >> 32)); +#endif + mali_mmu_pagedir_update(pagedir, virt, (mali_dma_addr)phys, MALI_MMU_PAGE_SIZE, prop); + } + i++; + virt += MALI_MMU_PAGE_SIZE; + } + } + return _MALI_OSK_ERR_OK; +} + + +void mali_mem_os_mali_unmap(mali_mem_allocation *alloc) +{ + struct mali_session_data *session; + MALI_DEBUG_ASSERT_POINTER(alloc); + session = alloc->session; + MALI_DEBUG_ASSERT_POINTER(session); + + mali_session_memory_lock(session); + mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start, + alloc->flags); + mali_session_memory_unlock(session); +} + +int mali_mem_os_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma) +{ + mali_mem_os_mem *os_mem = &mem_bkend->os_mem; + struct mali_page_node *m_page; + struct page *page; + int ret; + unsigned long addr = vma->vm_start; + MALI_DEBUG_ASSERT(MALI_MEM_OS == mem_bkend->type); + + list_for_each_entry(m_page, &os_mem->pages, list) { + /* We should use vm_insert_page, but it does a dcache + * flush which makes it way slower than remap_pfn_range or vm_insert_pfn. + ret = vm_insert_page(vma, addr, page); + */ + page = m_page->page; + ret = vm_insert_pfn(vma, addr, page_to_pfn(page)); + + if (unlikely(0 != ret)) { + return -EFAULT; + } + addr += _MALI_OSK_MALI_PAGE_SIZE; + } + + return 0; +} + +_mali_osk_errcode_t mali_mem_os_resize_cpu_map_locked(mali_mem_backend *mem_bkend, struct vm_area_struct *vma, unsigned long start_vaddr, u32 mappig_size) +{ + mali_mem_os_mem *os_mem = &mem_bkend->os_mem; + struct mali_page_node *m_page; + int ret; + int offset; + int mapping_page_num; + int count ; + + unsigned long vstart = vma->vm_start; + count = 0; + MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_OS); + MALI_DEBUG_ASSERT(0 == start_vaddr % _MALI_OSK_MALI_PAGE_SIZE); + MALI_DEBUG_ASSERT(0 == vstart % _MALI_OSK_MALI_PAGE_SIZE); + offset = (start_vaddr - vstart) / _MALI_OSK_MALI_PAGE_SIZE; + MALI_DEBUG_ASSERT(offset <= os_mem->count); + mapping_page_num = mappig_size / _MALI_OSK_MALI_PAGE_SIZE; + MALI_DEBUG_ASSERT((offset + mapping_page_num) <= os_mem->count); + + if ((offset + mapping_page_num) == os_mem->count) { + + unsigned long vm_end = start_vaddr + mappig_size; + + list_for_each_entry_reverse(m_page, &os_mem->pages, list) { + + vm_end -= _MALI_OSK_MALI_PAGE_SIZE; + if (mapping_page_num > 0) { + ret = vm_insert_pfn(vma, vm_end, page_to_pfn(m_page->page)); + + if (unlikely(0 != ret)) { + /*will return -EBUSY If the page has already been mapped into table, but it's OK*/ + if (-EBUSY == ret) { + break; + } else { + MALI_DEBUG_PRINT(1, ("OS Mem: mali_mem_os_resize_cpu_map_locked failed, ret = %d, offset is %d,page_count is %d\n", + ret, offset + mapping_page_num, os_mem->count)); + } + return _MALI_OSK_ERR_FAULT; + } + } else { + break; + } + mapping_page_num--; + + } + } else { + + list_for_each_entry(m_page, &os_mem->pages, list) { + if (count >= offset) { + + ret = vm_insert_pfn(vma, vstart, page_to_pfn(m_page->page)); + + if (unlikely(0 != ret)) { + /*will return -EBUSY If the page has already been mapped into table, but it's OK*/ + if (-EBUSY == ret) { + break; + } else { + MALI_DEBUG_PRINT(1, ("OS Mem: mali_mem_os_resize_cpu_map_locked failed, ret = %d, count is %d, offset is %d,page_count is %d\n", + ret, count, offset, os_mem->count)); + } + return _MALI_OSK_ERR_FAULT; + } + } + count++; + vstart += _MALI_OSK_MALI_PAGE_SIZE; + } + } + return _MALI_OSK_ERR_OK; +} + +u32 mali_mem_os_release(mali_mem_backend *mem_bkend) +{ + + mali_mem_allocation *alloc; + struct mali_session_data *session; + u32 free_pages_nr = 0; + MALI_DEBUG_ASSERT_POINTER(mem_bkend); + MALI_DEBUG_ASSERT(MALI_MEM_OS == mem_bkend->type); + + alloc = mem_bkend->mali_allocation; + MALI_DEBUG_ASSERT_POINTER(alloc); + + session = alloc->session; + MALI_DEBUG_ASSERT_POINTER(session); + + /* Unmap the memory from the mali virtual address space. */ + mali_mem_os_mali_unmap(alloc); + mutex_lock(&mem_bkend->mutex); + /* Free pages */ + if (MALI_MEM_BACKEND_FLAG_COWED & mem_bkend->flags) { + /* Lock to avoid the free race condition for the cow shared memory page node. */ + _mali_osk_mutex_wait(session->cow_lock); + free_pages_nr = mali_mem_os_free(&mem_bkend->os_mem.pages, mem_bkend->os_mem.count, MALI_TRUE); + _mali_osk_mutex_signal(session->cow_lock); + } else { + free_pages_nr = mali_mem_os_free(&mem_bkend->os_mem.pages, mem_bkend->os_mem.count, MALI_FALSE); + } + mutex_unlock(&mem_bkend->mutex); + + MALI_DEBUG_PRINT(4, ("OS Mem free : allocated size = 0x%x, free size = 0x%x\n", mem_bkend->os_mem.count * _MALI_OSK_MALI_PAGE_SIZE, + free_pages_nr * _MALI_OSK_MALI_PAGE_SIZE)); + + mem_bkend->os_mem.count = 0; + return free_pages_nr; +} + + +#define MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE 128 +static struct { + struct { + mali_dma_addr phys; + mali_io_address mapping; + } page[MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE]; + size_t count; + spinlock_t lock; +} mali_mem_page_table_page_pool = { + .count = 0, + .lock = __SPIN_LOCK_UNLOCKED(pool_lock), +}; + +_mali_osk_errcode_t mali_mem_os_get_table_page(mali_dma_addr *phys, mali_io_address *mapping) +{ + _mali_osk_errcode_t ret = _MALI_OSK_ERR_NOMEM; + dma_addr_t tmp_phys; + + spin_lock(&mali_mem_page_table_page_pool.lock); + if (0 < mali_mem_page_table_page_pool.count) { + u32 i = --mali_mem_page_table_page_pool.count; + *phys = mali_mem_page_table_page_pool.page[i].phys; + *mapping = mali_mem_page_table_page_pool.page[i].mapping; + + ret = _MALI_OSK_ERR_OK; + } + spin_unlock(&mali_mem_page_table_page_pool.lock); + + if (_MALI_OSK_ERR_OK != ret) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) + *mapping = dma_alloc_attrs(&mali_platform_device->dev, + _MALI_OSK_MALI_PAGE_SIZE, &tmp_phys, + GFP_KERNEL, dma_attrs_wc); +#else + *mapping = dma_alloc_writecombine(&mali_platform_device->dev, + _MALI_OSK_MALI_PAGE_SIZE, &tmp_phys, GFP_KERNEL); +#endif + if (NULL != *mapping) { + ret = _MALI_OSK_ERR_OK; + +#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) + /* Verify that the "physical" address is 32-bit and + * usable for Mali, when on a system with bus addresses + * wider than 32-bit. */ + MALI_DEBUG_ASSERT(0 == (tmp_phys >> 32)); +#endif + + *phys = (mali_dma_addr)tmp_phys; + } + } + + return ret; +} + +void mali_mem_os_release_table_page(mali_dma_addr phys, void *virt) +{ + spin_lock(&mali_mem_page_table_page_pool.lock); + if (MALI_MEM_OS_PAGE_TABLE_PAGE_POOL_SIZE > mali_mem_page_table_page_pool.count) { + u32 i = mali_mem_page_table_page_pool.count; + mali_mem_page_table_page_pool.page[i].phys = phys; + mali_mem_page_table_page_pool.page[i].mapping = virt; + + ++mali_mem_page_table_page_pool.count; + + spin_unlock(&mali_mem_page_table_page_pool.lock); + } else { + spin_unlock(&mali_mem_page_table_page_pool.lock); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) + dma_free_attrs(&mali_platform_device->dev, + _MALI_OSK_MALI_PAGE_SIZE, virt, phys, + dma_attrs_wc); +#else + dma_free_writecombine(&mali_platform_device->dev, + _MALI_OSK_MALI_PAGE_SIZE, virt, phys); +#endif + } +} + +void mali_mem_os_free_page_node(struct mali_page_node *m_page) +{ + struct page *page = m_page->page; + MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_OS); + + if (1 == page_count(page)) { + dma_unmap_page(&mali_platform_device->dev, page_private(page), + _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE); + ClearPagePrivate(page); + } + __free_page(page); + m_page->page = NULL; + list_del(&m_page->list); + kfree(m_page); +} + +/* The maximum number of page table pool pages to free in one go. */ +#define MALI_MEM_OS_CHUNK_TO_FREE 64UL + +/* Free a certain number of pages from the page table page pool. + * The pool lock must be held when calling the function, and the lock will be + * released before returning. + */ +static void mali_mem_os_page_table_pool_free(size_t nr_to_free) +{ + mali_dma_addr phys_arr[MALI_MEM_OS_CHUNK_TO_FREE]; + void *virt_arr[MALI_MEM_OS_CHUNK_TO_FREE]; + u32 i; + + MALI_DEBUG_ASSERT(nr_to_free <= MALI_MEM_OS_CHUNK_TO_FREE); + + /* Remove nr_to_free pages from the pool and store them locally on stack. */ + for (i = 0; i < nr_to_free; i++) { + u32 pool_index = mali_mem_page_table_page_pool.count - i - 1; + + phys_arr[i] = mali_mem_page_table_page_pool.page[pool_index].phys; + virt_arr[i] = mali_mem_page_table_page_pool.page[pool_index].mapping; + } + + mali_mem_page_table_page_pool.count -= nr_to_free; + + spin_unlock(&mali_mem_page_table_page_pool.lock); + + /* After releasing the spinlock: free the pages we removed from the pool. */ + for (i = 0; i < nr_to_free; i++) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) + dma_free_attrs(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE, + virt_arr[i], (dma_addr_t)phys_arr[i], dma_attrs_wc); +#else + dma_free_writecombine(&mali_platform_device->dev, + _MALI_OSK_MALI_PAGE_SIZE, + virt_arr[i], (dma_addr_t)phys_arr[i]); +#endif + } +} + +static void mali_mem_os_trim_page_table_page_pool(void) +{ + size_t nr_to_free = 0; + size_t nr_to_keep; + + /* Keep 2 page table pages for each 1024 pages in the page cache. */ + nr_to_keep = mali_mem_os_allocator.pool_count / 512; + /* And a minimum of eight pages, to accomodate new sessions. */ + nr_to_keep += 8; + + if (0 == spin_trylock(&mali_mem_page_table_page_pool.lock)) return; + + if (nr_to_keep < mali_mem_page_table_page_pool.count) { + nr_to_free = mali_mem_page_table_page_pool.count - nr_to_keep; + nr_to_free = min((size_t)MALI_MEM_OS_CHUNK_TO_FREE, nr_to_free); + } + + /* Pool lock will be released by the callee. */ + mali_mem_os_page_table_pool_free(nr_to_free); +} + +static unsigned long mali_mem_os_shrink_count(struct shrinker *shrinker, struct shrink_control *sc) +{ + return mali_mem_os_allocator.pool_count; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0) +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35) +static int mali_mem_os_shrink(int nr_to_scan, gfp_t gfp_mask) +#else +static int mali_mem_os_shrink(struct shrinker *shrinker, int nr_to_scan, gfp_t gfp_mask) +#endif /* Linux < 2.6.35 */ +#else +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) +static int mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc) +#else +static unsigned long mali_mem_os_shrink(struct shrinker *shrinker, struct shrink_control *sc) +#endif /* Linux < 3.12.0 */ +#endif /* Linux < 3.0.0 */ +{ + struct mali_page_node *m_page, *m_tmp; + unsigned long flags; + struct list_head *le, pages; +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0) + int nr = nr_to_scan; +#else + int nr = sc->nr_to_scan; +#endif + + if (0 == nr) { + return mali_mem_os_shrink_count(shrinker, sc); + } + + if (0 == spin_trylock_irqsave(&mali_mem_os_allocator.pool_lock, flags)) { + /* Not able to lock. */ + return -1; + } + + if (0 == mali_mem_os_allocator.pool_count) { + /* No pages availble */ + spin_unlock_irqrestore(&mali_mem_os_allocator.pool_lock, flags); + return 0; + } + + /* Release from general page pool */ + nr = min((size_t)nr, mali_mem_os_allocator.pool_count); + mali_mem_os_allocator.pool_count -= nr; + list_for_each(le, &mali_mem_os_allocator.pool_pages) { + --nr; + if (0 == nr) break; + } + list_cut_position(&pages, &mali_mem_os_allocator.pool_pages, le); + spin_unlock_irqrestore(&mali_mem_os_allocator.pool_lock, flags); + + list_for_each_entry_safe(m_page, m_tmp, &pages, list) { + mali_mem_os_free_page_node(m_page); + } + + if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES > mali_mem_os_allocator.pool_count) { + /* Pools are empty, stop timer */ + MALI_DEBUG_PRINT(5, ("Stopping timer, only %u pages on pool\n", mali_mem_os_allocator.pool_count)); + cancel_delayed_work(&mali_mem_os_allocator.timed_shrinker); + } + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) + return mali_mem_os_shrink_count(shrinker, sc); +#else + return nr; +#endif +} + +static void mali_mem_os_trim_pool(struct work_struct *data) +{ + struct mali_page_node *m_page, *m_tmp; + struct list_head *le; + LIST_HEAD(pages); + size_t nr_to_free; + + MALI_IGNORE(data); + + MALI_DEBUG_PRINT(3, ("OS Mem: Trimming pool %u\n", mali_mem_os_allocator.pool_count)); + + /* Release from general page pool */ + spin_lock(&mali_mem_os_allocator.pool_lock); + if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) { + size_t count = mali_mem_os_allocator.pool_count - MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES; + const size_t min_to_free = min(64, MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES); + + /* Free half the pages on the pool above the static limit. Or 64 pages, 256KB. */ + nr_to_free = max(count / 2, min_to_free); + + mali_mem_os_allocator.pool_count -= nr_to_free; + list_for_each(le, &mali_mem_os_allocator.pool_pages) { + --nr_to_free; + if (0 == nr_to_free) break; + } + list_cut_position(&pages, &mali_mem_os_allocator.pool_pages, le); + } + spin_unlock(&mali_mem_os_allocator.pool_lock); + + list_for_each_entry_safe(m_page, m_tmp, &pages, list) { + mali_mem_os_free_page_node(m_page); + } + + /* Release some pages from page table page pool */ + mali_mem_os_trim_page_table_page_pool(); + + if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES < mali_mem_os_allocator.pool_count) { + MALI_DEBUG_PRINT(4, ("OS Mem: Starting pool trim timer %u\n", mali_mem_os_allocator.pool_count)); + queue_delayed_work(mali_mem_os_allocator.wq, &mali_mem_os_allocator.timed_shrinker, MALI_OS_MEMORY_POOL_TRIM_JIFFIES); + } +} + +_mali_osk_errcode_t mali_mem_os_init(void) +{ + mali_mem_os_allocator.wq = alloc_workqueue("mali-mem", WQ_UNBOUND, 1); + if (NULL == mali_mem_os_allocator.wq) { + return _MALI_OSK_ERR_NOMEM; + } + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) + dma_attrs_wc |= DMA_ATTR_WRITE_COMBINE; +#endif + + register_shrinker(&mali_mem_os_allocator.shrinker); + + return _MALI_OSK_ERR_OK; +} + +void mali_mem_os_term(void) +{ + struct mali_page_node *m_page, *m_tmp; + unregister_shrinker(&mali_mem_os_allocator.shrinker); + cancel_delayed_work_sync(&mali_mem_os_allocator.timed_shrinker); + + if (NULL != mali_mem_os_allocator.wq) { + destroy_workqueue(mali_mem_os_allocator.wq); + mali_mem_os_allocator.wq = NULL; + } + + spin_lock(&mali_mem_os_allocator.pool_lock); + list_for_each_entry_safe(m_page, m_tmp, &mali_mem_os_allocator.pool_pages, list) { + mali_mem_os_free_page_node(m_page); + + --mali_mem_os_allocator.pool_count; + } + BUG_ON(mali_mem_os_allocator.pool_count); + spin_unlock(&mali_mem_os_allocator.pool_lock); + + /* Release from page table page pool */ + do { + u32 nr_to_free; + + spin_lock(&mali_mem_page_table_page_pool.lock); + + nr_to_free = min((size_t)MALI_MEM_OS_CHUNK_TO_FREE, mali_mem_page_table_page_pool.count); + + /* Pool lock will be released by the callee. */ + mali_mem_os_page_table_pool_free(nr_to_free); + } while (0 != mali_mem_page_table_page_pool.count); +} + +_mali_osk_errcode_t mali_memory_core_resource_os_memory(u32 size) +{ + mali_mem_os_allocator.allocation_limit = size; + + MALI_SUCCESS; +} + +u32 mali_mem_os_stat(void) +{ + return atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE; +} diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_memory_os_alloc.h b/drivers/gpu/arm/mali400/linux/mali_memory_os_alloc.h --- a/drivers/gpu/arm/mali400/linux/mali_memory_os_alloc.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_memory_os_alloc.h 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2013-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_MEMORY_OS_ALLOC_H__ +#define __MALI_MEMORY_OS_ALLOC_H__ + +#include "mali_osk.h" +#include "mali_memory_types.h" + + +/** @brief Release Mali OS memory + * + * The session memory_lock must be held when calling this function. + * + * @param mem_bkend Pointer to the mali_mem_backend to release + */ +u32 mali_mem_os_release(mali_mem_backend *mem_bkend); + +_mali_osk_errcode_t mali_mem_os_get_table_page(mali_dma_addr *phys, mali_io_address *mapping); + +void mali_mem_os_release_table_page(mali_dma_addr phys, void *virt); + +_mali_osk_errcode_t mali_mem_os_init(void); + +void mali_mem_os_term(void); + +u32 mali_mem_os_stat(void); + +void mali_mem_os_free_page_node(struct mali_page_node *m_page); + +int mali_mem_os_alloc_pages(mali_mem_os_mem *os_mem, u32 size); + +u32 mali_mem_os_free(struct list_head *os_pages, u32 pages_count, mali_bool cow_flag); + +_mali_osk_errcode_t mali_mem_os_put_page(struct page *page); + +_mali_osk_errcode_t mali_mem_os_resize_pages(mali_mem_os_mem *mem_from, mali_mem_os_mem *mem_to, u32 start_page, u32 page_count); + +_mali_osk_errcode_t mali_mem_os_mali_map(mali_mem_os_mem *os_mem, struct mali_session_data *session, u32 vaddr, u32 start_page, u32 mapping_pgae_num, u32 props); + +void mali_mem_os_mali_unmap(mali_mem_allocation *alloc); + +int mali_mem_os_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma); + +_mali_osk_errcode_t mali_mem_os_resize_cpu_map_locked(mali_mem_backend *mem_bkend, struct vm_area_struct *vma, unsigned long start_vaddr, u32 mappig_size); + +#endif /* __MALI_MEMORY_OS_ALLOC_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_memory_secure.c b/drivers/gpu/arm/mali400/linux/mali_memory_secure.c --- a/drivers/gpu/arm/mali400/linux/mali_memory_secure.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_memory_secure.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,169 @@ +/* + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_kernel_common.h" +#include "mali_memory.h" +#include "mali_memory_secure.h" +#include "mali_osk.h" +#include +#include +#include + +_mali_osk_errcode_t mali_mem_secure_attach_dma_buf(mali_mem_secure *secure_mem, u32 size, int mem_fd) +{ + struct dma_buf *buf; + MALI_DEBUG_ASSERT_POINTER(secure_mem); + + /* get dma buffer */ + buf = dma_buf_get(mem_fd); + if (IS_ERR_OR_NULL(buf)) { + MALI_DEBUG_PRINT_ERROR(("Failed to get dma buf!\n")); + return _MALI_OSK_ERR_FAULT; + } + + if (size != buf->size) { + MALI_DEBUG_PRINT_ERROR(("The secure mem size not match to the dma buf size!\n")); + goto failed_alloc_mem; + } + + secure_mem->buf = buf; + secure_mem->attachment = dma_buf_attach(secure_mem->buf, &mali_platform_device->dev); + if (NULL == secure_mem->attachment) { + MALI_DEBUG_PRINT_ERROR(("Failed to get dma buf attachment!\n")); + goto failed_dma_attach; + } + + secure_mem->sgt = dma_buf_map_attachment(secure_mem->attachment, DMA_BIDIRECTIONAL); + if (IS_ERR_OR_NULL(secure_mem->sgt)) { + MALI_DEBUG_PRINT_ERROR(("Failed to map dma buf attachment\n")); + goto failed_dma_map; + } + + secure_mem->count = size / MALI_MMU_PAGE_SIZE; + + return _MALI_OSK_ERR_OK; + +failed_dma_map: + dma_buf_detach(secure_mem->buf, secure_mem->attachment); +failed_dma_attach: +failed_alloc_mem: + dma_buf_put(buf); + return _MALI_OSK_ERR_FAULT; +} + +_mali_osk_errcode_t mali_mem_secure_mali_map(mali_mem_secure *secure_mem, struct mali_session_data *session, u32 vaddr, u32 props) +{ + struct mali_page_directory *pagedir; + struct scatterlist *sg; + u32 virt = vaddr; + u32 prop = props; + int i; + + MALI_DEBUG_ASSERT_POINTER(secure_mem); + MALI_DEBUG_ASSERT_POINTER(secure_mem->sgt); + MALI_DEBUG_ASSERT_POINTER(session); + + pagedir = session->page_directory; + + for_each_sg(secure_mem->sgt->sgl, sg, secure_mem->sgt->nents, i) { + u32 size = sg_dma_len(sg); + dma_addr_t phys = sg_dma_address(sg); + + /* sg must be page aligned. */ + MALI_DEBUG_ASSERT(0 == size % MALI_MMU_PAGE_SIZE); + MALI_DEBUG_ASSERT(0 == (phys & ~(uintptr_t)0xFFFFFFFF)); + + mali_mmu_pagedir_update(pagedir, virt, phys, size, prop); + + MALI_DEBUG_PRINT(3, ("The secure mem physical address: 0x%x gpu virtual address: 0x%x! \n", phys, virt)); + virt += size; + } + + return _MALI_OSK_ERR_OK; +} + +void mali_mem_secure_mali_unmap(mali_mem_allocation *alloc) +{ + struct mali_session_data *session; + MALI_DEBUG_ASSERT_POINTER(alloc); + session = alloc->session; + MALI_DEBUG_ASSERT_POINTER(session); + + mali_session_memory_lock(session); + mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start, + alloc->flags); + mali_session_memory_unlock(session); +} + + +int mali_mem_secure_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma) +{ + + int ret = 0; + struct scatterlist *sg; + mali_mem_secure *secure_mem = &mem_bkend->secure_mem; + unsigned long addr = vma->vm_start; + int i; + + MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_SECURE); + + for_each_sg(secure_mem->sgt->sgl, sg, secure_mem->sgt->nents, i) { + phys_addr_t phys; + dma_addr_t dev_addr; + u32 size, j; + dev_addr = sg_dma_address(sg); +#if defined(CONFIG_ARM64) ||LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) + phys = dma_to_phys(&mali_platform_device->dev, dev_addr); +#else + phys = page_to_phys(pfn_to_page(dma_to_pfn(&mali_platform_device->dev, dev_addr))); +#endif + size = sg_dma_len(sg); + MALI_DEBUG_ASSERT(0 == size % _MALI_OSK_MALI_PAGE_SIZE); + + for (j = 0; j < size / _MALI_OSK_MALI_PAGE_SIZE; j++) { + ret = vm_insert_pfn(vma, addr, PFN_DOWN(phys)); + + if (unlikely(0 != ret)) { + return -EFAULT; + } + addr += _MALI_OSK_MALI_PAGE_SIZE; + phys += _MALI_OSK_MALI_PAGE_SIZE; + + MALI_DEBUG_PRINT(3, ("The secure mem physical address: 0x%x , cpu virtual address: 0x%x! \n", phys, addr)); + } + } + return ret; +} + +u32 mali_mem_secure_release(mali_mem_backend *mem_bkend) +{ + struct mali_mem_secure *mem; + mali_mem_allocation *alloc = mem_bkend->mali_allocation; + u32 free_pages_nr = 0; + MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_SECURE); + + mem = &mem_bkend->secure_mem; + MALI_DEBUG_ASSERT_POINTER(mem->attachment); + MALI_DEBUG_ASSERT_POINTER(mem->buf); + MALI_DEBUG_ASSERT_POINTER(mem->sgt); + /* Unmap the memory from the mali virtual address space. */ + mali_mem_secure_mali_unmap(alloc); + mutex_lock(&mem_bkend->mutex); + dma_buf_unmap_attachment(mem->attachment, mem->sgt, DMA_BIDIRECTIONAL); + dma_buf_detach(mem->buf, mem->attachment); + dma_buf_put(mem->buf); + mutex_unlock(&mem_bkend->mutex); + + free_pages_nr = mem->count; + + return free_pages_nr; +} + + diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_memory_secure.h b/drivers/gpu/arm/mali400/linux/mali_memory_secure.h --- a/drivers/gpu/arm/mali400/linux/mali_memory_secure.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_memory_secure.h 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2010, 2013, 2015-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_MEMORY_SECURE_H__ +#define __MALI_MEMORY_SECURE_H__ + +#include "mali_session.h" +#include "mali_memory.h" +#include + +#include "mali_memory_types.h" + +_mali_osk_errcode_t mali_mem_secure_attach_dma_buf(mali_mem_secure *secure_mem, u32 size, int mem_fd); + +_mali_osk_errcode_t mali_mem_secure_mali_map(mali_mem_secure *secure_mem, struct mali_session_data *session, u32 vaddr, u32 props); + +void mali_mem_secure_mali_unmap(mali_mem_allocation *alloc); + +int mali_mem_secure_cpu_map(mali_mem_backend *mem_bkend, struct vm_area_struct *vma); + +u32 mali_mem_secure_release(mali_mem_backend *mem_bkend); + +#endif /* __MALI_MEMORY_SECURE_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_memory_swap_alloc.c b/drivers/gpu/arm/mali400/linux/mali_memory_swap_alloc.c --- a/drivers/gpu/arm/mali400/linux/mali_memory_swap_alloc.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_memory_swap_alloc.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,942 @@ +/* + * Copyright (C) 2013-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mali_osk.h" +#include "mali_osk_mali.h" +#include "mali_memory.h" +#include "mali_memory_manager.h" +#include "mali_memory_virtual.h" +#include "mali_memory_cow.h" +#include "mali_ukk.h" +#include "mali_kernel_utilization.h" +#include "mali_memory_swap_alloc.h" + + +static struct _mali_osk_bitmap idx_mgr; +static struct file *global_swap_file; +static struct address_space *global_swap_space; +static _mali_osk_wq_work_t *mali_mem_swap_out_workq = NULL; +static u32 mem_backend_swapped_pool_size; +#ifdef MALI_MEM_SWAP_TRACKING +static u32 mem_backend_swapped_unlock_size; +#endif +/* Lock order: mem_backend_swapped_pool_lock > each memory backend's mutex lock. + * This lock used to protect mem_backend_swapped_pool_size and mem_backend_swapped_pool. */ +static struct mutex mem_backend_swapped_pool_lock; +static struct list_head mem_backend_swapped_pool; + +extern struct mali_mem_os_allocator mali_mem_os_allocator; + +#define MALI_SWAP_LOW_MEM_DEFAULT_VALUE (60*1024*1024) +#define MALI_SWAP_INVALIDATE_MALI_ADDRESS (0) /* Used to mark the given memory cookie is invalidate. */ +#define MALI_SWAP_GLOBAL_SWAP_FILE_SIZE (0xFFFFFFFF) +#define MALI_SWAP_GLOBAL_SWAP_FILE_INDEX ((MALI_SWAP_GLOBAL_SWAP_FILE_SIZE) >> PAGE_SHIFT) +#define MALI_SWAP_GLOBAL_SWAP_FILE_INDEX_RESERVE (1 << 15) /* Reserved for CoW nonlinear swap backend memory, the space size is 128MB. */ + +unsigned int mali_mem_swap_out_threshold_value = MALI_SWAP_LOW_MEM_DEFAULT_VALUE; + +/** + * We have two situations to do shrinking things, one is we met low GPU utilization which shows GPU needn't touch too + * swappable backends in short time, and the other one is we add new swappable backends, the total pool size exceed + * the threshold value of the swapped pool size. + */ +typedef enum { + MALI_MEM_SWAP_SHRINK_WITH_LOW_UTILIZATION = 100, + MALI_MEM_SWAP_SHRINK_FOR_ADDING_NEW_BACKENDS = 257, +} _mali_mem_swap_pool_shrink_type_t; + +static void mali_mem_swap_swapped_bkend_pool_check_for_low_utilization(void *arg); + +_mali_osk_errcode_t mali_mem_swap_init(void) +{ + gfp_t flags = __GFP_NORETRY | __GFP_NOWARN; + + if (_MALI_OSK_ERR_OK != _mali_osk_bitmap_init(&idx_mgr, MALI_SWAP_GLOBAL_SWAP_FILE_INDEX, MALI_SWAP_GLOBAL_SWAP_FILE_INDEX_RESERVE)) { + return _MALI_OSK_ERR_NOMEM; + } + + global_swap_file = shmem_file_setup("mali_swap", MALI_SWAP_GLOBAL_SWAP_FILE_SIZE, VM_NORESERVE); + if (IS_ERR(global_swap_file)) { + _mali_osk_bitmap_term(&idx_mgr); + return _MALI_OSK_ERR_NOMEM; + } + + global_swap_space = global_swap_file->f_path.dentry->d_inode->i_mapping; + + mali_mem_swap_out_workq = _mali_osk_wq_create_work(mali_mem_swap_swapped_bkend_pool_check_for_low_utilization, NULL); + if (NULL == mali_mem_swap_out_workq) { + _mali_osk_bitmap_term(&idx_mgr); + fput(global_swap_file); + return _MALI_OSK_ERR_NOMEM; + } + +#if defined(CONFIG_ARM) && !defined(CONFIG_ARM_LPAE) + flags |= GFP_HIGHUSER; +#else +#ifdef CONFIG_ZONE_DMA32 + flags |= GFP_DMA32; +#else +#ifdef CONFIG_ZONE_DMA + flags |= GFP_DMA; +#else + /* arm64 utgard only work on < 4G, but the kernel + * didn't provide method to allocte memory < 4G + */ + MALI_DEBUG_ASSERT(0); +#endif +#endif +#endif + + /* When we use shmem_read_mapping_page to allocate/swap-in, it will + * use these flags to allocate new page if need.*/ + mapping_set_gfp_mask(global_swap_space, flags); + + mem_backend_swapped_pool_size = 0; +#ifdef MALI_MEM_SWAP_TRACKING + mem_backend_swapped_unlock_size = 0; +#endif + mutex_init(&mem_backend_swapped_pool_lock); + INIT_LIST_HEAD(&mem_backend_swapped_pool); + + MALI_DEBUG_PRINT(2, ("Mali SWAP: Swap out threshold vaule is %uM\n", mali_mem_swap_out_threshold_value >> 20)); + + return _MALI_OSK_ERR_OK; +} + +void mali_mem_swap_term(void) +{ + _mali_osk_bitmap_term(&idx_mgr); + + fput(global_swap_file); + + _mali_osk_wq_delete_work(mali_mem_swap_out_workq); + + MALI_DEBUG_ASSERT(list_empty(&mem_backend_swapped_pool)); + MALI_DEBUG_ASSERT(0 == mem_backend_swapped_pool_size); + + return; +} + +struct file *mali_mem_swap_get_global_swap_file(void) +{ + return global_swap_file; +} + +/* Judge if swappable backend in swapped pool. */ +static mali_bool mali_memory_swap_backend_in_swapped_pool(mali_mem_backend *mem_bkend) +{ + MALI_DEBUG_ASSERT_POINTER(mem_bkend); + + return !list_empty(&mem_bkend->list); +} + +void mali_memory_swap_list_backend_delete(mali_mem_backend *mem_bkend) +{ + MALI_DEBUG_ASSERT_POINTER(mem_bkend); + + mutex_lock(&mem_backend_swapped_pool_lock); + mutex_lock(&mem_bkend->mutex); + + if (MALI_FALSE == mali_memory_swap_backend_in_swapped_pool(mem_bkend)) { + mutex_unlock(&mem_bkend->mutex); + mutex_unlock(&mem_backend_swapped_pool_lock); + return; + } + + MALI_DEBUG_ASSERT(!list_empty(&mem_bkend->list)); + + list_del_init(&mem_bkend->list); + + mutex_unlock(&mem_bkend->mutex); + + mem_backend_swapped_pool_size -= mem_bkend->size; + + mutex_unlock(&mem_backend_swapped_pool_lock); +} + +static void mali_mem_swap_out_page_node(mali_page_node *page_node) +{ + MALI_DEBUG_ASSERT(page_node); + + dma_unmap_page(&mali_platform_device->dev, page_node->swap_it->dma_addr, + _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE); + set_page_dirty(page_node->swap_it->page); + put_page(page_node->swap_it->page); +} + +void mali_mem_swap_unlock_single_mem_backend(mali_mem_backend *mem_bkend) +{ + mali_page_node *m_page; + + MALI_DEBUG_ASSERT(1 == mutex_is_locked(&mem_bkend->mutex)); + + if (MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN == (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN)) { + return; + } + + mem_bkend->flags |= MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN; + + list_for_each_entry(m_page, &mem_bkend->swap_mem.pages, list) { + mali_mem_swap_out_page_node(m_page); + } + + return; +} + +static void mali_mem_swap_unlock_partial_locked_mem_backend(mali_mem_backend *mem_bkend, mali_page_node *page_node) +{ + mali_page_node *m_page; + + MALI_DEBUG_ASSERT(1 == mutex_is_locked(&mem_bkend->mutex)); + + list_for_each_entry(m_page, &mem_bkend->swap_mem.pages, list) { + if (m_page == page_node) { + break; + } + mali_mem_swap_out_page_node(m_page); + } +} + +static void mali_mem_swap_swapped_bkend_pool_shrink(_mali_mem_swap_pool_shrink_type_t shrink_type) +{ + mali_mem_backend *bkend, *tmp_bkend; + long system_free_size; + u32 last_gpu_utilization, gpu_utilization_threshold_value, temp_swap_out_threshold_value; + + MALI_DEBUG_ASSERT(1 == mutex_is_locked(&mem_backend_swapped_pool_lock)); + + if (MALI_MEM_SWAP_SHRINK_WITH_LOW_UTILIZATION == shrink_type) { + /** + * When we met that system memory is very low and Mali locked swappable memory size is less than + * threshold value, and at the same time, GPU load is very low and don't need high performance, + * at this condition, we can unlock more swap memory backend from swapped backends pool. + */ + gpu_utilization_threshold_value = MALI_MEM_SWAP_SHRINK_WITH_LOW_UTILIZATION; + temp_swap_out_threshold_value = (mali_mem_swap_out_threshold_value >> 2); + } else { + /* When we add swappable memory backends to swapped pool, we need to think that we couldn't + * hold too much swappable backends in Mali driver, and also we need considering performance. + * So there is a balance for swapping out memory backend, we should follow the following conditions: + * 1. Total memory size in global mem backend swapped pool is more than the defined threshold value. + * 2. System level free memory size is less than the defined threshold value. + * 3. Please note that GPU utilization problem isn't considered in this condition. + */ + gpu_utilization_threshold_value = MALI_MEM_SWAP_SHRINK_FOR_ADDING_NEW_BACKENDS; + temp_swap_out_threshold_value = mali_mem_swap_out_threshold_value; + } + + /* Get system free pages number. */ + system_free_size = global_page_state(NR_FREE_PAGES) * PAGE_SIZE; + last_gpu_utilization = _mali_ukk_utilization_gp_pp(); + + if ((last_gpu_utilization < gpu_utilization_threshold_value) + && (system_free_size < mali_mem_swap_out_threshold_value) + && (mem_backend_swapped_pool_size > temp_swap_out_threshold_value)) { + list_for_each_entry_safe(bkend, tmp_bkend, &mem_backend_swapped_pool, list) { + if (mem_backend_swapped_pool_size <= temp_swap_out_threshold_value) { + break; + } + + mutex_lock(&bkend->mutex); + + /* check if backend is in use. */ + if (0 < bkend->using_count) { + mutex_unlock(&bkend->mutex); + continue; + } + + mali_mem_swap_unlock_single_mem_backend(bkend); + list_del_init(&bkend->list); + mem_backend_swapped_pool_size -= bkend->size; +#ifdef MALI_MEM_SWAP_TRACKING + mem_backend_swapped_unlock_size += bkend->size; +#endif + mutex_unlock(&bkend->mutex); + } + } + + return; +} + +static void mali_mem_swap_swapped_bkend_pool_check_for_low_utilization(void *arg) +{ + MALI_IGNORE(arg); + + mutex_lock(&mem_backend_swapped_pool_lock); + + mali_mem_swap_swapped_bkend_pool_shrink(MALI_MEM_SWAP_SHRINK_WITH_LOW_UTILIZATION); + + mutex_unlock(&mem_backend_swapped_pool_lock); +} + +/** + * After PP job finished, we add all of swappable memory backend used by this PP + * job to the tail of the global swapped pool, and if the total size of swappable memory is more than threshold + * value, we also need to shrink the swapped pool start from the head of the list. + */ +void mali_memory_swap_list_backend_add(mali_mem_backend *mem_bkend) +{ + mutex_lock(&mem_backend_swapped_pool_lock); + mutex_lock(&mem_bkend->mutex); + + if (mali_memory_swap_backend_in_swapped_pool(mem_bkend)) { + MALI_DEBUG_ASSERT(!list_empty(&mem_bkend->list)); + + list_del_init(&mem_bkend->list); + list_add_tail(&mem_bkend->list, &mem_backend_swapped_pool); + mutex_unlock(&mem_bkend->mutex); + mutex_unlock(&mem_backend_swapped_pool_lock); + return; + } + + list_add_tail(&mem_bkend->list, &mem_backend_swapped_pool); + + mutex_unlock(&mem_bkend->mutex); + mem_backend_swapped_pool_size += mem_bkend->size; + + mali_mem_swap_swapped_bkend_pool_shrink(MALI_MEM_SWAP_SHRINK_FOR_ADDING_NEW_BACKENDS); + + mutex_unlock(&mem_backend_swapped_pool_lock); + return; +} + + +u32 mali_mem_swap_idx_alloc(void) +{ + return _mali_osk_bitmap_alloc(&idx_mgr); +} + +void mali_mem_swap_idx_free(u32 idx) +{ + _mali_osk_bitmap_free(&idx_mgr, idx); +} + +static u32 mali_mem_swap_idx_range_alloc(u32 count) +{ + u32 index; + + index = _mali_osk_bitmap_alloc_range(&idx_mgr, count); + + return index; +} + +static void mali_mem_swap_idx_range_free(u32 idx, int num) +{ + _mali_osk_bitmap_free_range(&idx_mgr, idx, num); +} + +struct mali_swap_item *mali_mem_swap_alloc_swap_item(void) +{ + mali_swap_item *swap_item; + + swap_item = kzalloc(sizeof(mali_swap_item), GFP_KERNEL); + + if (NULL == swap_item) { + return NULL; + } + + atomic_set(&swap_item->ref_count, 1); + swap_item->page = NULL; + atomic_add(1, &mali_mem_os_allocator.allocated_pages); + + return swap_item; +} + +void mali_mem_swap_free_swap_item(mali_swap_item *swap_item) +{ + struct inode *file_node; + long long start, end; + + /* If this swap item is shared, we just reduce the reference counter. */ + if (0 == atomic_dec_return(&swap_item->ref_count)) { + file_node = global_swap_file->f_path.dentry->d_inode; + start = swap_item->idx; + start = start << 12; + end = start + PAGE_SIZE; + + shmem_truncate_range(file_node, start, (end - 1)); + + mali_mem_swap_idx_free(swap_item->idx); + + atomic_sub(1, &mali_mem_os_allocator.allocated_pages); + + kfree(swap_item); + } +} + +/* Used to allocate new swap item for new memory allocation and cow page for write. */ +struct mali_page_node *_mali_mem_swap_page_node_allocate(void) +{ + struct mali_page_node *m_page; + + m_page = _mali_page_node_allocate(MALI_PAGE_NODE_SWAP); + + if (NULL == m_page) { + return NULL; + } + + m_page->swap_it = mali_mem_swap_alloc_swap_item(); + + if (NULL == m_page->swap_it) { + kfree(m_page); + return NULL; + } + + return m_page; +} + +_mali_osk_errcode_t _mali_mem_swap_put_page_node(struct mali_page_node *m_page) +{ + + mali_mem_swap_free_swap_item(m_page->swap_it); + + return _MALI_OSK_ERR_OK; +} + +void _mali_mem_swap_page_node_free(struct mali_page_node *m_page) +{ + _mali_mem_swap_put_page_node(m_page); + + kfree(m_page); + + return; +} + +u32 mali_mem_swap_free(mali_mem_swap *swap_mem) +{ + struct mali_page_node *m_page, *m_tmp; + u32 free_pages_nr = 0; + + MALI_DEBUG_ASSERT_POINTER(swap_mem); + + list_for_each_entry_safe(m_page, m_tmp, &swap_mem->pages, list) { + MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_SWAP); + + /* free the page node and release the swap item, if the ref count is 1, + * then need also free the swap item. */ + list_del(&m_page->list); + if (1 == _mali_page_node_get_ref_count(m_page)) { + free_pages_nr++; + } + + _mali_mem_swap_page_node_free(m_page); + } + + return free_pages_nr; +} + +static u32 mali_mem_swap_cow_free(mali_mem_cow *cow_mem) +{ + struct mali_page_node *m_page, *m_tmp; + u32 free_pages_nr = 0; + + MALI_DEBUG_ASSERT_POINTER(cow_mem); + + list_for_each_entry_safe(m_page, m_tmp, &cow_mem->pages, list) { + MALI_DEBUG_ASSERT(m_page->type == MALI_PAGE_NODE_SWAP); + + /* free the page node and release the swap item, if the ref count is 1, + * then need also free the swap item. */ + list_del(&m_page->list); + if (1 == _mali_page_node_get_ref_count(m_page)) { + free_pages_nr++; + } + + _mali_mem_swap_page_node_free(m_page); + } + + return free_pages_nr; +} + +u32 mali_mem_swap_release(mali_mem_backend *mem_bkend, mali_bool is_mali_mapped) +{ + mali_mem_allocation *alloc; + u32 free_pages_nr = 0; + + MALI_DEBUG_ASSERT_POINTER(mem_bkend); + alloc = mem_bkend->mali_allocation; + MALI_DEBUG_ASSERT_POINTER(alloc); + + if (is_mali_mapped) { + mali_mem_swap_mali_unmap(alloc); + } + + mali_memory_swap_list_backend_delete(mem_bkend); + + mutex_lock(&mem_bkend->mutex); + /* To make sure the given memory backend was unlocked from Mali side, + * and then free this memory block. */ + mali_mem_swap_unlock_single_mem_backend(mem_bkend); + mutex_unlock(&mem_bkend->mutex); + + if (MALI_MEM_SWAP == mem_bkend->type) { + free_pages_nr = mali_mem_swap_free(&mem_bkend->swap_mem); + } else { + free_pages_nr = mali_mem_swap_cow_free(&mem_bkend->cow_mem); + } + + return free_pages_nr; +} + +mali_bool mali_mem_swap_in_page_node(struct mali_page_node *page_node) +{ + MALI_DEBUG_ASSERT(NULL != page_node); + + page_node->swap_it->page = shmem_read_mapping_page(global_swap_space, page_node->swap_it->idx); + + if (IS_ERR(page_node->swap_it->page)) { + MALI_DEBUG_PRINT_ERROR(("SWAP Mem: failed to swap in page with index: %d.\n", page_node->swap_it->idx)); + return MALI_FALSE; + } + + /* Ensure page is flushed from CPU caches. */ + page_node->swap_it->dma_addr = dma_map_page(&mali_platform_device->dev, page_node->swap_it->page, + 0, _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE); + + return MALI_TRUE; +} + +int mali_mem_swap_alloc_pages(mali_mem_swap *swap_mem, u32 size, u32 *bkend_idx) +{ + size_t page_count = PAGE_ALIGN(size) / PAGE_SIZE; + struct mali_page_node *m_page; + long system_free_size; + u32 i, index; + mali_bool ret; + + MALI_DEBUG_ASSERT(NULL != swap_mem); + MALI_DEBUG_ASSERT(NULL != bkend_idx); + MALI_DEBUG_ASSERT(page_count <= MALI_SWAP_GLOBAL_SWAP_FILE_INDEX_RESERVE); + + if (atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE + size > mali_mem_os_allocator.allocation_limit) { + MALI_DEBUG_PRINT(2, ("Mali Mem: Unable to allocate %u bytes. Currently allocated: %lu, max limit %lu\n", + size, + atomic_read(&mali_mem_os_allocator.allocated_pages) * _MALI_OSK_MALI_PAGE_SIZE, + mali_mem_os_allocator.allocation_limit)); + return _MALI_OSK_ERR_NOMEM; + } + + INIT_LIST_HEAD(&swap_mem->pages); + swap_mem->count = page_count; + index = mali_mem_swap_idx_range_alloc(page_count); + + if (_MALI_OSK_BITMAP_INVALIDATE_INDEX == index) { + MALI_PRINT_ERROR(("Mali Swap: Failed to allocate continuous index for swappable Mali memory.")); + return _MALI_OSK_ERR_FAULT; + } + + for (i = 0; i < page_count; i++) { + m_page = _mali_mem_swap_page_node_allocate(); + + if (NULL == m_page) { + MALI_DEBUG_PRINT_ERROR(("SWAP Mem: Failed to allocate mali page node.")); + swap_mem->count = i; + + mali_mem_swap_free(swap_mem); + mali_mem_swap_idx_range_free(index + i, page_count - i); + return _MALI_OSK_ERR_FAULT; + } + + m_page->swap_it->idx = index + i; + + ret = mali_mem_swap_in_page_node(m_page); + + if (MALI_FALSE == ret) { + MALI_DEBUG_PRINT_ERROR(("SWAP Mem: Allocate new page from SHMEM file failed.")); + _mali_mem_swap_page_node_free(m_page); + mali_mem_swap_idx_range_free(index + i + 1, page_count - i - 1); + + swap_mem->count = i; + mali_mem_swap_free(swap_mem); + return _MALI_OSK_ERR_NOMEM; + } + + list_add_tail(&m_page->list, &swap_mem->pages); + } + + system_free_size = global_page_state(NR_FREE_PAGES) * PAGE_SIZE; + + if ((system_free_size < mali_mem_swap_out_threshold_value) + && (mem_backend_swapped_pool_size > (mali_mem_swap_out_threshold_value >> 2)) + && mali_utilization_enabled()) { + _mali_osk_wq_schedule_work(mali_mem_swap_out_workq); + } + + *bkend_idx = index; + return 0; +} + +void mali_mem_swap_mali_unmap(mali_mem_allocation *alloc) +{ + struct mali_session_data *session; + + MALI_DEBUG_ASSERT_POINTER(alloc); + session = alloc->session; + MALI_DEBUG_ASSERT_POINTER(session); + + mali_session_memory_lock(session); + mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start, + alloc->flags); + mali_session_memory_unlock(session); +} + + +/* Insert these pages from shmem to mali page table*/ +_mali_osk_errcode_t mali_mem_swap_mali_map(mali_mem_swap *swap_mem, struct mali_session_data *session, u32 vaddr, u32 props) +{ + struct mali_page_directory *pagedir = session->page_directory; + struct mali_page_node *m_page; + dma_addr_t phys; + u32 virt = vaddr; + u32 prop = props; + + list_for_each_entry(m_page, &swap_mem->pages, list) { + MALI_DEBUG_ASSERT(NULL != m_page->swap_it->page); + phys = m_page->swap_it->dma_addr; + + mali_mmu_pagedir_update(pagedir, virt, phys, MALI_MMU_PAGE_SIZE, prop); + virt += MALI_MMU_PAGE_SIZE; + } + + return _MALI_OSK_ERR_OK; +} + +int mali_mem_swap_in_pages(struct mali_pp_job *job) +{ + u32 num_memory_cookies; + struct mali_session_data *session; + struct mali_vma_node *mali_vma_node = NULL; + mali_mem_allocation *mali_alloc = NULL; + mali_mem_backend *mem_bkend = NULL; + struct mali_page_node *m_page; + mali_bool swap_in_success = MALI_TRUE; + int i; + + MALI_DEBUG_ASSERT_POINTER(job); + + num_memory_cookies = mali_pp_job_num_memory_cookies(job); + session = mali_pp_job_get_session(job); + + MALI_DEBUG_ASSERT_POINTER(session); + + for (i = 0; i < num_memory_cookies; i++) { + + u32 mali_addr = mali_pp_job_get_memory_cookie(job, i); + + mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0); + if (NULL == mali_vma_node) { + job->memory_cookies[i] = MALI_SWAP_INVALIDATE_MALI_ADDRESS; + swap_in_success = MALI_FALSE; + MALI_PRINT_ERROR(("SWAP Mem: failed to find mali_vma_node through Mali address: 0x%08x.\n", mali_addr)); + continue; + } + + mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node); + MALI_DEBUG_ASSERT(NULL != mali_alloc); + + if (MALI_MEM_SWAP != mali_alloc->type && + MALI_MEM_COW != mali_alloc->type) { + continue; + } + + /* Get backend memory & Map on GPU */ + mutex_lock(&mali_idr_mutex); + mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle); + mutex_unlock(&mali_idr_mutex); + MALI_DEBUG_ASSERT(NULL != mem_bkend); + + /* We neednot hold backend's lock here, race safe.*/ + if ((MALI_MEM_COW == mem_bkend->type) && + (!(mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) { + continue; + } + + mutex_lock(&mem_bkend->mutex); + + /* When swap_in_success is MALI_FALSE, it means this job has memory backend that could not be swapped in, + * and it will be aborted in mali scheduler, so here, we just mark those memory cookies which + * should not be swapped out when delete job to invalide */ + if (MALI_FALSE == swap_in_success) { + job->memory_cookies[i] = MALI_SWAP_INVALIDATE_MALI_ADDRESS; + mutex_unlock(&mem_bkend->mutex); + continue; + } + + /* Before swap in, checking if this memory backend has been swapped in by the latest flushed jobs. */ + ++mem_bkend->using_count; + + if (1 < mem_bkend->using_count) { + MALI_DEBUG_ASSERT(MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN != (MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN & mem_bkend->flags)); + mutex_unlock(&mem_bkend->mutex); + continue; + } + + if (MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN != (MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN & mem_bkend->flags)) { + mutex_unlock(&mem_bkend->mutex); + continue; + } + + + list_for_each_entry(m_page, &mem_bkend->swap_mem.pages, list) { + if (MALI_FALSE == mali_mem_swap_in_page_node(m_page)) { + /* Don't have enough memory to swap in page, so release pages have already been swapped + * in and then mark this pp job to be fail. */ + mali_mem_swap_unlock_partial_locked_mem_backend(mem_bkend, m_page); + swap_in_success = MALI_FALSE; + break; + } + } + + if (swap_in_success) { +#ifdef MALI_MEM_SWAP_TRACKING + mem_backend_swapped_unlock_size -= mem_bkend->size; +#endif + _mali_osk_mutex_wait(session->memory_lock); + mali_mem_swap_mali_map(&mem_bkend->swap_mem, session, mali_alloc->mali_mapping.addr, mali_alloc->mali_mapping.properties); + _mali_osk_mutex_signal(session->memory_lock); + + /* Remove the unlock flag from mem backend flags, mark this backend has been swapped in. */ + mem_bkend->flags &= ~(MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN); + mutex_unlock(&mem_bkend->mutex); + } else { + --mem_bkend->using_count; + /* Marking that this backend is not swapped in, need not to be processed anymore. */ + job->memory_cookies[i] = MALI_SWAP_INVALIDATE_MALI_ADDRESS; + mutex_unlock(&mem_bkend->mutex); + } + } + + job->swap_status = swap_in_success ? MALI_SWAP_IN_SUCC : MALI_SWAP_IN_FAIL; + + return _MALI_OSK_ERR_OK; +} + +int mali_mem_swap_out_pages(struct mali_pp_job *job) +{ + u32 num_memory_cookies; + struct mali_session_data *session; + struct mali_vma_node *mali_vma_node = NULL; + mali_mem_allocation *mali_alloc = NULL; + mali_mem_backend *mem_bkend = NULL; + int i; + + MALI_DEBUG_ASSERT_POINTER(job); + + num_memory_cookies = mali_pp_job_num_memory_cookies(job); + session = mali_pp_job_get_session(job); + + MALI_DEBUG_ASSERT_POINTER(session); + + + for (i = 0; i < num_memory_cookies; i++) { + u32 mali_addr = mali_pp_job_get_memory_cookie(job, i); + + if (MALI_SWAP_INVALIDATE_MALI_ADDRESS == mali_addr) { + continue; + } + + mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, mali_addr, 0); + + if (NULL == mali_vma_node) { + MALI_PRINT_ERROR(("SWAP Mem: failed to find mali_vma_node through Mali address: 0x%08x.\n", mali_addr)); + continue; + } + + mali_alloc = container_of(mali_vma_node, struct mali_mem_allocation, mali_vma_node); + MALI_DEBUG_ASSERT(NULL != mali_alloc); + + if (MALI_MEM_SWAP != mali_alloc->type && + MALI_MEM_COW != mali_alloc->type) { + continue; + } + + mutex_lock(&mali_idr_mutex); + mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle); + mutex_unlock(&mali_idr_mutex); + MALI_DEBUG_ASSERT(NULL != mem_bkend); + + /* We neednot hold backend's lock here, race safe.*/ + if ((MALI_MEM_COW == mem_bkend->type) && + (!(mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED))) { + continue; + } + + mutex_lock(&mem_bkend->mutex); + + MALI_DEBUG_ASSERT(0 < mem_bkend->using_count); + + /* Reducing the using_count of mem backend means less pp job are using this memory backend, + * if this count get to zero, it means no pp job is using it now, could put it to swap out list. */ + --mem_bkend->using_count; + + if (0 < mem_bkend->using_count) { + mutex_unlock(&mem_bkend->mutex); + continue; + } + mutex_unlock(&mem_bkend->mutex); + + mali_memory_swap_list_backend_add(mem_bkend); + } + + return _MALI_OSK_ERR_OK; +} + +int mali_mem_swap_allocate_page_on_demand(mali_mem_backend *mem_bkend, u32 offset, struct page **pagep) +{ + struct mali_page_node *m_page, *found_node = NULL; + struct page *found_page; + mali_mem_swap *swap = NULL; + mali_mem_cow *cow = NULL; + dma_addr_t dma_addr; + u32 i = 0; + + if (MALI_MEM_SWAP == mem_bkend->type) { + swap = &mem_bkend->swap_mem; + list_for_each_entry(m_page, &swap->pages, list) { + if (i == offset) { + found_node = m_page; + break; + } + i++; + } + } else { + MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type); + MALI_DEBUG_ASSERT(MALI_MEM_BACKEND_FLAG_SWAP_COWED == (MALI_MEM_BACKEND_FLAG_SWAP_COWED & mem_bkend->flags)); + + cow = &mem_bkend->cow_mem; + list_for_each_entry(m_page, &cow->pages, list) { + if (i == offset) { + found_node = m_page; + break; + } + i++; + } + } + + if (NULL == found_node) { + return _MALI_OSK_ERR_FAULT; + } + + found_page = shmem_read_mapping_page(global_swap_space, found_node->swap_it->idx); + + if (!IS_ERR(found_page)) { + lock_page(found_page); + dma_addr = dma_map_page(&mali_platform_device->dev, found_page, + 0, _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE); + dma_unmap_page(&mali_platform_device->dev, dma_addr, + _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE); + + *pagep = found_page; + } else { + return _MALI_OSK_ERR_NOMEM; + } + + return _MALI_OSK_ERR_OK; +} + +int mali_mem_swap_cow_page_on_demand(mali_mem_backend *mem_bkend, u32 offset, struct page **pagep) +{ + struct mali_page_node *m_page, *found_node = NULL, *new_node = NULL; + mali_mem_cow *cow = NULL; + u32 i = 0; + + MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type); + MALI_DEBUG_ASSERT(MALI_MEM_BACKEND_FLAG_SWAP_COWED == (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED)); + MALI_DEBUG_ASSERT(MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN == (MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN & mem_bkend->flags)); + MALI_DEBUG_ASSERT(!mali_memory_swap_backend_in_swapped_pool(mem_bkend)); + + cow = &mem_bkend->cow_mem; + list_for_each_entry(m_page, &cow->pages, list) { + if (i == offset) { + found_node = m_page; + break; + } + i++; + } + + if (NULL == found_node) { + return _MALI_OSK_ERR_FAULT; + } + + new_node = _mali_mem_swap_page_node_allocate(); + + if (NULL == new_node) { + return _MALI_OSK_ERR_FAULT; + } + + new_node->swap_it->idx = mali_mem_swap_idx_alloc(); + + if (_MALI_OSK_BITMAP_INVALIDATE_INDEX == new_node->swap_it->idx) { + MALI_DEBUG_PRINT(1, ("Failed to allocate swap index in swap CoW on demand.\n")); + kfree(new_node->swap_it); + kfree(new_node); + return _MALI_OSK_ERR_FAULT; + } + + if (MALI_FALSE == mali_mem_swap_in_page_node(new_node)) { + _mali_mem_swap_page_node_free(new_node); + return _MALI_OSK_ERR_FAULT; + } + + /* swap in found node for copy in kernel. */ + if (MALI_FALSE == mali_mem_swap_in_page_node(found_node)) { + mali_mem_swap_out_page_node(new_node); + _mali_mem_swap_page_node_free(new_node); + return _MALI_OSK_ERR_FAULT; + } + + _mali_mem_cow_copy_page(found_node, new_node); + + list_replace(&found_node->list, &new_node->list); + + if (1 != _mali_page_node_get_ref_count(found_node)) { + atomic_add(1, &mem_bkend->mali_allocation->session->mali_mem_allocated_pages); + if (atomic_read(&mem_bkend->mali_allocation->session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE > mem_bkend->mali_allocation->session->max_mali_mem_allocated_size) { + mem_bkend->mali_allocation->session->max_mali_mem_allocated_size = atomic_read(&mem_bkend->mali_allocation->session->mali_mem_allocated_pages) * MALI_MMU_PAGE_SIZE; + } + mem_bkend->cow_mem.change_pages_nr++; + } + + mali_mem_swap_out_page_node(found_node); + _mali_mem_swap_page_node_free(found_node); + + /* When swap in the new page node, we have called dma_map_page for this page.\n */ + dma_unmap_page(&mali_platform_device->dev, new_node->swap_it->dma_addr, + _MALI_OSK_MALI_PAGE_SIZE, DMA_TO_DEVICE); + + lock_page(new_node->swap_it->page); + + *pagep = new_node->swap_it->page; + + return _MALI_OSK_ERR_OK; +} + +#ifdef MALI_MEM_SWAP_TRACKING +void mali_mem_swap_tracking(u32 *swap_pool_size, u32 *unlock_size) +{ + *swap_pool_size = mem_backend_swapped_pool_size; + *unlock_size = mem_backend_swapped_unlock_size; +} +#endif diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_memory_swap_alloc.h b/drivers/gpu/arm/mali400/linux/mali_memory_swap_alloc.h --- a/drivers/gpu/arm/mali400/linux/mali_memory_swap_alloc.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_memory_swap_alloc.h 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,121 @@ +/* + * Copyright (C) 2013-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_MEMORY_SWAP_ALLOC_H__ +#define __MALI_MEMORY_SWAP_ALLOC_H__ + +#include "mali_osk.h" +#include "mali_session.h" + +#include "mali_memory_types.h" +#include "mali_pp_job.h" + +/** + * Initialize memory swapping module. + */ +_mali_osk_errcode_t mali_mem_swap_init(void); + +void mali_mem_swap_term(void); + +/** + * Return global share memory file to other modules. + */ +struct file *mali_mem_swap_get_global_swap_file(void); + +/** + * Unlock the given memory backend and pages in it could be swapped out by kernel. + */ +void mali_mem_swap_unlock_single_mem_backend(mali_mem_backend *mem_bkend); + +/** + * Remove the given memory backend from global swap list. + */ +void mali_memory_swap_list_backend_delete(mali_mem_backend *mem_bkend); + +/** + * Add the given memory backend to global swap list. + */ +void mali_memory_swap_list_backend_add(mali_mem_backend *mem_bkend); + +/** + * Allocate 1 index from bitmap used as page index in global swap file. + */ +u32 mali_mem_swap_idx_alloc(void); + +void mali_mem_swap_idx_free(u32 idx); + +/** + * Allocate a new swap item without page index. + */ +struct mali_swap_item *mali_mem_swap_alloc_swap_item(void); + +/** + * Free a swap item, truncate the corresponding space in page cache and free index of page. + */ +void mali_mem_swap_free_swap_item(mali_swap_item *swap_item); + +/** + * Allocate a page node with swap item. + */ +struct mali_page_node *_mali_mem_swap_page_node_allocate(void); + +/** + * Reduce the reference count of given page node and if return 0, just free this page node. + */ +_mali_osk_errcode_t _mali_mem_swap_put_page_node(struct mali_page_node *m_page); + +void _mali_mem_swap_page_node_free(struct mali_page_node *m_page); + +/** + * Free a swappable memory backend. + */ +u32 mali_mem_swap_free(mali_mem_swap *swap_mem); + +/** + * Ummap and free. + */ +u32 mali_mem_swap_release(mali_mem_backend *mem_bkend, mali_bool is_mali_mapped); + +/** + * Read in a page from global swap file with the pre-allcated page index. + */ +mali_bool mali_mem_swap_in_page_node(struct mali_page_node *page_node); + +int mali_mem_swap_alloc_pages(mali_mem_swap *swap_mem, u32 size, u32 *bkend_idx); + +_mali_osk_errcode_t mali_mem_swap_mali_map(mali_mem_swap *swap_mem, struct mali_session_data *session, u32 vaddr, u32 props); + +void mali_mem_swap_mali_unmap(mali_mem_allocation *alloc); + +/** + * When pp job created, we need swap in all of memory backend needed by this pp job. + */ +int mali_mem_swap_in_pages(struct mali_pp_job *job); + +/** + * Put all of memory backends used this pp job to the global swap list. + */ +int mali_mem_swap_out_pages(struct mali_pp_job *job); + +/** + * This will be called in page fault to process CPU read&write. + */ +int mali_mem_swap_allocate_page_on_demand(mali_mem_backend *mem_bkend, u32 offset, struct page **pagep) ; + +/** + * Used to process cow on demand for swappable memory backend. + */ +int mali_mem_swap_cow_page_on_demand(mali_mem_backend *mem_bkend, u32 offset, struct page **pagep); + +#ifdef MALI_MEM_SWAP_TRACKING +void mali_mem_swap_tracking(u32 *swap_pool_size, u32 *unlock_size); +#endif +#endif /* __MALI_MEMORY_SWAP_ALLOC_H__ */ + diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_memory_types.h b/drivers/gpu/arm/mali400/linux/mali_memory_types.h --- a/drivers/gpu/arm/mali400/linux/mali_memory_types.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_memory_types.h 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,219 @@ +/* + * Copyright (C) 2013-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_MEMORY_TYPES_H__ +#define __MALI_MEMORY_TYPES_H__ + +#include + +#if defined(CONFIG_MALI400_UMP) +#include "ump_kernel_interface.h" +#endif + +typedef u32 mali_address_t; + +typedef enum mali_mem_type { + MALI_MEM_OS, + MALI_MEM_EXTERNAL, + MALI_MEM_SWAP, + MALI_MEM_DMA_BUF, + MALI_MEM_UMP, + MALI_MEM_BLOCK, + MALI_MEM_COW, + MALI_MEM_SECURE, + MALI_MEM_TYPE_MAX, +} mali_mem_type; + +typedef struct mali_block_item { + /* for block type, the block_phy is alway page size align + * so use low 12bit used for ref_cout. + */ + unsigned long phy_addr; +} mali_block_item; + +/** + * idx is used to locate the given page in the address space of swap file. + * ref_count is used to mark how many memory backends are using this item. + */ +typedef struct mali_swap_item { + u32 idx; + atomic_t ref_count; + struct page *page; + dma_addr_t dma_addr; +} mali_swap_item; + +typedef enum mali_page_node_type { + MALI_PAGE_NODE_OS, + MALI_PAGE_NODE_BLOCK, + MALI_PAGE_NODE_SWAP, +} mali_page_node_type; + +typedef struct mali_page_node { + struct list_head list; + union { + struct page *page; + mali_block_item *blk_it; /*pointer to block item*/ + mali_swap_item *swap_it; + }; + + u32 type; +} mali_page_node; + +typedef struct mali_mem_os_mem { + struct list_head pages; + u32 count; +} mali_mem_os_mem; + +typedef struct mali_mem_dma_buf { +#if defined(CONFIG_DMA_SHARED_BUFFER) + struct mali_dma_buf_attachment *attachment; +#endif +} mali_mem_dma_buf; + +typedef struct mali_mem_external { + dma_addr_t phys; + u32 size; +} mali_mem_external; + +typedef struct mali_mem_ump { +#if defined(CONFIG_MALI400_UMP) + ump_dd_handle handle; +#endif +} mali_mem_ump; + +typedef struct block_allocator_allocation { + /* The list will be released in reverse order */ + struct block_info *last_allocated; + u32 mapping_length; + struct block_allocator *info; +} block_allocator_allocation; + +typedef struct mali_mem_block_mem { + struct list_head pfns; + u32 count; +} mali_mem_block_mem; + +typedef struct mali_mem_virt_mali_mapping { + mali_address_t addr; /* Virtual Mali address */ + u32 properties; /* MMU Permissions + cache, must match MMU HW */ +} mali_mem_virt_mali_mapping; + +typedef struct mali_mem_virt_cpu_mapping { + void __user *addr; + struct vm_area_struct *vma; +} mali_mem_virt_cpu_mapping; + +#define MALI_MEM_ALLOCATION_VALID_MAGIC 0xdeda110c +#define MALI_MEM_ALLOCATION_FREED_MAGIC 0x10101010 + +typedef struct mali_mm_node { + /* MALI GPU vaddr start, use u32 for mmu only support 32bit address*/ + uint32_t start; /* GPU vaddr */ + uint32_t size; /* GPU allocation virtual size */ + unsigned allocated : 1; +} mali_mm_node; + +typedef struct mali_vma_node { + struct mali_mm_node vm_node; + struct rb_node vm_rb; +} mali_vma_node; + + +typedef struct mali_mem_allocation { + MALI_DEBUG_CODE(u32 magic); + mali_mem_type type; /**< Type of memory */ + u32 flags; /**< Flags for this allocation */ + + struct mali_session_data *session; /**< Pointer to session that owns the allocation */ + + mali_mem_virt_cpu_mapping cpu_mapping; /**< CPU mapping */ + mali_mem_virt_mali_mapping mali_mapping; /**< Mali mapping */ + + /* add for new memory system */ + struct mali_vma_node mali_vma_node; + u32 vsize; /* virtual size*/ + u32 psize; /* physical backend memory size*/ + struct list_head list; + s32 backend_handle; /* idr for mem_backend */ + _mali_osk_atomic_t mem_alloc_refcount; +} mali_mem_allocation; + +struct mali_mem_os_allocator { + spinlock_t pool_lock; + struct list_head pool_pages; + size_t pool_count; + + atomic_t allocated_pages; + size_t allocation_limit; + + struct shrinker shrinker; + struct delayed_work timed_shrinker; + struct workqueue_struct *wq; +}; + +/* COW backend memory type */ +typedef struct mali_mem_cow { + struct list_head pages; /**< all pages for this cow backend allocation, + including new allocated pages for modified range*/ + u32 count; /**< number of pages */ + s32 change_pages_nr; +} mali_mem_cow; + +typedef struct mali_mem_swap { + struct list_head pages; + u32 count; +} mali_mem_swap; + +typedef struct mali_mem_secure { +#if defined(CONFIG_DMA_SHARED_BUFFER) + struct dma_buf *buf; + struct dma_buf_attachment *attachment; + struct sg_table *sgt; +#endif + u32 count; +} mali_mem_secure; + +#define MALI_MEM_BACKEND_FLAG_COWED (0x1) /* COW has happen on this backend */ +#define MALI_MEM_BACKEND_FLAG_COW_CPU_NO_WRITE (0x2) /* This is an COW backend, mapped as not allowed cpu to write */ +#define MALI_MEM_BACKEND_FLAG_SWAP_COWED (0x4) /* Mark the given backend is cowed from swappable memory. */ +/* Mark this backend is not swapped_in in MALI driver, and before using it, + * we should swap it in and set up corresponding page table. */ +#define MALI_MEM_BACKEND_FLAG_UNSWAPPED_IN (0x8) +#define MALI_MEM_BACKEND_FLAG_NOT_BINDED (0x1 << 5) /* this backend it not back with physical memory, used for defer bind */ +#define MALI_MEM_BACKEND_FLAG_BINDED (0x1 << 6) /* this backend it back with physical memory, used for defer bind */ + +typedef struct mali_mem_backend { + mali_mem_type type; /**< Type of backend memory */ + u32 flags; /**< Flags for this allocation */ + u32 size; + /* Union selected by type. */ + union { + mali_mem_os_mem os_mem; /**< MALI_MEM_OS */ + mali_mem_external ext_mem; /**< MALI_MEM_EXTERNAL */ + mali_mem_dma_buf dma_buf; /**< MALI_MEM_DMA_BUF */ + mali_mem_ump ump_mem; /**< MALI_MEM_UMP */ + mali_mem_block_mem block_mem; /**< MALI_MEM_BLOCK */ + mali_mem_cow cow_mem; + mali_mem_swap swap_mem; + mali_mem_secure secure_mem; + }; + mali_mem_allocation *mali_allocation; + struct mutex mutex; + mali_mem_type cow_type; + + struct list_head list; /**< Used to link swappable memory backend to the global swappable list */ + int using_count; /**< Mark how many PP jobs are using this memory backend */ + u32 start_idx; /**< If the correspondign vma of this backend is linear, this value will be used to set vma->vm_pgoff */ +} mali_mem_backend; + +#define MALI_MEM_FLAG_MALI_GUARD_PAGE (_MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) +#define MALI_MEM_FLAG_DONT_CPU_MAP (1 << 1) +#define MALI_MEM_FLAG_CAN_RESIZE (_MALI_MEMORY_ALLOCATE_RESIZEABLE) +#endif /* __MALI_MEMORY_TYPES__ */ diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_memory_ump.c b/drivers/gpu/arm/mali400/linux/mali_memory_ump.c --- a/drivers/gpu/arm/mali400/linux/mali_memory_ump.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_memory_ump.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,154 @@ +/* + * Copyright (C) 2012-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_ukk.h" +#include "mali_osk.h" +#include "mali_kernel_common.h" +#include "mali_session.h" +#include "mali_kernel_linux.h" +#include "mali_memory.h" +#include "ump_kernel_interface.h" + +static int mali_mem_ump_map(mali_mem_backend *mem_backend) +{ + ump_dd_handle ump_mem; + mali_mem_allocation *alloc; + struct mali_session_data *session; + u32 nr_blocks; + u32 i; + ump_dd_physical_block *ump_blocks; + struct mali_page_directory *pagedir; + u32 offset = 0; + _mali_osk_errcode_t err; + + MALI_DEBUG_ASSERT_POINTER(mem_backend); + MALI_DEBUG_ASSERT(MALI_MEM_UMP == mem_backend->type); + + alloc = mem_backend->mali_allocation; + MALI_DEBUG_ASSERT_POINTER(alloc); + + session = alloc->session; + MALI_DEBUG_ASSERT_POINTER(session); + + ump_mem = mem_backend->ump_mem.handle; + MALI_DEBUG_ASSERT(UMP_DD_HANDLE_INVALID != ump_mem); + + nr_blocks = ump_dd_phys_block_count_get(ump_mem); + if (nr_blocks == 0) { + MALI_DEBUG_PRINT(1, ("No block count\n")); + return -EINVAL; + } + + ump_blocks = _mali_osk_malloc(sizeof(*ump_blocks) * nr_blocks); + if (NULL == ump_blocks) { + return -ENOMEM; + } + + if (UMP_DD_INVALID == ump_dd_phys_blocks_get(ump_mem, ump_blocks, nr_blocks)) { + _mali_osk_free(ump_blocks); + return -EFAULT; + } + + pagedir = session->page_directory; + + mali_session_memory_lock(session); + + err = mali_mem_mali_map_prepare(alloc); + if (_MALI_OSK_ERR_OK != err) { + MALI_DEBUG_PRINT(1, ("Mapping of UMP memory failed\n")); + + _mali_osk_free(ump_blocks); + mali_session_memory_unlock(session); + return -ENOMEM; + } + + for (i = 0; i < nr_blocks; ++i) { + u32 virt = alloc->mali_vma_node.vm_node.start + offset; + + MALI_DEBUG_PRINT(7, ("Mapping in 0x%08x size %d\n", ump_blocks[i].addr , ump_blocks[i].size)); + + mali_mmu_pagedir_update(pagedir, virt, ump_blocks[i].addr, + ump_blocks[i].size, MALI_MMU_FLAGS_DEFAULT); + + offset += ump_blocks[i].size; + } + + if (alloc->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) { + u32 virt = alloc->mali_vma_node.vm_node.start + offset; + + /* Map in an extra virtual guard page at the end of the VMA */ + MALI_DEBUG_PRINT(6, ("Mapping in extra guard page\n")); + + mali_mmu_pagedir_update(pagedir, virt, ump_blocks[0].addr, _MALI_OSK_MALI_PAGE_SIZE, MALI_MMU_FLAGS_DEFAULT); + + offset += _MALI_OSK_MALI_PAGE_SIZE; + } + mali_session_memory_unlock(session); + _mali_osk_free(ump_blocks); + return 0; +} + +static void mali_mem_ump_unmap(mali_mem_allocation *alloc) +{ + struct mali_session_data *session; + MALI_DEBUG_ASSERT_POINTER(alloc); + session = alloc->session; + MALI_DEBUG_ASSERT_POINTER(session); + mali_session_memory_lock(session); + mali_mem_mali_map_free(session, alloc->psize, alloc->mali_vma_node.vm_node.start, + alloc->flags); + mali_session_memory_unlock(session); +} + +int mali_mem_bind_ump_buf(mali_mem_allocation *alloc, mali_mem_backend *mem_backend, u32 secure_id, u32 flags) +{ + ump_dd_handle ump_mem; + int ret; + MALI_DEBUG_ASSERT_POINTER(alloc); + MALI_DEBUG_ASSERT_POINTER(mem_backend); + MALI_DEBUG_ASSERT(MALI_MEM_UMP == mem_backend->type); + + MALI_DEBUG_PRINT(3, + ("Requested to map ump memory with secure id %d into virtual memory 0x%08X, size 0x%08X\n", + secure_id, alloc->mali_vma_node.vm_node.start, alloc->mali_vma_node.vm_node.size)); + + ump_mem = ump_dd_handle_create_from_secure_id(secure_id); + if (UMP_DD_HANDLE_INVALID == ump_mem) MALI_ERROR(_MALI_OSK_ERR_FAULT); + alloc->flags |= MALI_MEM_FLAG_DONT_CPU_MAP; + if (flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE) { + alloc->flags |= MALI_MEM_FLAG_MALI_GUARD_PAGE; + } + + mem_backend->ump_mem.handle = ump_mem; + + ret = mali_mem_ump_map(mem_backend); + if (0 != ret) { + ump_dd_reference_release(ump_mem); + return _MALI_OSK_ERR_FAULT; + } + MALI_DEBUG_PRINT(3, ("Returning from UMP bind\n")); + return _MALI_OSK_ERR_OK; +} + +void mali_mem_unbind_ump_buf(mali_mem_backend *mem_backend) +{ + ump_dd_handle ump_mem; + mali_mem_allocation *alloc; + MALI_DEBUG_ASSERT_POINTER(mem_backend); + MALI_DEBUG_ASSERT(MALI_MEM_UMP == mem_backend->type); + ump_mem = mem_backend->ump_mem.handle; + MALI_DEBUG_ASSERT(UMP_DD_HANDLE_INVALID != ump_mem); + + alloc = mem_backend->mali_allocation; + MALI_DEBUG_ASSERT_POINTER(alloc); + mali_mem_ump_unmap(alloc); + ump_dd_reference_release(ump_mem); +} + diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_memory_ump.h b/drivers/gpu/arm/mali400/linux/mali_memory_ump.h --- a/drivers/gpu/arm/mali400/linux/mali_memory_ump.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_memory_ump.h 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2011-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_MEMORY_UMP_BUF_H__ +#define __MALI_MEMORY_UMP_BUF_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "mali_uk_types.h" +#include "mali_osk.h" +#include "mali_memory.h" + +int mali_mem_bind_ump_buf(mali_mem_allocation *alloc, mali_mem_backend *mem_backend, u32 secure_id, u32 flags); +void mali_mem_unbind_ump_buf(mali_mem_backend *mem_backend); + +#ifdef __cplusplus +} +#endif + +#endif /* __MALI_MEMORY_DMA_BUF_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_memory_util.c b/drivers/gpu/arm/mali400/linux/mali_memory_util.c --- a/drivers/gpu/arm/mali400/linux/mali_memory_util.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_memory_util.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,158 @@ +/* + * Copyright (C) 2013-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "mali_osk.h" +#include "mali_osk_mali.h" +#include "mali_kernel_linux.h" +#include "mali_scheduler.h" + +#include "mali_memory.h" +#include "mali_memory_os_alloc.h" +#if defined(CONFIG_DMA_SHARED_BUFFER) +#include "mali_memory_dma_buf.h" +#include "mali_memory_secure.h" +#endif +#if defined(CONFIG_MALI400_UMP) +#include "mali_memory_ump.h" +#endif +#include "mali_memory_external.h" +#include "mali_memory_manager.h" +#include "mali_memory_virtual.h" +#include "mali_memory_cow.h" +#include "mali_memory_block_alloc.h" +#include "mali_memory_swap_alloc.h" + + + +/** +*function @_mali_free_allocation_mem - free a memory allocation +*/ +static u32 _mali_free_allocation_mem(mali_mem_allocation *mali_alloc) +{ + mali_mem_backend *mem_bkend = NULL; + u32 free_pages_nr = 0; + + struct mali_session_data *session = mali_alloc->session; + MALI_DEBUG_PRINT(4, (" _mali_free_allocation_mem, psize =0x%x! \n", mali_alloc->psize)); + if (0 == mali_alloc->psize) + goto out; + + /* Get backend memory & Map on CPU */ + mutex_lock(&mali_idr_mutex); + mem_bkend = idr_find(&mali_backend_idr, mali_alloc->backend_handle); + mutex_unlock(&mali_idr_mutex); + MALI_DEBUG_ASSERT(NULL != mem_bkend); + + switch (mem_bkend->type) { + case MALI_MEM_OS: + free_pages_nr = mali_mem_os_release(mem_bkend); + atomic_sub(free_pages_nr, &session->mali_mem_allocated_pages); + break; + case MALI_MEM_UMP: +#if defined(CONFIG_MALI400_UMP) + mali_mem_unbind_ump_buf(mem_bkend); + atomic_sub(mem_bkend->size / MALI_MMU_PAGE_SIZE, &session->mali_mem_array[mem_bkend->type]); +#else + MALI_DEBUG_PRINT(1, ("UMP not supported\n")); +#endif + break; + case MALI_MEM_DMA_BUF: +#if defined(CONFIG_DMA_SHARED_BUFFER) + mali_mem_unbind_dma_buf(mem_bkend); + atomic_sub(mem_bkend->size / MALI_MMU_PAGE_SIZE, &session->mali_mem_array[mem_bkend->type]); +#else + MALI_DEBUG_PRINT(1, ("DMA not supported\n")); +#endif + break; + case MALI_MEM_EXTERNAL: + mali_mem_unbind_ext_buf(mem_bkend); + atomic_sub(mem_bkend->size / MALI_MMU_PAGE_SIZE, &session->mali_mem_array[mem_bkend->type]); + break; + + case MALI_MEM_BLOCK: + free_pages_nr = mali_mem_block_release(mem_bkend); + atomic_sub(free_pages_nr, &session->mali_mem_allocated_pages); + break; + + case MALI_MEM_COW: + if (mem_bkend->flags & MALI_MEM_BACKEND_FLAG_SWAP_COWED) { + free_pages_nr = mali_mem_swap_release(mem_bkend, MALI_TRUE); + } else { + free_pages_nr = mali_mem_cow_release(mem_bkend, MALI_TRUE); + } + atomic_sub(free_pages_nr, &session->mali_mem_allocated_pages); + break; + case MALI_MEM_SWAP: + free_pages_nr = mali_mem_swap_release(mem_bkend, MALI_TRUE); + atomic_sub(free_pages_nr, &session->mali_mem_allocated_pages); + atomic_sub(free_pages_nr, &session->mali_mem_array[mem_bkend->type]); + break; + case MALI_MEM_SECURE: +#if defined(CONFIG_DMA_SHARED_BUFFER) + free_pages_nr = mali_mem_secure_release(mem_bkend); + atomic_sub(free_pages_nr, &session->mali_mem_allocated_pages); +#else + MALI_DEBUG_PRINT(1, ("DMA not supported for mali secure memory\n")); +#endif + break; + default: + MALI_DEBUG_PRINT(1, ("mem type %d is not in the mali_mem_type enum.\n", mem_bkend->type)); + break; + } + + /*Remove backend memory idex */ + mutex_lock(&mali_idr_mutex); + idr_remove(&mali_backend_idr, mali_alloc->backend_handle); + mutex_unlock(&mali_idr_mutex); + kfree(mem_bkend); +out: + /* remove memory allocation */ + mali_vma_offset_remove(&session->allocation_mgr, &mali_alloc->mali_vma_node); + mali_mem_allocation_struct_destory(mali_alloc); + return free_pages_nr; +} + +/** +* ref_count for allocation +*/ +u32 mali_allocation_unref(struct mali_mem_allocation **alloc) +{ + u32 free_pages_nr = 0; + mali_mem_allocation *mali_alloc = *alloc; + *alloc = NULL; + if (0 == _mali_osk_atomic_dec_return(&mali_alloc->mem_alloc_refcount)) { + free_pages_nr = _mali_free_allocation_mem(mali_alloc); + } + return free_pages_nr; +} + +void mali_allocation_ref(struct mali_mem_allocation *alloc) +{ + _mali_osk_atomic_inc(&alloc->mem_alloc_refcount); +} + +void mali_free_session_allocations(struct mali_session_data *session) +{ + struct mali_mem_allocation *entry, *next; + + MALI_DEBUG_PRINT(4, (" mali_free_session_allocations! \n")); + + list_for_each_entry_safe(entry, next, &session->allocation_mgr.head, list) { + mali_allocation_unref(&entry); + } +} diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_memory_util.h b/drivers/gpu/arm/mali400/linux/mali_memory_util.h --- a/drivers/gpu/arm/mali400/linux/mali_memory_util.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_memory_util.h 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,20 @@ +/* + * Copyright (C) 2013-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_MEMORY_UTIL_H__ +#define __MALI_MEMORY_UTIL_H__ + +u32 mali_allocation_unref(struct mali_mem_allocation **alloc); + +void mali_allocation_ref(struct mali_mem_allocation *alloc); + +void mali_free_session_allocations(struct mali_session_data *session); + +#endif diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_memory_virtual.c b/drivers/gpu/arm/mali400/linux/mali_memory_virtual.c --- a/drivers/gpu/arm/mali400/linux/mali_memory_virtual.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_memory_virtual.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,127 @@ +/* + * Copyright (C) 2013-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "mali_osk.h" +#include "mali_osk_mali.h" +#include "mali_kernel_linux.h" +#include "mali_scheduler.h" +#include "mali_memory_os_alloc.h" +#include "mali_memory_manager.h" +#include "mali_memory_virtual.h" + + +/** +*internal helper to link node into the rb-tree +*/ +static inline void _mali_vma_offset_add_rb(struct mali_allocation_manager *mgr, + struct mali_vma_node *node) +{ + struct rb_node **iter = &mgr->allocation_mgr_rb.rb_node; + struct rb_node *parent = NULL; + struct mali_vma_node *iter_node; + + while (likely(*iter)) { + parent = *iter; + iter_node = rb_entry(*iter, struct mali_vma_node, vm_rb); + + if (node->vm_node.start < iter_node->vm_node.start) + iter = &(*iter)->rb_left; + else if (node->vm_node.start > iter_node->vm_node.start) + iter = &(*iter)->rb_right; + else + MALI_DEBUG_ASSERT(0); + } + + rb_link_node(&node->vm_rb, parent, iter); + rb_insert_color(&node->vm_rb, &mgr->allocation_mgr_rb); +} + +/** + * mali_vma_offset_add() - Add offset node to RB Tree + */ +int mali_vma_offset_add(struct mali_allocation_manager *mgr, + struct mali_vma_node *node) +{ + int ret = 0; + write_lock(&mgr->vm_lock); + + if (node->vm_node.allocated) { + goto out; + } + + _mali_vma_offset_add_rb(mgr, node); + /* set to allocated */ + node->vm_node.allocated = 1; + +out: + write_unlock(&mgr->vm_lock); + return ret; +} + +/** + * mali_vma_offset_remove() - Remove offset node from RB tree + */ +void mali_vma_offset_remove(struct mali_allocation_manager *mgr, + struct mali_vma_node *node) +{ + write_lock(&mgr->vm_lock); + + if (node->vm_node.allocated) { + rb_erase(&node->vm_rb, &mgr->allocation_mgr_rb); + memset(&node->vm_node, 0, sizeof(node->vm_node)); + } + write_unlock(&mgr->vm_lock); +} + +/** +* mali_vma_offset_search - Search the node in RB tree +*/ +struct mali_vma_node *mali_vma_offset_search(struct mali_allocation_manager *mgr, + unsigned long start, unsigned long pages) +{ + struct mali_vma_node *node, *best; + struct rb_node *iter; + unsigned long offset; + read_lock(&mgr->vm_lock); + + iter = mgr->allocation_mgr_rb.rb_node; + best = NULL; + + while (likely(iter)) { + node = rb_entry(iter, struct mali_vma_node, vm_rb); + offset = node->vm_node.start; + if (start >= offset) { + iter = iter->rb_right; + best = node; + if (start == offset) + break; + } else { + iter = iter->rb_left; + } + } + + if (best) { + offset = best->vm_node.start + best->vm_node.size; + if (offset <= start + pages) + best = NULL; + } + read_unlock(&mgr->vm_lock); + + return best; +} + diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_memory_virtual.h b/drivers/gpu/arm/mali400/linux/mali_memory_virtual.h --- a/drivers/gpu/arm/mali400/linux/mali_memory_virtual.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_memory_virtual.h 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2013-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ +#ifndef __MALI_GPU_VMEM_H__ +#define __MALI_GPU_VMEM_H__ + +#include "mali_osk.h" +#include "mali_session.h" +#include +#include +#include +#include +#include +#include "mali_memory_types.h" +#include "mali_memory_os_alloc.h" +#include "mali_memory_manager.h" + + + +int mali_vma_offset_add(struct mali_allocation_manager *mgr, + struct mali_vma_node *node); + +void mali_vma_offset_remove(struct mali_allocation_manager *mgr, + struct mali_vma_node *node); + +struct mali_vma_node *mali_vma_offset_search(struct mali_allocation_manager *mgr, + unsigned long start, unsigned long pages); + +#endif diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_osk_atomics.c b/drivers/gpu/arm/mali400/linux/mali_osk_atomics.c --- a/drivers/gpu/arm/mali400/linux/mali_osk_atomics.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_osk_atomics.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2010, 2013-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_osk_atomics.c + * Implementation of the OS abstraction layer for the kernel device driver + */ + +#include "mali_osk.h" +#include +#include "mali_kernel_common.h" + +void _mali_osk_atomic_dec(_mali_osk_atomic_t *atom) +{ + atomic_dec((atomic_t *)&atom->u.val); +} + +u32 _mali_osk_atomic_dec_return(_mali_osk_atomic_t *atom) +{ + return atomic_dec_return((atomic_t *)&atom->u.val); +} + +void _mali_osk_atomic_inc(_mali_osk_atomic_t *atom) +{ + atomic_inc((atomic_t *)&atom->u.val); +} + +u32 _mali_osk_atomic_inc_return(_mali_osk_atomic_t *atom) +{ + return atomic_inc_return((atomic_t *)&atom->u.val); +} + +void _mali_osk_atomic_init(_mali_osk_atomic_t *atom, u32 val) +{ + MALI_DEBUG_ASSERT_POINTER(atom); + atomic_set((atomic_t *)&atom->u.val, val); +} + +u32 _mali_osk_atomic_read(_mali_osk_atomic_t *atom) +{ + return atomic_read((atomic_t *)&atom->u.val); +} + +void _mali_osk_atomic_term(_mali_osk_atomic_t *atom) +{ + MALI_IGNORE(atom); +} + +u32 _mali_osk_atomic_xchg(_mali_osk_atomic_t *atom, u32 val) +{ + return atomic_xchg((atomic_t *)&atom->u.val, val); +} diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_osk_bitmap.c b/drivers/gpu/arm/mali400/linux/mali_osk_bitmap.c --- a/drivers/gpu/arm/mali400/linux/mali_osk_bitmap.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_osk_bitmap.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,152 @@ +/* + * Copyright (C) 2010, 2013-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_osk_bitmap.c + * Implementation of the OS abstraction layer for the kernel device driver + */ + +#include +#include +#include +#include +#include +#include "common/mali_kernel_common.h" +#include "mali_osk_types.h" +#include "mali_osk.h" + +u32 _mali_osk_bitmap_alloc(struct _mali_osk_bitmap *bitmap) +{ + u32 obj; + + MALI_DEBUG_ASSERT_POINTER(bitmap); + + _mali_osk_spinlock_lock(bitmap->lock); + + obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->reserve); + + if (obj < bitmap->max) { + set_bit(obj, bitmap->table); + } else { + obj = -1; + } + + if (obj != -1) + --bitmap->avail; + _mali_osk_spinlock_unlock(bitmap->lock); + + return obj; +} + +void _mali_osk_bitmap_free(struct _mali_osk_bitmap *bitmap, u32 obj) +{ + MALI_DEBUG_ASSERT_POINTER(bitmap); + + _mali_osk_bitmap_free_range(bitmap, obj, 1); +} + +u32 _mali_osk_bitmap_alloc_range(struct _mali_osk_bitmap *bitmap, int cnt) +{ + u32 obj; + + MALI_DEBUG_ASSERT_POINTER(bitmap); + + if (0 >= cnt) { + return -1; + } + + if (1 == cnt) { + return _mali_osk_bitmap_alloc(bitmap); + } + + _mali_osk_spinlock_lock(bitmap->lock); + obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max, + bitmap->last, cnt, 0); + + if (obj >= bitmap->max) { + obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max, + bitmap->reserve, cnt, 0); + } + + if (obj < bitmap->max) { + bitmap_set(bitmap->table, obj, cnt); + + bitmap->last = (obj + cnt); + if (bitmap->last >= bitmap->max) { + bitmap->last = bitmap->reserve; + } + } else { + obj = -1; + } + + if (obj != -1) { + bitmap->avail -= cnt; + } + + _mali_osk_spinlock_unlock(bitmap->lock); + + return obj; +} + +u32 _mali_osk_bitmap_avail(struct _mali_osk_bitmap *bitmap) +{ + MALI_DEBUG_ASSERT_POINTER(bitmap); + + return bitmap->avail; +} + +void _mali_osk_bitmap_free_range(struct _mali_osk_bitmap *bitmap, u32 obj, int cnt) +{ + MALI_DEBUG_ASSERT_POINTER(bitmap); + + _mali_osk_spinlock_lock(bitmap->lock); + bitmap_clear(bitmap->table, obj, cnt); + bitmap->last = min(bitmap->last, obj); + + bitmap->avail += cnt; + _mali_osk_spinlock_unlock(bitmap->lock); +} + +int _mali_osk_bitmap_init(struct _mali_osk_bitmap *bitmap, u32 num, u32 reserve) +{ + MALI_DEBUG_ASSERT_POINTER(bitmap); + MALI_DEBUG_ASSERT(reserve <= num); + + bitmap->reserve = reserve; + bitmap->last = reserve; + bitmap->max = num; + bitmap->avail = num - reserve; + bitmap->lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_UNORDERED, _MALI_OSK_LOCK_ORDER_FIRST); + if (!bitmap->lock) { + return _MALI_OSK_ERR_NOMEM; + } + bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) * + sizeof(long), GFP_KERNEL); + if (!bitmap->table) { + _mali_osk_spinlock_term(bitmap->lock); + return _MALI_OSK_ERR_NOMEM; + } + + return _MALI_OSK_ERR_OK; +} + +void _mali_osk_bitmap_term(struct _mali_osk_bitmap *bitmap) +{ + MALI_DEBUG_ASSERT_POINTER(bitmap); + + if (NULL != bitmap->lock) { + _mali_osk_spinlock_term(bitmap->lock); + } + + if (NULL != bitmap->table) { + kfree(bitmap->table); + } +} + diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_osk_irq.c b/drivers/gpu/arm/mali400/linux/mali_osk_irq.c --- a/drivers/gpu/arm/mali400/linux/mali_osk_irq.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_osk_irq.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,200 @@ +/* + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_osk_irq.c + * Implementation of the OS abstraction layer for the kernel device driver + */ + +#include /* For memory allocation */ +#include +#include +#include + +#include "mali_osk.h" +#include "mali_kernel_common.h" + +typedef struct _mali_osk_irq_t_struct { + u32 irqnum; + void *data; + _mali_osk_irq_uhandler_t uhandler; +} mali_osk_irq_object_t; + +typedef irqreturn_t (*irq_handler_func_t)(int, void *, struct pt_regs *); +static irqreturn_t irq_handler_upper_half(int port_name, void *dev_id); /* , struct pt_regs *regs*/ + +#if defined(DEBUG) + +struct test_interrupt_data { + _mali_osk_irq_ack_t ack_func; + void *probe_data; + mali_bool interrupt_received; + wait_queue_head_t wq; +}; + +static irqreturn_t test_interrupt_upper_half(int port_name, void *dev_id) +{ + irqreturn_t ret = IRQ_NONE; + struct test_interrupt_data *data = (struct test_interrupt_data *)dev_id; + + if (_MALI_OSK_ERR_OK == data->ack_func(data->probe_data)) { + data->interrupt_received = MALI_TRUE; + wake_up(&data->wq); + ret = IRQ_HANDLED; + } + + return ret; +} + +static _mali_osk_errcode_t test_interrupt(u32 irqnum, + _mali_osk_irq_trigger_t trigger_func, + _mali_osk_irq_ack_t ack_func, + void *probe_data, + const char *description) +{ + unsigned long irq_flags = 0; + struct test_interrupt_data data = { + .ack_func = ack_func, + .probe_data = probe_data, + .interrupt_received = MALI_FALSE, + }; + +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + irq_flags |= IRQF_SHARED; +#endif /* defined(CONFIG_MALI_SHARED_INTERRUPTS) */ + + if (0 != request_irq(irqnum, test_interrupt_upper_half, irq_flags, description, &data)) { + MALI_DEBUG_PRINT(2, ("Unable to install test IRQ handler for core '%s'\n", description)); + return _MALI_OSK_ERR_FAULT; + } + + init_waitqueue_head(&data.wq); + + trigger_func(probe_data); + wait_event_timeout(data.wq, data.interrupt_received, 100); + + free_irq(irqnum, &data); + + if (data.interrupt_received) { + MALI_DEBUG_PRINT(3, ("%s: Interrupt test OK\n", description)); + return _MALI_OSK_ERR_OK; + } else { + MALI_PRINT_ERROR(("%s: Failed interrupt test on %u\n", description, irqnum)); + return _MALI_OSK_ERR_FAULT; + } +} + +#endif /* defined(DEBUG) */ + +_mali_osk_irq_t *_mali_osk_irq_init(u32 irqnum, _mali_osk_irq_uhandler_t uhandler, void *int_data, _mali_osk_irq_trigger_t trigger_func, _mali_osk_irq_ack_t ack_func, void *probe_data, const char *description) +{ + mali_osk_irq_object_t *irq_object; + unsigned long irq_flags = 0; + +#if defined(CONFIG_MALI_SHARED_INTERRUPTS) + irq_flags |= IRQF_SHARED; +#endif /* defined(CONFIG_MALI_SHARED_INTERRUPTS) */ + + irq_object = kmalloc(sizeof(mali_osk_irq_object_t), GFP_KERNEL); + if (NULL == irq_object) { + return NULL; + } + + if (-1 == irqnum) { + /* Probe for IRQ */ + if ((NULL != trigger_func) && (NULL != ack_func)) { + unsigned long probe_count = 3; + _mali_osk_errcode_t err; + int irq; + + MALI_DEBUG_PRINT(2, ("Probing for irq\n")); + + do { + unsigned long mask; + + mask = probe_irq_on(); + trigger_func(probe_data); + + _mali_osk_time_ubusydelay(5); + + irq = probe_irq_off(mask); + err = ack_func(probe_data); + } while (irq < 0 && (err == _MALI_OSK_ERR_OK) && probe_count--); + + if (irq < 0 || (_MALI_OSK_ERR_OK != err)) irqnum = -1; + else irqnum = irq; + } else irqnum = -1; /* no probe functions, fault */ + + if (-1 != irqnum) { + /* found an irq */ + MALI_DEBUG_PRINT(2, ("Found irq %d\n", irqnum)); + } else { + MALI_DEBUG_PRINT(2, ("Probe for irq failed\n")); + } + } + + irq_object->irqnum = irqnum; + irq_object->uhandler = uhandler; + irq_object->data = int_data; + + if (-1 == irqnum) { + MALI_DEBUG_PRINT(2, ("No IRQ for core '%s' found during probe\n", description)); + kfree(irq_object); + return NULL; + } + +#if defined(DEBUG) + /* Verify that the configured interrupt settings are working */ + if (_MALI_OSK_ERR_OK != test_interrupt(irqnum, trigger_func, ack_func, probe_data, description)) { + MALI_DEBUG_PRINT(2, ("Test of IRQ(%d) handler for core '%s' failed\n", irqnum, description)); + kfree(irq_object); + return NULL; + } +#endif + + if (0 != request_irq(irqnum, irq_handler_upper_half, irq_flags, description, irq_object)) { + MALI_DEBUG_PRINT(2, ("Unable to install IRQ handler for core '%s'\n", description)); + kfree(irq_object); + return NULL; + } + + return irq_object; +} + +void _mali_osk_irq_term(_mali_osk_irq_t *irq) +{ + mali_osk_irq_object_t *irq_object = (mali_osk_irq_object_t *)irq; + free_irq(irq_object->irqnum, irq_object); + kfree(irq_object); +} + + +/** This function is called directly in interrupt context from the OS just after + * the CPU get the hw-irq from mali, or other devices on the same IRQ-channel. + * It is registered one of these function for each mali core. When an interrupt + * arrives this function will be called equal times as registered mali cores. + * That means that we only check one mali core in one function call, and the + * core we check for each turn is given by the \a dev_id variable. + * If we detect an pending interrupt on the given core, we mask the interrupt + * out by settging the core's IRQ_MASK register to zero. + * Then we schedule the mali_core_irq_handler_bottom_half to run as high priority + * work queue job. + */ +static irqreturn_t irq_handler_upper_half(int port_name, void *dev_id) /* , struct pt_regs *regs*/ +{ + irqreturn_t ret = IRQ_NONE; + mali_osk_irq_object_t *irq_object = (mali_osk_irq_object_t *)dev_id; + + if (_MALI_OSK_ERR_OK == irq_object->uhandler(irq_object->data)) { + ret = IRQ_HANDLED; + } + + return ret; +} diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_osk_locks.c b/drivers/gpu/arm/mali400/linux/mali_osk_locks.c --- a/drivers/gpu/arm/mali400/linux/mali_osk_locks.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_osk_locks.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,287 @@ +/* + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_osk_locks.c + * Implemenation of the OS abstraction layer for the kernel device driver + */ + +#include "mali_osk_locks.h" +#include "mali_kernel_common.h" +#include "mali_osk.h" + + +#ifdef DEBUG +#ifdef LOCK_ORDER_CHECKING +static DEFINE_SPINLOCK(lock_tracking_lock); +static mali_bool add_lock_to_log_and_check(struct _mali_osk_lock_debug_s *lock, uint32_t tid); +static void remove_lock_from_log(struct _mali_osk_lock_debug_s *lock, uint32_t tid); +static const char *const lock_order_to_string(_mali_osk_lock_order_t order); +#endif /* LOCK_ORDER_CHECKING */ + +void _mali_osk_locks_debug_init(struct _mali_osk_lock_debug_s *checker, _mali_osk_lock_flags_t flags, _mali_osk_lock_order_t order) +{ + checker->orig_flags = flags; + checker->owner = 0; + +#ifdef LOCK_ORDER_CHECKING + checker->order = order; + checker->next = NULL; +#endif +} + +void _mali_osk_locks_debug_add(struct _mali_osk_lock_debug_s *checker) +{ + checker->owner = _mali_osk_get_tid(); + +#ifdef LOCK_ORDER_CHECKING + if (!(checker->orig_flags & _MALI_OSK_LOCKFLAG_UNORDERED)) { + if (!add_lock_to_log_and_check(checker, _mali_osk_get_tid())) { + printk(KERN_ERR "%d: ERROR lock %p taken while holding a lock of a higher order.\n", + _mali_osk_get_tid(), checker); + dump_stack(); + } + } +#endif +} + +void _mali_osk_locks_debug_remove(struct _mali_osk_lock_debug_s *checker) +{ + +#ifdef LOCK_ORDER_CHECKING + if (!(checker->orig_flags & _MALI_OSK_LOCKFLAG_UNORDERED)) { + remove_lock_from_log(checker, _mali_osk_get_tid()); + } +#endif + checker->owner = 0; +} + + +#ifdef LOCK_ORDER_CHECKING +/* Lock order checking + * ------------------- + * + * To assure that lock ordering scheme defined by _mali_osk_lock_order_t is strictly adhered to, the + * following function will, together with a linked list and some extra members in _mali_osk_lock_debug_s, + * make sure that a lock that is taken has a higher order than the current highest-order lock a + * thread holds. + * + * This is done in the following manner: + * - A linked list keeps track of locks held by a thread. + * - A `next' pointer is added to each lock. This is used to chain the locks together. + * - When taking a lock, the `add_lock_to_log_and_check' makes sure that taking + * the given lock is legal. It will follow the linked list to find the last + * lock taken by this thread. If the last lock's order was lower than the + * lock that is to be taken, it appends the new lock to the list and returns + * true, if not, it return false. This return value is assert()'ed on in + * _mali_osk_lock_wait(). + */ + +static struct _mali_osk_lock_debug_s *lock_lookup_list; + +static void dump_lock_tracking_list(void) +{ + struct _mali_osk_lock_debug_s *l; + u32 n = 1; + + /* print list for debugging purposes */ + l = lock_lookup_list; + + while (NULL != l) { + printk(" [lock: %p, tid_owner: %d, order: %d] ->", l, l->owner, l->order); + l = l->next; + MALI_DEBUG_ASSERT(n++ < 100); + } + printk(" NULL\n"); +} + +static int tracking_list_length(void) +{ + struct _mali_osk_lock_debug_s *l; + u32 n = 0; + l = lock_lookup_list; + + while (NULL != l) { + l = l->next; + n++; + MALI_DEBUG_ASSERT(n < 100); + } + return n; +} + +static mali_bool add_lock_to_log_and_check(struct _mali_osk_lock_debug_s *lock, uint32_t tid) +{ + mali_bool ret = MALI_FALSE; + _mali_osk_lock_order_t highest_order_for_tid = _MALI_OSK_LOCK_ORDER_FIRST; + struct _mali_osk_lock_debug_s *highest_order_lock = (struct _mali_osk_lock_debug_s *)0xbeefbabe; + struct _mali_osk_lock_debug_s *l; + unsigned long local_lock_flag; + u32 len; + + spin_lock_irqsave(&lock_tracking_lock, local_lock_flag); + len = tracking_list_length(); + + l = lock_lookup_list; + if (NULL == l) { /* This is the first lock taken by this thread -- record and return true */ + lock_lookup_list = lock; + spin_unlock_irqrestore(&lock_tracking_lock, local_lock_flag); + return MALI_TRUE; + } else { + /* Traverse the locks taken and find the lock of the highest order. + * Since several threads may hold locks, each lock's owner must be + * checked so that locks not owned by this thread can be ignored. */ + for (;;) { + MALI_DEBUG_ASSERT_POINTER(l); + if (tid == l->owner && l->order >= highest_order_for_tid) { + highest_order_for_tid = l->order; + highest_order_lock = l; + } + + if (NULL != l->next) { + l = l->next; + } else { + break; + } + } + + l->next = lock; + l->next = NULL; + } + + /* We have now found the highest order lock currently held by this thread and can see if it is + * legal to take the requested lock. */ + ret = highest_order_for_tid < lock->order; + + if (!ret) { + printk(KERN_ERR "Took lock of order %d (%s) while holding lock of order %d (%s)\n", + lock->order, lock_order_to_string(lock->order), + highest_order_for_tid, lock_order_to_string(highest_order_for_tid)); + dump_lock_tracking_list(); + } + + if (len + 1 != tracking_list_length()) { + printk(KERN_ERR "************ lock: %p\n", lock); + printk(KERN_ERR "************ before: %d *** after: %d ****\n", len, tracking_list_length()); + dump_lock_tracking_list(); + MALI_DEBUG_ASSERT_POINTER(NULL); + } + + spin_unlock_irqrestore(&lock_tracking_lock, local_lock_flag); + return ret; +} + +static void remove_lock_from_log(struct _mali_osk_lock_debug_s *lock, uint32_t tid) +{ + struct _mali_osk_lock_debug_s *curr; + struct _mali_osk_lock_debug_s *prev = NULL; + unsigned long local_lock_flag; + u32 len; + u32 n = 0; + + spin_lock_irqsave(&lock_tracking_lock, local_lock_flag); + len = tracking_list_length(); + curr = lock_lookup_list; + + if (NULL == curr) { + printk(KERN_ERR "Error: Lock tracking list was empty on call to remove_lock_from_log\n"); + dump_lock_tracking_list(); + } + + MALI_DEBUG_ASSERT_POINTER(curr); + + + while (lock != curr) { + prev = curr; + + MALI_DEBUG_ASSERT_POINTER(curr); + curr = curr->next; + MALI_DEBUG_ASSERT(n++ < 100); + } + + if (NULL == prev) { + lock_lookup_list = curr->next; + } else { + MALI_DEBUG_ASSERT_POINTER(curr); + MALI_DEBUG_ASSERT_POINTER(prev); + prev->next = curr->next; + } + + lock->next = NULL; + + if (len - 1 != tracking_list_length()) { + printk(KERN_ERR "************ lock: %p\n", lock); + printk(KERN_ERR "************ before: %d *** after: %d ****\n", len, tracking_list_length()); + dump_lock_tracking_list(); + MALI_DEBUG_ASSERT_POINTER(NULL); + } + + spin_unlock_irqrestore(&lock_tracking_lock, local_lock_flag); +} + +static const char *const lock_order_to_string(_mali_osk_lock_order_t order) +{ + switch (order) { + case _MALI_OSK_LOCK_ORDER_SESSIONS: + return "_MALI_OSK_LOCK_ORDER_SESSIONS"; + break; + case _MALI_OSK_LOCK_ORDER_MEM_SESSION: + return "_MALI_OSK_LOCK_ORDER_MEM_SESSION"; + break; + case _MALI_OSK_LOCK_ORDER_MEM_INFO: + return "_MALI_OSK_LOCK_ORDER_MEM_INFO"; + break; + case _MALI_OSK_LOCK_ORDER_MEM_PT_CACHE: + return "_MALI_OSK_LOCK_ORDER_MEM_PT_CACHE"; + break; + case _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP: + return "_MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP"; + break; + case _MALI_OSK_LOCK_ORDER_PM_EXECUTION: + return "_MALI_OSK_LOCK_ORDER_PM_EXECUTION"; + break; + case _MALI_OSK_LOCK_ORDER_EXECUTOR: + return "_MALI_OSK_LOCK_ORDER_EXECUTOR"; + break; + case _MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM: + return "_MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM"; + break; + case _MALI_OSK_LOCK_ORDER_SCHEDULER: + return "_MALI_OSK_LOCK_ORDER_SCHEDULER"; + break; + case _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED: + return "_MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED"; + break; + case _MALI_OSK_LOCK_ORDER_DMA_COMMAND: + return "_MALI_OSK_LOCK_ORDER_DMA_COMMAND"; + break; + case _MALI_OSK_LOCK_ORDER_PROFILING: + return "_MALI_OSK_LOCK_ORDER_PROFILING"; + break; + case _MALI_OSK_LOCK_ORDER_L2: + return "_MALI_OSK_LOCK_ORDER_L2"; + break; + case _MALI_OSK_LOCK_ORDER_L2_COMMAND: + return "_MALI_OSK_LOCK_ORDER_L2_COMMAND"; + break; + case _MALI_OSK_LOCK_ORDER_UTILIZATION: + return "_MALI_OSK_LOCK_ORDER_UTILIZATION"; + break; + case _MALI_OSK_LOCK_ORDER_SESSION_PENDING_JOBS: + return "_MALI_OSK_LOCK_ORDER_SESSION_PENDING_JOBS"; + break; + case _MALI_OSK_LOCK_ORDER_PM_STATE: + return "_MALI_OSK_LOCK_ORDER_PM_STATE"; + break; + default: + return ""; + } +} +#endif /* LOCK_ORDER_CHECKING */ +#endif /* DEBUG */ diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_osk_locks.h b/drivers/gpu/arm/mali400/linux/mali_osk_locks.h --- a/drivers/gpu/arm/mali400/linux/mali_osk_locks.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_osk_locks.h 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,326 @@ +/* + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_osk_locks.h + * Defines OS abstraction of lock and mutex + */ +#ifndef _MALI_OSK_LOCKS_H +#define _MALI_OSK_LOCKS_H + +#include +#include +#include + +#include + +#include "mali_osk_types.h" + +#ifdef _cplusplus +extern "C" { +#endif + + /* When DEBUG is enabled, this struct will be used to track owner, mode and order checking */ +#ifdef DEBUG + struct _mali_osk_lock_debug_s { + u32 owner; + _mali_osk_lock_flags_t orig_flags; + _mali_osk_lock_order_t order; + struct _mali_osk_lock_debug_s *next; + }; +#endif + + /* Anstraction of spinlock_t */ + struct _mali_osk_spinlock_s { +#ifdef DEBUG + struct _mali_osk_lock_debug_s checker; +#endif + spinlock_t spinlock; + }; + + /* Abstration of spinlock_t and lock flag which is used to store register's state before locking */ + struct _mali_osk_spinlock_irq_s { +#ifdef DEBUG + struct _mali_osk_lock_debug_s checker; +#endif + + spinlock_t spinlock; + unsigned long flags; + }; + + /* Abstraction of rw_semaphore in OS */ + struct _mali_osk_mutex_rw_s { +#ifdef DEBUG + struct _mali_osk_lock_debug_s checker; + _mali_osk_lock_mode_t mode; +#endif + + struct rw_semaphore rw_sema; + }; + + /* Mutex and mutex_interruptible functions share the same osk mutex struct */ + struct _mali_osk_mutex_s { +#ifdef DEBUG + struct _mali_osk_lock_debug_s checker; +#endif + struct mutex mutex; + }; + +#ifdef DEBUG + /** @brief _mali_osk_locks_debug_init/add/remove() functions are declared when DEBUG is enabled and + * defined in file mali_osk_locks.c. When LOCK_ORDER_CHECKING is enabled, calling these functions when we + * init/lock/unlock a lock/mutex, we could track lock order of a given tid. */ + void _mali_osk_locks_debug_init(struct _mali_osk_lock_debug_s *checker, _mali_osk_lock_flags_t flags, _mali_osk_lock_order_t order); + void _mali_osk_locks_debug_add(struct _mali_osk_lock_debug_s *checker); + void _mali_osk_locks_debug_remove(struct _mali_osk_lock_debug_s *checker); + + /** @brief This function can return a given lock's owner when DEBUG is enabled. */ + static inline u32 _mali_osk_lock_get_owner(struct _mali_osk_lock_debug_s *lock) + { + return lock->owner; + } +#else +#define _mali_osk_locks_debug_init(x, y, z) do {} while (0) +#define _mali_osk_locks_debug_add(x) do {} while (0) +#define _mali_osk_locks_debug_remove(x) do {} while (0) +#endif + + /** @brief Before use _mali_osk_spin_lock, init function should be used to allocate memory and initial spinlock*/ + static inline _mali_osk_spinlock_t *_mali_osk_spinlock_init(_mali_osk_lock_flags_t flags, _mali_osk_lock_order_t order) + { + _mali_osk_spinlock_t *lock = NULL; + + lock = kmalloc(sizeof(_mali_osk_spinlock_t), GFP_KERNEL); + if (NULL == lock) { + return NULL; + } + spin_lock_init(&lock->spinlock); + _mali_osk_locks_debug_init((struct _mali_osk_lock_debug_s *)lock, flags, order); + return lock; + } + + /** @brief Lock a spinlock */ + static inline void _mali_osk_spinlock_lock(_mali_osk_spinlock_t *lock) + { + BUG_ON(NULL == lock); + spin_lock(&lock->spinlock); + _mali_osk_locks_debug_add((struct _mali_osk_lock_debug_s *)lock); + } + + /** @brief Unlock a spinlock */ + static inline void _mali_osk_spinlock_unlock(_mali_osk_spinlock_t *lock) + { + BUG_ON(NULL == lock); + _mali_osk_locks_debug_remove((struct _mali_osk_lock_debug_s *)lock); + spin_unlock(&lock->spinlock); + } + + /** @brief Free a memory block which the argument lock pointed to and its type must be + * _mali_osk_spinlock_t *. */ + static inline void _mali_osk_spinlock_term(_mali_osk_spinlock_t *lock) + { + /* Parameter validation */ + BUG_ON(NULL == lock); + + /* Linux requires no explicit termination of spinlocks, semaphores, or rw_semaphores */ + kfree(lock); + } + + /** @brief Before _mali_osk_spinlock_irq_lock/unlock/term() is called, init function should be + * called to initial spinlock and flags in struct _mali_osk_spinlock_irq_t. */ + static inline _mali_osk_spinlock_irq_t *_mali_osk_spinlock_irq_init(_mali_osk_lock_flags_t flags, _mali_osk_lock_order_t order) + { + _mali_osk_spinlock_irq_t *lock = NULL; + lock = kmalloc(sizeof(_mali_osk_spinlock_irq_t), GFP_KERNEL); + + if (NULL == lock) { + return NULL; + } + + lock->flags = 0; + spin_lock_init(&lock->spinlock); + _mali_osk_locks_debug_init((struct _mali_osk_lock_debug_s *)lock, flags, order); + return lock; + } + + /** @brief Lock spinlock and save the register's state */ + static inline void _mali_osk_spinlock_irq_lock(_mali_osk_spinlock_irq_t *lock) + { + unsigned long tmp_flags; + + BUG_ON(NULL == lock); + spin_lock_irqsave(&lock->spinlock, tmp_flags); + lock->flags = tmp_flags; + _mali_osk_locks_debug_add((struct _mali_osk_lock_debug_s *)lock); + } + + /** @brief Unlock spinlock with saved register's state */ + static inline void _mali_osk_spinlock_irq_unlock(_mali_osk_spinlock_irq_t *lock) + { + BUG_ON(NULL == lock); + _mali_osk_locks_debug_remove((struct _mali_osk_lock_debug_s *)lock); + spin_unlock_irqrestore(&lock->spinlock, lock->flags); + } + + /** @brief Destroy a given memory block which lock pointed to, and the lock type must be + * _mali_osk_spinlock_irq_t *. */ + static inline void _mali_osk_spinlock_irq_term(_mali_osk_spinlock_irq_t *lock) + { + /* Parameter validation */ + BUG_ON(NULL == lock); + + /* Linux requires no explicit termination of spinlocks, semaphores, or rw_semaphores */ + kfree(lock); + } + + /** @brief Before _mali_osk_mutex_rw_wait/signal/term() is called, we should call + * _mali_osk_mutex_rw_init() to kmalloc a memory block and initial part of elements in it. */ + static inline _mali_osk_mutex_rw_t *_mali_osk_mutex_rw_init(_mali_osk_lock_flags_t flags, _mali_osk_lock_order_t order) + { + _mali_osk_mutex_rw_t *lock = NULL; + + lock = kmalloc(sizeof(_mali_osk_mutex_rw_t), GFP_KERNEL); + + if (NULL == lock) { + return NULL; + } + + init_rwsem(&lock->rw_sema); + _mali_osk_locks_debug_init((struct _mali_osk_lock_debug_s *)lock, flags, order); + return lock; + } + + /** @brief When call _mali_osk_mutex_rw_wait/signal() functions, the second argument mode + * should be assigned with value _MALI_OSK_LOCKMODE_RO or _MALI_OSK_LOCKMODE_RW */ + static inline void _mali_osk_mutex_rw_wait(_mali_osk_mutex_rw_t *lock, _mali_osk_lock_mode_t mode) + { + BUG_ON(NULL == lock); + BUG_ON(!(_MALI_OSK_LOCKMODE_RO == mode || _MALI_OSK_LOCKMODE_RW == mode)); + + if (mode == _MALI_OSK_LOCKMODE_RO) { + down_read(&lock->rw_sema); + } else { + down_write(&lock->rw_sema); + } + +#ifdef DEBUG + if (mode == _MALI_OSK_LOCKMODE_RW) { + lock->mode = mode; + } else { /* mode == _MALI_OSK_LOCKMODE_RO */ + lock->mode = mode; + } + _mali_osk_locks_debug_add((struct _mali_osk_lock_debug_s *)lock); +#endif + } + + /** @brief Up lock->rw_sema with up_read/write() accordinf argument mode's value. */ + static inline void _mali_osk_mutex_rw_signal(_mali_osk_mutex_rw_t *lock, _mali_osk_lock_mode_t mode) + { + BUG_ON(NULL == lock); + BUG_ON(!(_MALI_OSK_LOCKMODE_RO == mode || _MALI_OSK_LOCKMODE_RW == mode)); +#ifdef DEBUG + /* make sure the thread releasing the lock actually was the owner */ + if (mode == _MALI_OSK_LOCKMODE_RW) { + _mali_osk_locks_debug_remove((struct _mali_osk_lock_debug_s *)lock); + /* This lock now has no owner */ + lock->checker.owner = 0; + } +#endif + + if (mode == _MALI_OSK_LOCKMODE_RO) { + up_read(&lock->rw_sema); + } else { + up_write(&lock->rw_sema); + } + } + + /** @brief Free a given memory block which lock pointed to and its type must be + * _mali_sok_mutex_rw_t *. */ + static inline void _mali_osk_mutex_rw_term(_mali_osk_mutex_rw_t *lock) + { + /* Parameter validation */ + BUG_ON(NULL == lock); + + /* Linux requires no explicit termination of spinlocks, semaphores, or rw_semaphores */ + kfree(lock); + } + + /** @brief Mutex & mutex_interruptible share the same init and term function, because they have the + * same osk mutex struct, and the difference between them is which locking function they use */ + static inline _mali_osk_mutex_t *_mali_osk_mutex_init(_mali_osk_lock_flags_t flags, _mali_osk_lock_order_t order) + { + _mali_osk_mutex_t *lock = NULL; + + lock = kmalloc(sizeof(_mali_osk_mutex_t), GFP_KERNEL); + + if (NULL == lock) { + return NULL; + } + mutex_init(&lock->mutex); + + _mali_osk_locks_debug_init((struct _mali_osk_lock_debug_s *)lock, flags, order); + return lock; + } + + /** @brief Lock the lock->mutex with mutex_lock_interruptible function */ + static inline _mali_osk_errcode_t _mali_osk_mutex_wait_interruptible(_mali_osk_mutex_t *lock) + { + _mali_osk_errcode_t err = _MALI_OSK_ERR_OK; + + BUG_ON(NULL == lock); + + if (mutex_lock_interruptible(&lock->mutex)) { + printk(KERN_WARNING "Mali: Can not lock mutex\n"); + err = _MALI_OSK_ERR_RESTARTSYSCALL; + } + + _mali_osk_locks_debug_add((struct _mali_osk_lock_debug_s *)lock); + return err; + } + + /** @brief Unlock the lock->mutex which is locked with mutex_lock_interruptible() function. */ + static inline void _mali_osk_mutex_signal_interruptible(_mali_osk_mutex_t *lock) + { + BUG_ON(NULL == lock); + _mali_osk_locks_debug_remove((struct _mali_osk_lock_debug_s *)lock); + mutex_unlock(&lock->mutex); + } + + /** @brief Lock the lock->mutex just with mutex_lock() function which could not be interruptted. */ + static inline void _mali_osk_mutex_wait(_mali_osk_mutex_t *lock) + { + BUG_ON(NULL == lock); + mutex_lock(&lock->mutex); + _mali_osk_locks_debug_add((struct _mali_osk_lock_debug_s *)lock); + } + + /** @brief Unlock the lock->mutex which is locked with mutex_lock() function. */ + static inline void _mali_osk_mutex_signal(_mali_osk_mutex_t *lock) + { + BUG_ON(NULL == lock); + _mali_osk_locks_debug_remove((struct _mali_osk_lock_debug_s *)lock); + mutex_unlock(&lock->mutex); + } + + /** @brief Free a given memory block which lock point. */ + static inline void _mali_osk_mutex_term(_mali_osk_mutex_t *lock) + { + /* Parameter validation */ + BUG_ON(NULL == lock); + + /* Linux requires no explicit termination of spinlocks, semaphores, or rw_semaphores */ + kfree(lock); + } + +#ifdef _cplusplus +} +#endif + +#endif diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_osk_low_level_mem.c b/drivers/gpu/arm/mali400/linux/mali_osk_low_level_mem.c --- a/drivers/gpu/arm/mali400/linux/mali_osk_low_level_mem.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_osk_low_level_mem.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,146 @@ +/* + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_osk_low_level_mem.c + * Implementation of the OS abstraction layer for the kernel device driver + */ + +#include +#include +#include + +#include "mali_kernel_common.h" +#include "mali_osk.h" +#include "mali_ukk.h" + +void _mali_osk_mem_barrier(void) +{ + mb(); +} + +void _mali_osk_write_mem_barrier(void) +{ + wmb(); +} + +mali_io_address _mali_osk_mem_mapioregion(uintptr_t phys, u32 size, const char *description) +{ + return (mali_io_address)ioremap_nocache(phys, size); +} + +void _mali_osk_mem_unmapioregion(uintptr_t phys, u32 size, mali_io_address virt) +{ + iounmap((void *)virt); +} + +_mali_osk_errcode_t inline _mali_osk_mem_reqregion(uintptr_t phys, u32 size, const char *description) +{ +#if MALI_LICENSE_IS_GPL + return _MALI_OSK_ERR_OK; /* GPL driver gets the mem region for the resources registered automatically */ +#else + return ((NULL == request_mem_region(phys, size, description)) ? _MALI_OSK_ERR_NOMEM : _MALI_OSK_ERR_OK); +#endif +} + +void inline _mali_osk_mem_unreqregion(uintptr_t phys, u32 size) +{ +#if !MALI_LICENSE_IS_GPL + release_mem_region(phys, size); +#endif +} + +void inline _mali_osk_mem_iowrite32_relaxed(volatile mali_io_address addr, u32 offset, u32 val) +{ + __raw_writel(cpu_to_le32(val), ((u8 *)addr) + offset); +} + +u32 inline _mali_osk_mem_ioread32(volatile mali_io_address addr, u32 offset) +{ + return ioread32(((u8 *)addr) + offset); +} + +void inline _mali_osk_mem_iowrite32(volatile mali_io_address addr, u32 offset, u32 val) +{ + iowrite32(val, ((u8 *)addr) + offset); +} + +void _mali_osk_cache_flushall(void) +{ + /** @note Cached memory is not currently supported in this implementation */ +} + +void _mali_osk_cache_ensure_uncached_range_flushed(void *uncached_mapping, u32 offset, u32 size) +{ + _mali_osk_write_mem_barrier(); +} + +u32 _mali_osk_mem_write_safe(void __user *dest, const void __user *src, u32 size) +{ +#define MALI_MEM_SAFE_COPY_BLOCK_SIZE 4096 + u32 retval = 0; + void *temp_buf; + + temp_buf = kmalloc(MALI_MEM_SAFE_COPY_BLOCK_SIZE, GFP_KERNEL); + if (NULL != temp_buf) { + u32 bytes_left_to_copy = size; + u32 i; + for (i = 0; i < size; i += MALI_MEM_SAFE_COPY_BLOCK_SIZE) { + u32 size_to_copy; + u32 size_copied; + u32 bytes_left; + + if (bytes_left_to_copy > MALI_MEM_SAFE_COPY_BLOCK_SIZE) { + size_to_copy = MALI_MEM_SAFE_COPY_BLOCK_SIZE; + } else { + size_to_copy = bytes_left_to_copy; + } + + bytes_left = copy_from_user(temp_buf, ((char *)src) + i, size_to_copy); + size_copied = size_to_copy - bytes_left; + + bytes_left = copy_to_user(((char *)dest) + i, temp_buf, size_copied); + size_copied -= bytes_left; + + bytes_left_to_copy -= size_copied; + retval += size_copied; + + if (size_copied != size_to_copy) { + break; /* Early out, we was not able to copy this entire block */ + } + } + + kfree(temp_buf); + } + + return retval; +} + +_mali_osk_errcode_t _mali_ukk_mem_write_safe(_mali_uk_mem_write_safe_s *args) +{ + void __user *src; + void __user *dst; + struct mali_session_data *session; + + MALI_DEBUG_ASSERT_POINTER(args); + + session = (struct mali_session_data *)(uintptr_t)args->ctx; + + if (NULL == session) { + return _MALI_OSK_ERR_INVALID_ARGS; + } + + src = (void __user *)(uintptr_t)args->src; + dst = (void __user *)(uintptr_t)args->dest; + + /* Return number of bytes actually copied */ + args->size = _mali_osk_mem_write_safe(dst, src, args->size); + return _MALI_OSK_ERR_OK; +} diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_osk_mali.c b/drivers/gpu/arm/mali400/linux/mali_osk_mali.c --- a/drivers/gpu/arm/mali400/linux/mali_osk_mali.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_osk_mali.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,491 @@ +/* + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_osk_mali.c + * Implementation of the OS abstraction layer which is specific for the Mali kernel device driver + */ +#include +#include +#include +#include +#include +#include + +#include "mali_osk_mali.h" +#include "mali_kernel_common.h" /* MALI_xxx macros */ +#include "mali_osk.h" /* kernel side OS functions */ +#include "mali_kernel_linux.h" + +static mali_bool mali_secure_mode_enabled = MALI_FALSE; +static mali_bool mali_secure_mode_supported = MALI_FALSE; + +/* Function that init the mali gpu secure mode */ +void (*mali_secure_mode_deinit)(void) = NULL; +/* Function that reset GPU and enable the mali gpu secure mode */ +int (*mali_gpu_reset_and_secure_mode_enable)(void) = NULL; +/* Function that reset GPU and disable the mali gpu secure mode */ +int (*mali_gpu_reset_and_secure_mode_disable)(void) = NULL; + +#ifdef CONFIG_MALI_DT + +#define MALI_OSK_INVALID_RESOURCE_ADDRESS 0xFFFFFFFF + +/** + * Define the max number of resource we could have. + */ +#define MALI_OSK_MAX_RESOURCE_NUMBER 27 + +/** + * Define the max number of resource with interrupts, and they are + * the first 20 elements in array mali_osk_resource_bank. + */ +#define MALI_OSK_RESOURCE_WITH_IRQ_NUMBER 20 + +/** + * pp core start and end location in mali_osk_resource_bank array. + */ +#define MALI_OSK_RESOURCE_PP_LOCATION_START 2 +#define MALI_OSK_RESOURCE_PP_LOCATION_END 17 + +/** + * L2 cache start and end location in mali_osk_resource_bank array. + */ +#define MALI_OSK_RESOURCE_L2_LOCATION_START 20 +#define MALI_OSK_RESOURCE_l2_LOCATION_END 22 + +/** + * DMA unit location. + */ +#define MALI_OSK_RESOURCE_DMA_LOCATION 26 + +static _mali_osk_resource_t mali_osk_resource_bank[MALI_OSK_MAX_RESOURCE_NUMBER] = { + {.description = "Mali_GP", .base = MALI_OFFSET_GP, .irq_name = "IRQGP",}, + {.description = "Mali_GP_MMU", .base = MALI_OFFSET_GP_MMU, .irq_name = "IRQGPMMU",}, + {.description = "Mali_PP0", .base = MALI_OFFSET_PP0, .irq_name = "IRQPP0",}, + {.description = "Mali_PP0_MMU", .base = MALI_OFFSET_PP0_MMU, .irq_name = "IRQPPMMU0",}, + {.description = "Mali_PP1", .base = MALI_OFFSET_PP1, .irq_name = "IRQPP1",}, + {.description = "Mali_PP1_MMU", .base = MALI_OFFSET_PP1_MMU, .irq_name = "IRQPPMMU1",}, + {.description = "Mali_PP2", .base = MALI_OFFSET_PP2, .irq_name = "IRQPP2",}, + {.description = "Mali_PP2_MMU", .base = MALI_OFFSET_PP2_MMU, .irq_name = "IRQPPMMU2",}, + {.description = "Mali_PP3", .base = MALI_OFFSET_PP3, .irq_name = "IRQPP3",}, + {.description = "Mali_PP3_MMU", .base = MALI_OFFSET_PP3_MMU, .irq_name = "IRQPPMMU3",}, + {.description = "Mali_PP4", .base = MALI_OFFSET_PP4, .irq_name = "IRQPP4",}, + {.description = "Mali_PP4_MMU", .base = MALI_OFFSET_PP4_MMU, .irq_name = "IRQPPMMU4",}, + {.description = "Mali_PP5", .base = MALI_OFFSET_PP5, .irq_name = "IRQPP5",}, + {.description = "Mali_PP5_MMU", .base = MALI_OFFSET_PP5_MMU, .irq_name = "IRQPPMMU5",}, + {.description = "Mali_PP6", .base = MALI_OFFSET_PP6, .irq_name = "IRQPP6",}, + {.description = "Mali_PP6_MMU", .base = MALI_OFFSET_PP6_MMU, .irq_name = "IRQPPMMU6",}, + {.description = "Mali_PP7", .base = MALI_OFFSET_PP7, .irq_name = "IRQPP7",}, + {.description = "Mali_PP7_MMU", .base = MALI_OFFSET_PP7_MMU, .irq_name = "IRQPPMMU",}, + {.description = "Mali_PP_Broadcast", .base = MALI_OFFSET_PP_BCAST, .irq_name = "IRQPP",}, + {.description = "Mali_PMU", .base = MALI_OFFSET_PMU, .irq_name = "IRQPMU",}, + {.description = "Mali_L2", .base = MALI_OFFSET_L2_RESOURCE0,}, + {.description = "Mali_L2", .base = MALI_OFFSET_L2_RESOURCE1,}, + {.description = "Mali_L2", .base = MALI_OFFSET_L2_RESOURCE2,}, + {.description = "Mali_PP_MMU_Broadcast", .base = MALI_OFFSET_PP_BCAST_MMU,}, + {.description = "Mali_Broadcast", .base = MALI_OFFSET_BCAST,}, + {.description = "Mali_DLBU", .base = MALI_OFFSET_DLBU,}, + {.description = "Mali_DMA", .base = MALI_OFFSET_DMA,}, +}; + +static int _mali_osk_get_compatible_name(const char **out_string) +{ + struct device_node *node = mali_platform_device->dev.of_node; + + MALI_DEBUG_ASSERT(NULL != node); + + return of_property_read_string(node, "compatible", out_string); +} + +_mali_osk_errcode_t _mali_osk_resource_initialize(void) +{ + mali_bool mali_is_450 = MALI_FALSE, mali_is_470 = MALI_FALSE; + int i, pp_core_num = 0, l2_core_num = 0; + struct resource *res; + const char *compatible_name = NULL; + + if (0 == _mali_osk_get_compatible_name(&compatible_name)) { + if (0 == strncmp(compatible_name, "arm,mali-450", strlen("arm,mali-450"))) { + mali_is_450 = MALI_TRUE; + MALI_DEBUG_PRINT(2, ("mali-450 device tree detected.")); + } else if (0 == strncmp(compatible_name, "arm,mali-470", strlen("arm,mali-470"))) { + mali_is_470 = MALI_TRUE; + MALI_DEBUG_PRINT(2, ("mali-470 device tree detected.")); + } + } + + for (i = 0; i < MALI_OSK_RESOURCE_WITH_IRQ_NUMBER; i++) { + res = platform_get_resource_byname(mali_platform_device, IORESOURCE_IRQ, mali_osk_resource_bank[i].irq_name); + if (res) { + mali_osk_resource_bank[i].irq = res->start; + } else { + mali_osk_resource_bank[i].base = MALI_OSK_INVALID_RESOURCE_ADDRESS; + } + } + + for (i = MALI_OSK_RESOURCE_PP_LOCATION_START; i <= MALI_OSK_RESOURCE_PP_LOCATION_END; i++) { + if (MALI_OSK_INVALID_RESOURCE_ADDRESS != mali_osk_resource_bank[i].base) { + pp_core_num++; + } + } + + /* We have to divide by 2, because we caculate twice for only one pp(pp_core and pp_mmu_core). */ + if (0 != pp_core_num % 2) { + MALI_DEBUG_PRINT(2, ("The value of pp core number isn't normal.")); + return _MALI_OSK_ERR_FAULT; + } + + pp_core_num /= 2; + + /** + * we can caculate the number of l2 cache core according the number of pp core number + * and device type(mali400/mali450/mali470). + */ + l2_core_num = 1; + if (mali_is_450) { + if (pp_core_num > 4) { + l2_core_num = 3; + } else if (pp_core_num <= 4) { + l2_core_num = 2; + } + } + + for (i = MALI_OSK_RESOURCE_l2_LOCATION_END; i > MALI_OSK_RESOURCE_L2_LOCATION_START + l2_core_num - 1; i--) { + mali_osk_resource_bank[i].base = MALI_OSK_INVALID_RESOURCE_ADDRESS; + } + + /* If device is not mali-450 type, we have to remove related resource from resource bank. */ + if (!(mali_is_450 || mali_is_470)) { + for (i = MALI_OSK_RESOURCE_l2_LOCATION_END + 1; i < MALI_OSK_MAX_RESOURCE_NUMBER; i++) { + mali_osk_resource_bank[i].base = MALI_OSK_INVALID_RESOURCE_ADDRESS; + } + } + + if (mali_is_470) + mali_osk_resource_bank[MALI_OSK_RESOURCE_DMA_LOCATION].base = MALI_OSK_INVALID_RESOURCE_ADDRESS; + + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t _mali_osk_resource_find(u32 addr, _mali_osk_resource_t *res) +{ + int i; + + if (NULL == mali_platform_device) { + return _MALI_OSK_ERR_ITEM_NOT_FOUND; + } + + /* Traverse all of resources in resources bank to find the matching one. */ + for (i = 0; i < MALI_OSK_MAX_RESOURCE_NUMBER; i++) { + if (mali_osk_resource_bank[i].base == addr) { + if (NULL != res) { + res->base = addr + _mali_osk_resource_base_address(); + res->description = mali_osk_resource_bank[i].description; + res->irq = mali_osk_resource_bank[i].irq; + } + return _MALI_OSK_ERR_OK; + } + } + + return _MALI_OSK_ERR_ITEM_NOT_FOUND; +} + +uintptr_t _mali_osk_resource_base_address(void) +{ + struct resource *reg_res = NULL; + uintptr_t ret = 0; + + reg_res = platform_get_resource(mali_platform_device, IORESOURCE_MEM, 0); + + if (NULL != reg_res) { + ret = reg_res->start; + } + + return ret; +} + +void _mali_osk_device_data_pmu_config_get(u16 *domain_config_array, int array_size) +{ + struct device_node *node = mali_platform_device->dev.of_node; + struct property *prop; + const __be32 *p; + int length = 0, i = 0; + u32 u; + + MALI_DEBUG_PRINT(2, ("Get pmu config from device tree configuration.\n")); + + MALI_DEBUG_ASSERT(NULL != node); + + if (!of_get_property(node, "pmu_domain_config", &length)) { + return; + } + + if (array_size != length / sizeof(u32)) { + MALI_PRINT_ERROR(("Wrong pmu domain config in device tree.")); + return; + } + + of_property_for_each_u32(node, "pmu_domain_config", prop, p, u) { + domain_config_array[i] = (u16)u; + i++; + } + + return; +} + +u32 _mali_osk_get_pmu_switch_delay(void) +{ + struct device_node *node = mali_platform_device->dev.of_node; + u32 switch_delay; + + MALI_DEBUG_ASSERT(NULL != node); + + if (0 == of_property_read_u32(node, "pmu_switch_delay", &switch_delay)) { + return switch_delay; + } else { + MALI_DEBUG_PRINT(2, ("Couldn't find pmu_switch_delay in device tree configuration.\n")); + } + + return 0; +} + +#else /* CONFIG_MALI_DT */ + +_mali_osk_errcode_t _mali_osk_resource_find(u32 addr, _mali_osk_resource_t *res) +{ + int i; + uintptr_t phys_addr; + + if (NULL == mali_platform_device) { + /* Not connected to a device */ + return _MALI_OSK_ERR_ITEM_NOT_FOUND; + } + + phys_addr = addr + _mali_osk_resource_base_address(); + for (i = 0; i < mali_platform_device->num_resources; i++) { + if (IORESOURCE_MEM == resource_type(&(mali_platform_device->resource[i])) && + mali_platform_device->resource[i].start == phys_addr) { + if (NULL != res) { + res->base = phys_addr; + res->description = mali_platform_device->resource[i].name; + + /* Any (optional) IRQ resource belonging to this resource will follow */ + if ((i + 1) < mali_platform_device->num_resources && + IORESOURCE_IRQ == resource_type(&(mali_platform_device->resource[i + 1]))) { + res->irq = mali_platform_device->resource[i + 1].start; + } else { + res->irq = -1; + } + } + return _MALI_OSK_ERR_OK; + } + } + + return _MALI_OSK_ERR_ITEM_NOT_FOUND; +} + +uintptr_t _mali_osk_resource_base_address(void) +{ + uintptr_t lowest_addr = (uintptr_t)(0 - 1); + uintptr_t ret = 0; + + if (NULL != mali_platform_device) { + int i; + for (i = 0; i < mali_platform_device->num_resources; i++) { + if (mali_platform_device->resource[i].flags & IORESOURCE_MEM && + mali_platform_device->resource[i].start < lowest_addr) { + lowest_addr = mali_platform_device->resource[i].start; + ret = lowest_addr; + } + } + } + + return ret; +} + +void _mali_osk_device_data_pmu_config_get(u16 *domain_config_array, int array_size) +{ + _mali_osk_device_data data = { 0, }; + + MALI_DEBUG_PRINT(2, ("Get pmu config from platform device data.\n")); + if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) { + /* Copy the custom customer power domain config */ + _mali_osk_memcpy(domain_config_array, data.pmu_domain_config, sizeof(data.pmu_domain_config)); + } + + return; +} + +u32 _mali_osk_get_pmu_switch_delay(void) +{ + _mali_osk_errcode_t err; + _mali_osk_device_data data = { 0, }; + + err = _mali_osk_device_data_get(&data); + + if (_MALI_OSK_ERR_OK == err) { + return data.pmu_switch_delay; + } + + return 0; +} +#endif /* CONFIG_MALI_DT */ + +_mali_osk_errcode_t _mali_osk_device_data_get(_mali_osk_device_data *data) +{ + MALI_DEBUG_ASSERT_POINTER(data); + + if (NULL != mali_platform_device) { + struct mali_gpu_device_data *os_data = NULL; + + os_data = (struct mali_gpu_device_data *)mali_platform_device->dev.platform_data; + if (NULL != os_data) { + /* Copy data from OS dependant struct to Mali neutral struct (identical!) */ + BUILD_BUG_ON(sizeof(*os_data) != sizeof(*data)); + _mali_osk_memcpy(data, os_data, sizeof(*os_data)); + + return _MALI_OSK_ERR_OK; + } + } + + return _MALI_OSK_ERR_ITEM_NOT_FOUND; +} + +u32 _mali_osk_identify_gpu_resource(void) +{ + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_L2_RESOURCE1, NULL)) + /* Mali 450 */ + return 0x450; + + if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_DLBU, NULL)) + /* Mali 470 */ + return 0x470; + + /* Mali 400 */ + return 0x400; +} + +mali_bool _mali_osk_shared_interrupts(void) +{ + u32 irqs[128]; + u32 i, j, irq, num_irqs_found = 0; + + MALI_DEBUG_ASSERT_POINTER(mali_platform_device); + MALI_DEBUG_ASSERT(128 >= mali_platform_device->num_resources); + + for (i = 0; i < mali_platform_device->num_resources; i++) { + if (IORESOURCE_IRQ & mali_platform_device->resource[i].flags) { + irq = mali_platform_device->resource[i].start; + + for (j = 0; j < num_irqs_found; ++j) { + if (irq == irqs[j]) { + return MALI_TRUE; + } + } + + irqs[num_irqs_found++] = irq; + } + } + + return MALI_FALSE; +} + +_mali_osk_errcode_t _mali_osk_gpu_secure_mode_init(void) +{ + _mali_osk_device_data data = { 0, }; + + if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) { + if ((NULL != data.secure_mode_init) && (NULL != data.secure_mode_deinit) + && (NULL != data.gpu_reset_and_secure_mode_enable) && (NULL != data.gpu_reset_and_secure_mode_disable)) { + int err = data.secure_mode_init(); + if (err) { + MALI_DEBUG_PRINT(1, ("Failed to init gpu secure mode.\n")); + return _MALI_OSK_ERR_FAULT; + } + + mali_secure_mode_deinit = data.secure_mode_deinit; + mali_gpu_reset_and_secure_mode_enable = data.gpu_reset_and_secure_mode_enable; + mali_gpu_reset_and_secure_mode_disable = data.gpu_reset_and_secure_mode_disable; + + mali_secure_mode_supported = MALI_TRUE; + mali_secure_mode_enabled = MALI_FALSE; + return _MALI_OSK_ERR_OK; + } + } + MALI_DEBUG_PRINT(3, ("GPU secure mode not supported.\n")); + return _MALI_OSK_ERR_UNSUPPORTED; + +} + +_mali_osk_errcode_t _mali_osk_gpu_secure_mode_deinit(void) +{ + if (NULL != mali_secure_mode_deinit) { + mali_secure_mode_deinit(); + mali_secure_mode_enabled = MALI_FALSE; + mali_secure_mode_supported = MALI_FALSE; + return _MALI_OSK_ERR_OK; + } + MALI_DEBUG_PRINT(3, ("GPU secure mode not supported.\n")); + return _MALI_OSK_ERR_UNSUPPORTED; + +} + + +_mali_osk_errcode_t _mali_osk_gpu_reset_and_secure_mode_enable(void) +{ + /* the mali executor lock must be held before enter this function. */ + + MALI_DEBUG_ASSERT(MALI_FALSE == mali_secure_mode_enabled); + + if (NULL != mali_gpu_reset_and_secure_mode_enable) { + if (mali_gpu_reset_and_secure_mode_enable()) { + MALI_DEBUG_PRINT(1, ("Failed to reset GPU or enable gpu secure mode.\n")); + return _MALI_OSK_ERR_FAULT; + } + mali_secure_mode_enabled = MALI_TRUE; + return _MALI_OSK_ERR_OK; + } + MALI_DEBUG_PRINT(1, ("GPU secure mode not supported.\n")); + return _MALI_OSK_ERR_UNSUPPORTED; +} + +_mali_osk_errcode_t _mali_osk_gpu_reset_and_secure_mode_disable(void) +{ + /* the mali executor lock must be held before enter this function. */ + + MALI_DEBUG_ASSERT(MALI_TRUE == mali_secure_mode_enabled); + + if (NULL != mali_gpu_reset_and_secure_mode_disable) { + if (mali_gpu_reset_and_secure_mode_disable()) { + MALI_DEBUG_PRINT(1, ("Failed to reset GPU or disable gpu secure mode.\n")); + return _MALI_OSK_ERR_FAULT; + } + mali_secure_mode_enabled = MALI_FALSE; + + return _MALI_OSK_ERR_OK; + + } + MALI_DEBUG_PRINT(1, ("GPU secure mode not supported.\n")); + return _MALI_OSK_ERR_UNSUPPORTED; + +} + +mali_bool _mali_osk_gpu_secure_mode_is_enabled(void) +{ + return mali_secure_mode_enabled; +} + +mali_bool _mali_osk_gpu_secure_mode_is_supported(void) +{ + return mali_secure_mode_supported; +} + + diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_osk_math.c b/drivers/gpu/arm/mali400/linux/mali_osk_math.c --- a/drivers/gpu/arm/mali400/linux/mali_osk_math.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_osk_math.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2010, 2013-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_osk_math.c + * Implementation of the OS abstraction layer for the kernel device driver + */ + +#include "mali_osk.h" +#include + +u32 _mali_osk_clz(u32 input) +{ + return 32 - fls(input); +} + +u32 _mali_osk_fls(u32 input) +{ + return fls(input); +} diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_osk_memory.c b/drivers/gpu/arm/mali400/linux/mali_osk_memory.c --- a/drivers/gpu/arm/mali400/linux/mali_osk_memory.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_osk_memory.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,61 @@ +/* + * Copyright (C) 2010-2011, 2013-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_osk_memory.c + * Implementation of the OS abstraction layer for the kernel device driver + */ + +#include "mali_osk.h" +#include +#include + +void inline *_mali_osk_calloc(u32 n, u32 size) +{ + return kcalloc(n, size, GFP_KERNEL); +} + +void inline *_mali_osk_malloc(u32 size) +{ + return kmalloc(size, GFP_KERNEL); +} + +void inline _mali_osk_free(void *ptr) +{ + kfree(ptr); +} + +void inline *_mali_osk_valloc(u32 size) +{ + return vmalloc(size); +} + +void inline _mali_osk_vfree(void *ptr) +{ + vfree(ptr); +} + +void inline *_mali_osk_memcpy(void *dst, const void *src, u32 len) +{ + return memcpy(dst, src, len); +} + +void inline *_mali_osk_memset(void *s, u32 c, u32 n) +{ + return memset(s, c, n); +} + +mali_bool _mali_osk_mem_check_allocated(u32 max_allocated) +{ + /* No need to prevent an out-of-memory dialogue appearing on Linux, + * so we always return MALI_TRUE. + */ + return MALI_TRUE; +} diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_osk_misc.c b/drivers/gpu/arm/mali400/linux/mali_osk_misc.c --- a/drivers/gpu/arm/mali400/linux/mali_osk_misc.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_osk_misc.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,81 @@ +/* + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_osk_misc.c + * Implementation of the OS abstraction layer for the kernel device driver + */ +#include +#include +#include +#include +#include +#include +#include "mali_osk.h" + +#if !defined(CONFIG_MALI_QUIET) +void _mali_osk_dbgmsg(const char *fmt, ...) +{ + va_list args; + va_start(args, fmt); + vprintk(fmt, args); + va_end(args); +} +#endif /* !defined(CONFIG_MALI_QUIET) */ + +u32 _mali_osk_snprintf(char *buf, u32 size, const char *fmt, ...) +{ + int res; + va_list args; + va_start(args, fmt); + + res = vscnprintf(buf, (size_t)size, fmt, args); + + va_end(args); + return res; +} + +void _mali_osk_abort(void) +{ + /* make a simple fault by dereferencing a NULL pointer */ + dump_stack(); + *(int *)0 = 0; +} + +void _mali_osk_break(void) +{ + _mali_osk_abort(); +} + +u32 _mali_osk_get_pid(void) +{ + /* Thread group ID is the process ID on Linux */ + return (u32)current->tgid; +} + +char *_mali_osk_get_comm(void) +{ + return (char *)current->comm; +} + + +u32 _mali_osk_get_tid(void) +{ + /* pid is actually identifying the thread on Linux */ + u32 tid = current->pid; + + /* If the pid is 0 the core was idle. Instead of returning 0 we return a special number + * identifying which core we are on. */ + if (0 == tid) { + tid = -(1 + raw_smp_processor_id()); + } + + return tid; +} diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_osk_notification.c b/drivers/gpu/arm/mali400/linux/mali_osk_notification.c --- a/drivers/gpu/arm/mali400/linux/mali_osk_notification.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_osk_notification.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,182 @@ +/* + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_osk_notification.c + * Implementation of the OS abstraction layer for the kernel device driver + */ + +#include "mali_osk.h" +#include "mali_kernel_common.h" + +#include +#include +#include + +/** + * Declaration of the notification queue object type + * Contains a linked list of notification pending delivery to user space. + * It also contains a wait queue of exclusive waiters blocked in the ioctl + * When a new notification is posted a single thread is resumed. + */ +struct _mali_osk_notification_queue_t_struct { + spinlock_t mutex; /**< Mutex protecting the list */ + wait_queue_head_t receive_queue; /**< Threads waiting for new entries to the queue */ + struct list_head head; /**< List of notifications waiting to be picked up */ +}; + +typedef struct _mali_osk_notification_wrapper_t_struct { + struct list_head list; /**< Internal linked list variable */ + _mali_osk_notification_t data; /**< Notification data */ +} _mali_osk_notification_wrapper_t; + +_mali_osk_notification_queue_t *_mali_osk_notification_queue_init(void) +{ + _mali_osk_notification_queue_t *result; + + result = (_mali_osk_notification_queue_t *)kmalloc(sizeof(_mali_osk_notification_queue_t), GFP_KERNEL); + if (NULL == result) return NULL; + + spin_lock_init(&result->mutex); + init_waitqueue_head(&result->receive_queue); + INIT_LIST_HEAD(&result->head); + + return result; +} + +_mali_osk_notification_t *_mali_osk_notification_create(u32 type, u32 size) +{ + /* OPT Recycling of notification objects */ + _mali_osk_notification_wrapper_t *notification; + + notification = (_mali_osk_notification_wrapper_t *)kmalloc(sizeof(_mali_osk_notification_wrapper_t) + size, + GFP_KERNEL | __GFP_HIGH | __GFP_REPEAT); + if (NULL == notification) { + MALI_DEBUG_PRINT(1, ("Failed to create a notification object\n")); + return NULL; + } + + /* Init the list */ + INIT_LIST_HEAD(¬ification->list); + + if (0 != size) { + notification->data.result_buffer = ((u8 *)notification) + sizeof(_mali_osk_notification_wrapper_t); + } else { + notification->data.result_buffer = NULL; + } + + /* set up the non-allocating fields */ + notification->data.notification_type = type; + notification->data.result_buffer_size = size; + + /* all ok */ + return &(notification->data); +} + +void _mali_osk_notification_delete(_mali_osk_notification_t *object) +{ + _mali_osk_notification_wrapper_t *notification; + MALI_DEBUG_ASSERT_POINTER(object); + + notification = container_of(object, _mali_osk_notification_wrapper_t, data); + + /* Free the container */ + kfree(notification); +} + +void _mali_osk_notification_queue_term(_mali_osk_notification_queue_t *queue) +{ + _mali_osk_notification_t *result; + MALI_DEBUG_ASSERT_POINTER(queue); + + while (_MALI_OSK_ERR_OK == _mali_osk_notification_queue_dequeue(queue, &result)) { + _mali_osk_notification_delete(result); + } + + /* not much to do, just free the memory */ + kfree(queue); +} +void _mali_osk_notification_queue_send(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t *object) +{ +#if defined(MALI_UPPER_HALF_SCHEDULING) + unsigned long irq_flags; +#endif + + _mali_osk_notification_wrapper_t *notification; + MALI_DEBUG_ASSERT_POINTER(queue); + MALI_DEBUG_ASSERT_POINTER(object); + + notification = container_of(object, _mali_osk_notification_wrapper_t, data); + +#if defined(MALI_UPPER_HALF_SCHEDULING) + spin_lock_irqsave(&queue->mutex, irq_flags); +#else + spin_lock(&queue->mutex); +#endif + + list_add_tail(¬ification->list, &queue->head); + +#if defined(MALI_UPPER_HALF_SCHEDULING) + spin_unlock_irqrestore(&queue->mutex, irq_flags); +#else + spin_unlock(&queue->mutex); +#endif + + /* and wake up one possible exclusive waiter */ + wake_up(&queue->receive_queue); +} + +_mali_osk_errcode_t _mali_osk_notification_queue_dequeue(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result) +{ +#if defined(MALI_UPPER_HALF_SCHEDULING) + unsigned long irq_flags; +#endif + + _mali_osk_errcode_t ret = _MALI_OSK_ERR_ITEM_NOT_FOUND; + _mali_osk_notification_wrapper_t *wrapper_object; + +#if defined(MALI_UPPER_HALF_SCHEDULING) + spin_lock_irqsave(&queue->mutex, irq_flags); +#else + spin_lock(&queue->mutex); +#endif + + if (!list_empty(&queue->head)) { + wrapper_object = list_entry(queue->head.next, _mali_osk_notification_wrapper_t, list); + *result = &(wrapper_object->data); + list_del_init(&wrapper_object->list); + ret = _MALI_OSK_ERR_OK; + } + +#if defined(MALI_UPPER_HALF_SCHEDULING) + spin_unlock_irqrestore(&queue->mutex, irq_flags); +#else + spin_unlock(&queue->mutex); +#endif + + return ret; +} + +_mali_osk_errcode_t _mali_osk_notification_queue_receive(_mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result) +{ + /* check input */ + MALI_DEBUG_ASSERT_POINTER(queue); + MALI_DEBUG_ASSERT_POINTER(result); + + /* default result */ + *result = NULL; + + if (wait_event_interruptible(queue->receive_queue, + _MALI_OSK_ERR_OK == _mali_osk_notification_queue_dequeue(queue, result))) { + return _MALI_OSK_ERR_RESTARTSYSCALL; + } + + return _MALI_OSK_ERR_OK; /* all ok */ +} diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_osk_pm.c b/drivers/gpu/arm/mali400/linux/mali_osk_pm.c --- a/drivers/gpu/arm/mali400/linux/mali_osk_pm.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_osk_pm.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,83 @@ +/** + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_osk_pm.c + * Implementation of the callback functions from common power management + */ + +#include + +#include "mali_kernel_linux.h" +#ifdef CONFIG_PM_RUNTIME +#include +#endif /* CONFIG_PM_RUNTIME */ +#include +#include +#include "mali_osk.h" +#include "mali_kernel_common.h" + +/* Can NOT run in atomic context */ +_mali_osk_errcode_t _mali_osk_pm_dev_ref_get_sync(void) +{ +#ifdef CONFIG_PM_RUNTIME + int err; + MALI_DEBUG_ASSERT_POINTER(mali_platform_device); + err = pm_runtime_get_sync(&(mali_platform_device->dev)); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)) + pm_runtime_mark_last_busy(&(mali_platform_device->dev)); +#endif + if (0 > err) { + MALI_PRINT_ERROR(("Mali OSK PM: pm_runtime_get_sync() returned error code %d\n", err)); + return _MALI_OSK_ERR_FAULT; + } +#endif + return _MALI_OSK_ERR_OK; +} + +/* Can run in atomic context */ +_mali_osk_errcode_t _mali_osk_pm_dev_ref_get_async(void) +{ +#ifdef CONFIG_PM_RUNTIME + int err; + MALI_DEBUG_ASSERT_POINTER(mali_platform_device); + err = pm_runtime_get(&(mali_platform_device->dev)); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)) + pm_runtime_mark_last_busy(&(mali_platform_device->dev)); +#endif + if (0 > err && -EINPROGRESS != err) { + MALI_PRINT_ERROR(("Mali OSK PM: pm_runtime_get() returned error code %d\n", err)); + return _MALI_OSK_ERR_FAULT; + } +#endif + return _MALI_OSK_ERR_OK; +} + + +/* Can run in atomic context */ +void _mali_osk_pm_dev_ref_put(void) +{ +#ifdef CONFIG_PM_RUNTIME + MALI_DEBUG_ASSERT_POINTER(mali_platform_device); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)) + pm_runtime_mark_last_busy(&(mali_platform_device->dev)); + pm_runtime_put_autosuspend(&(mali_platform_device->dev)); +#else + pm_runtime_put(&(mali_platform_device->dev)); +#endif +#endif +} + +void _mali_osk_pm_dev_barrier(void) +{ +#ifdef CONFIG_PM_RUNTIME + pm_runtime_barrier(&(mali_platform_device->dev)); +#endif +} diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_osk_profiling.c b/drivers/gpu/arm/mali400/linux/mali_osk_profiling.c --- a/drivers/gpu/arm/mali400/linux/mali_osk_profiling.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_osk_profiling.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,1282 @@ +/* + * Copyright (C) 2012-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ +#include +#include +#include +#include +#include +#include + +#include +#include "mali_kernel_common.h" +#include "mali_osk.h" +#include "mali_ukk.h" +#include "mali_uk_types.h" +#include "mali_osk_profiling.h" +#include "mali_linux_trace.h" +#include "mali_gp.h" +#include "mali_pp.h" +#include "mali_l2_cache.h" +#include "mali_user_settings_db.h" +#include "mali_executor.h" +#include "mali_memory_manager.h" + +#define MALI_PROFILING_STREAM_DATA_DEFAULT_SIZE 100 +#define MALI_PROFILING_STREAM_HOLD_TIME 1000000 /*1 ms */ + +#define MALI_PROFILING_STREAM_BUFFER_SIZE (1 << 12) +#define MALI_PROFILING_STREAM_BUFFER_NUM 100 + +/** + * Define the mali profiling stream struct. + */ +typedef struct mali_profiling_stream { + u8 data[MALI_PROFILING_STREAM_BUFFER_SIZE]; + u32 used_size; + struct list_head list; +} mali_profiling_stream; + +typedef struct mali_profiling_stream_list { + spinlock_t spin_lock; + struct list_head free_list; + struct list_head queue_list; +} mali_profiling_stream_list; + +static const char mali_name[] = "4xx"; +static const char utgard_setup_version[] = "ANNOTATE_SETUP 1\n"; + +static u32 profiling_sample_rate = 0; +static u32 first_sw_counter_index = 0; + +static mali_bool l2_cache_counter_if_enabled = MALI_FALSE; +static u32 num_counters_enabled = 0; +static u32 mem_counters_enabled = 0; + +static _mali_osk_atomic_t stream_fd_if_used; + +static wait_queue_head_t stream_fd_wait_queue; +static mali_profiling_counter *global_mali_profiling_counters = NULL; +static u32 num_global_mali_profiling_counters = 0; + +static mali_profiling_stream_list *global_mali_stream_list = NULL; +static mali_profiling_stream *mali_counter_stream = NULL; +static mali_profiling_stream *mali_core_activity_stream = NULL; +static u64 mali_core_activity_stream_dequeue_time = 0; +static spinlock_t mali_activity_lock; +static u32 mali_activity_cores_num = 0; +static struct hrtimer profiling_sampling_timer; + +const char *_mali_mem_counter_descriptions[] = _MALI_MEM_COUTNER_DESCRIPTIONS; +const char *_mali_special_counter_descriptions[] = _MALI_SPCIAL_COUNTER_DESCRIPTIONS; + +static u32 current_profiling_pid = 0; + +static void _mali_profiling_stream_list_destory(mali_profiling_stream_list *profiling_stream_list) +{ + mali_profiling_stream *profiling_stream, *tmp_profiling_stream; + MALI_DEBUG_ASSERT_POINTER(profiling_stream_list); + + list_for_each_entry_safe(profiling_stream, tmp_profiling_stream, &profiling_stream_list->free_list, list) { + list_del(&profiling_stream->list); + kfree(profiling_stream); + } + + list_for_each_entry_safe(profiling_stream, tmp_profiling_stream, &profiling_stream_list->queue_list, list) { + list_del(&profiling_stream->list); + kfree(profiling_stream); + } + + kfree(profiling_stream_list); +} + +static void _mali_profiling_global_stream_list_free(void) +{ + mali_profiling_stream *profiling_stream, *tmp_profiling_stream; + unsigned long irq_flags; + + MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list); + spin_lock_irqsave(&global_mali_stream_list->spin_lock, irq_flags); + list_for_each_entry_safe(profiling_stream, tmp_profiling_stream, &global_mali_stream_list->queue_list, list) { + profiling_stream->used_size = 0; + list_move(&profiling_stream->list, &global_mali_stream_list->free_list); + } + spin_unlock_irqrestore(&global_mali_stream_list->spin_lock, irq_flags); +} + +static _mali_osk_errcode_t _mali_profiling_global_stream_list_dequeue(struct list_head *stream_list, mali_profiling_stream **new_mali_profiling_stream) +{ + unsigned long irq_flags; + _mali_osk_errcode_t ret = _MALI_OSK_ERR_OK; + MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list); + MALI_DEBUG_ASSERT_POINTER(stream_list); + + spin_lock_irqsave(&global_mali_stream_list->spin_lock, irq_flags); + + if (!list_empty(stream_list)) { + *new_mali_profiling_stream = list_entry(stream_list->next, mali_profiling_stream, list); + list_del_init(&(*new_mali_profiling_stream)->list); + } else { + ret = _MALI_OSK_ERR_NOMEM; + } + + spin_unlock_irqrestore(&global_mali_stream_list->spin_lock, irq_flags); + + return ret; +} + +static void _mali_profiling_global_stream_list_queue(struct list_head *stream_list, mali_profiling_stream *current_mali_profiling_stream) +{ + unsigned long irq_flags; + MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list); + MALI_DEBUG_ASSERT_POINTER(stream_list); + + spin_lock_irqsave(&global_mali_stream_list->spin_lock, irq_flags); + list_add_tail(¤t_mali_profiling_stream->list, stream_list); + spin_unlock_irqrestore(&global_mali_stream_list->spin_lock, irq_flags); +} + +static mali_bool _mali_profiling_global_stream_queue_list_if_empty(void) +{ + MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list); + return list_empty(&global_mali_stream_list->queue_list); +} + +static u32 _mali_profiling_global_stream_queue_list_next_size(void) +{ + unsigned long irq_flags; + u32 size = 0; + MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list); + + spin_lock_irqsave(&global_mali_stream_list->spin_lock, irq_flags); + if (!list_empty(&global_mali_stream_list->queue_list)) { + mali_profiling_stream *next_mali_profiling_stream = + list_entry(global_mali_stream_list->queue_list.next, mali_profiling_stream, list); + size = next_mali_profiling_stream->used_size; + } + spin_unlock_irqrestore(&global_mali_stream_list->spin_lock, irq_flags); + return size; +} + +/* The mali profiling stream file operations functions. */ +static ssize_t _mali_profiling_stream_read( + struct file *filp, + char __user *buffer, + size_t size, + loff_t *f_pos); + +static unsigned int _mali_profiling_stream_poll(struct file *filp, poll_table *wait); + +static int _mali_profiling_stream_release(struct inode *inode, struct file *filp); + +/* The timeline stream file operations structure. */ +static const struct file_operations mali_profiling_stream_fops = { + .release = _mali_profiling_stream_release, + .read = _mali_profiling_stream_read, + .poll = _mali_profiling_stream_poll, +}; + +static ssize_t _mali_profiling_stream_read( + struct file *filp, + char __user *buffer, + size_t size, + loff_t *f_pos) +{ + u32 copy_len = 0; + mali_profiling_stream *current_mali_profiling_stream; + u32 used_size; + MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list); + + while (!_mali_profiling_global_stream_queue_list_if_empty()) { + used_size = _mali_profiling_global_stream_queue_list_next_size(); + if (used_size <= ((u32)size - copy_len)) { + current_mali_profiling_stream = NULL; + _mali_profiling_global_stream_list_dequeue(&global_mali_stream_list->queue_list, + ¤t_mali_profiling_stream); + MALI_DEBUG_ASSERT_POINTER(current_mali_profiling_stream); + if (copy_to_user(&buffer[copy_len], current_mali_profiling_stream->data, current_mali_profiling_stream->used_size)) { + current_mali_profiling_stream->used_size = 0; + _mali_profiling_global_stream_list_queue(&global_mali_stream_list->free_list, current_mali_profiling_stream); + return -EFAULT; + } + copy_len += current_mali_profiling_stream->used_size; + current_mali_profiling_stream->used_size = 0; + _mali_profiling_global_stream_list_queue(&global_mali_stream_list->free_list, current_mali_profiling_stream); + } else { + break; + } + } + return (ssize_t)copy_len; +} + +static unsigned int _mali_profiling_stream_poll(struct file *filp, poll_table *wait) +{ + poll_wait(filp, &stream_fd_wait_queue, wait); + if (!_mali_profiling_global_stream_queue_list_if_empty()) + return POLLIN; + return 0; +} + +static int _mali_profiling_stream_release(struct inode *inode, struct file *filp) +{ + _mali_osk_atomic_init(&stream_fd_if_used, 0); + return 0; +} + +/* The funs for control packet and stream data.*/ +static void _mali_profiling_set_packet_size(unsigned char *const buf, const u32 size) +{ + u32 i; + + for (i = 0; i < sizeof(size); ++i) + buf[i] = (size >> 8 * i) & 0xFF; +} + +static u32 _mali_profiling_get_packet_size(unsigned char *const buf) +{ + u32 i; + u32 size = 0; + for (i = 0; i < sizeof(size); ++i) + size |= (u32)buf[i] << 8 * i; + return size; +} + +static u32 _mali_profiling_read_packet_int(unsigned char *const buf, u32 *const pos, u32 const packet_size) +{ + u64 int_value = 0; + u8 shift = 0; + u8 byte_value = ~0; + + while ((byte_value & 0x80) != 0) { + if ((*pos) >= packet_size) + return -1; + byte_value = buf[*pos]; + *pos += 1; + int_value |= (u32)(byte_value & 0x7f) << shift; + shift += 7; + } + + if (shift < 8 * sizeof(int_value) && (byte_value & 0x40) != 0) { + int_value |= -(1 << shift); + } + + return int_value; +} + +static u32 _mali_profiling_pack_int(u8 *const buf, u32 const buf_size, u32 const pos, s32 value) +{ + u32 add_bytes = 0; + int more = 1; + while (more) { + /* low order 7 bits of val */ + char byte_value = value & 0x7f; + value >>= 7; + + if ((value == 0 && (byte_value & 0x40) == 0) || (value == -1 && (byte_value & 0x40) != 0)) { + more = 0; + } else { + byte_value |= 0x80; + } + + if ((pos + add_bytes) >= buf_size) + return 0; + buf[pos + add_bytes] = byte_value; + add_bytes++; + } + + return add_bytes; +} + +static int _mali_profiling_pack_long(uint8_t *const buf, u32 const buf_size, u32 const pos, s64 val) +{ + int add_bytes = 0; + int more = 1; + while (more) { + /* low order 7 bits of x */ + char byte_value = val & 0x7f; + val >>= 7; + + if ((val == 0 && (byte_value & 0x40) == 0) || (val == -1 && (byte_value & 0x40) != 0)) { + more = 0; + } else { + byte_value |= 0x80; + } + + MALI_DEBUG_ASSERT((pos + add_bytes) < buf_size); + buf[pos + add_bytes] = byte_value; + add_bytes++; + } + + return add_bytes; +} + +static void _mali_profiling_stream_add_counter(mali_profiling_stream *profiling_stream, s64 current_time, u32 key, u32 counter_value) +{ + u32 add_size = STREAM_HEADER_SIZE; + MALI_DEBUG_ASSERT_POINTER(profiling_stream); + MALI_DEBUG_ASSERT((profiling_stream->used_size) < MALI_PROFILING_STREAM_BUFFER_SIZE); + + profiling_stream->data[profiling_stream->used_size] = STREAM_HEADER_COUNTER_VALUE; + + add_size += _mali_profiling_pack_long(profiling_stream->data, MALI_PROFILING_STREAM_BUFFER_SIZE, + profiling_stream->used_size + add_size, current_time); + add_size += _mali_profiling_pack_int(profiling_stream->data, MALI_PROFILING_STREAM_BUFFER_SIZE, + profiling_stream->used_size + add_size, (s32)0); + add_size += _mali_profiling_pack_int(profiling_stream->data, MALI_PROFILING_STREAM_BUFFER_SIZE, + profiling_stream->used_size + add_size, (s32)key); + add_size += _mali_profiling_pack_int(profiling_stream->data, MALI_PROFILING_STREAM_BUFFER_SIZE, + profiling_stream->used_size + add_size, (s32)counter_value); + + _mali_profiling_set_packet_size(profiling_stream->data + profiling_stream->used_size + 1, + add_size - STREAM_HEADER_SIZE); + + profiling_stream->used_size += add_size; +} + +/* The callback function for sampling timer.*/ +static enum hrtimer_restart _mali_profiling_sampling_counters(struct hrtimer *timer) +{ + u32 counter_index; + s64 current_time; + MALI_DEBUG_ASSERT_POINTER(global_mali_profiling_counters); + MALI_DEBUG_ASSERT_POINTER(global_mali_stream_list); + + MALI_DEBUG_ASSERT(NULL == mali_counter_stream); + if (_MALI_OSK_ERR_OK == _mali_profiling_global_stream_list_dequeue( + &global_mali_stream_list->free_list, &mali_counter_stream)) { + + MALI_DEBUG_ASSERT_POINTER(mali_counter_stream); + MALI_DEBUG_ASSERT(0 == mali_counter_stream->used_size); + + /* Capture l2 cache counter values if enabled */ + if (MALI_TRUE == l2_cache_counter_if_enabled) { + int i, j = 0; + _mali_profiling_l2_counter_values l2_counters_values; + _mali_profiling_get_l2_counters(&l2_counters_values); + + for (i = COUNTER_L2_0_C0; i <= COUNTER_L2_2_C1; i++) { + if (0 == (j % 2)) + _mali_osk_profiling_record_global_counters(i, l2_counters_values.cores[j / 2].value0); + else + _mali_osk_profiling_record_global_counters(i, l2_counters_values.cores[j / 2].value1); + j++; + } + } + + current_time = (s64)_mali_osk_boot_time_get_ns(); + + /* Add all enabled counter values into stream */ + for (counter_index = 0; counter_index < num_global_mali_profiling_counters; counter_index++) { + /* No need to sample these couners here. */ + if (global_mali_profiling_counters[counter_index].enabled) { + if ((global_mali_profiling_counters[counter_index].counter_id >= FIRST_MEM_COUNTER && + global_mali_profiling_counters[counter_index].counter_id <= LAST_MEM_COUNTER) + || (global_mali_profiling_counters[counter_index].counter_id == COUNTER_VP_ACTIVITY) + || (global_mali_profiling_counters[counter_index].counter_id == COUNTER_FP_ACTIVITY) + || (global_mali_profiling_counters[counter_index].counter_id == COUNTER_FILMSTRIP)) { + + continue; + } + + if (global_mali_profiling_counters[counter_index].counter_id >= COUNTER_L2_0_C0 && + global_mali_profiling_counters[counter_index].counter_id <= COUNTER_L2_2_C1) { + + u32 prev_val = global_mali_profiling_counters[counter_index].prev_counter_value; + + _mali_profiling_stream_add_counter(mali_counter_stream, current_time, global_mali_profiling_counters[counter_index].key, + global_mali_profiling_counters[counter_index].current_counter_value - prev_val); + + prev_val = global_mali_profiling_counters[counter_index].current_counter_value; + + global_mali_profiling_counters[counter_index].prev_counter_value = prev_val; + } else { + + if (global_mali_profiling_counters[counter_index].counter_id == COUNTER_TOTAL_ALLOC_PAGES) { + u32 total_alloc_mem = _mali_ukk_report_memory_usage(); + global_mali_profiling_counters[counter_index].current_counter_value = total_alloc_mem / _MALI_OSK_MALI_PAGE_SIZE; + } + _mali_profiling_stream_add_counter(mali_counter_stream, current_time, global_mali_profiling_counters[counter_index].key, + global_mali_profiling_counters[counter_index].current_counter_value); + if (global_mali_profiling_counters[counter_index].counter_id < FIRST_SPECIAL_COUNTER) + global_mali_profiling_counters[counter_index].current_counter_value = 0; + } + } + } + _mali_profiling_global_stream_list_queue(&global_mali_stream_list->queue_list, mali_counter_stream); + mali_counter_stream = NULL; + } else { + MALI_DEBUG_PRINT(1, ("Not enough mali profiling stream buffer!\n")); + } + + wake_up_interruptible(&stream_fd_wait_queue); + + /*Enable the sampling timer again*/ + if (0 != num_counters_enabled && 0 != profiling_sample_rate) { + hrtimer_forward_now(&profiling_sampling_timer, ns_to_ktime(profiling_sample_rate)); + return HRTIMER_RESTART; + } + return HRTIMER_NORESTART; +} + +static void _mali_profiling_sampling_core_activity_switch(int counter_id, int core, u32 activity, u32 pid) +{ + unsigned long irq_flags; + + spin_lock_irqsave(&mali_activity_lock, irq_flags); + if (activity == 0) + mali_activity_cores_num--; + else + mali_activity_cores_num++; + spin_unlock_irqrestore(&mali_activity_lock, irq_flags); + + if (NULL != global_mali_profiling_counters) { + int i ; + for (i = 0; i < num_global_mali_profiling_counters; i++) { + if (counter_id == global_mali_profiling_counters[i].counter_id && global_mali_profiling_counters[i].enabled) { + u64 current_time = _mali_osk_boot_time_get_ns(); + u32 add_size = STREAM_HEADER_SIZE; + + if (NULL != mali_core_activity_stream) { + if ((mali_core_activity_stream_dequeue_time + MALI_PROFILING_STREAM_HOLD_TIME < current_time) || + (MALI_PROFILING_STREAM_DATA_DEFAULT_SIZE > MALI_PROFILING_STREAM_BUFFER_SIZE + - mali_core_activity_stream->used_size)) { + _mali_profiling_global_stream_list_queue(&global_mali_stream_list->queue_list, mali_core_activity_stream); + mali_core_activity_stream = NULL; + wake_up_interruptible(&stream_fd_wait_queue); + } + } + + if (NULL == mali_core_activity_stream) { + if (_MALI_OSK_ERR_OK == _mali_profiling_global_stream_list_dequeue( + &global_mali_stream_list->free_list, &mali_core_activity_stream)) { + mali_core_activity_stream_dequeue_time = current_time; + } else { + MALI_DEBUG_PRINT(1, ("Not enough mali profiling stream buffer!\n")); + wake_up_interruptible(&stream_fd_wait_queue); + break; + } + + } + + mali_core_activity_stream->data[mali_core_activity_stream->used_size] = STREAM_HEADER_CORE_ACTIVITY; + + add_size += _mali_profiling_pack_long(mali_core_activity_stream->data, + MALI_PROFILING_STREAM_BUFFER_SIZE, mali_core_activity_stream->used_size + add_size, (s64)current_time); + add_size += _mali_profiling_pack_int(mali_core_activity_stream->data, + MALI_PROFILING_STREAM_BUFFER_SIZE, mali_core_activity_stream->used_size + add_size, core); + add_size += _mali_profiling_pack_int(mali_core_activity_stream->data, + MALI_PROFILING_STREAM_BUFFER_SIZE, mali_core_activity_stream->used_size + add_size, (s32)global_mali_profiling_counters[i].key); + add_size += _mali_profiling_pack_int(mali_core_activity_stream->data, + MALI_PROFILING_STREAM_BUFFER_SIZE, mali_core_activity_stream->used_size + add_size, activity); + add_size += _mali_profiling_pack_int(mali_core_activity_stream->data, + MALI_PROFILING_STREAM_BUFFER_SIZE, mali_core_activity_stream->used_size + add_size, pid); + + _mali_profiling_set_packet_size(mali_core_activity_stream->data + mali_core_activity_stream->used_size + 1, + add_size - STREAM_HEADER_SIZE); + + mali_core_activity_stream->used_size += add_size; + + if (0 == mali_activity_cores_num) { + _mali_profiling_global_stream_list_queue(&global_mali_stream_list->queue_list, mali_core_activity_stream); + mali_core_activity_stream = NULL; + wake_up_interruptible(&stream_fd_wait_queue); + } + + break; + } + } + } +} + +static mali_bool _mali_profiling_global_counters_init(void) +{ + int core_id, counter_index, counter_number, counter_id; + u32 num_l2_cache_cores; + u32 num_pp_cores; + u32 num_gp_cores = 1; + + MALI_DEBUG_ASSERT(NULL == global_mali_profiling_counters); + num_pp_cores = mali_pp_get_glob_num_pp_cores(); + num_l2_cache_cores = mali_l2_cache_core_get_glob_num_l2_cores(); + + num_global_mali_profiling_counters = 3 * (num_gp_cores + num_pp_cores) + 2 * num_l2_cache_cores + + MALI_PROFILING_SW_COUNTERS_NUM + + MALI_PROFILING_SPECIAL_COUNTERS_NUM + + MALI_PROFILING_MEM_COUNTERS_NUM; + global_mali_profiling_counters = _mali_osk_calloc(num_global_mali_profiling_counters, sizeof(mali_profiling_counter)); + + if (NULL == global_mali_profiling_counters) + return MALI_FALSE; + + counter_index = 0; + /*Vertex processor counters */ + for (core_id = 0; core_id < num_gp_cores; core_id ++) { + global_mali_profiling_counters[counter_index].counter_id = ACTIVITY_VP_0 + core_id; + _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name, + sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_VP_%d_active", mali_name, core_id); + + for (counter_number = 0; counter_number < 2; counter_number++) { + counter_index++; + global_mali_profiling_counters[counter_index].counter_id = COUNTER_VP_0_C0 + (2 * core_id) + counter_number; + _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name, + sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_VP_%d_cnt%d", mali_name, core_id, counter_number); + } + } + + /* Fragment processors' counters */ + for (core_id = 0; core_id < num_pp_cores; core_id++) { + counter_index++; + global_mali_profiling_counters[counter_index].counter_id = ACTIVITY_FP_0 + core_id; + _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name, + sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_FP_%d_active", mali_name, core_id); + + for (counter_number = 0; counter_number < 2; counter_number++) { + counter_index++; + global_mali_profiling_counters[counter_index].counter_id = COUNTER_FP_0_C0 + (2 * core_id) + counter_number; + _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name, + sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_FP_%d_cnt%d", mali_name, core_id, counter_number); + } + } + + /* L2 Cache counters */ + for (core_id = 0; core_id < num_l2_cache_cores; core_id++) { + for (counter_number = 0; counter_number < 2; counter_number++) { + counter_index++; + global_mali_profiling_counters[counter_index].counter_id = COUNTER_L2_0_C0 + (2 * core_id) + counter_number; + _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name, + sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_L2_%d_cnt%d", mali_name, core_id, counter_number); + } + } + + /* Now set up the software counter entries */ + for (counter_id = FIRST_SW_COUNTER; counter_id <= LAST_SW_COUNTER; counter_id++) { + counter_index++; + + if (0 == first_sw_counter_index) + first_sw_counter_index = counter_index; + + global_mali_profiling_counters[counter_index].counter_id = counter_id; + _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name, + sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_SW_%d", mali_name, counter_id - FIRST_SW_COUNTER); + } + + /* Now set up the special counter entries */ + for (counter_id = FIRST_SPECIAL_COUNTER; counter_id <= LAST_SPECIAL_COUNTER; counter_id++) { + + counter_index++; + _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name, + sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_%s", + mali_name, _mali_special_counter_descriptions[counter_id - FIRST_SPECIAL_COUNTER]); + + global_mali_profiling_counters[counter_index].counter_id = counter_id; + } + + /* Now set up the mem counter entries*/ + for (counter_id = FIRST_MEM_COUNTER; counter_id <= LAST_MEM_COUNTER; counter_id++) { + + counter_index++; + _mali_osk_snprintf(global_mali_profiling_counters[counter_index].counter_name, + sizeof(global_mali_profiling_counters[counter_index].counter_name), "ARM_Mali-%s_%s", + mali_name, _mali_mem_counter_descriptions[counter_id - FIRST_MEM_COUNTER]); + + global_mali_profiling_counters[counter_index].counter_id = counter_id; + } + + MALI_DEBUG_ASSERT((counter_index + 1) == num_global_mali_profiling_counters); + + return MALI_TRUE; +} + +void _mali_profiling_notification_mem_counter(struct mali_session_data *session, u32 counter_id, u32 key, int enable) +{ + + MALI_DEBUG_ASSERT_POINTER(session); + + if (NULL != session) { + _mali_osk_notification_t *notification; + _mali_osk_notification_queue_t *queue; + + queue = session->ioctl_queue; + MALI_DEBUG_ASSERT(NULL != queue); + + notification = _mali_osk_notification_create(_MALI_NOTIFICATION_ANNOTATE_PROFILING_MEM_COUNTER, + sizeof(_mali_uk_annotate_profiling_mem_counter_s)); + + if (NULL != notification) { + _mali_uk_annotate_profiling_mem_counter_s *data = notification->result_buffer; + data->counter_id = counter_id; + data->key = key; + data->enable = enable; + + _mali_osk_notification_queue_send(queue, notification); + } else { + MALI_PRINT_ERROR(("Failed to create notification object!\n")); + } + } else { + MALI_PRINT_ERROR(("Failed to find the right session!\n")); + } +} + +void _mali_profiling_notification_enable(struct mali_session_data *session, u32 sampling_rate, int enable) +{ + MALI_DEBUG_ASSERT_POINTER(session); + + if (NULL != session) { + _mali_osk_notification_t *notification; + _mali_osk_notification_queue_t *queue; + + queue = session->ioctl_queue; + MALI_DEBUG_ASSERT(NULL != queue); + + notification = _mali_osk_notification_create(_MALI_NOTIFICATION_ANNOTATE_PROFILING_ENABLE, + sizeof(_mali_uk_annotate_profiling_enable_s)); + + if (NULL != notification) { + _mali_uk_annotate_profiling_enable_s *data = notification->result_buffer; + data->sampling_rate = sampling_rate; + data->enable = enable; + + _mali_osk_notification_queue_send(queue, notification); + } else { + MALI_PRINT_ERROR(("Failed to create notification object!\n")); + } + } else { + MALI_PRINT_ERROR(("Failed to find the right session!\n")); + } +} + + +_mali_osk_errcode_t _mali_osk_profiling_init(mali_bool auto_start) +{ + int i; + mali_profiling_stream *new_mali_profiling_stream = NULL; + mali_profiling_stream_list *new_mali_profiling_stream_list = NULL; + if (MALI_TRUE == auto_start) { + mali_set_user_setting(_MALI_UK_USER_SETTING_SW_EVENTS_ENABLE, MALI_TRUE); + } + + /*Init the global_mali_stream_list*/ + MALI_DEBUG_ASSERT(NULL == global_mali_stream_list); + new_mali_profiling_stream_list = (mali_profiling_stream_list *)kmalloc(sizeof(mali_profiling_stream_list), GFP_KERNEL); + + if (NULL == new_mali_profiling_stream_list) { + return _MALI_OSK_ERR_NOMEM; + } + + spin_lock_init(&new_mali_profiling_stream_list->spin_lock); + INIT_LIST_HEAD(&new_mali_profiling_stream_list->free_list); + INIT_LIST_HEAD(&new_mali_profiling_stream_list->queue_list); + + spin_lock_init(&mali_activity_lock); + mali_activity_cores_num = 0; + + for (i = 0; i < MALI_PROFILING_STREAM_BUFFER_NUM; i++) { + new_mali_profiling_stream = (mali_profiling_stream *)kmalloc(sizeof(mali_profiling_stream), GFP_KERNEL); + if (NULL == new_mali_profiling_stream) { + _mali_profiling_stream_list_destory(new_mali_profiling_stream_list); + return _MALI_OSK_ERR_NOMEM; + } + + INIT_LIST_HEAD(&new_mali_profiling_stream->list); + new_mali_profiling_stream->used_size = 0; + list_add_tail(&new_mali_profiling_stream->list, &new_mali_profiling_stream_list->free_list); + + } + + _mali_osk_atomic_init(&stream_fd_if_used, 0); + init_waitqueue_head(&stream_fd_wait_queue); + + hrtimer_init(&profiling_sampling_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + + profiling_sampling_timer.function = _mali_profiling_sampling_counters; + + global_mali_stream_list = new_mali_profiling_stream_list; + + return _MALI_OSK_ERR_OK; +} + +void _mali_osk_profiling_term(void) +{ + if (0 != profiling_sample_rate) { + hrtimer_cancel(&profiling_sampling_timer); + profiling_sample_rate = 0; + } + _mali_osk_atomic_term(&stream_fd_if_used); + + if (NULL != global_mali_profiling_counters) { + _mali_osk_free(global_mali_profiling_counters); + global_mali_profiling_counters = NULL; + num_global_mali_profiling_counters = 0; + } + + if (NULL != global_mali_stream_list) { + _mali_profiling_stream_list_destory(global_mali_stream_list); + global_mali_stream_list = NULL; + } + +} + +void _mali_osk_profiling_stop_sampling(u32 pid) +{ + if (pid == current_profiling_pid) { + + int i; + /* Reset all counter states when closing connection.*/ + for (i = 0; i < num_global_mali_profiling_counters; ++i) { + _mali_profiling_set_event(global_mali_profiling_counters[i].counter_id, MALI_HW_CORE_NO_COUNTER); + global_mali_profiling_counters[i].enabled = 0; + global_mali_profiling_counters[i].prev_counter_value = 0; + global_mali_profiling_counters[i].current_counter_value = 0; + } + l2_cache_counter_if_enabled = MALI_FALSE; + num_counters_enabled = 0; + mem_counters_enabled = 0; + _mali_profiling_control(FBDUMP_CONTROL_ENABLE, 0); + _mali_profiling_control(SW_COUNTER_ENABLE, 0); + /* Delete sampling timer when closing connection. */ + if (0 != profiling_sample_rate) { + hrtimer_cancel(&profiling_sampling_timer); + profiling_sample_rate = 0; + } + current_profiling_pid = 0; + } +} + +void _mali_osk_profiling_add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4) +{ + /*Record the freq & volt to global_mali_profiling_counters here. */ + if (0 != profiling_sample_rate) { + u32 channel; + u32 state; + channel = (event_id >> 16) & 0xFF; + state = ((event_id >> 24) & 0xF) << 24; + + switch (state) { + case MALI_PROFILING_EVENT_TYPE_SINGLE: + if ((MALI_PROFILING_EVENT_CHANNEL_GPU >> 16) == channel) { + u32 reason = (event_id & 0xFFFF); + if (MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE == reason) { + _mali_osk_profiling_record_global_counters(COUNTER_FREQUENCY, data0); + _mali_osk_profiling_record_global_counters(COUNTER_VOLTAGE, data1); + } + } + break; + case MALI_PROFILING_EVENT_TYPE_START: + if ((MALI_PROFILING_EVENT_CHANNEL_GP0 >> 16) == channel) { + _mali_profiling_sampling_core_activity_switch(COUNTER_VP_ACTIVITY, 0, 1, data1); + } else if (channel >= (MALI_PROFILING_EVENT_CHANNEL_PP0 >> 16) && + (MALI_PROFILING_EVENT_CHANNEL_PP7 >> 16) >= channel) { + u32 core_id = channel - (MALI_PROFILING_EVENT_CHANNEL_PP0 >> 16); + _mali_profiling_sampling_core_activity_switch(COUNTER_FP_ACTIVITY, core_id, 1, data1); + } + break; + case MALI_PROFILING_EVENT_TYPE_STOP: + if ((MALI_PROFILING_EVENT_CHANNEL_GP0 >> 16) == channel) { + _mali_profiling_sampling_core_activity_switch(COUNTER_VP_ACTIVITY, 0, 0, 0); + } else if (channel >= (MALI_PROFILING_EVENT_CHANNEL_PP0 >> 16) && + (MALI_PROFILING_EVENT_CHANNEL_PP7 >> 16) >= channel) { + u32 core_id = channel - (MALI_PROFILING_EVENT_CHANNEL_PP0 >> 16); + _mali_profiling_sampling_core_activity_switch(COUNTER_FP_ACTIVITY, core_id, 0, 0); + } + break; + default: + break; + } + } + trace_mali_timeline_event(event_id, data0, data1, data2, data3, data4); +} + +void _mali_osk_profiling_report_sw_counters(u32 *counters) +{ + trace_mali_sw_counters(_mali_osk_get_pid(), _mali_osk_get_tid(), NULL, counters); +} + +void _mali_osk_profiling_record_global_counters(int counter_id, u32 value) +{ + if (NULL != global_mali_profiling_counters) { + int i ; + for (i = 0; i < num_global_mali_profiling_counters; i++) { + if (counter_id == global_mali_profiling_counters[i].counter_id && global_mali_profiling_counters[i].enabled) { + global_mali_profiling_counters[i].current_counter_value = value; + break; + } + } + } +} + +_mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args) +{ + /* Always add process and thread identificator in the first two data elements for events from user space */ + _mali_osk_profiling_add_event(args->event_id, _mali_osk_get_pid(), _mali_osk_get_tid(), args->data[2], args->data[3], args->data[4]); + + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t _mali_ukk_sw_counters_report(_mali_uk_sw_counters_report_s *args) +{ + u32 *counters = (u32 *)(uintptr_t)args->counters; + + _mali_osk_profiling_report_sw_counters(counters); + + if (NULL != global_mali_profiling_counters) { + int i; + for (i = 0; i < MALI_PROFILING_SW_COUNTERS_NUM; i ++) { + if (global_mali_profiling_counters[first_sw_counter_index + i].enabled) { + global_mali_profiling_counters[first_sw_counter_index + i].current_counter_value = *(counters + i); + } + } + } + + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t _mali_ukk_profiling_stream_fd_get(_mali_uk_profiling_stream_fd_get_s *args) +{ + struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx; + MALI_DEBUG_ASSERT_POINTER(session); + + if (1 == _mali_osk_atomic_inc_return(&stream_fd_if_used)) { + + s32 fd = anon_inode_getfd("[mali_profiling_stream]", &mali_profiling_stream_fops, + session, + O_RDONLY | O_CLOEXEC); + + args->stream_fd = fd; + if (0 > fd) { + _mali_osk_atomic_dec(&stream_fd_if_used); + return _MALI_OSK_ERR_FAULT; + } + args->stream_fd = fd; + } else { + _mali_osk_atomic_dec(&stream_fd_if_used); + args->stream_fd = -1; + return _MALI_OSK_ERR_BUSY; + } + + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t _mali_ukk_profiling_control_set(_mali_uk_profiling_control_set_s *args) +{ + u32 control_packet_size; + u32 output_buffer_size; + + struct mali_session_data *session = (struct mali_session_data *)(uintptr_t)args->ctx; + MALI_DEBUG_ASSERT_POINTER(session); + + if (NULL == global_mali_profiling_counters && MALI_FALSE == _mali_profiling_global_counters_init()) { + MALI_PRINT_ERROR(("Failed to create global_mali_profiling_counters.\n")); + return _MALI_OSK_ERR_FAULT; + } + + control_packet_size = args->control_packet_size; + output_buffer_size = args->response_packet_size; + + if (0 != control_packet_size) { + u8 control_type; + u8 *control_packet_data; + u8 *response_packet_data; + u32 version_length = sizeof(utgard_setup_version) - 1; + + control_packet_data = (u8 *)(uintptr_t)args->control_packet_data; + MALI_DEBUG_ASSERT_POINTER(control_packet_data); + response_packet_data = (u8 *)(uintptr_t)args->response_packet_data; + MALI_DEBUG_ASSERT_POINTER(response_packet_data); + + /*Decide if need to ignore Utgard setup version.*/ + if (control_packet_size >= version_length) { + if (0 == memcmp(control_packet_data, utgard_setup_version, version_length)) { + if (control_packet_size == version_length) { + args->response_packet_size = 0; + return _MALI_OSK_ERR_OK; + } else { + control_packet_data += version_length; + control_packet_size -= version_length; + } + } + } + + current_profiling_pid = _mali_osk_get_pid(); + + control_type = control_packet_data[0]; + switch (control_type) { + case PACKET_HEADER_COUNTERS_REQUEST: { + int i; + + if (PACKET_HEADER_SIZE > control_packet_size || + control_packet_size != _mali_profiling_get_packet_size(control_packet_data + 1)) { + MALI_PRINT_ERROR(("Wrong control packet size, type 0x%x,size 0x%x.\n", control_packet_data[0], control_packet_size)); + return _MALI_OSK_ERR_FAULT; + } + + /* Send supported counters */ + if (PACKET_HEADER_SIZE > output_buffer_size) + return _MALI_OSK_ERR_FAULT; + + *response_packet_data = PACKET_HEADER_COUNTERS_ACK; + args->response_packet_size = PACKET_HEADER_SIZE; + + for (i = 0; i < num_global_mali_profiling_counters; ++i) { + u32 name_size = strlen(global_mali_profiling_counters[i].counter_name); + + if ((args->response_packet_size + name_size + 1) > output_buffer_size) { + MALI_PRINT_ERROR(("Response packet data is too large..\n")); + return _MALI_OSK_ERR_FAULT; + } + + memcpy(response_packet_data + args->response_packet_size, + global_mali_profiling_counters[i].counter_name, name_size + 1); + + args->response_packet_size += (name_size + 1); + + if (global_mali_profiling_counters[i].counter_id == COUNTER_VP_ACTIVITY) { + args->response_packet_size += _mali_profiling_pack_int(response_packet_data, + output_buffer_size, args->response_packet_size, (s32)1); + } else if (global_mali_profiling_counters[i].counter_id == COUNTER_FP_ACTIVITY) { + args->response_packet_size += _mali_profiling_pack_int(response_packet_data, + output_buffer_size, args->response_packet_size, (s32)mali_pp_get_glob_num_pp_cores()); + } else { + args->response_packet_size += _mali_profiling_pack_int(response_packet_data, + output_buffer_size, args->response_packet_size, (s32) - 1); + } + } + + _mali_profiling_set_packet_size(response_packet_data + 1, args->response_packet_size); + break; + } + + case PACKET_HEADER_COUNTERS_ENABLE: { + int i; + u32 request_pos = PACKET_HEADER_SIZE; + mali_bool sw_counter_if_enabled = MALI_FALSE; + + if (PACKET_HEADER_SIZE > control_packet_size || + control_packet_size != _mali_profiling_get_packet_size(control_packet_data + 1)) { + MALI_PRINT_ERROR(("Wrong control packet size , type 0x%x,size 0x%x.\n", control_packet_data[0], control_packet_size)); + return _MALI_OSK_ERR_FAULT; + } + + /* Init all counter states before enable requested counters.*/ + for (i = 0; i < num_global_mali_profiling_counters; ++i) { + _mali_profiling_set_event(global_mali_profiling_counters[i].counter_id, MALI_HW_CORE_NO_COUNTER); + global_mali_profiling_counters[i].enabled = 0; + global_mali_profiling_counters[i].prev_counter_value = 0; + global_mali_profiling_counters[i].current_counter_value = 0; + + if (global_mali_profiling_counters[i].counter_id >= FIRST_MEM_COUNTER && + global_mali_profiling_counters[i].counter_id <= LAST_MEM_COUNTER) { + _mali_profiling_notification_mem_counter(session, global_mali_profiling_counters[i].counter_id, 0, 0); + } + } + + l2_cache_counter_if_enabled = MALI_FALSE; + num_counters_enabled = 0; + mem_counters_enabled = 0; + _mali_profiling_control(FBDUMP_CONTROL_ENABLE, 0); + _mali_profiling_control(SW_COUNTER_ENABLE, 0); + _mali_profiling_notification_enable(session, 0, 0); + + /* Enable requested counters */ + while (request_pos < control_packet_size) { + u32 begin = request_pos; + u32 event; + u32 key; + + /* Check the counter name which should be ended with null */ + while (request_pos < control_packet_size && control_packet_data[request_pos] != '\0') { + ++request_pos; + } + + if (request_pos >= control_packet_size) + return _MALI_OSK_ERR_FAULT; + + ++request_pos; + event = _mali_profiling_read_packet_int(control_packet_data, &request_pos, control_packet_size); + key = _mali_profiling_read_packet_int(control_packet_data, &request_pos, control_packet_size); + + for (i = 0; i < num_global_mali_profiling_counters; ++i) { + u32 name_size = strlen((char *)(control_packet_data + begin)); + + if (strncmp(global_mali_profiling_counters[i].counter_name, (char *)(control_packet_data + begin), name_size) == 0) { + if (!sw_counter_if_enabled && (FIRST_SW_COUNTER <= global_mali_profiling_counters[i].counter_id + && global_mali_profiling_counters[i].counter_id <= LAST_SW_COUNTER)) { + sw_counter_if_enabled = MALI_TRUE; + _mali_profiling_control(SW_COUNTER_ENABLE, 1); + } + + if (COUNTER_FILMSTRIP == global_mali_profiling_counters[i].counter_id) { + _mali_profiling_control(FBDUMP_CONTROL_ENABLE, 1); + _mali_profiling_control(FBDUMP_CONTROL_RATE, event & 0xff); + _mali_profiling_control(FBDUMP_CONTROL_RESIZE_FACTOR, (event >> 8) & 0xff); + } + + if (global_mali_profiling_counters[i].counter_id >= FIRST_MEM_COUNTER && + global_mali_profiling_counters[i].counter_id <= LAST_MEM_COUNTER) { + _mali_profiling_notification_mem_counter(session, global_mali_profiling_counters[i].counter_id, + key, 1); + mem_counters_enabled++; + } + + global_mali_profiling_counters[i].counter_event = event; + global_mali_profiling_counters[i].key = key; + global_mali_profiling_counters[i].enabled = 1; + + _mali_profiling_set_event(global_mali_profiling_counters[i].counter_id, + global_mali_profiling_counters[i].counter_event); + num_counters_enabled++; + break; + } + } + + if (i == num_global_mali_profiling_counters) { + MALI_PRINT_ERROR(("Counter name does not match for type %u.\n", control_type)); + return _MALI_OSK_ERR_FAULT; + } + } + + if (PACKET_HEADER_SIZE <= output_buffer_size) { + *response_packet_data = PACKET_HEADER_ACK; + _mali_profiling_set_packet_size(response_packet_data + 1, PACKET_HEADER_SIZE); + args->response_packet_size = PACKET_HEADER_SIZE; + } else { + return _MALI_OSK_ERR_FAULT; + } + + break; + } + + case PACKET_HEADER_START_CAPTURE_VALUE: { + u32 live_rate; + u32 request_pos = PACKET_HEADER_SIZE; + + if (PACKET_HEADER_SIZE > control_packet_size || + control_packet_size != _mali_profiling_get_packet_size(control_packet_data + 1)) { + MALI_PRINT_ERROR(("Wrong control packet size , type 0x%x,size 0x%x.\n", control_packet_data[0], control_packet_size)); + return _MALI_OSK_ERR_FAULT; + } + + /* Read samping rate in nanoseconds and live rate, start capture.*/ + profiling_sample_rate = _mali_profiling_read_packet_int(control_packet_data, + &request_pos, control_packet_size); + + live_rate = _mali_profiling_read_packet_int(control_packet_data, &request_pos, control_packet_size); + + if (PACKET_HEADER_SIZE <= output_buffer_size) { + *response_packet_data = PACKET_HEADER_ACK; + _mali_profiling_set_packet_size(response_packet_data + 1, PACKET_HEADER_SIZE); + args->response_packet_size = PACKET_HEADER_SIZE; + } else { + return _MALI_OSK_ERR_FAULT; + } + + if (0 != num_counters_enabled && 0 != profiling_sample_rate) { + _mali_profiling_global_stream_list_free(); + if (mem_counters_enabled > 0) { + _mali_profiling_notification_enable(session, profiling_sample_rate, 1); + } + hrtimer_start(&profiling_sampling_timer, + ktime_set(profiling_sample_rate / 1000000000, profiling_sample_rate % 1000000000), + HRTIMER_MODE_REL_PINNED); + } + + break; + } + default: + MALI_PRINT_ERROR(("Unsupported profiling packet header type %u.\n", control_type)); + args->response_packet_size = 0; + return _MALI_OSK_ERR_FAULT; + } + } else { + _mali_osk_profiling_stop_sampling(current_profiling_pid); + _mali_profiling_notification_enable(session, 0, 0); + } + + return _MALI_OSK_ERR_OK; +} + +/** + * Called by gator.ko to set HW counters + * + * @param counter_id The counter ID. + * @param event_id Event ID that the counter should count (HW counter value from TRM). + * + * @return 1 on success, 0 on failure. + */ +int _mali_profiling_set_event(u32 counter_id, s32 event_id) +{ + if (COUNTER_VP_0_C0 == counter_id) { + mali_gp_job_set_gp_counter_src0(event_id); + } else if (COUNTER_VP_0_C1 == counter_id) { + mali_gp_job_set_gp_counter_src1(event_id); + } else if (COUNTER_FP_0_C0 <= counter_id && COUNTER_FP_7_C1 >= counter_id) { + /* + * Two compatibility notes for this function: + * + * 1) Previously the DDK allowed per core counters. + * + * This did not make much sense on Mali-450 with the "virtual PP core" concept, + * so this option was removed, and only the same pair of HW counters was allowed on all cores, + * beginning with r3p2 release. + * + * Starting with r4p0, it is now possible to set different HW counters for the different sub jobs. + * This should be almost the same, since sub job 0 is designed to run on core 0, + * sub job 1 on core 1, and so on. + * + * The scheduling of PP sub jobs is not predictable, and this often led to situations where core 0 ran 2 + * sub jobs, while for instance core 1 ran zero. Having the counters set per sub job would thus increase + * the predictability of the returned data (as you would be guaranteed data for all the selected HW counters). + * + * PS: Core scaling needs to be disabled in order to use this reliably (goes for both solutions). + * + * The framework/#defines with Gator still indicates that the counter is for a particular core, + * but this is internally used as a sub job ID instead (no translation needed). + * + * 2) Global/default vs per sub job counters + * + * Releases before r3p2 had only per PP core counters. + * r3p2 releases had only one set of default/global counters which applied to all PP cores + * Starting with r4p0, we have both a set of default/global counters, + * and individual counters per sub job (equal to per core). + * + * To keep compatibility with Gator/DS-5/streamline, the following scheme is used: + * + * r3p2 release; only counters set for core 0 is handled, + * this is applied as the default/global set of counters, and will thus affect all cores. + * + * r4p0 release; counters set for core 0 is applied as both the global/default set of counters, + * and counters for sub job 0. + * Counters set for core 1-7 is only applied for the corresponding sub job. + * + * This should allow the DS-5/Streamline GUI to have a simple mode where it only allows setting the + * values for core 0, and thus this will be applied to all PP sub jobs/cores. + * Advanced mode will also be supported, where individual pairs of HW counters can be selected. + * + * The GUI will (until it is updated) still refer to cores instead of sub jobs, but this is probably + * something we can live with! + * + * Mali-450 note: Each job is not divided into a deterministic number of sub jobs, as the HW DLBU + * automatically distributes the load between whatever number of cores is available at this particular time. + * A normal PP job on Mali-450 is thus considered a single (virtual) job, and it will thus only be possible + * to use a single pair of HW counters (even if the job ran on multiple PP cores). + * In other words, only the global/default pair of PP HW counters will be used for normal Mali-450 jobs. + */ + u32 sub_job = (counter_id - COUNTER_FP_0_C0) >> 1; + u32 counter_src = (counter_id - COUNTER_FP_0_C0) & 1; + if (0 == counter_src) { + mali_pp_job_set_pp_counter_sub_job_src0(sub_job, event_id); + if (0 == sub_job) { + mali_pp_job_set_pp_counter_global_src0(event_id); + } + } else { + mali_pp_job_set_pp_counter_sub_job_src1(sub_job, event_id); + if (0 == sub_job) { + mali_pp_job_set_pp_counter_global_src1(event_id); + } + } + } else if (COUNTER_L2_0_C0 <= counter_id && COUNTER_L2_2_C1 >= counter_id) { + u32 core_id = (counter_id - COUNTER_L2_0_C0) >> 1; + struct mali_l2_cache_core *l2_cache_core = mali_l2_cache_core_get_glob_l2_core(core_id); + + if (NULL != l2_cache_core) { + u32 counter_src = (counter_id - COUNTER_L2_0_C0) & 1; + mali_l2_cache_core_set_counter_src(l2_cache_core, + counter_src, event_id); + l2_cache_counter_if_enabled = MALI_TRUE; + } + } else { + return 0; /* Failure, unknown event */ + } + + return 1; /* success */ +} + +/** + * Called by gator.ko to retrieve the L2 cache counter values for all L2 cache cores. + * The L2 cache counters are unique in that they are polled by gator, rather than being + * transmitted via the tracepoint mechanism. + * + * @param values Pointer to a _mali_profiling_l2_counter_values structure where + * the counter sources and values will be output + * @return 0 if all went well; otherwise, return the mask with the bits set for the powered off cores + */ +u32 _mali_profiling_get_l2_counters(_mali_profiling_l2_counter_values *values) +{ + u32 l2_cores_num = mali_l2_cache_core_get_glob_num_l2_cores(); + u32 i; + + MALI_DEBUG_ASSERT(l2_cores_num <= 3); + + for (i = 0; i < l2_cores_num; i++) { + struct mali_l2_cache_core *l2_cache = mali_l2_cache_core_get_glob_l2_core(i); + + if (NULL == l2_cache) { + continue; + } + + mali_l2_cache_core_get_counter_values(l2_cache, + &values->cores[i].source0, + &values->cores[i].value0, + &values->cores[i].source1, + &values->cores[i].value1); + } + + return 0; +} + +/** + * Called by gator to control the production of profiling information at runtime. + */ +void _mali_profiling_control(u32 action, u32 value) +{ + switch (action) { + case FBDUMP_CONTROL_ENABLE: + mali_set_user_setting(_MALI_UK_USER_SETTING_COLORBUFFER_CAPTURE_ENABLED, (value == 0 ? MALI_FALSE : MALI_TRUE)); + break; + case FBDUMP_CONTROL_RATE: + mali_set_user_setting(_MALI_UK_USER_SETTING_BUFFER_CAPTURE_N_FRAMES, value); + break; + case SW_COUNTER_ENABLE: + mali_set_user_setting(_MALI_UK_USER_SETTING_SW_COUNTER_ENABLED, value); + break; + case FBDUMP_CONTROL_RESIZE_FACTOR: + mali_set_user_setting(_MALI_UK_USER_SETTING_BUFFER_CAPTURE_RESIZE_FACTOR, value); + break; + default: + break; /* Ignore unimplemented actions */ + } +} + +/** + * Called by gator to get mali api version. + */ +u32 _mali_profiling_get_api_version(void) +{ + return MALI_PROFILING_API_VERSION; +} + +/** +* Called by gator to get the data about Mali instance in use: +* product id, version, number of cores +*/ +void _mali_profiling_get_mali_version(struct _mali_profiling_mali_version *values) +{ + values->mali_product_id = (u32)mali_kernel_core_get_product_id(); + values->mali_version_major = mali_kernel_core_get_gpu_major_version(); + values->mali_version_minor = mali_kernel_core_get_gpu_minor_version(); + values->num_of_l2_cores = mali_l2_cache_core_get_glob_num_l2_cores(); + values->num_of_fp_cores = mali_executor_get_num_cores_total(); + values->num_of_vp_cores = 1; +} + + +EXPORT_SYMBOL(_mali_profiling_set_event); +EXPORT_SYMBOL(_mali_profiling_get_l2_counters); +EXPORT_SYMBOL(_mali_profiling_control); +EXPORT_SYMBOL(_mali_profiling_get_api_version); +EXPORT_SYMBOL(_mali_profiling_get_mali_version); diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_osk_specific.h b/drivers/gpu/arm/mali400/linux/mali_osk_specific.h --- a/drivers/gpu/arm/mali400/linux/mali_osk_specific.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_osk_specific.h 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,72 @@ +/* + * Copyright (C) 2010, 2012-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_osk_specific.h + * Defines per-OS Kernel level specifics, such as unusual workarounds for + * certain OSs. + */ + +#ifndef __MALI_OSK_SPECIFIC_H__ +#define __MALI_OSK_SPECIFIC_H__ + +#include +#include +#include +#include + + +#include "mali_osk_types.h" +#include "mali_kernel_linux.h" + +#define MALI_STATIC_INLINE static inline +#define MALI_NON_STATIC_INLINE inline + +typedef struct dma_pool *mali_dma_pool; + +typedef u32 mali_dma_addr; + +#if MALI_ENABLE_CPU_CYCLES +/* Reads out the clock cycle performance counter of the current cpu. + It is useful for cost-free (2 cycle) measuring of the time spent + in a code path. Sample before and after, the diff number of cycles. + When the CPU is idle it will not increase this clock counter. + It means that the counter is accurate if only spin-locks are used, + but mutexes may lead to too low values since the cpu might "idle" + waiting for the mutex to become available. + The clock source is configured on the CPU during mali module load, + but will not give useful output after a CPU has been power cycled. + It is therefore important to configure the system to not turn of + the cpu cores when using this functionallity.*/ +static inline unsigned int mali_get_cpu_cyclecount(void) +{ + unsigned int value; + /* Reading the CCNT Register - CPU clock counter */ + asm volatile("MRC p15, 0, %0, c9, c13, 0\t\n": "=r"(value)); + return value; +} + +void mali_init_cpu_time_counters(int reset, int enable_divide_by_64); +#endif + + +MALI_STATIC_INLINE u32 _mali_osk_copy_from_user(void *to, void *from, u32 n) +{ + return (u32)copy_from_user(to, from, (unsigned long)n); +} + +MALI_STATIC_INLINE mali_bool _mali_osk_in_atomic(void) +{ + return in_atomic(); +} + +#define _mali_osk_put_user(x, ptr) put_user(x, ptr) + +#endif /* __MALI_OSK_SPECIFIC_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_osk_time.c b/drivers/gpu/arm/mali400/linux/mali_osk_time.c --- a/drivers/gpu/arm/mali400/linux/mali_osk_time.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_osk_time.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2010, 2013-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_osk_time.c + * Implementation of the OS abstraction layer for the kernel device driver + */ + +#include "mali_osk.h" +#include +#include +#include + +mali_bool _mali_osk_time_after_eq(unsigned long ticka, unsigned long tickb) +{ + return time_after_eq(ticka, tickb) ? + MALI_TRUE : MALI_FALSE; +} + +unsigned long _mali_osk_time_mstoticks(u32 ms) +{ + return msecs_to_jiffies(ms); +} + +u32 _mali_osk_time_tickstoms(unsigned long ticks) +{ + return jiffies_to_msecs(ticks); +} + +unsigned long _mali_osk_time_tickcount(void) +{ + return jiffies; +} + +void _mali_osk_time_ubusydelay(u32 usecs) +{ + udelay(usecs); +} + +u64 _mali_osk_time_get_ns(void) +{ + struct timespec tsval; + getnstimeofday(&tsval); + return (u64)timespec_to_ns(&tsval); +} + +u64 _mali_osk_boot_time_get_ns(void) +{ + struct timespec tsval; + get_monotonic_boottime(&tsval); + return (u64)timespec_to_ns(&tsval); +} diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_osk_timers.c b/drivers/gpu/arm/mali400/linux/mali_osk_timers.c --- a/drivers/gpu/arm/mali400/linux/mali_osk_timers.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_osk_timers.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,76 @@ +/* + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_osk_timers.c + * Implementation of the OS abstraction layer for the kernel device driver + */ + +#include +#include +#include "mali_osk.h" +#include "mali_kernel_common.h" + +struct _mali_osk_timer_t_struct { + struct timer_list timer; +}; + +typedef void (*timer_timeout_function_t)(unsigned long); + +_mali_osk_timer_t *_mali_osk_timer_init(void) +{ + _mali_osk_timer_t *t = (_mali_osk_timer_t *)kmalloc(sizeof(_mali_osk_timer_t), GFP_KERNEL); + if (NULL != t) init_timer(&t->timer); + return t; +} + +void _mali_osk_timer_add(_mali_osk_timer_t *tim, unsigned long ticks_to_expire) +{ + MALI_DEBUG_ASSERT_POINTER(tim); + tim->timer.expires = jiffies + ticks_to_expire; + add_timer(&(tim->timer)); +} + +void _mali_osk_timer_mod(_mali_osk_timer_t *tim, unsigned long ticks_to_expire) +{ + MALI_DEBUG_ASSERT_POINTER(tim); + mod_timer(&(tim->timer), jiffies + ticks_to_expire); +} + +void _mali_osk_timer_del(_mali_osk_timer_t *tim) +{ + MALI_DEBUG_ASSERT_POINTER(tim); + del_timer_sync(&(tim->timer)); +} + +void _mali_osk_timer_del_async(_mali_osk_timer_t *tim) +{ + MALI_DEBUG_ASSERT_POINTER(tim); + del_timer(&(tim->timer)); +} + +mali_bool _mali_osk_timer_pending(_mali_osk_timer_t *tim) +{ + MALI_DEBUG_ASSERT_POINTER(tim); + return 1 == timer_pending(&(tim->timer)); +} + +void _mali_osk_timer_setcallback(_mali_osk_timer_t *tim, _mali_osk_timer_callback_t callback, void *data) +{ + MALI_DEBUG_ASSERT_POINTER(tim); + tim->timer.data = (unsigned long)data; + tim->timer.function = (timer_timeout_function_t)callback; +} + +void _mali_osk_timer_term(_mali_osk_timer_t *tim) +{ + MALI_DEBUG_ASSERT_POINTER(tim); + kfree(tim); +} diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_osk_wait_queue.c b/drivers/gpu/arm/mali400/linux/mali_osk_wait_queue.c --- a/drivers/gpu/arm/mali400/linux/mali_osk_wait_queue.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_osk_wait_queue.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,78 @@ +/* + * Copyright (C) 2012-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_osk_wait_queue.c + * Implemenation of the OS abstraction layer for the kernel device driver + */ + +#include +#include +#include + +#include "mali_osk.h" +#include "mali_kernel_common.h" + +struct _mali_osk_wait_queue_t_struct { + wait_queue_head_t wait_queue; +}; + +_mali_osk_wait_queue_t *_mali_osk_wait_queue_init(void) +{ + _mali_osk_wait_queue_t *ret = NULL; + + ret = kmalloc(sizeof(_mali_osk_wait_queue_t), GFP_KERNEL); + + if (NULL == ret) { + return ret; + } + + init_waitqueue_head(&ret->wait_queue); + MALI_DEBUG_ASSERT(!waitqueue_active(&ret->wait_queue)); + + return ret; +} + +void _mali_osk_wait_queue_wait_event(_mali_osk_wait_queue_t *queue, mali_bool(*condition)(void *), void *data) +{ + MALI_DEBUG_ASSERT_POINTER(queue); + MALI_DEBUG_PRINT(6, ("Adding to wait queue %p\n", queue)); + wait_event(queue->wait_queue, condition(data)); +} + +void _mali_osk_wait_queue_wait_event_timeout(_mali_osk_wait_queue_t *queue, mali_bool(*condition)(void *), void *data, u32 timeout) +{ + MALI_DEBUG_ASSERT_POINTER(queue); + MALI_DEBUG_PRINT(6, ("Adding to wait queue %p\n", queue)); + wait_event_timeout(queue->wait_queue, condition(data), _mali_osk_time_mstoticks(timeout)); +} + +void _mali_osk_wait_queue_wake_up(_mali_osk_wait_queue_t *queue) +{ + MALI_DEBUG_ASSERT_POINTER(queue); + + /* if queue is empty, don't attempt to wake up its elements */ + if (!waitqueue_active(&queue->wait_queue)) return; + + MALI_DEBUG_PRINT(6, ("Waking up elements in wait queue %p ....\n", queue)); + + wake_up_all(&queue->wait_queue); + + MALI_DEBUG_PRINT(6, ("... elements in wait queue %p woken up\n", queue)); +} + +void _mali_osk_wait_queue_term(_mali_osk_wait_queue_t *queue) +{ + /* Parameter validation */ + MALI_DEBUG_ASSERT_POINTER(queue); + + /* Linux requires no explicit termination of wait queues */ + kfree(queue); +} diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_osk_wq.c b/drivers/gpu/arm/mali400/linux/mali_osk_wq.c --- a/drivers/gpu/arm/mali400/linux/mali_osk_wq.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_osk_wq.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,240 @@ +/* + * Copyright (C) 2010-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_osk_wq.c + * Implementation of the OS abstraction layer for the kernel device driver + */ + +#include /* For memory allocation */ +#include +#include +#include + +#include "mali_osk.h" +#include "mali_kernel_common.h" +#include "mali_kernel_license.h" +#include "mali_kernel_linux.h" + +typedef struct _mali_osk_wq_work_s { + _mali_osk_wq_work_handler_t handler; + void *data; + mali_bool high_pri; + struct work_struct work_handle; +} mali_osk_wq_work_object_t; + +typedef struct _mali_osk_wq_delayed_work_s { + _mali_osk_wq_work_handler_t handler; + void *data; + struct delayed_work work; +} mali_osk_wq_delayed_work_object_t; + +#if MALI_LICENSE_IS_GPL +static struct workqueue_struct *mali_wq_normal = NULL; +static struct workqueue_struct *mali_wq_high = NULL; +#endif + +static void _mali_osk_wq_work_func(struct work_struct *work); + +_mali_osk_errcode_t _mali_osk_wq_init(void) +{ +#if MALI_LICENSE_IS_GPL + MALI_DEBUG_ASSERT(NULL == mali_wq_normal); + MALI_DEBUG_ASSERT(NULL == mali_wq_high); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) + mali_wq_normal = alloc_workqueue("mali", WQ_UNBOUND, 0); + mali_wq_high = alloc_workqueue("mali_high_pri", WQ_HIGHPRI | WQ_UNBOUND, 0); +#else + mali_wq_normal = create_workqueue("mali"); + mali_wq_high = create_workqueue("mali_high_pri"); +#endif + if (NULL == mali_wq_normal || NULL == mali_wq_high) { + MALI_PRINT_ERROR(("Unable to create Mali workqueues\n")); + + if (mali_wq_normal) destroy_workqueue(mali_wq_normal); + if (mali_wq_high) destroy_workqueue(mali_wq_high); + + mali_wq_normal = NULL; + mali_wq_high = NULL; + + return _MALI_OSK_ERR_FAULT; + } +#endif /* MALI_LICENSE_IS_GPL */ + + return _MALI_OSK_ERR_OK; +} + +void _mali_osk_wq_flush(void) +{ +#if MALI_LICENSE_IS_GPL + flush_workqueue(mali_wq_high); + flush_workqueue(mali_wq_normal); +#else + flush_scheduled_work(); +#endif +} + +void _mali_osk_wq_term(void) +{ +#if MALI_LICENSE_IS_GPL + MALI_DEBUG_ASSERT(NULL != mali_wq_normal); + MALI_DEBUG_ASSERT(NULL != mali_wq_high); + + flush_workqueue(mali_wq_normal); + destroy_workqueue(mali_wq_normal); + + flush_workqueue(mali_wq_high); + destroy_workqueue(mali_wq_high); + + mali_wq_normal = NULL; + mali_wq_high = NULL; +#else + flush_scheduled_work(); +#endif +} + +_mali_osk_wq_work_t *_mali_osk_wq_create_work(_mali_osk_wq_work_handler_t handler, void *data) +{ + mali_osk_wq_work_object_t *work = kmalloc(sizeof(mali_osk_wq_work_object_t), GFP_KERNEL); + + if (NULL == work) return NULL; + + work->handler = handler; + work->data = data; + work->high_pri = MALI_FALSE; + + INIT_WORK(&work->work_handle, _mali_osk_wq_work_func); + + return work; +} + +_mali_osk_wq_work_t *_mali_osk_wq_create_work_high_pri(_mali_osk_wq_work_handler_t handler, void *data) +{ + mali_osk_wq_work_object_t *work = kmalloc(sizeof(mali_osk_wq_work_object_t), GFP_KERNEL); + + if (NULL == work) return NULL; + + work->handler = handler; + work->data = data; + work->high_pri = MALI_TRUE; + + INIT_WORK(&work->work_handle, _mali_osk_wq_work_func); + + return work; +} + +void _mali_osk_wq_delete_work(_mali_osk_wq_work_t *work) +{ + mali_osk_wq_work_object_t *work_object = (mali_osk_wq_work_object_t *)work; + _mali_osk_wq_flush(); + kfree(work_object); +} + +void _mali_osk_wq_delete_work_nonflush(_mali_osk_wq_work_t *work) +{ + mali_osk_wq_work_object_t *work_object = (mali_osk_wq_work_object_t *)work; + kfree(work_object); +} + +void _mali_osk_wq_schedule_work(_mali_osk_wq_work_t *work) +{ + mali_osk_wq_work_object_t *work_object = (mali_osk_wq_work_object_t *)work; +#if MALI_LICENSE_IS_GPL + queue_work(mali_wq_normal, &work_object->work_handle); +#else + schedule_work(&work_object->work_handle); +#endif +} + +void _mali_osk_wq_schedule_work_high_pri(_mali_osk_wq_work_t *work) +{ + mali_osk_wq_work_object_t *work_object = (mali_osk_wq_work_object_t *)work; +#if MALI_LICENSE_IS_GPL + queue_work(mali_wq_high, &work_object->work_handle); +#else + schedule_work(&work_object->work_handle); +#endif +} + +static void _mali_osk_wq_work_func(struct work_struct *work) +{ + mali_osk_wq_work_object_t *work_object; + + work_object = _MALI_OSK_CONTAINER_OF(work, mali_osk_wq_work_object_t, work_handle); + +#if MALI_LICENSE_IS_GPL +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) + /* We want highest Dynamic priority of the thread so that the Jobs depending + ** on this thread could be scheduled in time. Without this, this thread might + ** sometimes need to wait for some threads in user mode to finish its round-robin + ** time, causing *bubble* in the Mali pipeline. Thanks to the new implementation + ** of high-priority workqueue in new kernel, this only happens in older kernel. + */ + if (MALI_TRUE == work_object->high_pri) { + set_user_nice(current, -19); + } +#endif +#endif /* MALI_LICENSE_IS_GPL */ + + work_object->handler(work_object->data); +} + +static void _mali_osk_wq_delayed_work_func(struct work_struct *work) +{ + mali_osk_wq_delayed_work_object_t *work_object; + + work_object = _MALI_OSK_CONTAINER_OF(work, mali_osk_wq_delayed_work_object_t, work.work); + work_object->handler(work_object->data); +} + +mali_osk_wq_delayed_work_object_t *_mali_osk_wq_delayed_create_work(_mali_osk_wq_work_handler_t handler, void *data) +{ + mali_osk_wq_delayed_work_object_t *work = kmalloc(sizeof(mali_osk_wq_delayed_work_object_t), GFP_KERNEL); + + if (NULL == work) return NULL; + + work->handler = handler; + work->data = data; + + INIT_DELAYED_WORK(&work->work, _mali_osk_wq_delayed_work_func); + + return work; +} + +void _mali_osk_wq_delayed_delete_work_nonflush(_mali_osk_wq_delayed_work_t *work) +{ + mali_osk_wq_delayed_work_object_t *work_object = (mali_osk_wq_delayed_work_object_t *)work; + kfree(work_object); +} + +void _mali_osk_wq_delayed_cancel_work_async(_mali_osk_wq_delayed_work_t *work) +{ + mali_osk_wq_delayed_work_object_t *work_object = (mali_osk_wq_delayed_work_object_t *)work; + cancel_delayed_work(&work_object->work); +} + +void _mali_osk_wq_delayed_cancel_work_sync(_mali_osk_wq_delayed_work_t *work) +{ + mali_osk_wq_delayed_work_object_t *work_object = (mali_osk_wq_delayed_work_object_t *)work; + cancel_delayed_work_sync(&work_object->work); +} + +void _mali_osk_wq_delayed_schedule_work(_mali_osk_wq_delayed_work_t *work, u32 delay) +{ + mali_osk_wq_delayed_work_object_t *work_object = (mali_osk_wq_delayed_work_object_t *)work; + +#if MALI_LICENSE_IS_GPL + queue_delayed_work(mali_wq_normal, &work_object->work, delay); +#else + schedule_delayed_work(&work_object->work, delay); +#endif + +} diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_pmu_power_up_down.c b/drivers/gpu/arm/mali400/linux/mali_pmu_power_up_down.c --- a/drivers/gpu/arm/mali400/linux/mali_pmu_power_up_down.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_pmu_power_up_down.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,23 @@ +/** + * Copyright (C) 2010, 2012-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_pmu_power_up_down.c + */ + +#include +#include "mali_executor.h" + +int mali_perf_set_num_pp_cores(unsigned int num_cores) +{ + return mali_executor_set_perf_level(num_cores, MALI_FALSE); +} + +EXPORT_SYMBOL(mali_perf_set_num_pp_cores); diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_profiling_events.h b/drivers/gpu/arm/mali400/linux/mali_profiling_events.h --- a/drivers/gpu/arm/mali400/linux/mali_profiling_events.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_profiling_events.h 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,17 @@ +/* + * Copyright (C) 2012, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_PROFILING_EVENTS_H__ +#define __MALI_PROFILING_EVENTS_H__ + +/* Simple wrapper in order to find the OS specific location of this file */ +#include + +#endif /* __MALI_PROFILING_EVENTS_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_profiling_gator_api.h b/drivers/gpu/arm/mali400/linux/mali_profiling_gator_api.h --- a/drivers/gpu/arm/mali400/linux/mali_profiling_gator_api.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_profiling_gator_api.h 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,17 @@ +/* + * Copyright (C) 2012-2013, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_PROFILING_GATOR_API_H__ +#define __MALI_PROFILING_GATOR_API_H__ + +/* Simple wrapper in order to find the OS specific location of this file */ +#include + +#endif /* __MALI_PROFILING_GATOR_API_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_profiling_internal.c b/drivers/gpu/arm/mali400/linux/mali_profiling_internal.c --- a/drivers/gpu/arm/mali400/linux/mali_profiling_internal.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_profiling_internal.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,275 @@ +/* + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_kernel_common.h" +#include "mali_osk.h" +#include "mali_osk_mali.h" +#include "mali_ukk.h" +#include "mali_timestamp.h" +#include "mali_osk_profiling.h" +#include "mali_user_settings_db.h" +#include "mali_profiling_internal.h" + +typedef struct mali_profiling_entry { + u64 timestamp; + u32 event_id; + u32 data[5]; +} mali_profiling_entry; + +typedef enum mali_profiling_state { + MALI_PROFILING_STATE_UNINITIALIZED, + MALI_PROFILING_STATE_IDLE, + MALI_PROFILING_STATE_RUNNING, + MALI_PROFILING_STATE_RETURN, +} mali_profiling_state; + +static _mali_osk_mutex_t *lock = NULL; +static mali_profiling_state prof_state = MALI_PROFILING_STATE_UNINITIALIZED; +static mali_profiling_entry *profile_entries = NULL; +static _mali_osk_atomic_t profile_insert_index; +static u32 profile_mask = 0; + +static inline void add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4); + +void probe_mali_timeline_event(void *data, TP_PROTO(unsigned int event_id, unsigned int d0, unsigned int d1, unsigned + int d2, unsigned int d3, unsigned int d4)) +{ + add_event(event_id, d0, d1, d2, d3, d4); +} + +_mali_osk_errcode_t _mali_internal_profiling_init(mali_bool auto_start) +{ + profile_entries = NULL; + profile_mask = 0; + _mali_osk_atomic_init(&profile_insert_index, 0); + + lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_PROFILING); + if (NULL == lock) { + return _MALI_OSK_ERR_FAULT; + } + + prof_state = MALI_PROFILING_STATE_IDLE; + + if (MALI_TRUE == auto_start) { + u32 limit = MALI_PROFILING_MAX_BUFFER_ENTRIES; /* Use maximum buffer size */ + + mali_set_user_setting(_MALI_UK_USER_SETTING_SW_EVENTS_ENABLE, MALI_TRUE); + if (_MALI_OSK_ERR_OK != _mali_internal_profiling_start(&limit)) { + return _MALI_OSK_ERR_FAULT; + } + } + + return _MALI_OSK_ERR_OK; +} + +void _mali_internal_profiling_term(void) +{ + u32 count; + + /* Ensure profiling is stopped */ + _mali_internal_profiling_stop(&count); + + prof_state = MALI_PROFILING_STATE_UNINITIALIZED; + + if (NULL != profile_entries) { + _mali_osk_vfree(profile_entries); + profile_entries = NULL; + } + + if (NULL != lock) { + _mali_osk_mutex_term(lock); + lock = NULL; + } +} + +_mali_osk_errcode_t _mali_internal_profiling_start(u32 *limit) +{ + _mali_osk_errcode_t ret; + mali_profiling_entry *new_profile_entries; + + _mali_osk_mutex_wait(lock); + + if (MALI_PROFILING_STATE_RUNNING == prof_state) { + _mali_osk_mutex_signal(lock); + return _MALI_OSK_ERR_BUSY; + } + + new_profile_entries = _mali_osk_valloc(*limit * sizeof(mali_profiling_entry)); + + if (NULL == new_profile_entries) { + _mali_osk_mutex_signal(lock); + _mali_osk_vfree(new_profile_entries); + return _MALI_OSK_ERR_NOMEM; + } + + if (MALI_PROFILING_MAX_BUFFER_ENTRIES < *limit) { + *limit = MALI_PROFILING_MAX_BUFFER_ENTRIES; + } + + profile_mask = 1; + while (profile_mask <= *limit) { + profile_mask <<= 1; + } + profile_mask >>= 1; + + *limit = profile_mask; + + profile_mask--; /* turns the power of two into a mask of one less */ + + if (MALI_PROFILING_STATE_IDLE != prof_state) { + _mali_osk_mutex_signal(lock); + _mali_osk_vfree(new_profile_entries); + return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */ + } + + profile_entries = new_profile_entries; + + ret = _mali_timestamp_reset(); + + if (_MALI_OSK_ERR_OK == ret) { + prof_state = MALI_PROFILING_STATE_RUNNING; + } else { + _mali_osk_vfree(profile_entries); + profile_entries = NULL; + } + + register_trace_mali_timeline_event(probe_mali_timeline_event, NULL); + + _mali_osk_mutex_signal(lock); + return ret; +} + +static inline void add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4) +{ + u32 cur_index = (_mali_osk_atomic_inc_return(&profile_insert_index) - 1) & profile_mask; + + profile_entries[cur_index].timestamp = _mali_timestamp_get(); + profile_entries[cur_index].event_id = event_id; + profile_entries[cur_index].data[0] = data0; + profile_entries[cur_index].data[1] = data1; + profile_entries[cur_index].data[2] = data2; + profile_entries[cur_index].data[3] = data3; + profile_entries[cur_index].data[4] = data4; + + /* If event is "leave API function", add current memory usage to the event + * as data point 4. This is used in timeline profiling to indicate how + * much memory was used when leaving a function. */ + if (event_id == (MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_SINGLE_SW_LEAVE_API_FUNC)) { + profile_entries[cur_index].data[4] = _mali_ukk_report_memory_usage(); + } +} + +_mali_osk_errcode_t _mali_internal_profiling_stop(u32 *count) +{ + _mali_osk_mutex_wait(lock); + + if (MALI_PROFILING_STATE_RUNNING != prof_state) { + _mali_osk_mutex_signal(lock); + return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */ + } + + /* go into return state (user to retreive events), no more events will be added after this */ + prof_state = MALI_PROFILING_STATE_RETURN; + + unregister_trace_mali_timeline_event(probe_mali_timeline_event, NULL); + + _mali_osk_mutex_signal(lock); + + tracepoint_synchronize_unregister(); + + *count = _mali_osk_atomic_read(&profile_insert_index); + if (*count > profile_mask) *count = profile_mask; + + return _MALI_OSK_ERR_OK; +} + +u32 _mali_internal_profiling_get_count(void) +{ + u32 retval = 0; + + _mali_osk_mutex_wait(lock); + if (MALI_PROFILING_STATE_RETURN == prof_state) { + retval = _mali_osk_atomic_read(&profile_insert_index); + if (retval > profile_mask) retval = profile_mask; + } + _mali_osk_mutex_signal(lock); + + return retval; +} + +_mali_osk_errcode_t _mali_internal_profiling_get_event(u32 index, u64 *timestamp, u32 *event_id, u32 data[5]) +{ + u32 raw_index = _mali_osk_atomic_read(&profile_insert_index); + + _mali_osk_mutex_wait(lock); + + if (index < profile_mask) { + if ((raw_index & ~profile_mask) != 0) { + index += raw_index; + index &= profile_mask; + } + + if (prof_state != MALI_PROFILING_STATE_RETURN) { + _mali_osk_mutex_signal(lock); + return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */ + } + + if (index >= raw_index) { + _mali_osk_mutex_signal(lock); + return _MALI_OSK_ERR_FAULT; + } + + *timestamp = profile_entries[index].timestamp; + *event_id = profile_entries[index].event_id; + data[0] = profile_entries[index].data[0]; + data[1] = profile_entries[index].data[1]; + data[2] = profile_entries[index].data[2]; + data[3] = profile_entries[index].data[3]; + data[4] = profile_entries[index].data[4]; + } else { + _mali_osk_mutex_signal(lock); + return _MALI_OSK_ERR_FAULT; + } + + _mali_osk_mutex_signal(lock); + return _MALI_OSK_ERR_OK; +} + +_mali_osk_errcode_t _mali_internal_profiling_clear(void) +{ + _mali_osk_mutex_wait(lock); + + if (MALI_PROFILING_STATE_RETURN != prof_state) { + _mali_osk_mutex_signal(lock); + return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */ + } + + prof_state = MALI_PROFILING_STATE_IDLE; + profile_mask = 0; + _mali_osk_atomic_init(&profile_insert_index, 0); + + if (NULL != profile_entries) { + _mali_osk_vfree(profile_entries); + profile_entries = NULL; + } + + _mali_osk_mutex_signal(lock); + return _MALI_OSK_ERR_OK; +} + +mali_bool _mali_internal_profiling_is_recording(void) +{ + return prof_state == MALI_PROFILING_STATE_RUNNING ? MALI_TRUE : MALI_FALSE; +} + +mali_bool _mali_internal_profiling_have_recording(void) +{ + return prof_state == MALI_PROFILING_STATE_RETURN ? MALI_TRUE : MALI_FALSE; +} diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_profiling_internal.h b/drivers/gpu/arm/mali400/linux/mali_profiling_internal.h --- a/drivers/gpu/arm/mali400/linux/mali_profiling_internal.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_profiling_internal.h 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2012-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_PROFILING_INTERNAL_H__ +#define __MALI_PROFILING_INTERNAL_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "mali_osk.h" + +int _mali_internal_profiling_init(mali_bool auto_start); +void _mali_internal_profiling_term(void); + +mali_bool _mali_internal_profiling_is_recording(void); +mali_bool _mali_internal_profiling_have_recording(void); +_mali_osk_errcode_t _mali_internal_profiling_clear(void); +_mali_osk_errcode_t _mali_internal_profiling_get_event(u32 index, u64 *timestamp, u32 *event_id, u32 data[5]); +u32 _mali_internal_profiling_get_count(void); +int _mali_internal_profiling_stop(u32 *count); +int _mali_internal_profiling_start(u32 *limit); + +#ifdef __cplusplus +} +#endif + +#endif /* __MALI_PROFILING_INTERNAL_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_sync.c b/drivers/gpu/arm/mali400/linux/mali_sync.c --- a/drivers/gpu/arm/mali400/linux/mali_sync.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_sync.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,451 @@ +/* + * Copyright (C) 2012-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_sync.h" + +#include "mali_osk.h" +#include "mali_kernel_common.h" +#include "mali_timeline.h" +#include "mali_executor.h" + +#include +#include +#include + +struct mali_sync_pt { + struct sync_pt sync_pt; + struct mali_sync_flag *flag; + struct sync_timeline *sync_tl; /**< Sync timeline this pt is connected to. */ +}; + +/** + * The sync flag is used to connect sync fences to the Mali Timeline system. Sync fences can be + * created from a sync flag, and when the flag is signaled, the sync fences will also be signaled. + */ +struct mali_sync_flag { + struct sync_timeline *sync_tl; /**< Sync timeline this flag is connected to. */ + u32 point; /**< Point on timeline. */ + int status; /**< 0 if unsignaled, 1 if signaled without error or negative if signaled with error. */ + struct kref refcount; /**< Reference count. */ +}; + +/** + * Mali sync timeline is used to connect mali timeline to sync_timeline. + * When fence timeout can print more detailed mali timeline system info. + */ +struct mali_sync_timeline_container { + struct sync_timeline sync_timeline; + struct mali_timeline *timeline; +}; + +MALI_STATIC_INLINE struct mali_sync_pt *to_mali_sync_pt(struct sync_pt *pt) +{ + return container_of(pt, struct mali_sync_pt, sync_pt); +} + +MALI_STATIC_INLINE struct mali_sync_timeline_container *to_mali_sync_tl_container(struct sync_timeline *sync_tl) +{ + return container_of(sync_tl, struct mali_sync_timeline_container, sync_timeline); +} + +static struct sync_pt *timeline_dup(struct sync_pt *pt) +{ + struct mali_sync_pt *mpt, *new_mpt; + struct sync_pt *new_pt; + + MALI_DEBUG_ASSERT_POINTER(pt); + mpt = to_mali_sync_pt(pt); + + new_pt = sync_pt_create(mpt->sync_tl, sizeof(struct mali_sync_pt)); + if (NULL == new_pt) return NULL; + + new_mpt = to_mali_sync_pt(new_pt); + + mali_sync_flag_get(mpt->flag); + new_mpt->flag = mpt->flag; + new_mpt->sync_tl = mpt->sync_tl; + + return new_pt; +} + +static int timeline_has_signaled(struct sync_pt *pt) +{ + struct mali_sync_pt *mpt; + + MALI_DEBUG_ASSERT_POINTER(pt); + mpt = to_mali_sync_pt(pt); + + MALI_DEBUG_ASSERT_POINTER(mpt->flag); + + return mpt->flag->status; +} + +static int timeline_compare(struct sync_pt *pta, struct sync_pt *ptb) +{ + struct mali_sync_pt *mpta; + struct mali_sync_pt *mptb; + u32 a, b; + + MALI_DEBUG_ASSERT_POINTER(pta); + MALI_DEBUG_ASSERT_POINTER(ptb); + mpta = to_mali_sync_pt(pta); + mptb = to_mali_sync_pt(ptb); + + MALI_DEBUG_ASSERT_POINTER(mpta->flag); + MALI_DEBUG_ASSERT_POINTER(mptb->flag); + + a = mpta->flag->point; + b = mptb->flag->point; + + if (a == b) return 0; + + return ((b - a) < (a - b) ? -1 : 1); +} + +static void timeline_free_pt(struct sync_pt *pt) +{ + struct mali_sync_pt *mpt; + + MALI_DEBUG_ASSERT_POINTER(pt); + mpt = to_mali_sync_pt(pt); + + mali_sync_flag_put(mpt->flag); +} + +static void timeline_release(struct sync_timeline *sync_timeline) +{ + struct mali_sync_timeline_container *mali_sync_tl = NULL; + struct mali_timeline *mali_tl = NULL; + + MALI_DEBUG_ASSERT_POINTER(sync_timeline); + + mali_sync_tl = to_mali_sync_tl_container(sync_timeline); + MALI_DEBUG_ASSERT_POINTER(mali_sync_tl); + + mali_tl = mali_sync_tl->timeline; + + /* always signaled timeline didn't have mali container */ + if (mali_tl) { + if (NULL != mali_tl->spinlock) { + mali_spinlock_reentrant_term(mali_tl->spinlock); + } + _mali_osk_free(mali_tl); + } + + module_put(THIS_MODULE); +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) +static void timeline_print_pt(struct seq_file *s, struct sync_pt *sync_pt) +{ + struct mali_sync_pt *mpt; + + MALI_DEBUG_ASSERT_POINTER(s); + MALI_DEBUG_ASSERT_POINTER(sync_pt); + + mpt = to_mali_sync_pt(sync_pt); + + /* It is possible this sync point is just under construct, + * make sure the flag is valid before accessing it + */ + if (mpt->flag) { + seq_printf(s, "%u", mpt->flag->point); + } else { + seq_printf(s, "uninitialized"); + } +} + +static void timeline_print_obj(struct seq_file *s, struct sync_timeline *sync_tl) +{ + struct mali_sync_timeline_container *mali_sync_tl = NULL; + struct mali_timeline *mali_tl = NULL; + + MALI_DEBUG_ASSERT_POINTER(sync_tl); + + mali_sync_tl = to_mali_sync_tl_container(sync_tl); + MALI_DEBUG_ASSERT_POINTER(mali_sync_tl); + + mali_tl = mali_sync_tl->timeline; + + if (NULL != mali_tl) { + seq_printf(s, "oldest (%u) ", mali_tl->point_oldest); + seq_printf(s, "next (%u)", mali_tl->point_next); + seq_printf(s, "\n"); + +#if defined(MALI_TIMELINE_DEBUG_FUNCTIONS) + { + u32 tid = _mali_osk_get_tid(); + struct mali_timeline_system *system = mali_tl->system; + + mali_spinlock_reentrant_wait(mali_tl->spinlock, tid); + if (!mali_tl->destroyed) { + mali_spinlock_reentrant_wait(system->spinlock, tid); + mali_timeline_debug_print_timeline(mali_tl, s); + mali_spinlock_reentrant_signal(system->spinlock, tid); + } + mali_spinlock_reentrant_signal(mali_tl->spinlock, tid); + + /* dump job queue status and group running status */ + mali_executor_status_dump(); + } +#endif + } +} +#else +static void timeline_pt_value_str(struct sync_pt *pt, char *str, int size) +{ + struct mali_sync_pt *mpt; + + MALI_DEBUG_ASSERT_POINTER(str); + MALI_DEBUG_ASSERT_POINTER(pt); + + mpt = to_mali_sync_pt(pt); + + /* It is possible this sync point is just under construct, + * make sure the flag is valid before accessing it + */ + if (mpt->flag) { + _mali_osk_snprintf(str, size, "%u", mpt->flag->point); + } else { + _mali_osk_snprintf(str, size, "uninitialized"); + } +} + +static void timeline_value_str(struct sync_timeline *timeline, char *str, int size) +{ + struct mali_sync_timeline_container *mali_sync_tl = NULL; + struct mali_timeline *mali_tl = NULL; + + MALI_DEBUG_ASSERT_POINTER(timeline); + + mali_sync_tl = to_mali_sync_tl_container(timeline); + MALI_DEBUG_ASSERT_POINTER(mali_sync_tl); + + mali_tl = mali_sync_tl->timeline; + + if (NULL != mali_tl) { + _mali_osk_snprintf(str, size, "oldest (%u) ", mali_tl->point_oldest); + _mali_osk_snprintf(str, size, "next (%u)", mali_tl->point_next); + _mali_osk_snprintf(str, size, "\n"); + +#if defined(MALI_TIMELINE_DEBUG_FUNCTIONS) + { + u32 tid = _mali_osk_get_tid(); + struct mali_timeline_system *system = mali_tl->system; + + mali_spinlock_reentrant_wait(mali_tl->spinlock, tid); + if (!mali_tl->destroyed) { + mali_spinlock_reentrant_wait(system->spinlock, tid); + mali_timeline_debug_direct_print_timeline(mali_tl); + mali_spinlock_reentrant_signal(system->spinlock, tid); + } + mali_spinlock_reentrant_signal(mali_tl->spinlock, tid); + + /* dump job queue status and group running status */ + mali_executor_status_dump(); + } +#endif + } +} +#endif + + +static struct sync_timeline_ops mali_timeline_ops = { + .driver_name = "Mali", + .dup = timeline_dup, + .has_signaled = timeline_has_signaled, + .compare = timeline_compare, + .free_pt = timeline_free_pt, + .release_obj = timeline_release, +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) + .print_pt = timeline_print_pt, + .print_obj = timeline_print_obj, +#else + .pt_value_str = timeline_pt_value_str, + .timeline_value_str = timeline_value_str, +#endif +}; + +struct sync_timeline *mali_sync_timeline_create(struct mali_timeline *timeline, const char *name) +{ + struct sync_timeline *sync_tl; + struct mali_sync_timeline_container *mali_sync_tl; + + sync_tl = sync_timeline_create(&mali_timeline_ops, sizeof(struct mali_sync_timeline_container), name); + if (NULL == sync_tl) return NULL; + + mali_sync_tl = to_mali_sync_tl_container(sync_tl); + mali_sync_tl->timeline = timeline; + + /* Grab a reference on the module to ensure the callbacks are present + * as long some timeline exists. The reference is released when the + * timeline is freed. + * Since this function is called from a ioctl on an open file we know + * we already have a reference, so using __module_get is safe. */ + __module_get(THIS_MODULE); + + return sync_tl; +} + +s32 mali_sync_fence_fd_alloc(struct sync_fence *sync_fence) +{ + s32 fd = -1; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) + fd = get_unused_fd(); +#else + fd = get_unused_fd_flags(0); +#endif + + if (fd < 0) { + sync_fence_put(sync_fence); + return -1; + } + sync_fence_install(sync_fence, fd); + + return fd; +} + +struct sync_fence *mali_sync_fence_merge(struct sync_fence *sync_fence1, struct sync_fence *sync_fence2) +{ + struct sync_fence *sync_fence; + + MALI_DEBUG_ASSERT_POINTER(sync_fence1); + MALI_DEBUG_ASSERT_POINTER(sync_fence1); + + sync_fence = sync_fence_merge("mali_merge_fence", sync_fence1, sync_fence2); + sync_fence_put(sync_fence1); + sync_fence_put(sync_fence2); + + return sync_fence; +} + +struct sync_fence *mali_sync_timeline_create_signaled_fence(struct sync_timeline *sync_tl) +{ + struct mali_sync_flag *flag; + struct sync_fence *sync_fence; + + MALI_DEBUG_ASSERT_POINTER(sync_tl); + + flag = mali_sync_flag_create(sync_tl, 0); + if (NULL == flag) return NULL; + + sync_fence = mali_sync_flag_create_fence(flag); + + mali_sync_flag_signal(flag, 0); + mali_sync_flag_put(flag); + + return sync_fence; +} + +struct mali_sync_flag *mali_sync_flag_create(struct sync_timeline *sync_tl, mali_timeline_point point) +{ + struct mali_sync_flag *flag; + + if (NULL == sync_tl) return NULL; + + flag = _mali_osk_calloc(1, sizeof(*flag)); + if (NULL == flag) return NULL; + + flag->sync_tl = sync_tl; + flag->point = point; + + flag->status = 0; + kref_init(&flag->refcount); + + return flag; +} + +void mali_sync_flag_get(struct mali_sync_flag *flag) +{ + MALI_DEBUG_ASSERT_POINTER(flag); + kref_get(&flag->refcount); +} + +/** + * Free sync flag. + * + * @param ref kref object embedded in sync flag that should be freed. + */ +static void mali_sync_flag_free(struct kref *ref) +{ + struct mali_sync_flag *flag; + + MALI_DEBUG_ASSERT_POINTER(ref); + flag = container_of(ref, struct mali_sync_flag, refcount); + + _mali_osk_free(flag); +} + +void mali_sync_flag_put(struct mali_sync_flag *flag) +{ + MALI_DEBUG_ASSERT_POINTER(flag); + kref_put(&flag->refcount, mali_sync_flag_free); +} + +void mali_sync_flag_signal(struct mali_sync_flag *flag, int error) +{ + MALI_DEBUG_ASSERT_POINTER(flag); + + MALI_DEBUG_ASSERT(0 == flag->status); + flag->status = (0 > error) ? error : 1; + + _mali_osk_write_mem_barrier(); + + sync_timeline_signal(flag->sync_tl); +} + +/** + * Create a sync point attached to given sync flag. + * + * @note Sync points must be triggered in *exactly* the same order as they are created. + * + * @param flag Sync flag. + * @return New sync point if successful, NULL if not. + */ +static struct sync_pt *mali_sync_flag_create_pt(struct mali_sync_flag *flag) +{ + struct sync_pt *pt; + struct mali_sync_pt *mpt; + + MALI_DEBUG_ASSERT_POINTER(flag); + MALI_DEBUG_ASSERT_POINTER(flag->sync_tl); + + pt = sync_pt_create(flag->sync_tl, sizeof(struct mali_sync_pt)); + if (NULL == pt) return NULL; + + mali_sync_flag_get(flag); + + mpt = to_mali_sync_pt(pt); + mpt->flag = flag; + mpt->sync_tl = flag->sync_tl; + + return pt; +} + +struct sync_fence *mali_sync_flag_create_fence(struct mali_sync_flag *flag) +{ + struct sync_pt *sync_pt; + struct sync_fence *sync_fence; + + MALI_DEBUG_ASSERT_POINTER(flag); + MALI_DEBUG_ASSERT_POINTER(flag->sync_tl); + + sync_pt = mali_sync_flag_create_pt(flag); + if (NULL == sync_pt) return NULL; + + sync_fence = sync_fence_create("mali_flag_fence", sync_pt); + if (NULL == sync_fence) { + sync_pt_free(sync_pt); + return NULL; + } + + return sync_fence; +} diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_sync.h b/drivers/gpu/arm/mali400/linux/mali_sync.h --- a/drivers/gpu/arm/mali400/linux/mali_sync.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_sync.h 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,111 @@ +/* + * Copyright (C) 2012-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_sync.h + * + * Mali interface for Linux sync objects. + */ + +#ifndef _MALI_SYNC_H_ +#define _MALI_SYNC_H_ + +#if defined(CONFIG_SYNC) + +#include +#include +#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0) +#include +#else +#include +#endif + + +#include "mali_osk.h" + +struct mali_sync_flag; +struct mali_timeline; + +/** + * Create a sync timeline. + * + * @param name Name of the sync timeline. + * @return The new sync timeline if successful, NULL if not. + */ +struct sync_timeline *mali_sync_timeline_create(struct mali_timeline *timeline, const char *name); + +/** + * Creates a file descriptor representing the sync fence. Will release sync fence if allocation of + * file descriptor fails. + * + * @param sync_fence Sync fence. + * @return File descriptor representing sync fence if successful, or -1 if not. + */ +s32 mali_sync_fence_fd_alloc(struct sync_fence *sync_fence); + +/** + * Merges two sync fences. Both input sync fences will be released. + * + * @param sync_fence1 First sync fence. + * @param sync_fence2 Second sync fence. + * @return New sync fence that is the result of the merger if successful, or NULL if not. + */ +struct sync_fence *mali_sync_fence_merge(struct sync_fence *sync_fence1, struct sync_fence *sync_fence2); + +/** + * Create a sync fence that is already signaled. + * + * @param tl Sync timeline. + * @return New signaled sync fence if successful, NULL if not. + */ +struct sync_fence *mali_sync_timeline_create_signaled_fence(struct sync_timeline *sync_tl); + +/** + * Create a sync flag. + * + * @param sync_tl Sync timeline. + * @param point Point on Mali timeline. + * @return New sync flag if successful, NULL if not. + */ +struct mali_sync_flag *mali_sync_flag_create(struct sync_timeline *sync_tl, u32 point); + +/** + * Grab sync flag reference. + * + * @param flag Sync flag. + */ +void mali_sync_flag_get(struct mali_sync_flag *flag); + +/** + * Release sync flag reference. If this was the last reference, the sync flag will be freed. + * + * @param flag Sync flag. + */ +void mali_sync_flag_put(struct mali_sync_flag *flag); + +/** + * Signal sync flag. All sync fences created from this flag will be signaled. + * + * @param flag Sync flag to signal. + * @param error Negative error code, or 0 if no error. + */ +void mali_sync_flag_signal(struct mali_sync_flag *flag, int error); + +/** + * Create a sync fence attached to given sync flag. + * + * @param flag Sync flag. + * @return New sync fence if successful, NULL if not. + */ +struct sync_fence *mali_sync_flag_create_fence(struct mali_sync_flag *flag); + +#endif /* defined(CONFIG_SYNC) */ + +#endif /* _MALI_SYNC_H_ */ diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_ukk_core.c b/drivers/gpu/arm/mali400/linux/mali_ukk_core.c --- a/drivers/gpu/arm/mali400/linux/mali_ukk_core.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_ukk_core.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,146 @@ +/* + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ +#include /* file system operations */ +#include /* memort allocation functions */ +#include /* user space access */ + +#include "mali_ukk.h" +#include "mali_osk.h" +#include "mali_kernel_common.h" +#include "mali_session.h" +#include "mali_ukk_wrappers.h" + +int get_api_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_s __user *uargs) +{ + _mali_uk_get_api_version_s kargs; + _mali_osk_errcode_t err; + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + + if (0 != get_user(kargs.version, &uargs->version)) return -EFAULT; + + kargs.ctx = (uintptr_t)session_data; + err = _mali_ukk_get_api_version(&kargs); + if (_MALI_OSK_ERR_OK != err) return map_errcode(err); + + if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT; + if (0 != put_user(kargs.compatible, &uargs->compatible)) return -EFAULT; + + return 0; +} + +int get_api_version_v2_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_v2_s __user *uargs) +{ + _mali_uk_get_api_version_v2_s kargs; + _mali_osk_errcode_t err; + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + + if (0 != get_user(kargs.version, &uargs->version)) return -EFAULT; + + kargs.ctx = (uintptr_t)session_data; + err = _mali_ukk_get_api_version_v2(&kargs); + if (_MALI_OSK_ERR_OK != err) return map_errcode(err); + + if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT; + if (0 != put_user(kargs.compatible, &uargs->compatible)) return -EFAULT; + + return 0; +} + +int wait_for_notification_wrapper(struct mali_session_data *session_data, _mali_uk_wait_for_notification_s __user *uargs) +{ + _mali_uk_wait_for_notification_s kargs; + _mali_osk_errcode_t err; + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + + kargs.ctx = (uintptr_t)session_data; + err = _mali_ukk_wait_for_notification(&kargs); + if (_MALI_OSK_ERR_OK != err) return map_errcode(err); + + if (_MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS != kargs.type) { + kargs.ctx = (uintptr_t)NULL; /* prevent kernel address to be returned to user space */ + if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_wait_for_notification_s))) return -EFAULT; + } else { + if (0 != put_user(kargs.type, &uargs->type)) return -EFAULT; + } + + return 0; +} + +int post_notification_wrapper(struct mali_session_data *session_data, _mali_uk_post_notification_s __user *uargs) +{ + _mali_uk_post_notification_s kargs; + _mali_osk_errcode_t err; + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + + kargs.ctx = (uintptr_t)session_data; + + if (0 != get_user(kargs.type, &uargs->type)) { + return -EFAULT; + } + + err = _mali_ukk_post_notification(&kargs); + if (_MALI_OSK_ERR_OK != err) { + return map_errcode(err); + } + + return 0; +} + +int get_user_settings_wrapper(struct mali_session_data *session_data, _mali_uk_get_user_settings_s __user *uargs) +{ + _mali_uk_get_user_settings_s kargs; + _mali_osk_errcode_t err; + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + + kargs.ctx = (uintptr_t)session_data; + err = _mali_ukk_get_user_settings(&kargs); + if (_MALI_OSK_ERR_OK != err) { + return map_errcode(err); + } + + kargs.ctx = 0; /* prevent kernel address to be returned to user space */ + if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_get_user_settings_s))) return -EFAULT; + + return 0; +} + +int request_high_priority_wrapper(struct mali_session_data *session_data, _mali_uk_request_high_priority_s __user *uargs) +{ + _mali_uk_request_high_priority_s kargs; + _mali_osk_errcode_t err; + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + + kargs.ctx = (uintptr_t)session_data; + err = _mali_ukk_request_high_priority(&kargs); + + kargs.ctx = 0; + + return map_errcode(err); +} + +int pending_submit_wrapper(struct mali_session_data *session_data, _mali_uk_pending_submit_s __user *uargs) +{ + _mali_uk_pending_submit_s kargs; + _mali_osk_errcode_t err; + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + + kargs.ctx = (uintptr_t)session_data; + err = _mali_ukk_pending_submit(&kargs); + if (_MALI_OSK_ERR_OK != err) return map_errcode(err); + + return 0; +} diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_ukk_gp.c b/drivers/gpu/arm/mali400/linux/mali_ukk_gp.c --- a/drivers/gpu/arm/mali400/linux/mali_ukk_gp.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_ukk_gp.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,91 @@ +/* + * Copyright (C) 2010, 2012-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ +#include /* file system operations */ +#include /* user space access */ + +#include "mali_ukk.h" +#include "mali_osk.h" +#include "mali_kernel_common.h" +#include "mali_session.h" +#include "mali_ukk_wrappers.h" + +int gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_gp_start_job_s __user *uargs) +{ + _mali_osk_errcode_t err; + + /* If the job was started successfully, 0 is returned. If there was an error, but the job + * was started, we return -ENOENT. For anything else returned, the job was not started. */ + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + MALI_CHECK_NON_NULL(session_data, -EINVAL); + + err = _mali_ukk_gp_start_job(session_data, uargs); + if (_MALI_OSK_ERR_OK != err) return map_errcode(err); + + return 0; +} + +int gp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_core_version_s __user *uargs) +{ + _mali_uk_get_gp_core_version_s kargs; + _mali_osk_errcode_t err; + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + MALI_CHECK_NON_NULL(session_data, -EINVAL); + + kargs.ctx = (uintptr_t)session_data; + err = _mali_ukk_get_gp_core_version(&kargs); + if (_MALI_OSK_ERR_OK != err) return map_errcode(err); + + /* no known transactions to roll-back */ + + if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT; + + return 0; +} + +int gp_suspend_response_wrapper(struct mali_session_data *session_data, _mali_uk_gp_suspend_response_s __user *uargs) +{ + _mali_uk_gp_suspend_response_s kargs; + _mali_osk_errcode_t err; + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + MALI_CHECK_NON_NULL(session_data, -EINVAL); + + if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_gp_suspend_response_s))) return -EFAULT; + + kargs.ctx = (uintptr_t)session_data; + err = _mali_ukk_gp_suspend_response(&kargs); + if (_MALI_OSK_ERR_OK != err) return map_errcode(err); + + if (0 != put_user(kargs.cookie, &uargs->cookie)) return -EFAULT; + + /* no known transactions to roll-back */ + return 0; +} + +int gp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_number_of_cores_s __user *uargs) +{ + _mali_uk_get_gp_number_of_cores_s kargs; + _mali_osk_errcode_t err; + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + MALI_CHECK_NON_NULL(session_data, -EINVAL); + + kargs.ctx = (uintptr_t)session_data; + err = _mali_ukk_get_gp_number_of_cores(&kargs); + if (_MALI_OSK_ERR_OK != err) return map_errcode(err); + + /* no known transactions to roll-back */ + + if (0 != put_user(kargs.number_of_cores, &uargs->number_of_cores)) return -EFAULT; + + return 0; +} diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_ukk_mem.c b/drivers/gpu/arm/mali400/linux/mali_ukk_mem.c --- a/drivers/gpu/arm/mali400/linux/mali_ukk_mem.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_ukk_mem.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,333 @@ +/* + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ +#include /* file system operations */ +#include /* user space access */ + +#include "mali_ukk.h" +#include "mali_osk.h" +#include "mali_kernel_common.h" +#include "mali_session.h" +#include "mali_ukk_wrappers.h" + +int mem_alloc_wrapper(struct mali_session_data *session_data, _mali_uk_alloc_mem_s __user *uargs) +{ + _mali_uk_alloc_mem_s kargs; + _mali_osk_errcode_t err; + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + MALI_CHECK_NON_NULL(session_data, -EINVAL); + + if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_alloc_mem_s))) { + return -EFAULT; + } + kargs.ctx = (uintptr_t)session_data; + + err = _mali_ukk_mem_allocate(&kargs); + + if (_MALI_OSK_ERR_OK != err) { + return map_errcode(err); + } + + if (0 != put_user(kargs.backend_handle, &uargs->backend_handle)) { + return -EFAULT; + } + + return 0; +} + +int mem_free_wrapper(struct mali_session_data *session_data, _mali_uk_free_mem_s __user *uargs) +{ + _mali_uk_free_mem_s kargs; + _mali_osk_errcode_t err; + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + MALI_CHECK_NON_NULL(session_data, -EINVAL); + + if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_free_mem_s))) { + return -EFAULT; + } + kargs.ctx = (uintptr_t)session_data; + + err = _mali_ukk_mem_free(&kargs); + + if (_MALI_OSK_ERR_OK != err) { + return map_errcode(err); + } + + if (0 != put_user(kargs.free_pages_nr, &uargs->free_pages_nr)) { + return -EFAULT; + } + + return 0; +} + +int mem_bind_wrapper(struct mali_session_data *session_data, _mali_uk_bind_mem_s __user *uargs) +{ + _mali_uk_bind_mem_s kargs; + _mali_osk_errcode_t err; + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + MALI_CHECK_NON_NULL(session_data, -EINVAL); + + if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_bind_mem_s))) { + return -EFAULT; + } + kargs.ctx = (uintptr_t)session_data; + + err = _mali_ukk_mem_bind(&kargs); + + if (_MALI_OSK_ERR_OK != err) { + return map_errcode(err); + } + + return 0; +} + +int mem_unbind_wrapper(struct mali_session_data *session_data, _mali_uk_unbind_mem_s __user *uargs) +{ + _mali_uk_unbind_mem_s kargs; + _mali_osk_errcode_t err; + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + MALI_CHECK_NON_NULL(session_data, -EINVAL); + + if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_unbind_mem_s))) { + return -EFAULT; + } + kargs.ctx = (uintptr_t)session_data; + + err = _mali_ukk_mem_unbind(&kargs); + + if (_MALI_OSK_ERR_OK != err) { + return map_errcode(err); + } + + return 0; +} + + +int mem_cow_wrapper(struct mali_session_data *session_data, _mali_uk_cow_mem_s __user *uargs) +{ + _mali_uk_cow_mem_s kargs; + _mali_osk_errcode_t err; + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + MALI_CHECK_NON_NULL(session_data, -EINVAL); + + if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_cow_mem_s))) { + return -EFAULT; + } + kargs.ctx = (uintptr_t)session_data; + + err = _mali_ukk_mem_cow(&kargs); + + if (_MALI_OSK_ERR_OK != err) { + return map_errcode(err); + } + + if (0 != put_user(kargs.backend_handle, &uargs->backend_handle)) { + return -EFAULT; + } + + return 0; +} + +int mem_cow_modify_range_wrapper(struct mali_session_data *session_data, _mali_uk_cow_modify_range_s __user *uargs) +{ + _mali_uk_cow_modify_range_s kargs; + _mali_osk_errcode_t err; + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + MALI_CHECK_NON_NULL(session_data, -EINVAL); + + if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_cow_modify_range_s))) { + return -EFAULT; + } + kargs.ctx = (uintptr_t)session_data; + + err = _mali_ukk_mem_cow_modify_range(&kargs); + + if (_MALI_OSK_ERR_OK != err) { + return map_errcode(err); + } + + if (0 != put_user(kargs.change_pages_nr, &uargs->change_pages_nr)) { + return -EFAULT; + } + return 0; +} + + +int mem_resize_mem_wrapper(struct mali_session_data *session_data, _mali_uk_mem_resize_s __user *uargs) +{ + _mali_uk_mem_resize_s kargs; + _mali_osk_errcode_t err; + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + MALI_CHECK_NON_NULL(session_data, -EINVAL); + + if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_mem_resize_s))) { + return -EFAULT; + } + kargs.ctx = (uintptr_t)session_data; + + err = _mali_ukk_mem_resize(&kargs); + + if (_MALI_OSK_ERR_OK != err) { + return map_errcode(err); + } + + return 0; +} + +int mem_write_safe_wrapper(struct mali_session_data *session_data, _mali_uk_mem_write_safe_s __user *uargs) +{ + _mali_uk_mem_write_safe_s kargs; + _mali_osk_errcode_t err; + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + MALI_CHECK_NON_NULL(session_data, -EINVAL); + + if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_mem_write_safe_s))) { + return -EFAULT; + } + + kargs.ctx = (uintptr_t)session_data; + + /* Check if we can access the buffers */ + if (!access_ok(VERIFY_WRITE, kargs.dest, kargs.size) + || !access_ok(VERIFY_READ, kargs.src, kargs.size)) { + return -EINVAL; + } + + /* Check if size wraps */ + if ((kargs.size + kargs.dest) <= kargs.dest + || (kargs.size + kargs.src) <= kargs.src) { + return -EINVAL; + } + + err = _mali_ukk_mem_write_safe(&kargs); + if (_MALI_OSK_ERR_OK != err) { + return map_errcode(err); + } + + if (0 != put_user(kargs.size, &uargs->size)) { + return -EFAULT; + } + + return 0; +} + + + +int mem_query_mmu_page_table_dump_size_wrapper(struct mali_session_data *session_data, _mali_uk_query_mmu_page_table_dump_size_s __user *uargs) +{ + _mali_uk_query_mmu_page_table_dump_size_s kargs; + _mali_osk_errcode_t err; + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + MALI_CHECK_NON_NULL(session_data, -EINVAL); + + kargs.ctx = (uintptr_t)session_data; + + err = _mali_ukk_query_mmu_page_table_dump_size(&kargs); + if (_MALI_OSK_ERR_OK != err) return map_errcode(err); + + if (0 != put_user(kargs.size, &uargs->size)) return -EFAULT; + + return 0; +} + +int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mali_uk_dump_mmu_page_table_s __user *uargs) +{ + _mali_uk_dump_mmu_page_table_s kargs; + _mali_osk_errcode_t err; + void __user *user_buffer; + void *buffer = NULL; + int rc = -EFAULT; + + /* validate input */ + MALI_CHECK_NON_NULL(uargs, -EINVAL); + /* the session_data pointer was validated by caller */ + + if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_dump_mmu_page_table_s))) + goto err_exit; + + user_buffer = (void __user *)(uintptr_t)kargs.buffer; + if (!access_ok(VERIFY_WRITE, user_buffer, kargs.size)) + goto err_exit; + + /* allocate temporary buffer (kernel side) to store mmu page table info */ + if (kargs.size <= 0) + return -EINVAL; + /* Allow at most 8MiB buffers, this is more than enough to dump a fully + * populated page table. */ + if (kargs.size > SZ_8M) + return -EINVAL; + + buffer = (void *)(uintptr_t)_mali_osk_valloc(kargs.size); + if (NULL == buffer) { + rc = -ENOMEM; + goto err_exit; + } + + kargs.ctx = (uintptr_t)session_data; + kargs.buffer = (uintptr_t)buffer; + err = _mali_ukk_dump_mmu_page_table(&kargs); + if (_MALI_OSK_ERR_OK != err) { + rc = map_errcode(err); + goto err_exit; + } + + /* copy mmu page table info back to user space and update pointers */ + if (0 != copy_to_user(user_buffer, buffer, kargs.size)) + goto err_exit; + + kargs.register_writes = kargs.register_writes - + (uintptr_t)buffer + (uintptr_t)user_buffer; + kargs.page_table_dump = kargs.page_table_dump - + (uintptr_t)buffer + (uintptr_t)user_buffer; + + if (0 != copy_to_user(uargs, &kargs, sizeof(kargs))) + goto err_exit; + + rc = 0; + +err_exit: + if (buffer) _mali_osk_vfree(buffer); + return rc; +} + +int mem_usage_get_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_memory_usage_get_s __user *uargs) +{ + _mali_osk_errcode_t err; + _mali_uk_profiling_memory_usage_get_s kargs; + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + MALI_CHECK_NON_NULL(session_data, -EINVAL); + + if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_memory_usage_get_s))) { + return -EFAULT; + } + + kargs.ctx = (uintptr_t)session_data; + err = _mali_ukk_mem_usage_get(&kargs); + if (_MALI_OSK_ERR_OK != err) { + return map_errcode(err); + } + + kargs.ctx = (uintptr_t)NULL; /* prevent kernel address to be returned to user space */ + if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_profiling_memory_usage_get_s))) { + return -EFAULT; + } + + return 0; +} + diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_ukk_pp.c b/drivers/gpu/arm/mali400/linux/mali_ukk_pp.c --- a/drivers/gpu/arm/mali400/linux/mali_ukk_pp.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_ukk_pp.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,105 @@ +/* + * Copyright (C) 2010, 2012-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ +#include /* file system operations */ +#include /* user space access */ + +#include "mali_ukk.h" +#include "mali_osk.h" +#include "mali_kernel_common.h" +#include "mali_session.h" +#include "mali_ukk_wrappers.h" + +int pp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_start_job_s __user *uargs) +{ + _mali_osk_errcode_t err; + + /* If the job was started successfully, 0 is returned. If there was an error, but the job + * was started, we return -ENOENT. For anything else returned, the job was not started. */ + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + MALI_CHECK_NON_NULL(session_data, -EINVAL); + + err = _mali_ukk_pp_start_job(session_data, uargs); + if (_MALI_OSK_ERR_OK != err) return map_errcode(err); + + return 0; +} + +int pp_and_gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_and_gp_start_job_s __user *uargs) +{ + _mali_osk_errcode_t err; + + /* If the jobs were started successfully, 0 is returned. If there was an error, but the + * jobs were started, we return -ENOENT. For anything else returned, the jobs were not + * started. */ + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + MALI_CHECK_NON_NULL(session_data, -EINVAL); + + err = _mali_ukk_pp_and_gp_start_job(session_data, uargs); + if (_MALI_OSK_ERR_OK != err) return map_errcode(err); + + return 0; +} + +int pp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_number_of_cores_s __user *uargs) +{ + _mali_uk_get_pp_number_of_cores_s kargs; + _mali_osk_errcode_t err; + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + MALI_CHECK_NON_NULL(session_data, -EINVAL); + + kargs.ctx = (uintptr_t)session_data; + + err = _mali_ukk_get_pp_number_of_cores(&kargs); + if (_MALI_OSK_ERR_OK != err) { + return map_errcode(err); + } + + kargs.ctx = (uintptr_t)NULL; /* prevent kernel address to be returned to user space */ + if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_get_pp_number_of_cores_s))) { + return -EFAULT; + } + + return 0; +} + +int pp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_core_version_s __user *uargs) +{ + _mali_uk_get_pp_core_version_s kargs; + _mali_osk_errcode_t err; + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + MALI_CHECK_NON_NULL(session_data, -EINVAL); + + kargs.ctx = (uintptr_t)session_data; + err = _mali_ukk_get_pp_core_version(&kargs); + if (_MALI_OSK_ERR_OK != err) return map_errcode(err); + + if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT; + + return 0; +} + +int pp_disable_wb_wrapper(struct mali_session_data *session_data, _mali_uk_pp_disable_wb_s __user *uargs) +{ + _mali_uk_pp_disable_wb_s kargs; + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + MALI_CHECK_NON_NULL(session_data, -EINVAL); + + if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_pp_disable_wb_s))) return -EFAULT; + + kargs.ctx = (uintptr_t)session_data; + _mali_ukk_pp_job_disable_wb(&kargs); + + return 0; +} diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_ukk_profiling.c b/drivers/gpu/arm/mali400/linux/mali_ukk_profiling.c --- a/drivers/gpu/arm/mali400/linux/mali_ukk_profiling.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_ukk_profiling.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,183 @@ +/* + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ +#include /* file system operations */ +#include /* user space access */ +#include + +#include "mali_ukk.h" +#include "mali_osk.h" +#include "mali_kernel_common.h" +#include "mali_session.h" +#include "mali_ukk_wrappers.h" + +int profiling_add_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_add_event_s __user *uargs) +{ + _mali_uk_profiling_add_event_s kargs; + _mali_osk_errcode_t err; + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + + if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_add_event_s))) { + return -EFAULT; + } + + kargs.ctx = (uintptr_t)session_data; + err = _mali_ukk_profiling_add_event(&kargs); + if (_MALI_OSK_ERR_OK != err) { + return map_errcode(err); + } + + return 0; +} + +int profiling_report_sw_counters_wrapper(struct mali_session_data *session_data, _mali_uk_sw_counters_report_s __user *uargs) +{ + _mali_uk_sw_counters_report_s kargs; + _mali_osk_errcode_t err; + u32 *counter_buffer; + u32 __user *counters; + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + + if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_sw_counters_report_s))) { + return -EFAULT; + } + + /* make sure that kargs.num_counters is [at least somewhat] sane */ + if (kargs.num_counters > 10000) { + MALI_DEBUG_PRINT(1, ("User space attempted to allocate too many counters.\n")); + return -EINVAL; + } + + counter_buffer = (u32 *)kmalloc(sizeof(u32) * kargs.num_counters, GFP_KERNEL); + if (NULL == counter_buffer) { + return -ENOMEM; + } + + counters = (u32 *)(uintptr_t)kargs.counters; + + if (0 != copy_from_user(counter_buffer, counters, sizeof(u32) * kargs.num_counters)) { + kfree(counter_buffer); + return -EFAULT; + } + + kargs.ctx = (uintptr_t)session_data; + kargs.counters = (uintptr_t)counter_buffer; + + err = _mali_ukk_sw_counters_report(&kargs); + + kfree(counter_buffer); + + if (_MALI_OSK_ERR_OK != err) { + return map_errcode(err); + } + + return 0; +} + +int profiling_get_stream_fd_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_stream_fd_get_s __user *uargs) +{ + _mali_uk_profiling_stream_fd_get_s kargs; + _mali_osk_errcode_t err; + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + + if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_stream_fd_get_s))) { + return -EFAULT; + } + + kargs.ctx = (uintptr_t)session_data; + err = _mali_ukk_profiling_stream_fd_get(&kargs); + if (_MALI_OSK_ERR_OK != err) { + return map_errcode(err); + } + + if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_profiling_stream_fd_get_s))) { + return -EFAULT; + } + + return 0; +} + +int profiling_control_set_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_control_set_s __user *uargs) +{ + _mali_uk_profiling_control_set_s kargs; + _mali_osk_errcode_t err; + u8 *kernel_control_data = NULL; + u8 *kernel_response_data = NULL; + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + + if (0 != get_user(kargs.control_packet_size, &uargs->control_packet_size)) return -EFAULT; + if (0 != get_user(kargs.response_packet_size, &uargs->response_packet_size)) return -EFAULT; + + kargs.ctx = (uintptr_t)session_data; + + + /* Sanity check about the size */ + if (kargs.control_packet_size > PAGE_SIZE || kargs.response_packet_size > PAGE_SIZE) + return -EINVAL; + + if (0 != kargs.control_packet_size) { + + if (0 == kargs.response_packet_size) + return -EINVAL; + + kernel_control_data = _mali_osk_calloc(1, kargs.control_packet_size); + if (NULL == kernel_control_data) { + return -ENOMEM; + } + + kernel_response_data = _mali_osk_calloc(1, kargs.response_packet_size); + if (NULL == kernel_response_data) { + _mali_osk_free(kernel_control_data); + return -ENOMEM; + } + + kargs.control_packet_data = (uintptr_t)kernel_control_data; + kargs.response_packet_data = (uintptr_t)kernel_response_data; + + if (0 != copy_from_user((void *)(uintptr_t)kernel_control_data, (void *)(uintptr_t)uargs->control_packet_data, kargs.control_packet_size)) { + _mali_osk_free(kernel_control_data); + _mali_osk_free(kernel_response_data); + return -EFAULT; + } + + err = _mali_ukk_profiling_control_set(&kargs); + if (_MALI_OSK_ERR_OK != err) { + _mali_osk_free(kernel_control_data); + _mali_osk_free(kernel_response_data); + return map_errcode(err); + } + + if (0 != kargs.response_packet_size && 0 != copy_to_user(((void *)(uintptr_t)uargs->response_packet_data), ((void *)(uintptr_t)kargs.response_packet_data), kargs.response_packet_size)) { + _mali_osk_free(kernel_control_data); + _mali_osk_free(kernel_response_data); + return -EFAULT; + } + + if (0 != put_user(kargs.response_packet_size, &uargs->response_packet_size)) { + _mali_osk_free(kernel_control_data); + _mali_osk_free(kernel_response_data); + return -EFAULT; + } + + _mali_osk_free(kernel_control_data); + _mali_osk_free(kernel_response_data); + } else { + + err = _mali_ukk_profiling_control_set(&kargs); + if (_MALI_OSK_ERR_OK != err) { + return map_errcode(err); + } + + } + return 0; +} diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_ukk_soft_job.c b/drivers/gpu/arm/mali400/linux/mali_ukk_soft_job.c --- a/drivers/gpu/arm/mali400/linux/mali_ukk_soft_job.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_ukk_soft_job.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,90 @@ +/* + * Copyright (C) 2013-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ +#include /* file system operations */ +#include /* user space access */ + +#include "mali_ukk.h" +#include "mali_osk.h" +#include "mali_kernel_common.h" +#include "mali_session.h" +#include "mali_ukk_wrappers.h" + +#include "mali_soft_job.h" +#include "mali_timeline.h" + +int soft_job_start_wrapper(struct mali_session_data *session, _mali_uk_soft_job_start_s __user *uargs) +{ + _mali_uk_soft_job_start_s kargs; + u32 type, point; + u64 user_job; + struct mali_timeline_fence fence; + struct mali_soft_job *job = NULL; + u32 __user *job_id_ptr = NULL; + + /* If the job was started successfully, 0 is returned. If there was an error, but the job + * was started, we return -ENOENT. For anything else returned, the job was not started. */ + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + MALI_CHECK_NON_NULL(session, -EINVAL); + + MALI_DEBUG_ASSERT_POINTER(session->soft_job_system); + + if (0 != copy_from_user(&kargs, uargs, sizeof(kargs))) { + return -EFAULT; + } + + type = kargs.type; + user_job = kargs.user_job; + job_id_ptr = (u32 __user *)(uintptr_t)kargs.job_id_ptr; + + mali_timeline_fence_copy_uk_fence(&fence, &kargs.fence); + + if ((MALI_SOFT_JOB_TYPE_USER_SIGNALED != type) && (MALI_SOFT_JOB_TYPE_SELF_SIGNALED != type)) { + MALI_DEBUG_PRINT_ERROR(("Invalid soft job type specified\n")); + return -EINVAL; + } + + /* Create soft job. */ + job = mali_soft_job_create(session->soft_job_system, (enum mali_soft_job_type)type, user_job); + if (unlikely(NULL == job)) { + return map_errcode(_MALI_OSK_ERR_NOMEM); + } + + /* Write job id back to user space. */ + if (0 != put_user(job->id, job_id_ptr)) { + MALI_PRINT_ERROR(("Mali Soft Job: failed to put job id")); + mali_soft_job_destroy(job); + return map_errcode(_MALI_OSK_ERR_NOMEM); + } + + /* Start soft job. */ + point = mali_soft_job_start(job, &fence); + + if (0 != put_user(point, &uargs->point)) { + /* Let user space know that something failed after the job was started. */ + return -ENOENT; + } + + return 0; +} + +int soft_job_signal_wrapper(struct mali_session_data *session, _mali_uk_soft_job_signal_s __user *uargs) +{ + u32 job_id; + _mali_osk_errcode_t err; + + MALI_DEBUG_ASSERT_POINTER(session); + + if (0 != get_user(job_id, &uargs->job_id)) return -EFAULT; + + err = mali_soft_job_system_signal_job(session->soft_job_system, job_id); + + return map_errcode(err); +} diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_ukk_timeline.c b/drivers/gpu/arm/mali400/linux/mali_ukk_timeline.c --- a/drivers/gpu/arm/mali400/linux/mali_ukk_timeline.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_ukk_timeline.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,88 @@ +/* + * Copyright (C) 2013, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ +#include /* file system operations */ +#include /* user space access */ + +#include "mali_ukk.h" +#include "mali_osk.h" +#include "mali_kernel_common.h" +#include "mali_session.h" +#include "mali_ukk_wrappers.h" + +#include "mali_timeline.h" +#include "mali_timeline_fence_wait.h" +#include "mali_timeline_sync_fence.h" + +int timeline_get_latest_point_wrapper(struct mali_session_data *session, _mali_uk_timeline_get_latest_point_s __user *uargs) +{ + u32 val; + mali_timeline_id timeline; + mali_timeline_point point; + + MALI_DEBUG_ASSERT_POINTER(session); + + if (0 != get_user(val, &uargs->timeline)) return -EFAULT; + + if (MALI_UK_TIMELINE_MAX <= val) { + return -EINVAL; + } + + timeline = (mali_timeline_id)val; + + point = mali_timeline_system_get_latest_point(session->timeline_system, timeline); + + if (0 != put_user(point, &uargs->point)) return -EFAULT; + + return 0; +} + +int timeline_wait_wrapper(struct mali_session_data *session, _mali_uk_timeline_wait_s __user *uargs) +{ + u32 timeout, status; + mali_bool ret; + _mali_uk_fence_t uk_fence; + struct mali_timeline_fence fence; + + MALI_DEBUG_ASSERT_POINTER(session); + + if (0 != copy_from_user(&uk_fence, &uargs->fence, sizeof(_mali_uk_fence_t))) return -EFAULT; + if (0 != get_user(timeout, &uargs->timeout)) return -EFAULT; + + mali_timeline_fence_copy_uk_fence(&fence, &uk_fence); + + ret = mali_timeline_fence_wait(session->timeline_system, &fence, timeout); + status = (MALI_TRUE == ret ? 1 : 0); + + if (0 != put_user(status, &uargs->status)) return -EFAULT; + + return 0; +} + +int timeline_create_sync_fence_wrapper(struct mali_session_data *session, _mali_uk_timeline_create_sync_fence_s __user *uargs) +{ + s32 sync_fd = -1; + _mali_uk_fence_t uk_fence; + struct mali_timeline_fence fence; + + MALI_DEBUG_ASSERT_POINTER(session); + + if (0 != copy_from_user(&uk_fence, &uargs->fence, sizeof(_mali_uk_fence_t))) return -EFAULT; + mali_timeline_fence_copy_uk_fence(&fence, &uk_fence); + +#if defined(CONFIG_SYNC) + sync_fd = mali_timeline_sync_fence_create(session->timeline_system, &fence); +#else + sync_fd = -1; +#endif /* defined(CONFIG_SYNC) */ + + if (0 != put_user(sync_fd, &uargs->sync_fd)) return -EFAULT; + + return 0; +} diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_ukk_vsync.c b/drivers/gpu/arm/mali400/linux/mali_ukk_vsync.c --- a/drivers/gpu/arm/mali400/linux/mali_ukk_vsync.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_ukk_vsync.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2011-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ +#include /* file system operations */ +#include /* user space access */ + +#include "mali_ukk.h" +#include "mali_osk.h" +#include "mali_kernel_common.h" +#include "mali_session.h" +#include "mali_ukk_wrappers.h" + + +int vsync_event_report_wrapper(struct mali_session_data *session_data, _mali_uk_vsync_event_report_s __user *uargs) +{ + _mali_uk_vsync_event_report_s kargs; + _mali_osk_errcode_t err; + + MALI_CHECK_NON_NULL(uargs, -EINVAL); + + if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_vsync_event_report_s))) { + return -EFAULT; + } + + kargs.ctx = (uintptr_t)session_data; + err = _mali_ukk_vsync_event_report(&kargs); + if (_MALI_OSK_ERR_OK != err) { + return map_errcode(err); + } + + return 0; +} + diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_ukk_wrappers.h b/drivers/gpu/arm/mali400/linux/mali_ukk_wrappers.h --- a/drivers/gpu/arm/mali400/linux/mali_ukk_wrappers.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_ukk_wrappers.h 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2010-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_ukk_wrappers.h + * Defines the wrapper functions for each user-kernel function + */ + +#ifndef __MALI_UKK_WRAPPERS_H__ +#define __MALI_UKK_WRAPPERS_H__ + +#include "mali_uk_types.h" +#include "mali_osk.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int wait_for_notification_wrapper(struct mali_session_data *session_data, _mali_uk_wait_for_notification_s __user *uargs); +int get_api_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_s __user *uargs); +int get_api_version_v2_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_v2_s __user *uargs); +int get_user_settings_wrapper(struct mali_session_data *session_data, _mali_uk_get_user_settings_s __user *uargs); +int post_notification_wrapper(struct mali_session_data *session_data, _mali_uk_post_notification_s __user *uargs); +int request_high_priority_wrapper(struct mali_session_data *session_data, _mali_uk_request_high_priority_s __user *uargs); +int pending_submit_wrapper(struct mali_session_data *session_data, _mali_uk_pending_submit_s __user *uargs); + +int mem_alloc_wrapper(struct mali_session_data *session_data, _mali_uk_alloc_mem_s __user *uargs); +int mem_free_wrapper(struct mali_session_data *session_data, _mali_uk_free_mem_s __user *uargs); +int mem_bind_wrapper(struct mali_session_data *session_data, _mali_uk_bind_mem_s __user *uargs); +int mem_unbind_wrapper(struct mali_session_data *session_data, _mali_uk_unbind_mem_s __user *uargs); +int mem_cow_wrapper(struct mali_session_data *session_data, _mali_uk_cow_mem_s __user *uargs); +int mem_cow_modify_range_wrapper(struct mali_session_data *session_data, _mali_uk_cow_modify_range_s __user *uargs); +int mem_resize_mem_wrapper(struct mali_session_data *session_data, _mali_uk_mem_resize_s __user *uargs); +int mem_write_safe_wrapper(struct mali_session_data *session_data, _mali_uk_mem_write_safe_s __user *uargs); +int mem_query_mmu_page_table_dump_size_wrapper(struct mali_session_data *session_data, _mali_uk_query_mmu_page_table_dump_size_s __user *uargs); +int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mali_uk_dump_mmu_page_table_s __user *uargs); +int mem_usage_get_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_memory_usage_get_s __user *uargs); + +int timeline_get_latest_point_wrapper(struct mali_session_data *session, _mali_uk_timeline_get_latest_point_s __user *uargs); +int timeline_wait_wrapper(struct mali_session_data *session, _mali_uk_timeline_wait_s __user *uargs); +int timeline_create_sync_fence_wrapper(struct mali_session_data *session, _mali_uk_timeline_create_sync_fence_s __user *uargs); +int soft_job_start_wrapper(struct mali_session_data *session, _mali_uk_soft_job_start_s __user *uargs); +int soft_job_signal_wrapper(struct mali_session_data *session, _mali_uk_soft_job_signal_s __user *uargs); +int pp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_start_job_s __user *uargs); +int pp_and_gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_and_gp_start_job_s __user *uargs); +int pp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_number_of_cores_s __user *uargs); +int pp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_core_version_s __user *uargs); +int pp_disable_wb_wrapper(struct mali_session_data *session_data, _mali_uk_pp_disable_wb_s __user *uargs); +int gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_gp_start_job_s __user *uargs); +int gp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_number_of_cores_s __user *uargs); +int gp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_core_version_s __user *uargs); +int gp_suspend_response_wrapper(struct mali_session_data *session_data, _mali_uk_gp_suspend_response_s __user *uargs); + +int profiling_add_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_add_event_s __user *uargs); +int profiling_report_sw_counters_wrapper(struct mali_session_data *session_data, _mali_uk_sw_counters_report_s __user *uargs); +int profiling_get_stream_fd_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_stream_fd_get_s __user *uargs); +int profiling_control_set_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_control_set_s __user *uargs); + +int vsync_event_report_wrapper(struct mali_session_data *session_data, _mali_uk_vsync_event_report_s __user *uargs); + + +int map_errcode(_mali_osk_errcode_t err); + +#ifdef __cplusplus +} +#endif + +#endif /* __MALI_UKK_WRAPPERS_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/linux/mali_uk_types.h b/drivers/gpu/arm/mali400/linux/mali_uk_types.h --- a/drivers/gpu/arm/mali400/linux/mali_uk_types.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/linux/mali_uk_types.h 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,17 @@ +/* + * Copyright (C) 2012, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_UK_TYPES_H__ +#define __MALI_UK_TYPES_H__ + +/* Simple wrapper in order to find the OS specific location of this file */ +#include + +#endif /* __MALI_UK_TYPES_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/Makefile b/drivers/gpu/arm/mali400/Makefile --- a/drivers/gpu/arm/mali400/Makefile 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/Makefile 2018-05-06 08:49:49.174695256 +0200 @@ -0,0 +1,206 @@ +# +# Copyright (C) 2010-2016 ARM Limited. All rights reserved. +# +# This program is free software and is provided to you under the terms of the GNU General Public License version 2 +# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. +# +# A copy of the licence is included with the program, and can also be obtained from Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# + +USE_UMPV2=0 +USING_PROFILING ?= 1 +USING_INTERNAL_PROFILING ?= 0 +USING_DVFS ?= 1 +USING_DMA_BUF_FENCE ?= 0 +MALI_HEATMAPS_ENABLED ?= 0 +MALI_DMA_BUF_MAP_ON_ATTACH ?= 1 +MALI_PMU_PARALLEL_POWER_UP ?= 0 +USING_DT ?= 0 +MALI_MEM_SWAP_TRACKING ?= 0 +USING_DEVFREQ ?= 0 + +# The Makefile sets up "arch" based on the CONFIG, creates the version info +# string and the __malidrv_build_info.c file, and then call the Linux build +# system to actually build the driver. After that point the Kbuild file takes +# over. + +# set up defaults if not defined by the user +ARCH ?= arm + +OSKOS=linux +FILES_PREFIX= + +check_cc2 = \ + $(shell if $(1) -S -o /dev/null -xc /dev/null > /dev/null 2>&1; \ + then \ + echo "$(2)"; \ + else \ + echo "$(3)"; \ + fi ;) + +# This conditional makefile exports the global definition ARM_INTERNAL_BUILD. Customer releases will not include arm_internal.mak +-include ../../../arm_internal.mak + +# Give warning of old config parameters are used +ifneq ($(CONFIG),) +$(warning "You have specified the CONFIG variable which is no longer in used. Use TARGET_PLATFORM instead.") +endif + +ifneq ($(CPU),) +$(warning "You have specified the CPU variable which is no longer in used. Use TARGET_PLATFORM instead.") +endif + +# Include the mapping between TARGET_PLATFORM and KDIR + MALI_PLATFORM +-include MALI_CONFIGURATION +export KDIR ?= $(KDIR-$(TARGET_PLATFORM)) +export MALI_PLATFORM ?= $(MALI_PLATFORM-$(TARGET_PLATFORM)) + +ifneq ($(TARGET_PLATFORM),) +ifeq ($(MALI_PLATFORM),) +$(error "Invalid TARGET_PLATFORM: $(TARGET_PLATFORM)") +endif +endif + +# validate lookup result +ifeq ($(KDIR),) +$(error No KDIR found for platform $(TARGET_PLATFORM)) +endif + +ifeq ($(USING_GPU_UTILIZATION), 1) + ifeq ($(USING_DVFS), 1) + $(error USING_GPU_UTILIZATION conflict with USING_DVFS you can read the Integration Guide to choose which one do you need) + endif +endif + +ifeq ($(USING_UMP),1) +export CONFIG_MALI400_UMP=y +export EXTRA_DEFINES += -DCONFIG_MALI400_UMP=1 +ifeq ($(USE_UMPV2),1) +UMP_SYMVERS_FILE ?= ../umpv2/Module.symvers +else +UMP_SYMVERS_FILE ?= ../ump/Module.symvers +endif +KBUILD_EXTRA_SYMBOLS = $(realpath $(UMP_SYMVERS_FILE)) +$(warning $(KBUILD_EXTRA_SYMBOLS)) +endif + +# Define host system directory +KDIR-$(shell uname -m):=/lib/modules/$(shell uname -r)/build + +include $(KDIR)/.config + +ifeq ($(ARCH), arm) +# when compiling for ARM we're cross compiling +export CROSS_COMPILE ?= $(call check_cc2, arm-linux-gnueabi-gcc, arm-linux-gnueabi-, arm-none-linux-gnueabi-) +endif + +# report detected/selected settings +ifdef ARM_INTERNAL_BUILD +$(warning TARGET_PLATFORM $(TARGET_PLATFORM)) +$(warning KDIR $(KDIR)) +$(warning MALI_PLATFORM $(MALI_PLATFORM)) +endif + +# Set up build config +export CONFIG_MALI400=m +export CONFIG_MALI450=y +export CONFIG_MALI470=y + +export EXTRA_DEFINES += -DCONFIG_MALI400=1 +export EXTRA_DEFINES += -DCONFIG_MALI450=1 +export EXTRA_DEFINES += -DCONFIG_MALI470=1 + +ifneq ($(MALI_PLATFORM),) +export EXTRA_DEFINES += -DMALI_FAKE_PLATFORM_DEVICE=1 +export MALI_PLATFORM_FILES = $(wildcard platform/$(MALI_PLATFORM)/*.c) +endif + +ifeq ($(USING_PROFILING),1) +ifeq ($(CONFIG_TRACEPOINTS),) +$(warning CONFIG_TRACEPOINTS required for profiling) +else +export CONFIG_MALI400_PROFILING=y +export EXTRA_DEFINES += -DCONFIG_MALI400_PROFILING=1 +ifeq ($(USING_INTERNAL_PROFILING),1) +export CONFIG_MALI400_INTERNAL_PROFILING=y +export EXTRA_DEFINES += -DCONFIG_MALI400_INTERNAL_PROFILING=1 +endif +ifeq ($(MALI_HEATMAPS_ENABLED),1) +export MALI_HEATMAPS_ENABLED=y +export EXTRA_DEFINES += -DCONFIG_MALI400_HEATMAPS_ENABLED +endif +endif +endif + +ifeq ($(MALI_DMA_BUF_MAP_ON_ATTACH),1) +export CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH=y +export EXTRA_DEFINES += -DCONFIG_MALI_DMA_BUF_MAP_ON_ATTACH +endif + +ifeq ($(MALI_SHARED_INTERRUPTS),1) +export CONFIG_MALI_SHARED_INTERRUPTS=y +export EXTRA_DEFINES += -DCONFIG_MALI_SHARED_INTERRUPTS +endif + +ifeq ($(USING_DVFS),1) +export CONFIG_MALI_DVFS=y +export EXTRA_DEFINES += -DCONFIG_MALI_DVFS +endif + +ifeq ($(USING_DMA_BUF_FENCE),1) +export CONFIG_MALI_DMA_BUF_FENCE=y +export EXTRA_DEFINES += -DCONFIG_MALI_DMA_BUF_FENCE +endif + +ifeq ($(MALI_PMU_PARALLEL_POWER_UP),1) +export CONFIG_MALI_PMU_PARALLEL_POWER_UP=y +export EXTRA_DEFINES += -DCONFIG_MALI_PMU_PARALLEL_POWER_UP +endif + +ifdef CONFIG_OF +ifeq ($(USING_DT),1) +export CONFIG_MALI_DT=y +export EXTRA_DEFINES += -DCONFIG_MALI_DT +endif +endif + +ifeq ($(USING_DEVFREQ), 1) +ifdef CONFIG_PM_DEVFREQ +export CONFIG_MALI_DEVFREQ=y +export EXTRA_DEFINES += -DCONFIG_MALI_DEVFREQ=1 +else +$(warning "You want to support DEVFREQ but kernel didn't support DEVFREQ.") +endif +endif + +ifneq ($(BUILD),release) +# Debug +export CONFIG_MALI400_DEBUG=y +else +# Release +ifeq ($(MALI_QUIET),1) +export CONFIG_MALI_QUIET=y +export EXTRA_DEFINES += -DCONFIG_MALI_QUIET +endif +endif + +ifeq ($(MALI_SKIP_JOBS),1) +EXTRA_DEFINES += -DPROFILING_SKIP_PP_JOBS=1 -DPROFILING_SKIP_GP_JOBS=1 +endif + +ifeq ($(MALI_MEM_SWAP_TRACKING),1) +EXTRA_DEFINES += -DMALI_MEM_SWAP_TRACKING=1 +endif + +all: $(UMP_SYMVERS_FILE) + $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) modules + @rm $(FILES_PREFIX)__malidrv_build_info.c $(FILES_PREFIX)__malidrv_build_info.o + +clean: + $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean + +kernelrelease: + $(MAKE) ARCH=$(ARCH) -C $(KDIR) kernelrelease + +export CONFIG KBUILD_EXTRA_SYMBOLS diff -ENwbur a/drivers/gpu/arm/mali400/platform/arm/arm.c b/drivers/gpu/arm/mali400/platform/arm/arm.c --- a/drivers/gpu/arm/mali400/platform/arm/arm.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/platform/arm/arm.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,623 @@ +/* + * Copyright (C) 2010, 2012-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file mali_platform.c + * Platform specific Mali driver functions for: + * - Realview Versatile platforms with ARM11 Mpcore and virtex 5. + * - Versatile Express platforms with ARM Cortex-A9 and virtex 6. + */ +#include +#include +#include +#include "mali_kernel_linux.h" +#ifdef CONFIG_PM_RUNTIME +#include +#endif +#include +#include +#include "mali_kernel_common.h" +#include +#include + +#include "arm_core_scaling.h" +#include "mali_executor.h" + +#if defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL) +#include +#include +#endif + +static int mali_core_scaling_enable = 0; + +void mali_gpu_utilization_callback(struct mali_gpu_utilization_data *data); +static u32 mali_read_phys(u32 phys_addr); +#if defined(CONFIG_ARCH_REALVIEW) +static void mali_write_phys(u32 phys_addr, u32 value); +#endif + +#if defined(CONFIG_ARCH_VEXPRESS) && defined(CONFIG_ARM64) + +#define SECURE_MODE_CONTROL_HANDLER 0x6F02006C +void *secure_mode_mapped_addr = NULL; +/** + * Reset GPU and enable/disable Mali secure mode. + * @Return value: + * 0: success + * non-0: failure. + */ + +static int mali_gpu_reset_and_secure_mode_enable_juno(void) +{ + u32 phys_offset = SECURE_MODE_CONTROL_HANDLER & 0x00001FFF; + MALI_DEBUG_ASSERT(NULL != secure_mode_mapped_addr); + + iowrite32(1, ((u8 *)secure_mode_mapped_addr) + phys_offset); + + if (1 == (u32)ioread32(((u8 *)secure_mode_mapped_addr) + phys_offset)) { + MALI_DEBUG_PRINT(3, ("Mali reset GPU and enable secured mode successfully! \n")); + return 0; + } + + MALI_PRINT_ERROR(("Failed to reset GPU and enable Mali secured mode !!! \n")); + + return -1; + +} + +static int mali_gpu_reset_and_secure_mode_disable_juno(void) +{ + u32 phys_offset = SECURE_MODE_CONTROL_HANDLER & 0x00001FFF; + MALI_DEBUG_ASSERT(NULL != secure_mode_mapped_addr); + + iowrite32(0, ((u8 *)secure_mode_mapped_addr) + phys_offset); + + if (0 == (u32)ioread32(((u8 *)secure_mode_mapped_addr) + phys_offset)) { + MALI_DEBUG_PRINT(3, ("Mali reset GPU and disable secured mode successfully! \n")); + return 0; + } + + MALI_PRINT_ERROR(("Failed to reset GPU and disable mali secured mode !!! \n")); + return -1; +} + +static int mali_secure_mode_init_juno(void) +{ + u32 phys_addr_page = SECURE_MODE_CONTROL_HANDLER & 0xFFFFE000; + u32 phys_offset = SECURE_MODE_CONTROL_HANDLER & 0x00001FFF; + u32 map_size = phys_offset + sizeof(u32); + + MALI_DEBUG_ASSERT(NULL == secure_mode_mapped_addr); + + secure_mode_mapped_addr = ioremap_nocache(phys_addr_page, map_size); + if (NULL != secure_mode_mapped_addr) { + return mali_gpu_reset_and_secure_mode_disable_juno(); + } + MALI_DEBUG_PRINT(2, ("Failed to ioremap for Mali secured mode! \n")); + return -1; +} + +static void mali_secure_mode_deinit_juno(void) +{ + if (NULL != secure_mode_mapped_addr) { + mali_gpu_reset_and_secure_mode_disable_juno(); + iounmap(secure_mode_mapped_addr); + secure_mode_mapped_addr = NULL; + } +} +#endif + +#ifndef CONFIG_MALI_DT +static void mali_platform_device_release(struct device *device); + +#if defined(CONFIG_ARCH_VEXPRESS) + +#if defined(CONFIG_ARM64) +/* Juno + Mali-450 MP6 in V7 FPGA */ +static struct resource mali_gpu_resources_m450_mp6[] = { + MALI_GPU_RESOURCES_MALI450_MP6_PMU(0x6F040000, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200) +}; + +static struct resource mali_gpu_resources_m470_mp4[] = { + MALI_GPU_RESOURCES_MALI470_MP4_PMU(0x6F040000, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200) +}; + +static struct resource mali_gpu_resources_m470_mp3[] = { + MALI_GPU_RESOURCES_MALI470_MP3_PMU(0x6F040000, 200, 200, 200, 200, 200, 200, 200, 200, 200) +}; + +static struct resource mali_gpu_resources_m470_mp2[] = { + MALI_GPU_RESOURCES_MALI470_MP2_PMU(0x6F040000, 200, 200, 200, 200, 200, 200, 200) +}; + +static struct resource mali_gpu_resources_m470_mp1[] = { + MALI_GPU_RESOURCES_MALI470_MP1_PMU(0x6F040000, 200, 200, 200, 200, 200) +}; + +#else +static struct resource mali_gpu_resources_m450_mp8[] = { + MALI_GPU_RESOURCES_MALI450_MP8_PMU(0xFC040000, -1, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 68) +}; + +static struct resource mali_gpu_resources_m450_mp6[] = { + MALI_GPU_RESOURCES_MALI450_MP6_PMU(0xFC040000, -1, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 68) +}; + +static struct resource mali_gpu_resources_m450_mp4[] = { + MALI_GPU_RESOURCES_MALI450_MP4_PMU(0xFC040000, -1, 70, 70, 70, 70, 70, 70, 70, 70, 70, 68) +}; + +static struct resource mali_gpu_resources_m470_mp4[] = { + MALI_GPU_RESOURCES_MALI470_MP4_PMU(0xFC040000, -1, 70, 70, 70, 70, 70, 70, 70, 70, 70, 68) +}; +#endif /* CONFIG_ARM64 */ + +#elif defined(CONFIG_ARCH_REALVIEW) + +static struct resource mali_gpu_resources_m300[] = { + MALI_GPU_RESOURCES_MALI300_PMU(0xC0000000, -1, -1, -1, -1) +}; + +static struct resource mali_gpu_resources_m400_mp1[] = { + MALI_GPU_RESOURCES_MALI400_MP1_PMU(0xC0000000, -1, -1, -1, -1) +}; + +static struct resource mali_gpu_resources_m400_mp2[] = { + MALI_GPU_RESOURCES_MALI400_MP2_PMU(0xC0000000, -1, -1, -1, -1, -1, -1) +}; + +#endif +#endif + +#if defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL) + +#define FALLBACK_STATIC_TEMPERATURE 55000 + +static struct thermal_zone_device *gpu_tz; + +/* Calculate gpu static power example for reference */ +static unsigned long arm_model_static_power(unsigned long voltage) +{ + int temperature, temp; + int temp_squared, temp_cubed, temp_scaling_factor; + const unsigned long coefficient = (410UL << 20) / (729000000UL >> 10); + const unsigned long voltage_cubed = (voltage * voltage * voltage) >> 10; + unsigned long static_power; + + if (gpu_tz) { + int ret; + + ret = gpu_tz->ops->get_temp(gpu_tz, &temperature); + if (ret) { + MALI_DEBUG_PRINT(2, ("Error reading temperature for gpu thermal zone: %d\n", ret)); + temperature = FALLBACK_STATIC_TEMPERATURE; + } + } else { + temperature = FALLBACK_STATIC_TEMPERATURE; + } + + /* Calculate the temperature scaling factor. To be applied to the + * voltage scaled power. + */ + temp = temperature / 1000; + temp_squared = temp * temp; + temp_cubed = temp_squared * temp; + temp_scaling_factor = + (2 * temp_cubed) + - (80 * temp_squared) + + (4700 * temp) + + 32000; + + static_power = (((coefficient * voltage_cubed) >> 20) + * temp_scaling_factor) + / 1000000; + + return static_power; +} + +/* Calculate gpu dynamic power example for reference */ +static unsigned long arm_model_dynamic_power(unsigned long freq, + unsigned long voltage) +{ + /* The inputs: freq (f) is in Hz, and voltage (v) in mV. + * The coefficient (c) is in mW/(MHz mV mV). + * + * This function calculates the dynamic power after this formula: + * Pdyn (mW) = c (mW/(MHz*mV*mV)) * v (mV) * v (mV) * f (MHz) + */ + const unsigned long v2 = (voltage * voltage) / 1000; /* m*(V*V) */ + const unsigned long f_mhz = freq / 1000000; /* MHz */ + const unsigned long coefficient = 3600; /* mW/(MHz*mV*mV) */ + unsigned long dynamic_power; + + dynamic_power = (coefficient * v2 * f_mhz) / 1000000; /* mW */ + + return dynamic_power; +} + +struct devfreq_cooling_power arm_cooling_ops = { + .get_static_power = arm_model_static_power, + .get_dynamic_power = arm_model_dynamic_power, +}; +#endif + +static struct mali_gpu_device_data mali_gpu_data = { +#ifndef CONFIG_MALI_DT + .pmu_switch_delay = 0xFF, /* do not have to be this high on FPGA, but it is good for testing to have a delay */ +#if defined(CONFIG_ARCH_VEXPRESS) + .shared_mem_size = 256 * 1024 * 1024, /* 256MB */ +#endif +#endif + .max_job_runtime = 60000, /* 60 seconds */ + +#if defined(CONFIG_ARCH_REALVIEW) + .dedicated_mem_start = 0x80000000, /* Physical start address (use 0xD0000000 for old indirect setup) */ + .dedicated_mem_size = 0x10000000, /* 256MB */ +#endif +#if defined(CONFIG_ARM64) + /* Some framebuffer drivers get the framebuffer dynamically, such as through GEM, + * in which the memory resource can't be predicted in advance. + */ + .fb_start = 0x0, + .fb_size = 0xFFFFF000, +#else + .fb_start = 0xe0000000, + .fb_size = 0x01000000, +#endif + .control_interval = 1000, /* 1000ms */ + .utilization_callback = mali_gpu_utilization_callback, + .get_clock_info = NULL, + .get_freq = NULL, + .set_freq = NULL, +#if defined(CONFIG_ARCH_VEXPRESS) && defined(CONFIG_ARM64) + .secure_mode_init = mali_secure_mode_init_juno, + .secure_mode_deinit = mali_secure_mode_deinit_juno, + .gpu_reset_and_secure_mode_enable = mali_gpu_reset_and_secure_mode_enable_juno, + .gpu_reset_and_secure_mode_disable = mali_gpu_reset_and_secure_mode_disable_juno, +#else + .secure_mode_init = NULL, + .secure_mode_deinit = NULL, + .gpu_reset_and_secure_mode_enable = NULL, + .gpu_reset_and_secure_mode_disable = NULL, +#endif +#if defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL) + .gpu_cooling_ops = &arm_cooling_ops, +#endif +}; + +#ifndef CONFIG_MALI_DT +static struct platform_device mali_gpu_device = { + .name = MALI_GPU_NAME_UTGARD, + .id = 0, + .dev.release = mali_platform_device_release, + .dev.dma_mask = &mali_gpu_device.dev.coherent_dma_mask, + .dev.coherent_dma_mask = DMA_BIT_MASK(32), + + .dev.platform_data = &mali_gpu_data, +}; + +int mali_platform_device_register(void) +{ + int err = -1; + int num_pp_cores = 0; +#if defined(CONFIG_ARCH_REALVIEW) + u32 m400_gp_version; +#endif + + MALI_DEBUG_PRINT(4, ("mali_platform_device_register() called\n")); + + /* Detect present Mali GPU and connect the correct resources to the device */ +#if defined(CONFIG_ARCH_VEXPRESS) + +#if defined(CONFIG_ARM64) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) + mali_gpu_device.dev.archdata.dma_ops = &dummy_dma_ops; +#else + mali_gpu_device.dev.archdata.dma_ops = dma_ops; +#endif + if ((mali_read_phys(0x6F000000) & 0x00600450) == 0x00600450) { + MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP6 device\n")); + num_pp_cores = 6; + mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m450_mp6); + mali_gpu_device.resource = mali_gpu_resources_m450_mp6; + } else if ((mali_read_phys(0x6F000000) & 0x00F00430) == 0x00400430) { + MALI_DEBUG_PRINT(4, ("Registering Mali-470 MP4 device\n")); + num_pp_cores = 4; + mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m470_mp4); + mali_gpu_device.resource = mali_gpu_resources_m470_mp4; + } else if ((mali_read_phys(0x6F000000) & 0x00F00430) == 0x00300430) { + MALI_DEBUG_PRINT(4, ("Registering Mali-470 MP3 device\n")); + num_pp_cores = 3; + mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m470_mp3); + mali_gpu_device.resource = mali_gpu_resources_m470_mp3; + } else if ((mali_read_phys(0x6F000000) & 0x00F00430) == 0x00200430) { + MALI_DEBUG_PRINT(4, ("Registering Mali-470 MP2 device\n")); + num_pp_cores = 2; + mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m470_mp2); + mali_gpu_device.resource = mali_gpu_resources_m470_mp2; + } else if ((mali_read_phys(0x6F000000) & 0x00F00430) == 0x00100430) { + MALI_DEBUG_PRINT(4, ("Registering Mali-470 MP1 device\n")); + num_pp_cores = 1; + mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m470_mp1); + mali_gpu_device.resource = mali_gpu_resources_m470_mp1; + } +#else + if (mali_read_phys(0xFC000000) == 0x00000450) { + MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP8 device\n")); + num_pp_cores = 8; + mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m450_mp8); + mali_gpu_device.resource = mali_gpu_resources_m450_mp8; + } else if (mali_read_phys(0xFC000000) == 0x40600450) { + MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP6 device\n")); + num_pp_cores = 6; + mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m450_mp6); + mali_gpu_device.resource = mali_gpu_resources_m450_mp6; + } else if (mali_read_phys(0xFC000000) == 0x40400450) { + MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP4 device\n")); + num_pp_cores = 4; + mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m450_mp4); + mali_gpu_device.resource = mali_gpu_resources_m450_mp4; + } else if (mali_read_phys(0xFC000000) == 0xFFFFFFFF) { + MALI_DEBUG_PRINT(4, ("Registering Mali-470 MP4 device\n")); + num_pp_cores = 4; + mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m470_mp4); + mali_gpu_device.resource = mali_gpu_resources_m470_mp4; + } +#endif /* CONFIG_ARM64 */ + +#elif defined(CONFIG_ARCH_REALVIEW) + + m400_gp_version = mali_read_phys(0xC000006C); + if ((m400_gp_version & 0xFFFF0000) == 0x0C070000) { + MALI_DEBUG_PRINT(4, ("Registering Mali-300 device\n")); + num_pp_cores = 1; + mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m300); + mali_gpu_device.resource = mali_gpu_resources_m300; + mali_write_phys(0xC0010020, 0xA); /* Enable direct memory mapping for FPGA */ + } else if ((m400_gp_version & 0xFFFF0000) == 0x0B070000) { + u32 fpga_fw_version = mali_read_phys(0xC0010000); + if (fpga_fw_version == 0x130C008F || fpga_fw_version == 0x110C008F) { + /* Mali-400 MP1 r1p0 or r1p1 */ + MALI_DEBUG_PRINT(4, ("Registering Mali-400 MP1 device\n")); + num_pp_cores = 1; + mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m400_mp1); + mali_gpu_device.resource = mali_gpu_resources_m400_mp1; + mali_write_phys(0xC0010020, 0xA); /* Enable direct memory mapping for FPGA */ + } else if (fpga_fw_version == 0x130C000F) { + /* Mali-400 MP2 r1p1 */ + MALI_DEBUG_PRINT(4, ("Registering Mali-400 MP2 device\n")); + num_pp_cores = 2; + mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m400_mp2); + mali_gpu_device.resource = mali_gpu_resources_m400_mp2; + mali_write_phys(0xC0010020, 0xA); /* Enable direct memory mapping for FPGA */ + } + } + +#endif + /* Register the platform device */ + err = platform_device_register(&mali_gpu_device); + if (0 == err) { +#ifdef CONFIG_PM_RUNTIME +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)) + pm_runtime_set_autosuspend_delay(&(mali_gpu_device.dev), 1000); + pm_runtime_use_autosuspend(&(mali_gpu_device.dev)); +#endif + pm_runtime_enable(&(mali_gpu_device.dev)); +#endif + MALI_DEBUG_ASSERT(0 < num_pp_cores); + mali_core_scaling_init(num_pp_cores); + + return 0; + } + + return err; +} + +void mali_platform_device_unregister(void) +{ + MALI_DEBUG_PRINT(4, ("mali_platform_device_unregister() called\n")); + + mali_core_scaling_term(); +#ifdef CONFIG_PM_RUNTIME + pm_runtime_disable(&(mali_gpu_device.dev)); +#endif + platform_device_unregister(&mali_gpu_device); + + platform_device_put(&mali_gpu_device); + +#if defined(CONFIG_ARCH_REALVIEW) + mali_write_phys(0xC0010020, 0x9); /* Restore default (legacy) memory mapping */ +#endif +} + +static void mali_platform_device_release(struct device *device) +{ + MALI_DEBUG_PRINT(4, ("mali_platform_device_release() called\n")); +} + +#else /* CONFIG_MALI_DT */ +int mali_platform_device_init(struct platform_device *device) +{ + int num_pp_cores = 0; + int err = -1; +#if defined(CONFIG_ARCH_REALVIEW) + u32 m400_gp_version; +#endif + + /* Detect present Mali GPU and connect the correct resources to the device */ +#if defined(CONFIG_ARCH_VEXPRESS) + +#if defined(CONFIG_ARM64) + if ((mali_read_phys(0x6F000000) & 0x00600450) == 0x00600450) { + MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP6 device\n")); + num_pp_cores = 6; + } else if ((mali_read_phys(0x6F000000) & 0x00F00430) == 0x00400430) { + MALI_DEBUG_PRINT(4, ("Registering Mali-470 MP4 device\n")); + num_pp_cores = 4; + } else if ((mali_read_phys(0x6F000000) & 0x00F00430) == 0x00300430) { + MALI_DEBUG_PRINT(4, ("Registering Mali-470 MP3 device\n")); + num_pp_cores = 3; + } else if ((mali_read_phys(0x6F000000) & 0x00F00430) == 0x00200430) { + MALI_DEBUG_PRINT(4, ("Registering Mali-470 MP2 device\n")); + num_pp_cores = 2; + } else if ((mali_read_phys(0x6F000000) & 0x00F00430) == 0x00100430) { + MALI_DEBUG_PRINT(4, ("Registering Mali-470 MP1 device\n")); + num_pp_cores = 1; + } +#else + if (mali_read_phys(0xFC000000) == 0x00000450) { + MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP8 device\n")); + num_pp_cores = 8; + } else if (mali_read_phys(0xFC000000) == 0x40400450) { + MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP4 device\n")); + num_pp_cores = 4; + } else if (mali_read_phys(0xFC000000) == 0xFFFFFFFF) { + MALI_DEBUG_PRINT(4, ("Registering Mali-470 MP4 device\n")); + num_pp_cores = 4; + } +#endif + +#elif defined(CONFIG_ARCH_REALVIEW) + + m400_gp_version = mali_read_phys(0xC000006C); + if ((m400_gp_version & 0xFFFF0000) == 0x0C070000) { + MALI_DEBUG_PRINT(4, ("Registering Mali-300 device\n")); + num_pp_cores = 1; + mali_write_phys(0xC0010020, 0xA); /* Enable direct memory mapping for FPGA */ + } else if ((m400_gp_version & 0xFFFF0000) == 0x0B070000) { + u32 fpga_fw_version = mali_read_phys(0xC0010000); + if (fpga_fw_version == 0x130C008F || fpga_fw_version == 0x110C008F) { + /* Mali-400 MP1 r1p0 or r1p1 */ + MALI_DEBUG_PRINT(4, ("Registering Mali-400 MP1 device\n")); + num_pp_cores = 1; + mali_write_phys(0xC0010020, 0xA); /* Enable direct memory mapping for FPGA */ + } else if (fpga_fw_version == 0x130C000F) { + /* Mali-400 MP2 r1p1 */ + MALI_DEBUG_PRINT(4, ("Registering Mali-400 MP2 device\n")); + num_pp_cores = 2; + mali_write_phys(0xC0010020, 0xA); /* Enable direct memory mapping for FPGA */ + } + } +#endif + + /* After kernel 3.15 device tree will default set dev + * related parameters in of_platform_device_create_pdata. + * But kernel changes from version to version, + * For example 3.10 didn't include device->dev.dma_mask parameter setting, + * if we didn't include here will cause dma_mapping error, + * but in kernel 3.15 it include device->dev.dma_mask parameter setting, + * so it's better to set must need paramter by DDK itself. + */ + if (!device->dev.dma_mask) + device->dev.dma_mask = &device->dev.coherent_dma_mask; + device->dev.archdata.dma_ops = dma_ops; + + err = platform_device_add_data(device, &mali_gpu_data, sizeof(mali_gpu_data)); + + if (0 == err) { +#ifdef CONFIG_PM_RUNTIME +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)) + pm_runtime_set_autosuspend_delay(&(device->dev), 1000); + pm_runtime_use_autosuspend(&(device->dev)); +#endif + pm_runtime_enable(&(device->dev)); +#endif + MALI_DEBUG_ASSERT(0 < num_pp_cores); + mali_core_scaling_init(num_pp_cores); + } + +#if defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL) + /* Get thermal zone */ + gpu_tz = thermal_zone_get_zone_by_name("soc_thermal"); + if (IS_ERR(gpu_tz)) { + MALI_DEBUG_PRINT(2, ("Error getting gpu thermal zone (%ld), not yet ready?\n", + PTR_ERR(gpu_tz))); + gpu_tz = NULL; + + err = -EPROBE_DEFER; + } +#endif + + return err; +} + +int mali_platform_device_deinit(struct platform_device *device) +{ + MALI_IGNORE(device); + + MALI_DEBUG_PRINT(4, ("mali_platform_device_deinit() called\n")); + + mali_core_scaling_term(); +#ifdef CONFIG_PM_RUNTIME + pm_runtime_disable(&(device->dev)); +#endif + +#if defined(CONFIG_ARCH_REALVIEW) + mali_write_phys(0xC0010020, 0x9); /* Restore default (legacy) memory mapping */ +#endif + + return 0; +} + +#endif /* CONFIG_MALI_DT */ + +static u32 mali_read_phys(u32 phys_addr) +{ + u32 phys_addr_page = phys_addr & 0xFFFFE000; + u32 phys_offset = phys_addr & 0x00001FFF; + u32 map_size = phys_offset + sizeof(u32); + u32 ret = 0xDEADBEEF; + void *mem_mapped = ioremap_nocache(phys_addr_page, map_size); + if (NULL != mem_mapped) { + ret = (u32)ioread32(((u8 *)mem_mapped) + phys_offset); + iounmap(mem_mapped); + } + + return ret; +} + +#if defined(CONFIG_ARCH_REALVIEW) +static void mali_write_phys(u32 phys_addr, u32 value) +{ + u32 phys_addr_page = phys_addr & 0xFFFFE000; + u32 phys_offset = phys_addr & 0x00001FFF; + u32 map_size = phys_offset + sizeof(u32); + void *mem_mapped = ioremap_nocache(phys_addr_page, map_size); + if (NULL != mem_mapped) { + iowrite32(value, ((u8 *)mem_mapped) + phys_offset); + iounmap(mem_mapped); + } +} +#endif + +static int param_set_core_scaling(const char *val, const struct kernel_param *kp) +{ + int ret = param_set_int(val, kp); + + if (1 == mali_core_scaling_enable) { + mali_core_scaling_sync(mali_executor_get_num_cores_enabled()); + } + return ret; +} + +static struct kernel_param_ops param_ops_core_scaling = { + .set = param_set_core_scaling, + .get = param_get_int, +}; + +module_param_cb(mali_core_scaling_enable, ¶m_ops_core_scaling, &mali_core_scaling_enable, 0644); +MODULE_PARM_DESC(mali_core_scaling_enable, "1 means to enable core scaling policy, 0 means to disable core scaling policy"); + +void mali_gpu_utilization_callback(struct mali_gpu_utilization_data *data) +{ + if (1 == mali_core_scaling_enable) { + mali_core_scaling_update(data); + } +} diff -ENwbur a/drivers/gpu/arm/mali400/platform/arm/arm_core_scaling.c b/drivers/gpu/arm/mali400/platform/arm/arm_core_scaling.c --- a/drivers/gpu/arm/mali400/platform/arm/arm_core_scaling.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/platform/arm/arm_core_scaling.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,122 @@ +/* + * Copyright (C) 2013-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file arm_core_scaling.c + * Example core scaling policy. + */ + +#include "arm_core_scaling.h" + +#include +#include "mali_kernel_common.h" + +#include + +static int num_cores_total; +static int num_cores_enabled; + +static struct work_struct wq_work; + +static void set_num_cores(struct work_struct *work) +{ + int err = mali_perf_set_num_pp_cores(num_cores_enabled); + MALI_DEBUG_ASSERT(0 == err); + MALI_IGNORE(err); +} + +static void enable_one_core(void) +{ + if (num_cores_enabled < num_cores_total) { + ++num_cores_enabled; + schedule_work(&wq_work); + MALI_DEBUG_PRINT(3, ("Core scaling: Enabling one more core\n")); + } + + MALI_DEBUG_ASSERT(1 <= num_cores_enabled); + MALI_DEBUG_ASSERT(num_cores_total >= num_cores_enabled); +} + +static void disable_one_core(void) +{ + if (1 < num_cores_enabled) { + --num_cores_enabled; + schedule_work(&wq_work); + MALI_DEBUG_PRINT(3, ("Core scaling: Disabling one core\n")); + } + + MALI_DEBUG_ASSERT(1 <= num_cores_enabled); + MALI_DEBUG_ASSERT(num_cores_total >= num_cores_enabled); +} + +static void enable_max_num_cores(void) +{ + if (num_cores_enabled < num_cores_total) { + num_cores_enabled = num_cores_total; + schedule_work(&wq_work); + MALI_DEBUG_PRINT(3, ("Core scaling: Enabling maximum number of cores\n")); + } + + MALI_DEBUG_ASSERT(num_cores_total == num_cores_enabled); +} + +void mali_core_scaling_init(int num_pp_cores) +{ + INIT_WORK(&wq_work, set_num_cores); + + num_cores_total = num_pp_cores; + num_cores_enabled = num_pp_cores; + + /* NOTE: Mali is not fully initialized at this point. */ +} + +void mali_core_scaling_sync(int num_cores) +{ + num_cores_enabled = num_cores; +} + +void mali_core_scaling_term(void) +{ + flush_scheduled_work(); +} + +#define PERCENT_OF(percent, max) ((int) ((percent)*(max)/100.0 + 0.5)) + +void mali_core_scaling_update(struct mali_gpu_utilization_data *data) +{ + /* + * This function implements a very trivial PP core scaling algorithm. + * + * It is _NOT_ of production quality. + * The only intention behind this algorithm is to exercise and test the + * core scaling functionality of the driver. + * It is _NOT_ tuned for neither power saving nor performance! + * + * Other metrics than PP utilization need to be considered as well + * in order to make a good core scaling algorithm. + */ + + MALI_DEBUG_PRINT(3, ("Utilization: (%3d, %3d, %3d), cores enabled: %d/%d\n", data->utilization_gpu, data->utilization_gp, data->utilization_pp, num_cores_enabled, num_cores_total)); + + /* NOTE: this function is normally called directly from the utilization callback which is in + * timer context. */ + + if (PERCENT_OF(90, 256) < data->utilization_pp) { + enable_max_num_cores(); + } else if (PERCENT_OF(50, 256) < data->utilization_pp) { + enable_one_core(); + } else if (PERCENT_OF(40, 256) < data->utilization_pp) { + /* do nothing */ + } else if (PERCENT_OF(0, 256) < data->utilization_pp) { + disable_one_core(); + } else { + /* do nothing */ + } +} diff -ENwbur a/drivers/gpu/arm/mali400/platform/arm/arm_core_scaling.h b/drivers/gpu/arm/mali400/platform/arm/arm_core_scaling.h --- a/drivers/gpu/arm/mali400/platform/arm/arm_core_scaling.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/platform/arm/arm_core_scaling.h 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2013, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file arm_core_scaling.h + * Example core scaling policy. + */ + +#ifndef __ARM_CORE_SCALING_H__ +#define __ARM_CORE_SCALING_H__ + +struct mali_gpu_utilization_data; + +/** + * Initialize core scaling policy. + * + * @note The core scaling policy will assume that all PP cores are on initially. + * + * @param num_pp_cores Total number of PP cores. + */ +void mali_core_scaling_init(int num_pp_cores); + +/** + * Terminate core scaling policy. + */ +void mali_core_scaling_term(void); + +/** + * Update core scaling policy with new utilization data. + * + * @param data Utilization data. + */ +void mali_core_scaling_update(struct mali_gpu_utilization_data *data); + +void mali_core_scaling_sync(int num_cores); + +#endif /* __ARM_CORE_SCALING_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/platform/arm/juno_opp.c b/drivers/gpu/arm/mali400/platform/arm/juno_opp.c --- a/drivers/gpu/arm/mali400/platform/arm/juno_opp.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/platform/arm/juno_opp.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,127 @@ +/* + * Copyright (C) 2010, 2012-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file juno_opp.c + * Example: Set up opp table + * Using ARM64 juno specific SCPI_PROTOCOL get frequence inform + * Customer need implement your own platform releated logic + */ +#ifdef CONFIG_ARCH_VEXPRESS +#ifdef CONFIG_MALI_DEVFREQ +#ifdef CONFIG_ARM64 +#ifdef CONFIG_ARM_SCPI_PROTOCOL +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) +#include +#else /* Linux >= 3.13 */ +/* In 3.13 the OPP include header file, types, and functions were all + * renamed. Use the old filename for the include, and define the new names to + * the old, when an old kernel is detected. + */ +#include +#define dev_pm_opp_add opp_add +#define dev_pm_opp_remove opp_remove +#endif /* Linux >= 3.13 */ + +#include "mali_kernel_common.h" + +static int init_juno_opps_from_scpi(struct device *dev) +{ + struct scpi_dvfs_info *sinfo; + struct scpi_ops *sops; + + int i; + + sops = get_scpi_ops(); + if (NULL == sops) { + MALI_DEBUG_PRINT(2, ("Mali didn't get any scpi ops \n")); + return -1; + } + + /* Hard coded for Juno. 2 is GPU domain */ + sinfo = sops->dvfs_get_info(2); + if (IS_ERR_OR_NULL(sinfo)) + return PTR_ERR(sinfo); + + for (i = 0; i < sinfo->count; i++) { + struct scpi_opp *e = &sinfo->opps[i]; + + MALI_DEBUG_PRINT(2, ("Mali OPP from SCPI: %u Hz @ %u mV\n", e->freq, e->m_volt)); + + dev_pm_opp_add(dev, e->freq, e->m_volt * 1000); + } + + return 0; +} + +int setup_opps(void) +{ + struct device_node *np; + struct platform_device *pdev; + int err; + + np = of_find_node_by_name(NULL, "gpu"); + if (!np) { + pr_err("Failed to find DT entry for Mali\n"); + return -EFAULT; + } + + pdev = of_find_device_by_node(np); + if (!pdev) { + pr_err("Failed to find device for Mali\n"); + of_node_put(np); + return -EFAULT; + } + + err = init_juno_opps_from_scpi(&pdev->dev); + + of_node_put(np); + + return err; +} + +int term_opps(struct device *dev) +{ + struct scpi_dvfs_info *sinfo; + struct scpi_ops *sops; + + int i; + + sops = get_scpi_ops(); + if (NULL == sops) { + MALI_DEBUG_PRINT(2, ("Mali didn't get any scpi ops \n")); + return -1; + } + + /* Hard coded for Juno. 2 is GPU domain */ + sinfo = sops->dvfs_get_info(2); + if (IS_ERR_OR_NULL(sinfo)) + return PTR_ERR(sinfo); + + for (i = 0; i < sinfo->count; i++) { + struct scpi_opp *e = &sinfo->opps[i]; + + MALI_DEBUG_PRINT(2, ("Mali Remove OPP: %u Hz \n", e->freq)); + + dev_pm_opp_remove(dev, e->freq); + } + + return 0; + +} +#endif +#endif +#endif +#endif diff -ENwbur a/drivers/gpu/arm/mali400/platform/nexell/s5pxx18.c b/drivers/gpu/arm/mali400/platform/nexell/s5pxx18.c --- a/drivers/gpu/arm/mali400/platform/nexell/s5pxx18.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/platform/nexell/s5pxx18.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,309 @@ +/* + * Copyright (C) 2010, 2012-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file s5pxx18.c + * Platform specific Mali driver functions for: + * - Nexell s5p6818 platforms with ARM CortexA53 8 cores. + * - Nexell s5p4418 platforms with ARM CortexA9 4 cores. + */ +#include +#include +#include +#include "mali_kernel_linux.h" +#ifdef CONFIG_PM_RUNTIME +#include +#endif +#include +#include +#include "mali_kernel_common.h" +#include +#include +#include +#include + +#include +#include +#include "s5pxx18_core_scaling.h" +#include "mali_executor.h" + +#if defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL) +#include +#include +#endif + +#ifdef CONFIG_MALI_PLATFORM_S5P6818 +#include +#include +#endif + +static int mali_core_scaling_enable = 0; +static struct clk *clk_mali; +static struct reset_control *rst_mali; +#ifdef CONFIG_ARM_S5Pxx18_DEVFREQ +static bool nexell_qos_added; +static struct pm_qos_request nexell_gpu_qos; +static int bus_clk_step; +static struct delayed_work qos_work; +#endif + +void mali_gpu_utilization_callback(struct mali_gpu_utilization_data *data); + +#ifdef CONFIG_MALI_PLATFORM_S5P6818 +static void s5p6818_mali_axibus_lpi_exit(void) +{ + /* Set PBUS CSYSREQ to High */ + nx_tieoff_set(NX_TIEOFF_Inst_VR_PBUS_AXILPI_S0_CSYSREQ, 1); + + /* Set MBUS CSYSREQ to High */ + nx_tieoff_set(NX_TIEOFF_Inst_VR_MBUS_AXILPI_S0_CSYSREQ, 1); +} + +static void s5p6818_mali_axibus_lpi_enter(void) +{ + /* Set PBUS LPI CSYSREQ to Low */ + nx_tieoff_set(NX_TIEOFF_Inst_VR_PBUS_AXILPI_S0_CSYSREQ, 0); + + /* Set MBUS LPI CSYSREQ to Low */ + nx_tieoff_set(NX_TIEOFF_Inst_VR_MBUS_AXILPI_S0_CSYSREQ, 0); +} +#endif + +static void nexell_platform_resume(struct device *dev) +{ + clk_prepare_enable(clk_mali); + reset_control_reset(rst_mali); +#ifdef CONFIG_MALI_PLATFORM_S5P6818 + s5p6818_mali_axibus_lpi_exit(); +#endif +} + +static void nexell_platform_suspend(struct device *dev) +{ + if (rst_mali) { +#ifdef CONFIG_MALI_PLATFORM_S5P6818 + s5p6818_mali_axibus_lpi_enter(); +#endif + reset_control_assert(rst_mali); + } + + if (clk_mali) + clk_disable_unprepare(clk_mali); +} + +#ifdef CONFIG_ARM_S5Pxx18_DEVFREQ +static struct mali_gpu_clk_item gpu_clocks[] = { + { + .clock = 100, /* NX_BUS_CLK_IDLE_KHZ */ + }, { + .clock = 200, /* Fake clock */ + }, { + .clock = 300, /* Fake clock */ + }, { + .clock = 400, /* NX_BUS_CLK_GPU_KHZ */ + } +}; + +struct mali_gpu_clock gpu_clock = { + .item = gpu_clocks, + .num_of_steps = ARRAY_SIZE(gpu_clocks), +}; + +static void +nexell_gpu_qos_work_handler(struct work_struct *work) +{ + u32 clk_khz = gpu_clocks[bus_clk_step].clock * 1000; + + if (clk_khz != NX_BUS_CLK_IDLE_KHZ) + clk_khz = NX_BUS_CLK_GPU_KHZ; + + if (!nexell_qos_added) { + pm_qos_add_request(&nexell_gpu_qos, PM_QOS_BUS_THROUGHPUT, + clk_khz); + nexell_qos_added = true; + } else { + pm_qos_update_request(&nexell_gpu_qos, clk_khz); + } +} + +static void nexell_get_clock_info(struct mali_gpu_clock **data) +{ + *data = &gpu_clock; +} + +static int nexell_get_freq(void) +{ + return bus_clk_step; +} + +static int nexell_set_freq(int setting_clock_step) +{ + if (bus_clk_step != setting_clock_step) { + bus_clk_step = setting_clock_step; + + INIT_DELAYED_WORK(&qos_work, nexell_gpu_qos_work_handler); + queue_delayed_work(system_power_efficient_wq, &qos_work, 0); + } + + return 0; +} +#endif + +static struct mali_gpu_device_data mali_gpu_data = { + .max_job_runtime = 60000, /* 60 seconds */ + + /* Some framebuffer drivers get the framebuffer dynamically, such as through GEM, + * in which the memory resource can't be predicted in advance. + */ + .fb_start = 0x0, + .fb_size = 0xFFFFF000, + .control_interval = 1000, /* 1000ms */ + .utilization_callback = mali_gpu_utilization_callback, +#ifdef CONFIG_ARM_S5Pxx18_DEVFREQ + .get_clock_info = nexell_get_clock_info, + .get_freq = nexell_get_freq, + .set_freq = nexell_set_freq, +#endif + .secure_mode_init = NULL, + .secure_mode_deinit = NULL, + .gpu_reset_and_secure_mode_enable = NULL, + .gpu_reset_and_secure_mode_disable = NULL, + .platform_suspend = nexell_platform_suspend, + .platform_resume = nexell_platform_resume, +}; + +int mali_platform_device_init(struct platform_device *device) +{ + int num_pp_cores = 2; + int err = -1; + struct device *dev = &device->dev; + + clk_mali = devm_clk_get(dev, "clk_mali"); + if (IS_ERR_OR_NULL(clk_mali)) { + dev_err(dev, "failed to get mali clock\n"); + return -ENODEV; + } + + clk_prepare_enable(clk_mali); + + rst_mali = devm_reset_control_get(dev, "vr-reset"); + + if (IS_ERR(rst_mali)) { + dev_err(dev, "failed to get reset_control\n"); + return -EINVAL; + } + + reset_control_reset(rst_mali); +#ifdef CONFIG_MALI_PLATFORM_S5P6818 + s5p6818_mali_axibus_lpi_exit(); +#endif + +#ifdef CONFIG_MALI_PLATFORM_S5P6818 + num_pp_cores = 4; +#endif + /* After kernel 3.15 device tree will default set dev + * related parameters in of_platform_device_create_pdata. + * But kernel changes from version to version, + * For example 3.10 didn't include device->dev.dma_mask parameter setting, + * if we didn't include here will cause dma_mapping error, + * but in kernel 3.15 it include device->dev.dma_mask parameter setting, + * so it's better to set must need paramter by DDK itself. + */ + if (!device->dev.dma_mask) + device->dev.dma_mask = &device->dev.coherent_dma_mask; +#ifndef CONFIG_ARM64 + device->dev.archdata.dma_ops = &arm_dma_ops; +#endif + + err = platform_device_add_data(device, &mali_gpu_data, sizeof(mali_gpu_data)); + + if (0 == err) { +#ifdef CONFIG_PM_RUNTIME +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)) + pm_runtime_set_autosuspend_delay(&(device->dev), 1000); + pm_runtime_use_autosuspend(&(device->dev)); +#endif + pm_runtime_enable(&(device->dev)); +#endif + MALI_DEBUG_ASSERT(0 < num_pp_cores); + mali_core_scaling_init(num_pp_cores); + } + +#if defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL) + /* Get thermal zone */ + gpu_tz = thermal_zone_get_zone_by_name("soc_thermal"); + if (IS_ERR(gpu_tz)) { + MALI_DEBUG_PRINT(2, ("Error getting gpu thermal zone (%ld), not yet ready?\n", + PTR_ERR(gpu_tz))); + gpu_tz = NULL; + + err = -EPROBE_DEFER; + } +#endif + + return err; +} + +int mali_platform_device_deinit(struct platform_device *device) +{ + MALI_IGNORE(device); + + MALI_DEBUG_PRINT(4, ("mali_platform_device_deinit() called\n")); + + mali_core_scaling_term(); + +#ifdef CONFIG_ARM_S5Pxx18_DEVFREQ + if (nexell_qos_added) { + pm_qos_remove_request(&nexell_gpu_qos); + nexell_qos_added = false; + } +#endif + + if (rst_mali) { +#ifdef CONFIG_MALI_PLATFORM_S5P6818 + s5p6818_mali_axibus_lpi_enter(); +#endif + reset_control_assert(rst_mali); + } + + if (clk_mali) + clk_disable_unprepare(clk_mali); + +#ifdef CONFIG_PM_RUNTIME + pm_runtime_disable(&(device->dev)); +#endif + + return 0; +} + +static int param_set_core_scaling(const char *val, const struct kernel_param *kp) +{ + int ret = param_set_int(val, kp); + + if (1 == mali_core_scaling_enable) { + mali_core_scaling_sync(mali_executor_get_num_cores_enabled()); + } + return ret; +} + +static struct kernel_param_ops param_ops_core_scaling = { + .set = param_set_core_scaling, + .get = param_get_int, +}; + +module_param_cb(mali_core_scaling_enable, ¶m_ops_core_scaling, &mali_core_scaling_enable, 0644); +MODULE_PARM_DESC(mali_core_scaling_enable, "1 means to enable core scaling policy, 0 means to disable core scaling policy"); + +void mali_gpu_utilization_callback(struct mali_gpu_utilization_data *data) +{ + if (1 == mali_core_scaling_enable) { + mali_core_scaling_update(data); + } +} diff -ENwbur a/drivers/gpu/arm/mali400/platform/nexell/s5pxx18_core_scaling.c b/drivers/gpu/arm/mali400/platform/nexell/s5pxx18_core_scaling.c --- a/drivers/gpu/arm/mali400/platform/nexell/s5pxx18_core_scaling.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/platform/nexell/s5pxx18_core_scaling.c 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,125 @@ +/* + * Copyright (C) 2013-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file arm_core_scaling.c + * Example core scaling policy. + */ + +#include "s5pxx18_core_scaling.h" + +#include +#include "mali_kernel_common.h" + +#include + +static int num_cores_total; +static int num_cores_enabled; + +static struct work_struct wq_work; + +static void set_num_cores(struct work_struct *work) +{ + int err = mali_perf_set_num_pp_cores(num_cores_enabled); + MALI_DEBUG_ASSERT(0 == err); + MALI_IGNORE(err); +} + +static void enable_one_core(void) +{ + if (num_cores_enabled < num_cores_total) { + ++num_cores_enabled; + schedule_work(&wq_work); + MALI_DEBUG_PRINT(3, ("Core scaling: Enabling one more core\n")); + } + + MALI_DEBUG_ASSERT(1 <= num_cores_enabled); + MALI_DEBUG_ASSERT(num_cores_total >= num_cores_enabled); +} + +static void disable_one_core(void) +{ + if (1 < num_cores_enabled) { + --num_cores_enabled; + schedule_work(&wq_work); + MALI_DEBUG_PRINT(3, ("Core scaling: Disabling one core\n")); + } + + MALI_DEBUG_ASSERT(1 <= num_cores_enabled); + MALI_DEBUG_ASSERT(num_cores_total >= num_cores_enabled); +} + +static void enable_max_num_cores(void) +{ + if (num_cores_enabled < num_cores_total) { + num_cores_enabled = num_cores_total; + schedule_work(&wq_work); + MALI_DEBUG_PRINT(3, ("Core scaling: Enabling maximum number of cores\n")); + } + + MALI_DEBUG_ASSERT(num_cores_total == num_cores_enabled); +} + +void mali_core_scaling_init(int num_pp_cores) +{ + INIT_WORK(&wq_work, set_num_cores); + + num_cores_total = num_pp_cores; + num_cores_enabled = num_pp_cores; + + /* NOTE: Mali is not fully initialized at this point. */ +} + +void mali_core_scaling_sync(int num_cores) +{ + num_cores_enabled = num_cores; +} + +void mali_core_scaling_term(void) +{ + flush_scheduled_work(); +} + +#define PERCENT_OF(percent, max) ((int) ((percent)*(max)/100.0 + 0.5)) + +void mali_core_scaling_update(struct mali_gpu_utilization_data *data) +{ + /* + * This function implements a very trivial PP core scaling algorithm. + * + * It is _NOT_ of production quality. + * The only intention behind this algorithm is to exercise and test the + * core scaling functionality of the driver. + * It is _NOT_ tuned for neither power saving nor performance! + * + * Other metrics than PP utilization need to be considered as well + * in order to make a good core scaling algorithm. + */ + + MALI_DEBUG_PRINT(3, ("Utilization: (%3d, %3d, %3d), cores enabled: %d/%d\n", + data->utilization_gpu, data->utilization_gp, + data->utilization_pp, num_cores_enabled, + num_cores_total)); + + /* NOTE: this function is normally called directly from the utilization + * callback which is in timer context. */ + + if (PERCENT_OF(90, 256) < data->utilization_pp) { + enable_max_num_cores(); + } else if (PERCENT_OF(50, 256) < data->utilization_pp) { + enable_one_core(); + } else if (PERCENT_OF(40, 256) < data->utilization_pp) { + /* do nothing */ + } else if (PERCENT_OF(0, 256) < data->utilization_pp) { + disable_one_core(); + } else { + /* do nothing */ + } +} diff -ENwbur a/drivers/gpu/arm/mali400/platform/nexell/s5pxx18_core_scaling.h b/drivers/gpu/arm/mali400/platform/nexell/s5pxx18_core_scaling.h --- a/drivers/gpu/arm/mali400/platform/nexell/s5pxx18_core_scaling.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/platform/nexell/s5pxx18_core_scaling.h 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2013, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +/** + * @file arm_core_scaling.h + * Example core scaling policy. + */ + +#ifndef __ARM_CORE_SCALING_H__ +#define __ARM_CORE_SCALING_H__ + +struct mali_gpu_utilization_data; + +/** + * Initialize core scaling policy. + * + * @note The core scaling policy will assume that all PP cores are on initially. + * + * @param num_pp_cores Total number of PP cores. + */ +void mali_core_scaling_init(int num_pp_cores); + +/** + * Terminate core scaling policy. + */ +void mali_core_scaling_term(void); + +/** + * Update core scaling policy with new utilization data. + * + * @param data Utilization data. + */ +void mali_core_scaling_update(struct mali_gpu_utilization_data *data); + +void mali_core_scaling_sync(int num_cores); + +#endif /* __ARM_CORE_SCALING_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/readme.txt b/drivers/gpu/arm/mali400/readme.txt --- a/drivers/gpu/arm/mali400/readme.txt 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/readme.txt 2018-05-06 08:49:49.182695581 +0200 @@ -0,0 +1,28 @@ +Building the Mali Device Driver for Linux +----------------------------------------- + +Build the Mali Device Driver for Linux by running the following make command: + +KDIR= USING_UMP= BUILD= make + +where + kdir_path: Path to your Linux Kernel directory + ump_option: 1 = Enable UMP support(*) + 0 = disable UMP support + build_option: debug = debug build of driver + release = release build of driver + +(*) For newer Linux Kernels, the Module.symvers file for the UMP device driver + must be available. The UMP_SYMVERS_FILE variable in the Makefile should + point to this file. This file is generated when the UMP driver is built. + +The result will be a mali.ko file, which can be loaded into the Linux kernel +by using the insmod command. + +Use of UMP is not recommended. The dma-buf API in the Linux kernel has +replaced UMP. The Mali Device Driver will be built with dma-buf support if the +kernel config includes enabled dma-buf. + +The kernel needs to be provided with a platform_device struct for the Mali GPU +device. See the mali_utgard.h header file for how to set up the Mali GPU +resources. diff -ENwbur a/drivers/gpu/arm/mali400/regs/mali_200_regs.h b/drivers/gpu/arm/mali400/regs/mali_200_regs.h --- a/drivers/gpu/arm/mali400/regs/mali_200_regs.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/regs/mali_200_regs.h 2018-05-06 08:49:49.186695742 +0200 @@ -0,0 +1,131 @@ +/* + * Copyright (C) 2010, 2012-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef _MALI200_REGS_H_ +#define _MALI200_REGS_H_ + +/** + * Enum for management register addresses. + */ +enum mali200_mgmt_reg { + MALI200_REG_ADDR_MGMT_VERSION = 0x1000, + MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR = 0x1004, + MALI200_REG_ADDR_MGMT_STATUS = 0x1008, + MALI200_REG_ADDR_MGMT_CTRL_MGMT = 0x100c, + + MALI200_REG_ADDR_MGMT_INT_RAWSTAT = 0x1020, + MALI200_REG_ADDR_MGMT_INT_CLEAR = 0x1024, + MALI200_REG_ADDR_MGMT_INT_MASK = 0x1028, + MALI200_REG_ADDR_MGMT_INT_STATUS = 0x102c, + + MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS = 0x1050, + + MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE = 0x1080, + MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC = 0x1084, + MALI200_REG_ADDR_MGMT_PERF_CNT_0_LIMIT = 0x1088, + MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE = 0x108c, + + MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE = 0x10a0, + MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC = 0x10a4, + MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE = 0x10ac, + + MALI200_REG_ADDR_MGMT_PERFMON_CONTR = 0x10b0, + MALI200_REG_ADDR_MGMT_PERFMON_BASE = 0x10b4, + + MALI200_REG_SIZEOF_REGISTER_BANK = 0x10f0 + +}; + +#define MALI200_REG_VAL_PERF_CNT_ENABLE 1 + +enum mali200_mgmt_ctrl_mgmt { + MALI200_REG_VAL_CTRL_MGMT_STOP_BUS = (1 << 0), + MALI200_REG_VAL_CTRL_MGMT_FLUSH_CACHES = (1 << 3), + MALI200_REG_VAL_CTRL_MGMT_FORCE_RESET = (1 << 5), + MALI200_REG_VAL_CTRL_MGMT_START_RENDERING = (1 << 6), + MALI400PP_REG_VAL_CTRL_MGMT_SOFT_RESET = (1 << 7), /* Only valid for Mali-300 and later */ +}; + +enum mali200_mgmt_irq { + MALI200_REG_VAL_IRQ_END_OF_FRAME = (1 << 0), + MALI200_REG_VAL_IRQ_END_OF_TILE = (1 << 1), + MALI200_REG_VAL_IRQ_HANG = (1 << 2), + MALI200_REG_VAL_IRQ_FORCE_HANG = (1 << 3), + MALI200_REG_VAL_IRQ_BUS_ERROR = (1 << 4), + MALI200_REG_VAL_IRQ_BUS_STOP = (1 << 5), + MALI200_REG_VAL_IRQ_CNT_0_LIMIT = (1 << 6), + MALI200_REG_VAL_IRQ_CNT_1_LIMIT = (1 << 7), + MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR = (1 << 8), + MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND = (1 << 9), + MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW = (1 << 10), + MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW = (1 << 11), + MALI400PP_REG_VAL_IRQ_RESET_COMPLETED = (1 << 12), +}; + +#define MALI200_REG_VAL_IRQ_MASK_ALL ((enum mali200_mgmt_irq) (\ + MALI200_REG_VAL_IRQ_END_OF_FRAME |\ + MALI200_REG_VAL_IRQ_END_OF_TILE |\ + MALI200_REG_VAL_IRQ_HANG |\ + MALI200_REG_VAL_IRQ_FORCE_HANG |\ + MALI200_REG_VAL_IRQ_BUS_ERROR |\ + MALI200_REG_VAL_IRQ_BUS_STOP |\ + MALI200_REG_VAL_IRQ_CNT_0_LIMIT |\ + MALI200_REG_VAL_IRQ_CNT_1_LIMIT |\ + MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR |\ + MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND |\ + MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW |\ + MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW |\ + MALI400PP_REG_VAL_IRQ_RESET_COMPLETED)) + +#define MALI200_REG_VAL_IRQ_MASK_USED ((enum mali200_mgmt_irq) (\ + MALI200_REG_VAL_IRQ_END_OF_FRAME |\ + MALI200_REG_VAL_IRQ_FORCE_HANG |\ + MALI200_REG_VAL_IRQ_BUS_ERROR |\ + MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR |\ + MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND |\ + MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW |\ + MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW)) + +#define MALI200_REG_VAL_IRQ_MASK_NONE ((enum mali200_mgmt_irq)(0)) + +enum mali200_mgmt_status { + MALI200_REG_VAL_STATUS_RENDERING_ACTIVE = (1 << 0), + MALI200_REG_VAL_STATUS_BUS_STOPPED = (1 << 4), +}; + +enum mali200_render_unit { + MALI200_REG_ADDR_FRAME = 0x0000, + MALI200_REG_ADDR_RSW = 0x0004, + MALI200_REG_ADDR_STACK = 0x0030, + MALI200_REG_ADDR_STACK_SIZE = 0x0034, + MALI200_REG_ADDR_ORIGIN_OFFSET_X = 0x0040 +}; + +enum mali200_wb_unit { + MALI200_REG_ADDR_WB0 = 0x0100, + MALI200_REG_ADDR_WB1 = 0x0200, + MALI200_REG_ADDR_WB2 = 0x0300 +}; + +enum mali200_wb_unit_regs { + MALI200_REG_ADDR_WB_SOURCE_SELECT = 0x0000, + MALI200_REG_ADDR_WB_SOURCE_ADDR = 0x0004, +}; + +/* This should be in the top 16 bit of the version register of Mali PP */ +#define MALI200_PP_PRODUCT_ID 0xC807 +#define MALI300_PP_PRODUCT_ID 0xCE07 +#define MALI400_PP_PRODUCT_ID 0xCD07 +#define MALI450_PP_PRODUCT_ID 0xCF07 +#define MALI470_PP_PRODUCT_ID 0xCF08 + + + +#endif /* _MALI200_REGS_H_ */ diff -ENwbur a/drivers/gpu/arm/mali400/regs/mali_gp_regs.h b/drivers/gpu/arm/mali400/regs/mali_gp_regs.h --- a/drivers/gpu/arm/mali400/regs/mali_gp_regs.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/regs/mali_gp_regs.h 2018-05-06 08:49:49.186695742 +0200 @@ -0,0 +1,172 @@ +/* + * Copyright (C) 2010, 2012-2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef _MALIGP2_CONROL_REGS_H_ +#define _MALIGP2_CONROL_REGS_H_ + +/** + * These are the different geometry processor control registers. + * Their usage is to control and monitor the operation of the + * Vertex Shader and the Polygon List Builder in the geometry processor. + * Addresses are in 32-bit word relative sizes. + * @see [P0081] "Geometry Processor Data Structures" for details + */ + +typedef enum { + MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR = 0x00, + MALIGP2_REG_ADDR_MGMT_VSCL_END_ADDR = 0x04, + MALIGP2_REG_ADDR_MGMT_PLBUCL_START_ADDR = 0x08, + MALIGP2_REG_ADDR_MGMT_PLBUCL_END_ADDR = 0x0c, + MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR = 0x10, + MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR = 0x14, + MALIGP2_REG_ADDR_MGMT_CMD = 0x20, + MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT = 0x24, + MALIGP2_REG_ADDR_MGMT_INT_CLEAR = 0x28, + MALIGP2_REG_ADDR_MGMT_INT_MASK = 0x2C, + MALIGP2_REG_ADDR_MGMT_INT_STAT = 0x30, + MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE = 0x3C, + MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE = 0x40, + MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC = 0x44, + MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC = 0x48, + MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE = 0x4C, + MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE = 0x50, + MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_LIMIT = 0x54, + MALIGP2_REG_ADDR_MGMT_STATUS = 0x68, + MALIGP2_REG_ADDR_MGMT_VERSION = 0x6C, + MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR_READ = 0x80, + MALIGP2_REG_ADDR_MGMT_PLBCL_START_ADDR_READ = 0x84, + MALIGP2_CONTR_AXI_BUS_ERROR_STAT = 0x94, + MALIGP2_REGISTER_ADDRESS_SPACE_SIZE = 0x98, +} maligp_reg_addr_mgmt_addr; + +#define MALIGP2_REG_VAL_PERF_CNT_ENABLE 1 + +/** + * Commands to geometry processor. + * @see MALIGP2_CTRL_REG_CMD + */ +typedef enum { + MALIGP2_REG_VAL_CMD_START_VS = (1 << 0), + MALIGP2_REG_VAL_CMD_START_PLBU = (1 << 1), + MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC = (1 << 4), + MALIGP2_REG_VAL_CMD_RESET = (1 << 5), + MALIGP2_REG_VAL_CMD_FORCE_HANG = (1 << 6), + MALIGP2_REG_VAL_CMD_STOP_BUS = (1 << 9), + MALI400GP_REG_VAL_CMD_SOFT_RESET = (1 << 10), /* only valid for Mali-300 and later */ +} mgp_contr_reg_val_cmd; + + +/** @defgroup MALIGP2_IRQ + * Interrupt status of geometry processor. + * @see MALIGP2_CTRL_REG_INT_RAWSTAT, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, + * MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_ADDR_MGMT_INT_STAT + * @{ + */ +#define MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST (1 << 0) +#define MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST (1 << 1) +#define MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM (1 << 2) +#define MALIGP2_REG_VAL_IRQ_VS_SEM_IRQ (1 << 3) +#define MALIGP2_REG_VAL_IRQ_PLBU_SEM_IRQ (1 << 4) +#define MALIGP2_REG_VAL_IRQ_HANG (1 << 5) +#define MALIGP2_REG_VAL_IRQ_FORCE_HANG (1 << 6) +#define MALIGP2_REG_VAL_IRQ_PERF_CNT_0_LIMIT (1 << 7) +#define MALIGP2_REG_VAL_IRQ_PERF_CNT_1_LIMIT (1 << 8) +#define MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR (1 << 9) +#define MALIGP2_REG_VAL_IRQ_SYNC_ERROR (1 << 10) +#define MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR (1 << 11) +#define MALI400GP_REG_VAL_IRQ_AXI_BUS_STOPPED (1 << 12) +#define MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD (1 << 13) +#define MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD (1 << 14) +#define MALI400GP_REG_VAL_IRQ_RESET_COMPLETED (1 << 19) +#define MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW (1 << 20) +#define MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW (1 << 21) +#define MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS (1 << 22) + +/* Mask defining all IRQs in Mali GP */ +#define MALIGP2_REG_VAL_IRQ_MASK_ALL \ + (\ + MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | \ + MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | \ + MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | \ + MALIGP2_REG_VAL_IRQ_VS_SEM_IRQ | \ + MALIGP2_REG_VAL_IRQ_PLBU_SEM_IRQ | \ + MALIGP2_REG_VAL_IRQ_HANG | \ + MALIGP2_REG_VAL_IRQ_FORCE_HANG | \ + MALIGP2_REG_VAL_IRQ_PERF_CNT_0_LIMIT | \ + MALIGP2_REG_VAL_IRQ_PERF_CNT_1_LIMIT | \ + MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR | \ + MALIGP2_REG_VAL_IRQ_SYNC_ERROR | \ + MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR | \ + MALI400GP_REG_VAL_IRQ_AXI_BUS_STOPPED | \ + MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD | \ + MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD | \ + MALI400GP_REG_VAL_IRQ_RESET_COMPLETED | \ + MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW | \ + MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW | \ + MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS) + +/* Mask defining the IRQs in Mali GP which we use */ +#define MALIGP2_REG_VAL_IRQ_MASK_USED \ + (\ + MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | \ + MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | \ + MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | \ + MALIGP2_REG_VAL_IRQ_FORCE_HANG | \ + MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR | \ + MALIGP2_REG_VAL_IRQ_SYNC_ERROR | \ + MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR | \ + MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD | \ + MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD | \ + MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW | \ + MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW | \ + MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS) + +/* Mask defining non IRQs on MaliGP2*/ +#define MALIGP2_REG_VAL_IRQ_MASK_NONE 0 + +/** }@ defgroup MALIGP2_IRQ*/ + +/** @defgroup MALIGP2_STATUS + * The different Status values to the geometry processor. + * @see MALIGP2_CTRL_REG_STATUS + * @{ + */ +#define MALIGP2_REG_VAL_STATUS_VS_ACTIVE 0x0002 +#define MALIGP2_REG_VAL_STATUS_BUS_STOPPED 0x0004 +#define MALIGP2_REG_VAL_STATUS_PLBU_ACTIVE 0x0008 +#define MALIGP2_REG_VAL_STATUS_BUS_ERROR 0x0040 +#define MALIGP2_REG_VAL_STATUS_WRITE_BOUND_ERR 0x0100 +/** }@ defgroup MALIGP2_STATUS*/ + +#define MALIGP2_REG_VAL_STATUS_MASK_ACTIVE (\ + MALIGP2_REG_VAL_STATUS_VS_ACTIVE|\ + MALIGP2_REG_VAL_STATUS_PLBU_ACTIVE) + + +#define MALIGP2_REG_VAL_STATUS_MASK_ERROR (\ + MALIGP2_REG_VAL_STATUS_BUS_ERROR |\ + MALIGP2_REG_VAL_STATUS_WRITE_BOUND_ERR ) + +/* This should be in the top 16 bit of the version register of gp.*/ +#define MALI200_GP_PRODUCT_ID 0xA07 +#define MALI300_GP_PRODUCT_ID 0xC07 +#define MALI400_GP_PRODUCT_ID 0xB07 +#define MALI450_GP_PRODUCT_ID 0xD07 + +/** + * The different sources for instrumented on the geometry processor. + * @see MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC + */ + +enum MALIGP2_cont_reg_perf_cnt_src { + MALIGP2_REG_VAL_PERF_CNT1_SRC_NUMBER_OF_VERTICES_PROCESSED = 0x0a, +}; + +#endif diff -ENwbur a/drivers/gpu/arm/mali400/timestamp-arm11-cc/mali_timestamp.c b/drivers/gpu/arm/mali400/timestamp-arm11-cc/mali_timestamp.c --- a/drivers/gpu/arm/mali400/timestamp-arm11-cc/mali_timestamp.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/timestamp-arm11-cc/mali_timestamp.c 2018-05-06 08:49:49.186695742 +0200 @@ -0,0 +1,13 @@ +/* + * Copyright (C) 2010-2011, 2013, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_timestamp.h" + +/* This file is intentionally left empty, as all functions are inlined in mali_profiling_sampler.h */ diff -ENwbur a/drivers/gpu/arm/mali400/timestamp-arm11-cc/mali_timestamp.h b/drivers/gpu/arm/mali400/timestamp-arm11-cc/mali_timestamp.h --- a/drivers/gpu/arm/mali400/timestamp-arm11-cc/mali_timestamp.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/timestamp-arm11-cc/mali_timestamp.h 2018-05-06 08:49:49.186695742 +0200 @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2010-2011, 2013-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_TIMESTAMP_H__ +#define __MALI_TIMESTAMP_H__ + +#include "mali_osk.h" + +MALI_STATIC_INLINE _mali_osk_errcode_t _mali_timestamp_reset(void) +{ + /* + * reset counters and overflow flags + */ + + u32 mask = (1 << 0) | /* enable all three counters */ + (0 << 1) | /* reset both Count Registers to 0x0 */ + (1 << 2) | /* reset the Cycle Counter Register to 0x0 */ + (0 << 3) | /* 1 = Cycle Counter Register counts every 64th processor clock cycle */ + (0 << 4) | /* Count Register 0 interrupt enable */ + (0 << 5) | /* Count Register 1 interrupt enable */ + (0 << 6) | /* Cycle Counter interrupt enable */ + (0 << 8) | /* Count Register 0 overflow flag (clear or write, flag on read) */ + (0 << 9) | /* Count Register 1 overflow flag (clear or write, flag on read) */ + (1 << 10); /* Cycle Counter Register overflow flag (clear or write, flag on read) */ + + __asm__ __volatile__("MCR p15, 0, %0, c15, c12, 0" : : "r"(mask)); + + return _MALI_OSK_ERR_OK; +} + +MALI_STATIC_INLINE u64 _mali_timestamp_get(void) +{ + u32 result; + + /* this is for the clock cycles */ + __asm__ __volatile__("MRC p15, 0, %0, c15, c12, 1" : "=r"(result)); + + return (u64)result; +} + +#endif /* __MALI_TIMESTAMP_H__ */ diff -ENwbur a/drivers/gpu/arm/mali400/timestamp-default/mali_timestamp.c b/drivers/gpu/arm/mali400/timestamp-default/mali_timestamp.c --- a/drivers/gpu/arm/mali400/timestamp-default/mali_timestamp.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/timestamp-default/mali_timestamp.c 2018-05-06 08:49:49.186695742 +0200 @@ -0,0 +1,13 @@ +/* + * Copyright (C) 2010-2011, 2013, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include "mali_timestamp.h" + +/* This file is intentionally left empty, as all functions are inlined in mali_profiling_sampler.h */ diff -ENwbur a/drivers/gpu/arm/mali400/timestamp-default/mali_timestamp.h b/drivers/gpu/arm/mali400/timestamp-default/mali_timestamp.h --- a/drivers/gpu/arm/mali400/timestamp-default/mali_timestamp.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/arm/mali400/timestamp-default/mali_timestamp.h 2018-05-06 08:49:49.186695742 +0200 @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2010-2011, 2013-2014, 2016 ARM Limited. All rights reserved. + * + * This program is free software and is provided to you under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. + * + * A copy of the licence is included with the program, and can also be obtained from Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __MALI_TIMESTAMP_H__ +#define __MALI_TIMESTAMP_H__ + +#include "mali_osk.h" + +MALI_STATIC_INLINE _mali_osk_errcode_t _mali_timestamp_reset(void) +{ + return _MALI_OSK_ERR_OK; +} + +MALI_STATIC_INLINE u64 _mali_timestamp_get(void) +{ + return _mali_osk_boot_time_get_ns(); +} + +#endif /* __MALI_TIMESTAMP_H__ */ diff -ENwbur a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig --- a/drivers/gpu/drm/Kconfig 2018-05-06 08:47:36.357304664 +0200 +++ b/drivers/gpu/drm/Kconfig 2018-05-06 08:49:49.186695742 +0200 @@ -278,6 +278,8 @@ source "drivers/gpu/drm/pl111/Kconfig" +source "drivers/gpu/drm/nexell/Kconfig" + # Keep legacy drivers last menuconfig DRM_LEGACY diff -ENwbur a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile --- a/drivers/gpu/drm/Makefile 2018-05-06 08:47:36.357304664 +0200 +++ b/drivers/gpu/drm/Makefile 2018-05-06 08:49:49.186695742 +0200 @@ -101,3 +101,4 @@ obj-$(CONFIG_DRM_MXSFB) += mxsfb/ obj-$(CONFIG_DRM_TINYDRM) += tinydrm/ obj-$(CONFIG_DRM_PL111) += pl111/ +obj-$(CONFIG_DRM_NX) += nexell/ diff -ENwbur a/drivers/gpu/drm/nexell/Kconfig b/drivers/gpu/drm/nexell/Kconfig --- a/drivers/gpu/drm/nexell/Kconfig 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/drm/nexell/Kconfig 2018-05-06 08:49:49.566711163 +0200 @@ -0,0 +1,51 @@ +config DRM_NX + tristate "DRM Support for NEXELL Display Controller" + depends on DRM && (ARCH_S5P6818 || ARCH_S5P4418) + select DRM_KMS_HELPER + select DRM_KMS_FB_HELPER + select VIDEOMODE_HELPERS + select FB_SYS_FILLRECT + select FB_SYS_COPYAREA + select FB_SYS_IMAGEBLIT + select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE + help + Choose this option if you have a Nexell soc chipset. + This driver provides kernel mode setting and buffer + management to userspace. If M is selected the module + will be called nexell drm. + +config DRM_NX_RGB + bool "RGB LCD support" + depends on DRM_NX + select DRM_PANEL + help + This selects support for RGB LCD display out. + If you want to enable RGB LCD display, + you should selet this option. + +config DRM_NX_LVDS + bool "LVDS LCD support" + depends on DRM_NX + select DRM_PANEL + help + This selects support for LVDS LCD display out. + If you want to enable LVDS LCD display, + you should selet this option. + +config DRM_NX_MIPI_DSI + bool "MiPi DSI support" + depends on DRM_NX + select DRM_PANEL + select DRM_MIPI_DSI + help + This selects support for MiPi-DSI display device. + If you want to enable MiPi-DSI display device, + you should selet this option. + +config DRM_NX_HDMI + bool "HDMI support" + depends on DRM_NX + help + This selects support for HDMI display out. + If you want to enable HDMI display, + you should selet this option. diff -ENwbur a/drivers/gpu/drm/nexell/Makefile b/drivers/gpu/drm/nexell/Makefile --- a/drivers/gpu/drm/nexell/Makefile 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/drm/nexell/Makefile 2018-05-06 08:49:49.566711163 +0200 @@ -0,0 +1,14 @@ +# +# Makefile for the drm device driver. This driver provides support for the +# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. + +ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/nexell +nx_drm-y := nx_drm_drv.o nx_drm_connector.o nx_drm_encoder.o \ + nx_drm_crtc.o nx_drm_plane.o nx_drm_fb.o nx_drm_gem.o + +obj-$(CONFIG_DRM_NX) += soc/ +obj-$(CONFIG_DRM_NX_RGB) += nx_drm_lcd.o +obj-$(CONFIG_DRM_NX_LVDS) += nx_drm_lcd.o +obj-$(CONFIG_DRM_NX_MIPI_DSI) += nx_drm_lcd.o +obj-$(CONFIG_DRM_NX_HDMI) += nx_drm_hdmi.o +obj-$(CONFIG_DRM_NX) += nx_drm.o diff -ENwbur a/drivers/gpu/drm/nexell/nx_drm_connector.c b/drivers/gpu/drm/nexell/nx_drm_connector.c --- a/drivers/gpu/drm/nexell/nx_drm_connector.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/drm/nexell/nx_drm_connector.c 2018-05-06 08:49:49.566711163 +0200 @@ -0,0 +1,254 @@ +/* + * Copyright (C) 2016 Nexell Co., Ltd. + * Author: junghyun, kim + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include + +#include "nx_drm_drv.h" +#include "nx_drm_connector.h" +#include "nx_drm_encoder.h" +#include "soc/s5pxx18_drm_dp.h" + +static int nx_drm_connector_get_modes(struct drm_connector *connector) +{ + struct nx_drm_device *display = to_nx_connector(connector)->display; + struct nx_drm_ops *ops = display->ops; + + DRM_DEBUG_KMS("enter\n"); + + if (ops && ops->get_modes) + return ops->get_modes(display->dev, connector); + + DRM_ERROR("fail : create a new display mode.\n"); + return 0; +} + +static int nx_drm_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct nx_drm_device *display = to_nx_connector(connector)->display; + struct nx_drm_ops *ops = display->ops; + + DRM_DEBUG_KMS("enter\n"); + DRM_DEBUG_KMS("bpp specified : %s, %d\n", + connector->cmdline_mode.bpp_specified ? "yes" : "no", + connector->cmdline_mode.bpp); + + if (ops && ops->check_mode) + return ops->check_mode(display->dev, mode); + + return MODE_BAD; +} + +struct drm_encoder *nx_drm_best_encoder(struct drm_connector *connector) +{ + struct nx_drm_connector *nx_connector = to_nx_connector(connector); + struct drm_encoder *encoder = nx_connector->encoder; + + if (encoder) + DRM_DEBUG_KMS("encoodr id:%d (enc.%d) panel %s\n", + encoder->base.id, to_nx_encoder(encoder)->pipe, + dp_panel_type_name( + dp_panel_get_type(nx_connector->display))); + + DRM_DEBUG_KMS("connector id:%d\n", connector->base.id); + return encoder; +} + +static struct drm_connector_helper_funcs nx_drm_connector_helper_funcs = { + .get_modes = nx_drm_connector_get_modes, + .mode_valid = nx_drm_connector_mode_valid, + .best_encoder = nx_drm_best_encoder, +}; + +static enum drm_connector_status nx_drm_connector_detect( + struct drm_connector *connector, bool force) +{ + struct nx_drm_connector *nx_connector = to_nx_connector(connector); + struct nx_drm_device *display = nx_connector->display; + struct nx_drm_ops *ops = display->ops; + enum drm_connector_status status = connector_status_disconnected; + + DRM_DEBUG_KMS("enter connector id:%d\n", connector->base.id); + + if (ops && ops->is_connected) { + if (ops->is_connected(display->dev, connector)) + status = connector_status_connected; + else + status = connector_status_disconnected; + } + + DRM_DEBUG_KMS("status: %s\n", + status == connector_status_connected ? "connected" : + "disconnected"); + + return status; +} + +static void nx_drm_connector_destroy(struct drm_connector *connector) +{ + struct nx_drm_connector *nx_connector = to_nx_connector(connector); + struct drm_device *drm = connector->dev; + + DRM_DEBUG_KMS("enter\n"); + + drm_connector_unregister(connector); + drm_connector_cleanup(connector); + devm_kfree(drm->dev, nx_connector); +} + +static int nx_drm_connector_dpms(struct drm_connector *connector, int mode) +{ + DRM_DEBUG_KMS("enter [CONNECTOR:%d] dpms:%d\n", + connector->base.id, mode); + + return drm_helper_connector_dpms(connector, mode); +} + +static struct drm_connector_funcs nx_drm_connector_funcs = { + .dpms = nx_drm_connector_dpms, + .detect = nx_drm_connector_detect, + .destroy = nx_drm_connector_destroy, + .fill_modes = drm_helper_probe_single_connector_modes, +}; + +void nx_drm_connector_destroy_and_detach(struct drm_connector *connector) +{ + struct drm_encoder *encoder; + + BUG_ON(!connector); + encoder = connector->encoder; + + if (encoder) + encoder->funcs->destroy(encoder); + + if (connector) + connector->funcs->destroy(connector); +} +EXPORT_SYMBOL(nx_drm_connector_destroy_and_detach); + +struct drm_connector *nx_drm_connector_create_and_attach( + struct drm_device *drm, + struct nx_drm_device *display, int pipe, + unsigned int possible_crtcs, + enum dp_panel_type panel_type, void *context) +{ + struct nx_drm_priv *priv = drm->dev_private; + struct nx_drm_connector *nx_connector; + struct drm_connector *connector; + struct drm_encoder *encoder; + + /* bitmask of potential CRTC bindings */ + int con_type = 0, enc_type = 0; + bool interlace_allowed = false; + uint8_t polled = 0; + int err; + + /* + * if no possible crtcs, you can connect all crtcs. + */ + if (0 == possible_crtcs) + possible_crtcs = (1 << priv->num_crtcs) - 1; + + DRM_DEBUG_KMS("enter pipe.%d crtc mask:0x%x\n", pipe, possible_crtcs); + + BUG_ON(!display); + + switch (panel_type) { + case dp_panel_type_rgb: + con_type = DRM_MODE_CONNECTOR_VGA; + enc_type = DRM_MODE_ENCODER_TMDS; + break; + case dp_panel_type_lvds: + con_type = DRM_MODE_CONNECTOR_LVDS; + enc_type = DRM_MODE_ENCODER_LVDS; + break; + case dp_panel_type_mipi: /* MiPi DSI */ + con_type = DRM_MODE_CONNECTOR_DSI; + enc_type = DRM_MODE_ENCODER_DSI; + break; + case dp_panel_type_hdmi: + con_type = DRM_MODE_CONNECTOR_HDMIA; + enc_type = DRM_MODE_ENCODER_TMDS; + interlace_allowed = true; + break; + case dp_panel_type_vidi: + con_type = DRM_MODE_CONNECTOR_VIRTUAL; + enc_type = DRM_MODE_ENCODER_VIRTUAL; + break; + default: + con_type = DRM_MODE_CONNECTOR_Unknown; + DRM_ERROR("fail : unknown drm connector type(%d)\n", + panel_type); + return NULL; + } + polled = DRM_CONNECTOR_POLL_HPD; /* for hpd_irq_event */ + + nx_connector = kzalloc(sizeof(*nx_connector), GFP_KERNEL); + if (!nx_connector) + return NULL; + + connector = &nx_connector->connector; + connector->polled = polled; + connector->interlace_allowed = interlace_allowed; + + /* create encoder */ + encoder = nx_drm_encoder_create(drm, display, enc_type, + pipe, possible_crtcs, context); + if (IS_ERR(encoder)) + goto err_alloc; + + /* create connector and attach */ + drm_connector_helper_add(connector, &nx_drm_connector_helper_funcs); + drm_connector_init(drm, connector, &nx_drm_connector_funcs, con_type); + err = drm_connector_register(connector); + if (err) + goto err_encoder; + + //connector->encoder = encoder; + err = drm_mode_connector_attach_encoder(connector, encoder); + if (err) { + DRM_ERROR("fail : attach a connector to a encoder\n"); + goto err_connector; + } + + nx_connector->display = display; + nx_connector->context = context; + nx_connector->encoder = encoder; + + /* inititalize dpms status */ + connector->dpms = nx_drm_dp_encoder_get_dpms(encoder); + + DRM_DEBUG_KMS("done, encoder id:%d , connector id:%d, dpms %s\n", + encoder->base.id, connector->base.id, + connector->dpms == DRM_MODE_DPMS_ON ? "on" : "off"); + + return connector; + +err_connector: + drm_connector_unregister(connector); +err_encoder: + drm_connector_cleanup(connector); + if (encoder) + encoder->funcs->destroy(encoder); +err_alloc: + kfree(nx_connector); + + return NULL; +} +EXPORT_SYMBOL(nx_drm_connector_create_and_attach); + diff -ENwbur a/drivers/gpu/drm/nexell/nx_drm_connector.h b/drivers/gpu/drm/nexell/nx_drm_connector.h --- a/drivers/gpu/drm/nexell/nx_drm_connector.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/drm/nexell/nx_drm_connector.h 2018-05-06 08:49:49.566711163 +0200 @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2016 Nexell Co., Ltd. + * Author: junghyun, kim + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef _NX_DRM_CONNECTOR_H_ +#define _NX_DRM_CONNECTOR_H_ + +#include "soc/s5pxx18_drm_dp.h" + +struct nx_drm_connector { + struct drm_connector connector; + struct drm_encoder *encoder; + struct nx_drm_device *display; + void *context; /* device context */ +}; + +#define to_nx_connector(c) \ + container_of(c, struct nx_drm_connector, connector) + +struct drm_connector *nx_drm_connector_create_and_attach( + struct drm_device *drm, + struct nx_drm_device *display, + int pipe, unsigned int possible_crtcs, + enum dp_panel_type panel_type, void *context); + +void nx_drm_connector_destroy_and_detach(struct drm_connector *connector); + +#endif diff -ENwbur a/drivers/gpu/drm/nexell/nx_drm_crtc.c b/drivers/gpu/drm/nexell/nx_drm_crtc.c --- a/drivers/gpu/drm/nexell/nx_drm_crtc.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/drm/nexell/nx_drm_crtc.c 2018-05-06 08:49:49.566711163 +0200 @@ -0,0 +1,674 @@ +/* + * Copyright (C) 2016 Nexell Co., Ltd. + * Author: junghyun, kim + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include +#include +#include +#include + +#include "nx_drm_drv.h" +#include "nx_drm_crtc.h" +#include "nx_drm_plane.h" +#include "nx_drm_gem.h" +#include "nx_drm_fb.h" +#include "soc/s5pxx18_drm_dp.h" + +/* + * for multiple framebuffers. + */ +static int fb_align_rgb = 1; +static bool fb_vblank_wait; +MODULE_PARM_DESC(fb_align, "frame buffer's align (0~4096)"); +MODULE_PARM_DESC(fb_vblank, "frame buffer wait vblank for pan display"); + +module_param_named(fb_align, fb_align_rgb, int, 0600); +module_param_named(fb_vblank, fb_vblank_wait, bool, 0600); + +static void nx_drm_crtc_dpms(struct drm_crtc *crtc, int mode) +{ + struct drm_device *drm = crtc->dev; + struct nx_drm_crtc *nx_crtc = to_nx_crtc(crtc); + + DRM_DEBUG_KMS("enter [CRTC:%d] dpms:%d\n", crtc->base.id, mode); + + if (nx_crtc->dpms_mode == mode) { + DRM_DEBUG_KMS("dpms %d same as previous one.\n", mode); + return; + } + + mutex_lock(&drm->struct_mutex); + + switch (mode) { + case DRM_MODE_DPMS_ON: + nx_crtc->dpms_mode = mode; + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + nx_crtc->dpms_mode = mode; + break; + default: + DRM_ERROR("fail : unspecified mode %d\n", mode); + goto err_dpms; + } + + nx_drm_dp_crtc_dpms(crtc, mode); + +err_dpms: + mutex_unlock(&drm->struct_mutex); +} + +static void nx_drm_crtc_prepare(struct drm_crtc *crtc) +{ + DRM_DEBUG_KMS("enter\n"); +} + +static void nx_drm_crtc_commit(struct drm_crtc *crtc) +{ + struct nx_drm_crtc *nx_crtc = to_nx_crtc(crtc); + + DRM_DEBUG_KMS("enter current [CRTC:%d] dpms:%d\n", + crtc->base.id, nx_crtc->dpms_mode); + + /* + * when set_crtc is requested from user or at booting time, + * crtc->commit would be called without dpms call so if dpms is + * no power on then crtc->dpms should be called + * with DRM_MODE_DPMS_ON for the hardware power to be on. + */ + if (nx_crtc->dpms_mode != DRM_MODE_DPMS_ON) + nx_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON); + + nx_drm_dp_crtc_commit(crtc); +} + +static bool nx_drm_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static int nx_drm_crtc_mode_set(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, + int x, int y, + struct drm_framebuffer *old_fb) +{ + struct drm_framebuffer *fb = crtc->primary->fb; + struct nx_drm_crtc *nx_crtc = to_nx_crtc(crtc); + struct drm_plane *plane = crtc->primary; + struct videomode vm; + unsigned int crtc_w, crtc_h, src_w, src_h; + int ret = 0; + + DRM_DEBUG_KMS("enter\n"); + + drm_display_mode_to_videomode(mode, &vm); + drm_mode_copy(&nx_crtc->current_mode, mode); + + /* + * copy the mode data adjusted by mode_fixup() into crtc->mode + * so that hardware can be seet to proper mode. + */ + memcpy(&crtc->mode, adjusted_mode, sizeof(*adjusted_mode)); + + crtc_w = vm.hactive; + crtc_h = vm.vactive; + src_w = fb->width - x; + src_h = fb->height - y; + + ret = nx_drm_dp_plane_mode_set(crtc, + crtc->primary, fb, + 0, 0, crtc_w, crtc_h, x, y, src_w, src_h); + if (0 > ret) + return ret; + + plane->crtc = crtc; + to_nx_plane(plane)->enabled = true; + + return ret; +} + +static int nx_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb) +{ + struct drm_device *drm = crtc->dev; + struct drm_framebuffer *fb = crtc->primary->fb; + struct nx_drm_crtc *nx_crtc = to_nx_crtc(crtc); + struct nx_drm_priv *priv = drm->dev_private; + struct nx_drm_fbdev *fbdev = priv->framebuffer_dev->fbdev; + struct videomode vm; + unsigned int crtc_w, crtc_h, src_w, src_h; + int align = fb_align_rgb; + bool doublefb = fbdev->fb_buffers > 1 ? true : false; + bool vblank = false; + int ret; + + drm_display_mode_to_videomode(&nx_crtc->current_mode, &vm); + + /* when framebuffer changing is requested, crtc's dpms should be on */ + if (nx_crtc->dpms_mode > DRM_MODE_DPMS_ON) { + DRM_ERROR("fail : framebuffer changing request.\n"); + return -EPERM; + } + + crtc_w = fb->width; /* vm.hactive; */ + crtc_h = fb->height; /* vm.vactive; */ + src_w = fb->width - x; + src_h = fb->height - y; + + DRM_DEBUG_KMS("crtc.%d [%d:%d] pos[%d:%d] src[%d:%d] fb[%d:%d]\n", + nx_crtc->pipe, crtc_w, crtc_h, x, y, src_w, src_h, + fb->width, fb->height); + + /* for multiple buffers */ + if (doublefb) { + if (y >= fb->height) + src_h = fb->height; + + if (fb_vblank_wait && + drm->driver->enable_vblank) { + drm->driver->enable_vblank(drm, nx_crtc->pipe); + vblank = true; + } + } + + ret = nx_drm_dp_plane_update(crtc->primary, fb, 0, 0, + crtc_w, crtc_h, x, y, src_w, src_h, align); + + if (!ret && vblank) + drm_wait_one_vblank(drm, nx_crtc->pipe); + + return ret; +} + +static void nx_drm_crtc_disable(struct drm_crtc *crtc) +{ + struct drm_plane *plane; + int ret; + + DRM_DEBUG_KMS("enter\n"); + + nx_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); + + drm_for_each_plane(plane, crtc->dev) { + if (plane->crtc != crtc) + continue; + ret = plane->funcs->disable_plane(plane, NULL); + if (ret) + DRM_ERROR("fail : disable plane %d\n", ret); + } +} + +static struct drm_crtc_helper_funcs nx_crtc_helper_funcs = { + .dpms = nx_drm_crtc_dpms, + .prepare = nx_drm_crtc_prepare, + .commit = nx_drm_crtc_commit, + .mode_fixup = nx_drm_crtc_mode_fixup, + .mode_set = nx_drm_crtc_mode_set, + .mode_set_base = nx_drm_crtc_mode_set_base, + .disable = nx_drm_crtc_disable, +}; + +static int nx_drm_crtc_page_flip(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t flags, + struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_device *drm = crtc->dev; + struct nx_drm_crtc *nx_crtc = to_nx_crtc(crtc); + struct drm_framebuffer *old_fb = crtc->primary->fb; + unsigned int crtc_w, crtc_h; + int pipe = nx_crtc->pipe; + int ret; + + DRM_DEBUG_KMS("page flip crtc.%d\n", nx_crtc->pipe); + + /* when the page flip is requested, crtc's dpms should be on */ + if (nx_crtc->dpms_mode > DRM_MODE_DPMS_ON) { + DRM_ERROR("fail : page flip request.\n"); + return -EINVAL; + } + + if (!event) + return -EINVAL; + + spin_lock_irq(&drm->event_lock); + + if (nx_crtc->event) { + ret = -EBUSY; + goto out; + } + + ret = drm_crtc_vblank_get(crtc); + if (ret) { + DRM_ERROR("fail : to acquire vblank counter\n"); + goto out; + } + + nx_crtc->event = event; + spin_unlock_irq(&drm->event_lock); + + /* + * the pipe from user always is 0 so we can set pipe number + * of current owner to event. + */ + event->pipe = pipe; + + crtc->primary->fb = fb; + crtc_w = fb->width - crtc->x; + crtc_h = fb->height - crtc->y; + + ret = nx_drm_dp_plane_update(crtc->primary, fb, 0, 0, + crtc_w, crtc_h, crtc->x, crtc->y, crtc_w, crtc_h, 0); + + if (ret) { + DRM_DEBUG("fail : plane update for page flip %d\n", ret); + crtc->primary->fb = old_fb; + spin_lock_irq(&drm->event_lock); + nx_crtc->event = NULL; + drm_crtc_vblank_put(crtc); + spin_unlock_irq(&drm->event_lock); + return ret; + } + + return 0; + +out: + spin_unlock_irq(&drm->event_lock); + return ret; +} + +static void nx_drm_crtc_destroy(struct drm_crtc *crtc) +{ + struct nx_drm_priv *priv = crtc->dev->dev_private; + struct nx_drm_crtc *nx_crtc = to_nx_crtc(crtc); + int pipe = nx_crtc->pipe; + + DRM_DEBUG_KMS("enter crtc.%d\n", nx_crtc->pipe); + + priv->crtcs[pipe] = NULL; + + drm_crtc_cleanup(crtc); + kfree(nx_crtc); +} + +static struct drm_crtc_funcs nx_crtc_funcs = { + .set_config = drm_crtc_helper_set_config, + .page_flip = nx_drm_crtc_page_flip, + .destroy = nx_drm_crtc_destroy, +}; + +int nx_drm_crtc_enable_vblank(struct drm_device *drm, unsigned int pipe) +{ + struct nx_drm_priv *priv = drm->dev_private; + struct nx_drm_crtc *nx_crtc = to_nx_crtc(priv->crtcs[pipe]); + + DRM_DEBUG_KMS("enter pipe.%d\n", pipe); + + if (nx_crtc->dpms_mode != DRM_MODE_DPMS_ON) + return -EPERM; + + nx_drm_dp_crtc_irq_on(&nx_crtc->crtc, pipe); + + return 0; +} + +void nx_drm_crtc_disable_vblank(struct drm_device *drm, unsigned int pipe) +{ + struct nx_drm_priv *priv = drm->dev_private; + struct nx_drm_crtc *nx_crtc = to_nx_crtc(priv->crtcs[pipe]); + + DRM_DEBUG_KMS("enter pipe.%d\n", pipe); + + nx_drm_dp_crtc_irq_off(&nx_crtc->crtc, pipe); +} + +#ifdef DEBUG_FPS_TIME +#define SHOW_PERIOD_SEC 1 +#define FPS_HZ (60) +#define DUMP_FPS_TIME(p) { \ + static long ts[2] = { 0, }, vb_count; \ + long new = ktime_to_ms(ktime_get()); \ + if (0 == (vb_count++ % (FPS_HZ * SHOW_PERIOD_SEC))) \ + pr_info("[dp.%d] %ld ms\n", p, new - ts[p]); \ + ts[p] = new; \ + } +#else +#define DUMP_FPS_TIME(p) +#endif + +static irqreturn_t nx_drm_crtc_interrupt(int irq, void *arg) +{ + struct drm_device *drm = arg; + struct nx_drm_priv *priv = drm->dev_private; + int i; + + for (i = 0; i < priv->num_crtcs; i++) { + struct drm_crtc *crtc = priv->crtcs[i]; + struct nx_drm_crtc *nx_crtc; + + if (!crtc) + continue; + + nx_crtc = to_nx_crtc(crtc); + + if (irq == nx_crtc->pipe_irq) { + struct drm_pending_vblank_event *event = NULL; + int pipe = nx_crtc->pipe; + + drm_crtc_handle_vblank(crtc); + + spin_lock(&drm->event_lock); + + event = nx_crtc->event; + if (event) { + if (nx_crtc->post_closed) { + drm_crtc_vblank_put(crtc); + //event->base.destroy(&event->base); + drm_event_cancel_free(crtc->dev, &event->base); + } else { + drm_crtc_send_vblank_event(crtc, event); + drm_crtc_vblank_put(crtc); + } + nx_crtc->event = NULL; + nx_crtc->post_closed = false; + } + + spin_unlock(&drm->event_lock); + + DUMP_FPS_TIME(pipe); + + /* clear irq */ + nx_drm_dp_crtc_irq_done(crtc, pipe); + break; + } + } + + return IRQ_HANDLED; +} + +static int nx_drm_crtc_irq_install(struct drm_device *drm, + struct drm_crtc *crtc) +{ + struct nx_drm_crtc *nx_crtc = to_nx_crtc(crtc); + struct drm_driver *drv = drm->driver; + int irq = nx_crtc->pipe_irq; + int ret = 0; + + if (NULL == drv->irq_handler) + drv->irq_handler = nx_drm_crtc_interrupt; + + if (drm->irq_enabled) + drm->irq_enabled = false; + + ret = drm_irq_install(drm, irq); + if (0 > ret) + DRM_ERROR("fail : crtc.%d irq %d !!!\n", nx_crtc->pipe, irq); + + DRM_INFO("irq %d install for crtc.%d\n", irq, nx_crtc->pipe); + + return ret; +} + +static int __of_graph_get_port_num_index(struct drm_device *drm, + int *pipe, int pipe_size) +{ + struct device *dev = drm->dev; + struct device_node *parent = dev->of_node; + struct device_node *node, *port; + int num = 0; + + node = of_get_child_by_name(parent, "ports"); + if (node) + parent = node; + + for_each_child_of_node(parent, port) { + u32 port_id = 0; + + if (of_node_cmp(port->name, "port") != 0) + continue; + if (of_property_read_u32(port, "reg", &port_id)) + continue; + + pipe[num] = port_id; + num++; + + if (num > (pipe_size - 1)) + break; + } + of_node_put(node); + + return num; +} + +#define parse_read_prop(n, s, v) { \ + u32 _v; \ + if (!of_property_read_u32(n, s, &_v)) \ + v = _v; \ + } + +static int nx_drm_crtc_parse_dt_setup(struct drm_device *drm, + struct drm_crtc *crtc, int pipe) +{ + struct device_node *np; + struct device *dev = drm->dev; + struct device_node *node = dev->of_node; + struct nx_drm_crtc *nx_crtc = to_nx_crtc(crtc); + struct dp_plane_top *top = &nx_crtc->top; + const char *strings[10]; + int i, size = 0, err; + int irq = INVALID_IRQ; + + DRM_DEBUG_KMS("crtc.%d for %s\n", pipe, dev_name(dev)); + + /* + * parse base address + */ + err = nx_drm_dp_crtc_res_parse(to_platform_device(drm->dev), pipe, &irq, + nx_crtc->resets, &nx_crtc->num_resets); + if (0 > err) + return -EINVAL; + + nx_crtc->pipe_irq = irq; + + if (INVALID_IRQ != nx_crtc->pipe_irq) { + err = nx_drm_crtc_irq_install(drm, crtc); + if (0 > err) + return -EINVAL; + } + + /* + * parse port properties. + */ + np = of_graph_get_port_by_id(node, pipe); + if (!np) + return -EINVAL; + + parse_read_prop(np, "back_color", top->back_color); + parse_read_prop(np, "color_key", top->color_key); + + size = of_property_read_string_array(np, "plane-names", strings, 10); + top->num_planes = size; + + for (i = 0; size > i; i++) { + if (!strcmp("primary", strings[i])) { + top->plane_type[i] = DRM_PLANE_TYPE_PRIMARY; + top->plane_flag[i] = PLANE_FLAG_RGB; + } else if (!strcmp("cursor", strings[i])) { + top->plane_type[i] = DRM_PLANE_TYPE_CURSOR; + top->plane_flag[i] = PLANE_FLAG_RGB; + } else if (!strcmp("rgb", strings[i])) { + top->plane_type[i] = DRM_PLANE_TYPE_OVERLAY; + top->plane_flag[i] = PLANE_FLAG_RGB; + } else if (!strcmp("video", strings[i])) { + top->plane_type[i] = DRM_PLANE_TYPE_OVERLAY; + top->plane_flag[i] = PLANE_FLAG_VIDEO; /* video */ + top->video_prior = i; /* priority */ + } else { + top->plane_flag[i] = PLANE_FLAG_UNKNOWN; + DRM_ERROR("fail : unknown plane name [%d] %s\n", + i, strings[i]); + } + DRM_DEBUG_KMS("crtc.%d planes[%d]: %s, bg:0x%08x, key:0x%08x\n", + pipe, i, strings[i], top->back_color, top->color_key); + } + + return 0; +} + +static int nx_drm_crtc_create_planes(struct drm_device *drm, + struct drm_crtc *crtc) +{ + struct drm_plane **planes; + struct drm_plane *plane, *plane_primary = NULL, *plane_cursor = NULL; + struct nx_drm_crtc *nx_crtc = to_nx_crtc(crtc); + struct dp_plane_top *top = &nx_crtc->top; + int i = 0, ret = 0; + int num = 0, plane_num = 0; + + /* setup crtc's planes */ + planes = kzalloc(sizeof(struct drm_plane *) * top->num_planes, + GFP_KERNEL); + if (!planes) + return -ENOMEM; + + for (i = 0; top->num_planes > i; i++) { + enum drm_plane_type drm_type = top->plane_type[i]; + bool video = top->plane_flag[i] == PLANE_FLAG_VIDEO ? + true : false; + + if (PLANE_FLAG_UNKNOWN == top->plane_flag[i]) + continue; + + plane_num = video ? PLANE_VIDEO_NUM : num++; + + plane = nx_drm_plane_init( + drm, crtc, drm_crtc_mask(crtc), drm_type, plane_num); + if (IS_ERR(plane)) { + ret = PTR_ERR(plane); + goto err_plane; + } + + switch( drm_type ) { + case DRM_PLANE_TYPE_PRIMARY: + top->primary_plane = num - 1; + plane_primary = plane; + break; + case DRM_PLANE_TYPE_CURSOR: + plane_cursor = plane; + break; + default: + break; + } + planes[i] = plane; + } + ret = drm_crtc_init_with_planes(drm, + crtc, plane_primary, plane_cursor, &nx_crtc_funcs, NULL); + if (0 > ret) + goto err_plane; + drm_crtc_helper_add(crtc, &nx_crtc_helper_funcs); + kfree(planes); + + return 0; + +err_plane: + for (i = 0; top->num_planes > i; i++) { + plane = planes[i]; + if (plane) + plane->funcs->destroy(plane); + } + + kfree(planes); + + return ret; +} + +int nx_drm_crtc_init(struct drm_device *drm) +{ + struct nx_drm_crtc **nx_crtcs; + int pipes[10], num_crtcs = 0; + int size = ARRAY_SIZE(pipes); + int i = 0, ret = 0; + int align = fb_align_rgb; + + /* get ports 'reg' property value */ + num_crtcs = __of_graph_get_port_num_index(drm, pipes, size); + + if (PAGE_SIZE >= align && align > 0) + fb_align_rgb = align; + else + fb_align_rgb = 1; + + DRM_INFO("num of crtcs %d, FB %d align, FB vblank %s\n", + num_crtcs, fb_align_rgb, fb_vblank_wait ? "Wait" : "Pass"); + + /* setup crtc's planes */ + nx_crtcs = kzalloc(sizeof(struct nx_drm_crtc *) * num_crtcs, + GFP_KERNEL); + if (!nx_crtcs) + return -ENOMEM; + + for (i = 0; num_crtcs > i; i++) { + struct nx_drm_priv *priv; + struct nx_drm_crtc *nx_crtc; + int pipe = pipes[i]; /* reg property */ + + nx_crtc = kzalloc(sizeof(struct nx_drm_crtc), GFP_KERNEL); + if (!nx_crtc) + goto err_crtc; + + priv = drm->dev_private; + priv->crtcs[i] = &nx_crtc->crtc; /* sequentially link */ + priv->num_crtcs++; + priv->possible_pipes |= (1 << pipe); + + nx_crtc->pipe = pipe; + nx_crtc->dpms_mode = DRM_MODE_DPMS_OFF; + nx_crtc->pipe_irq = INVALID_IRQ; + + ret = nx_drm_crtc_parse_dt_setup(drm, &nx_crtc->crtc, pipe); + if (0 > ret) + return ret; + + nx_drm_dp_crtc_init(drm, &nx_crtc->crtc, pipe); + ret = nx_drm_crtc_create_planes(drm, &nx_crtc->crtc); + if (0 > ret) + goto err_crtc; + + nx_crtcs[i] = nx_crtc; + DRM_INFO("crtc[%d]: pipe.%d (irq.%d)\n", + i, pipe, nx_crtc->pipe_irq); + } + + kfree(nx_crtcs); + + DRM_DEBUG_KMS("done\n"); + return 0; + +err_crtc: + for (i = 0; num_crtcs > i; i++) + kfree(nx_crtcs[i]); + + kfree(nx_crtcs); + + return ret; +} + diff -ENwbur a/drivers/gpu/drm/nexell/nx_drm_crtc.h b/drivers/gpu/drm/nexell/nx_drm_crtc.h --- a/drivers/gpu/drm/nexell/nx_drm_crtc.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/drm/nexell/nx_drm_crtc.h 2018-05-06 08:49:49.566711163 +0200 @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2016 Nexell Co., Ltd. + * Author: junghyun, kim + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef _NX_DRM_CRTC_H_ +#define _NX_DRM_CRTC_H_ + +#include "soc/s5pxx18_drm_dp.h" + +struct nx_drm_crtc { + struct drm_crtc crtc; + struct drm_display_mode current_mode; + int pipe; /* hw crtc index */ + int pipe_irq; + struct dp_plane_top top; + struct drm_pending_vblank_event *event; + unsigned int dpms_mode; + struct reset_control *resets[2]; + int num_resets; + bool post_closed; + bool suspended; +}; + +#define to_nx_crtc(x) \ + container_of(x, struct nx_drm_crtc, crtc) + +int nx_drm_crtc_init(struct drm_device *dev); +int nx_drm_crtc_enable_vblank(struct drm_device *dev, unsigned int pipe); +void nx_drm_crtc_disable_vblank(struct drm_device *dev, unsigned int pipe); + +#endif diff -ENwbur a/drivers/gpu/drm/nexell/nx_drm_drv.c b/drivers/gpu/drm/nexell/nx_drm_drv.c --- a/drivers/gpu/drm/nexell/nx_drm_drv.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/drm/nexell/nx_drm_drv.c 2018-05-06 08:49:49.566711163 +0200 @@ -0,0 +1,442 @@ +/* + * Copyright (C) 2016 Nexell Co., Ltd. + * Author: junghyun, kim + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include +#include +#include +#include + +#include + +#include "nx_drm_drv.h" +#include "nx_drm_crtc.h" +#include "nx_drm_connector.h" +#include "nx_drm_encoder.h" +#include "nx_drm_fb.h" +#include "nx_drm_plane.h" +#include "nx_drm_gem.h" + +/* + * DRM Configuration + * + * CRTC : MLC top control (and display interrupt, reset, clock, ...) + * Plane : MLC layer control + * Encoder : DPC control + * Connector : DRM connetcor for LCD, LVDS, MiPi, HDMI,... + * Panel : Display device control (LCD, LVDS, MiPi, HDMI,...) + * + */ + +static void nx_drm_output_poll_changed(struct drm_device *drm) +{ + struct nx_drm_priv *priv = drm->dev_private; + struct nx_framebuffer_dev *nx_framebuffer = priv->framebuffer_dev; + + DRM_DEBUG_KMS("enter : fbdev %s\n", + nx_framebuffer ? "exist" : "non exist"); + + mutex_lock(&priv->lock); + + if (nx_framebuffer && nx_framebuffer->fbdev) + drm_fb_helper_hotplug_event( + (struct drm_fb_helper *)nx_framebuffer->fbdev); + else + nx_drm_framebuffer_init(drm); + + mutex_unlock(&priv->lock); + DRM_DEBUG_DRIVER("exit.\n"); +} + +static struct drm_mode_config_funcs nx_mode_config_funcs = { + .fb_create = nx_drm_fb_mode_create, + .output_poll_changed = nx_drm_output_poll_changed, +}; + +static void nx_drm_mode_config_init(struct drm_device *drm) +{ + drm->mode_config.min_width = 0; + drm->mode_config.min_height = 0; + + /* + * set max width and height as default value + * this value would be used to check framebuffer size limitation + * at drm_mode_addfb(). + */ + drm->mode_config.max_width = MAX_FB_MODE_WIDTH; + drm->mode_config.max_height = MAX_FB_MODE_HEIGHT; + drm->mode_config.funcs = &nx_mode_config_funcs; + + DRM_DEBUG_KMS("min %d*%d, max %d*%d\n", + drm->mode_config.min_width, drm->mode_config.min_height, + drm->mode_config.max_width, drm->mode_config.max_height); +} + +static struct drm_ioctl_desc nx_drm_ioctls[] = { + DRM_IOCTL_DEF_DRV(NX_GEM_CREATE, nx_drm_gem_create_ioctl, + DRM_UNLOCKED | DRM_AUTH), + DRM_IOCTL_DEF_DRV(NX_GEM_SYNC, nx_drm_gem_sync_ioctl, + DRM_UNLOCKED | DRM_AUTH), + DRM_IOCTL_DEF_DRV(NX_GEM_GET, nx_drm_gem_get_ioctl, + DRM_UNLOCKED), +}; + +static const struct file_operations nx_drm_fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif + .poll = drm_poll, + .read = drm_read, + .llseek = no_llseek, + .mmap = nx_drm_gem_fops_mmap, +}; + +static void nx_drm_lastclose(struct drm_device *drm) +{ + struct nx_drm_priv *priv = drm->dev_private; + struct nx_drm_fbdev *fbdev; + + if (!priv || !priv->framebuffer_dev) + return; + + fbdev = priv->framebuffer_dev->fbdev; + if (fbdev) + drm_fb_helper_restore_fbdev_mode_unlocked( + (struct drm_fb_helper *)fbdev); +} + +static void nx_drm_postclose(struct drm_device *drm, struct drm_file *file) +{ + struct drm_pending_vblank_event *event; + struct nx_drm_crtc *nx_crtc; + struct nx_drm_priv *priv = drm->dev_private; + unsigned long flags; + int i; + + for (i = 0; i < priv->num_crtcs; i++) { + nx_crtc = to_nx_crtc(priv->crtcs[i]); + event = nx_crtc->event; + if (event && event->base.file_priv == file) { + spin_lock_irqsave(&drm->event_lock, flags); + nx_crtc->post_closed = true; + spin_unlock_irqrestore(&drm->event_lock, flags); + } + } +} + +static struct drm_driver nx_drm_driver = { + .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET | + DRIVER_GEM | DRIVER_PRIME | DRIVER_IRQ_SHARED, + .fops = &nx_drm_fops, /* replace fops */ + .lastclose = nx_drm_lastclose, + .postclose = nx_drm_postclose, + + .enable_vblank = nx_drm_crtc_enable_vblank, + .disable_vblank = nx_drm_crtc_disable_vblank, + + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + + .gem_free_object = nx_drm_gem_free_object, + + .gem_prime_export = nx_drm_gem_prime_export, + .gem_prime_import = drm_gem_prime_import, + .gem_prime_get_sg_table = nx_drm_gem_prime_get_sg_table, + + .gem_prime_import_sg_table = nx_drm_gem_prime_import_sg_table, + + .dumb_create = nx_drm_gem_dumb_create, + .dumb_map_offset = nx_drm_gem_dumb_map_offset, + .dumb_destroy = drm_gem_dumb_destroy, + + .ioctls = nx_drm_ioctls, + .num_ioctls = ARRAY_SIZE(nx_drm_ioctls), + + .name = "nexell", + .desc = "nexell SoC DRM", + .date = "20160219", + .major = 2, + .minor = 0, +}; + +static int nx_drm_bind(struct device *dev) +{ + struct drm_device *drm; + struct nx_drm_priv *priv; + int ret; + + drm = drm_dev_alloc(&nx_drm_driver, dev); + if (IS_ERR(drm)) + return PTR_ERR(drm); + + priv = kzalloc(sizeof(struct nx_drm_priv), GFP_KERNEL); + if (!priv) { + ret = -ENOMEM; + goto err_free_drm; + } + + mutex_init(&priv->lock); + drm->dev_private = (void *)priv; + dev_set_drvdata(dev, drm); + + /* drm->mode_config initialization */ + drm_mode_config_init(drm); + nx_drm_mode_config_init(drm); + + /* Try to nexell crtcs. */ + ret = nx_drm_crtc_init(drm); + if (ret) + goto err_mode_config_cleanup; + + ret = drm_vblank_init(drm, priv->num_crtcs); + if (ret) + goto err_unbind_all; + + /* Try to bind all sub drivers. */ + ret = component_bind_all(drm->dev, drm); + if (ret) + goto err_mode_config_cleanup; + + + /* init kms poll for handling hpd */ + drm_kms_helper_poll_init(drm); + + /* register the DRM device */ + ret = drm_dev_register(drm, 0); + if (ret < 0) + goto err_cleanup_poll; + + /* force connectors detection for LCD */ + if (priv->force_detect) + drm_helper_hpd_irq_event(drm); + return 0; + +err_cleanup_poll: + drm_kms_helper_poll_fini(drm); +err_unbind_all: + component_unbind_all(drm->dev, drm); + +err_mode_config_cleanup: + drm_mode_config_cleanup(drm); + kfree(priv); +err_free_drm: + drm_dev_unref(drm); + + return ret; +} + +static void nx_drm_unbind(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + + DRM_DEBUG_DRIVER("enter\n"); + + nx_drm_framebuffer_fini(drm); + + drm_kms_helper_poll_fini(drm); + drm_mode_config_cleanup(drm); + kfree(drm->dev_private); + + drm->dev_private = NULL; +} + +static const struct component_master_ops nx_drm_ops = { + .bind = nx_drm_bind, + .unbind = nx_drm_unbind, +}; + +static int match_dev(struct device *dev, void *data) +{ + return dev == (struct device *)data; +} + +#ifdef CHECK_DRIVER_NAME +static int match_drv(struct device_driver *drv, void *data) +{ + const char *t = data, *f = drv->name; + + return strstr(f, t) ? 1 : 0; +} +#endif + +static int match_component(struct device *dev, void *data) +{ + const char *name = data; + const char *t = name, *f = dev_name(dev); + + return strstr(f, t) ? 1 : 0; +} + +static int nx_drm_probe(struct platform_device *pdev) +{ + struct component_match *match = NULL; + const char *const dev_names[] = { + /* node name (x:name) */ +#ifdef CONFIG_DRM_NX_RGB + "display_drm_rgb", +#endif +#ifdef CONFIG_DRM_NX_LVDS + "display_drm_lvds", +#endif +#ifdef CONFIG_DRM_NX_MIPI_DSI + "display_drm_mipi", +#endif +#ifdef CONFIG_DRM_NX_HDMI + "display_drm_hdmi", +#endif + }; + int found = 0; + int i; + + DRM_DEBUG_DRIVER("enter %s\n", dev_name(&pdev->dev)); + + for (i = 0; i < ARRAY_SIZE(dev_names); i++) { + struct device *dev; + + dev = bus_find_device(&platform_bus_type, NULL, + (void *)dev_names[i], match_component); + if (!dev) { + DRM_INFO("not found device name: %s\n", dev_names[i]); + continue; + } + + #ifdef CHECK_DRIVER_NAME + if (!bus_for_each_drv(dev->bus, NULL, + (void *)dev_names[i], match_drv)) { + DRM_INFO("not found driver: %s\n", dev_names[i]); + continue; + } + #endif + + component_match_add(&pdev->dev, &match, match_dev, dev); + found++; + } + + if (!found) + return -EINVAL; + + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + + /* call master bind */ + return component_master_add_with_match(&pdev->dev, &nx_drm_ops, match); +} + +static int nx_drm_remove(struct platform_device *pdev) +{ + component_master_del(&pdev->dev, &nx_drm_ops); + return 0; +} + +static const struct of_device_id dt_of_match[] = { + {.compatible = "nexell,s5pxx18-drm"}, + {} +}; +MODULE_DEVICE_TABLE(of, dt_of_match); + + +static int nx_drm_pm_suspend(struct device *dev) +{ + struct drm_connector *connector; + struct drm_device *drm = dev_get_drvdata(dev); + struct nx_drm_priv *priv = drm->dev_private; + int i; + + DRM_DEBUG_DRIVER("enter %s\n", dev_name(dev)); + + drm_modeset_lock_all(drm); + + for (i = 0; i < priv->num_crtcs; i++) + to_nx_crtc(priv->crtcs[i])->suspended = true; + + list_for_each_entry(connector, &drm->mode_config.connector_list, head) { + int old_dpms = connector->dpms; + struct nx_drm_device *display = + to_nx_connector(connector)->display; + + if (display) + display->suspended = true; + + if (connector->funcs->dpms) + connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF); + + /* Set the old mode back to the connector for resume */ + connector->dpms = old_dpms; + } + + drm_modeset_unlock_all(drm); + + return 0; +} + +static int nx_drm_pm_resume(struct device *dev) +{ + struct drm_connector *connector; + struct drm_device *drm = dev_get_drvdata(dev); + struct nx_drm_priv *priv = drm->dev_private; + int i; + + DRM_DEBUG_DRIVER("enter %s\n", dev_name(dev)); + + drm_modeset_lock_all(drm); + + for (i = 0; i < priv->num_crtcs; i++) + nx_drm_dp_crtc_reset(priv->crtcs[i]); + + list_for_each_entry(connector, &drm->mode_config.connector_list, head) { + if (connector->funcs->dpms) { + int dpms = connector->dpms; + struct nx_drm_device *display = + to_nx_connector(connector)->display; + + connector->dpms = DRM_MODE_DPMS_OFF; + connector->funcs->dpms(connector, dpms); + if (display) + display->suspended = false; + } + } + + for (i = 0; i < priv->num_crtcs; i++) + to_nx_crtc(priv->crtcs[i])->suspended = false; + + drm_modeset_unlock_all(drm); + + return 0; +} + +static const struct dev_pm_ops nx_drm_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(nx_drm_pm_suspend, nx_drm_pm_resume) +}; + +static struct platform_driver nx_drm_platform_drv = { + .probe = nx_drm_probe, + .remove = nx_drm_remove, + .driver = { + .owner = THIS_MODULE, + .name = "nexell,display_drm", + .of_match_table = dt_of_match, + .pm = &nx_drm_pm_ops, + }, +}; +module_platform_driver(nx_drm_platform_drv); + +MODULE_AUTHOR("jhkim "); +MODULE_DESCRIPTION("Nexell DRM Driver"); +MODULE_LICENSE("GPL"); diff -ENwbur a/drivers/gpu/drm/nexell/nx_drm_drv.h b/drivers/gpu/drm/nexell/nx_drm_drv.h --- a/drivers/gpu/drm/nexell/nx_drm_drv.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/drm/nexell/nx_drm_drv.h 2018-05-06 08:49:49.566711163 +0200 @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2016 Nexell Co., Ltd. + * Author: junghyun, kim + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef _NX_DRM_DRV_H_ +#define _NX_DRM_DRV_H_ + +#include +#include + +#define MAX_CRTCS 2 /* Multi Layer Controller(MLC) nums (0, 1) */ +#define MAX_CONNECTOR 4 /* RGB, LVDS, MiPi, HDMI */ + +#define MAX_FB_MODE_WIDTH 4096 +#define MAX_FB_MODE_HEIGHT 4096 + +struct nx_drm_priv { + struct nx_framebuffer_dev *framebuffer_dev; + unsigned int possible_pipes; + bool force_detect; + struct drm_crtc *crtcs[MAX_CRTCS]; + int num_crtcs; + struct mutex lock; +}; + +#endif diff -ENwbur a/drivers/gpu/drm/nexell/nx_drm_encoder.c b/drivers/gpu/drm/nexell/nx_drm_encoder.c --- a/drivers/gpu/drm/nexell/nx_drm_encoder.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/drm/nexell/nx_drm_encoder.c 2018-05-06 08:49:49.566711163 +0200 @@ -0,0 +1,266 @@ +/* + * Copyright (C) 2016 Nexell Co., Ltd. + * Author: junghyun, kim + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include +#include + +#ifdef CONFIG_ARM_S5Pxx18_DEVFREQ +#include +#include +#endif + +#include "nx_drm_drv.h" +#include "nx_drm_crtc.h" +#include "nx_drm_encoder.h" +#include "nx_drm_connector.h" +#include "soc/s5pxx18_drm_dp.h" + +#define nx_drm_dp_encoder_set_pipe(d, p) { \ + struct dp_control_dev *dpc = drm_dev_get_dpc(d); \ + dpc->module = p; \ + } + +#ifdef CONFIG_ARM_S5Pxx18_DEVFREQ +static struct pm_qos_request dev_qos_req; + +static inline void dev_qos_update(int khz) +{ + int active = pm_qos_request_active(&dev_qos_req); + + if (!active) + pm_qos_add_request(&dev_qos_req, PM_QOS_BUS_THROUGHPUT, khz); + else + pm_qos_update_request(&dev_qos_req, khz); +} + +static void nx_drm_qos_up(struct drm_encoder *encoder) +{ + int khz = NX_BUS_CLK_DISP_KHZ; + + DRM_DEBUG_KMS("[ENCODER:%d] qos up to %d khz\n", + encoder->base.id, khz); + dev_qos_update(khz); +} + +static void nx_drm_qos_down(struct drm_encoder *encoder) +{ + struct drm_encoder *enc; + struct drm_device *drm = encoder->dev; + int khz = NX_BUS_CLK_IDLE_KHZ; + bool power = false; + + list_for_each_entry(enc, &drm->mode_config.encoder_list, head) { + power = to_nx_encoder(enc)->enabled; + if (power) + break; + } + + if (!power) { + DRM_DEBUG_KMS("[ENCODER:%d] qos idle to %d khz\n", + encoder->base.id, khz); + dev_qos_update(khz); + } +} +#else +#define nx_drm_qos_up(encoder) +#define nx_drm_qos_down(encoder) +#endif + +static void nx_drm_encoder_dpms(struct drm_encoder *encoder, int mode) +{ + struct nx_drm_encoder *nx_encoder = to_nx_encoder(encoder); + struct nx_drm_device *display = nx_encoder->display; + struct nx_drm_panel *panel = &display->panel; + struct nx_drm_ops *ops = display->ops; + + DRM_DEBUG_KMS("enter [ENCODER:%d] %s dpms:%d, %s, power %s\n", + encoder->base.id, + dp_panel_type_name(dp_panel_get_type(display)), + mode, panel->is_connected ? "connected" : "disconnected", + nx_encoder->enabled ? "on" : "off"); + + if (nx_encoder->dpms == mode) { + DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n"); + return; + } + + switch (mode) { + case DRM_MODE_DPMS_ON: + if (panel->is_connected) { + nx_drm_qos_up(encoder); + nx_drm_dp_encoder_dpms(encoder, true); + if (ops && ops->dpms) + ops->dpms(display->dev, mode); + nx_encoder->enabled = true; + } + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + if (nx_encoder->enabled) { + if (ops && ops->dpms) + ops->dpms(display->dev, mode); + nx_drm_dp_encoder_dpms(encoder, false); + nx_encoder->enabled = false; + nx_drm_qos_down(encoder); + } + break; + + default: + DRM_ERROR("fail : unspecified mode %d\n", mode); + break; + } + + nx_encoder->dpms = mode; + DRM_DEBUG_KMS("done\n"); +} + +static bool nx_drm_encoder_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_connector *connector; + struct drm_device *drm = encoder->dev; + struct nx_drm_encoder *nx_encoder = to_nx_encoder(encoder); + struct nx_drm_crtc *nx_crtc = to_nx_crtc(encoder->crtc); + struct nx_drm_device *display = to_nx_encoder(encoder)->display; + struct nx_drm_ops *ops = display->ops; + int pipe = nx_crtc->pipe; + + DRM_DEBUG_KMS("enter, encoder id:%d crtc pipe.%d\n", + encoder->base.id, pipe); + + /* + * set display controllor pipe. + */ + nx_encoder->pipe = pipe; + nx_drm_dp_encoder_set_pipe(display, pipe); + nx_drm_dp_encoder_prepare(encoder, pipe, true); + + list_for_each_entry(connector, &drm->mode_config.connector_list, head) { + if (connector->encoder == encoder) { + if (ops && ops->mode_fixup) + return ops->mode_fixup(display->dev, + connector, mode, adjusted_mode); + } + } + + return true; +} + +static void nx_drm_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_device *drm = encoder->dev; + struct drm_connector *connector; + struct nx_drm_device *display = to_nx_encoder(encoder)->display; + struct nx_drm_ops *ops = display->ops; + + DRM_DEBUG_KMS("enter\n"); + + list_for_each_entry(connector, &drm->mode_config.connector_list, head) { + if (connector->encoder == encoder) { + if (ops && ops->mode_set) + ops->mode_set(display->dev, adjusted_mode); + } + } + + nx_drm_dp_display_mode_to_sync(adjusted_mode, display); +} + +static void nx_drm_encoder_prepare(struct drm_encoder *encoder) +{ + DRM_DEBUG_KMS("enter\n"); +} + +static void nx_drm_encoder_commit(struct drm_encoder *encoder) +{ + struct nx_drm_device *display = to_nx_encoder(encoder)->display; + struct nx_drm_ops *ops = display->ops; + struct nx_drm_panel *panel = &display->panel; + + DRM_DEBUG_KMS("enter\n"); + + if (!panel->is_connected) + return; + + if (ops && ops->commit) + ops->commit(display->dev); + + nx_drm_dp_encoder_commit(encoder); + + /* display output device */ + if (ops && ops->dpms) + ops->dpms(display->dev, DRM_MODE_DPMS_ON); +} + +static struct drm_encoder_helper_funcs nx_encoder_helper_funcs = { + .dpms = nx_drm_encoder_dpms, + .mode_fixup = nx_drm_encoder_mode_fixup, + .mode_set = nx_drm_encoder_mode_set, + .prepare = nx_drm_encoder_prepare, + .commit = nx_drm_encoder_commit, +}; + +static void nx_drm_encoder_destroy(struct drm_encoder *encoder) +{ + struct nx_drm_encoder *nx_encoder = to_nx_encoder(encoder); + + nx_drm_dp_encoder_unprepare(encoder); + + drm_encoder_cleanup(encoder); + kfree(nx_encoder); +} + +static struct drm_encoder_funcs nx_encoder_funcs = { + .destroy = nx_drm_encoder_destroy, +}; + +struct drm_encoder *nx_drm_encoder_create(struct drm_device *drm, + struct nx_drm_device *display, int enc_type, + int pipe, int possible_crtcs, void *context) +{ + struct nx_drm_encoder *nx_encoder; + struct drm_encoder *encoder; + + DRM_DEBUG_KMS("enter pipe.%d crtc mask:0x%x\n", pipe, possible_crtcs); + + BUG_ON(!display || 0 == possible_crtcs); + + nx_encoder = kzalloc(sizeof(*nx_encoder), GFP_KERNEL); + if (!nx_encoder) + return ERR_PTR(-ENOMEM); + + nx_encoder->dpms = DRM_MODE_DPMS_OFF; + nx_encoder->pipe = pipe; + nx_encoder->display = display; + nx_encoder->context = context; + + encoder = &nx_encoder->encoder; + encoder->possible_crtcs = possible_crtcs; + + drm_encoder_init(drm, encoder, &nx_encoder_funcs, enc_type, NULL); + drm_encoder_helper_add(encoder, &nx_encoder_helper_funcs); + + DRM_DEBUG_KMS("exit, encoder id:%d\n", encoder->base.id); + + return encoder; +} +EXPORT_SYMBOL(nx_drm_encoder_create); + diff -ENwbur a/drivers/gpu/drm/nexell/nx_drm_encoder.h b/drivers/gpu/drm/nexell/nx_drm_encoder.h --- a/drivers/gpu/drm/nexell/nx_drm_encoder.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/drm/nexell/nx_drm_encoder.h 2018-05-06 08:49:49.566711163 +0200 @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2016 Nexell Co., Ltd. + * Author: junghyun, kim + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef _NX_DRM_ENCODER_H_ +#define _NX_DRM_ENCODER_H_ + +#include "soc/s5pxx18_drm_dp.h" + +struct nx_drm_encoder { + struct drm_encoder encoder; + int pipe; + struct nx_drm_device *display; + int dpms; + bool enabled; + void *context; /* device context */ +}; + +#define to_nx_encoder(e) \ + container_of(e, struct nx_drm_encoder, encoder) + +struct drm_encoder *nx_drm_encoder_create(struct drm_device *drm, + struct nx_drm_device *display, int enc_type, + int pipe, int possible_crtcs, void *context); + +#endif diff -ENwbur a/drivers/gpu/drm/nexell/nx_drm_fb.c b/drivers/gpu/drm/nexell/nx_drm_fb.c --- a/drivers/gpu/drm/nexell/nx_drm_fb.c 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/drm/nexell/nx_drm_fb.c 2018-05-06 08:49:49.566711163 +0200 @@ -0,0 +1,468 @@ +/* + * Copyright (C) 2016 Nexell Co., Ltd. + * Author: junghyun, kim + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include +#include + +#include "nx_drm_drv.h" +#include "nx_drm_fb.h" +#include "nx_drm_gem.h" + +#define PREFERRED_BPP 32 + +static int fb_buffer_count = 1; +static bool fb_format_bgr; + +MODULE_PARM_DESC(fb_buffers, "frame buffer count"); +module_param_named(fb_buffers, fb_buffer_count, int, 0600); + +MODULE_PARM_DESC(fb_bgr, "frame buffer BGR pixel format"); +module_param_named(fb_bgr, fb_format_bgr, bool, 0600); + +static void nx_drm_fb_destroy(struct drm_framebuffer *fb) +{ + struct nx_drm_fb *nx_fb = to_nx_drm_fb(fb); + int i; + + for (i = 0; i < 4; i++) { + if (nx_fb->obj[i]) + drm_gem_object_unreference_unlocked( + &nx_fb->obj[i]->base); + } + + drm_framebuffer_cleanup(fb); + kfree(nx_fb); +} + +static int nx_drm_fb_create_handle(struct drm_framebuffer *fb, + struct drm_file *file_priv, unsigned int *handle) +{ + struct nx_drm_fb *nx_fb = to_nx_drm_fb(fb); + + return drm_gem_handle_create(file_priv, &nx_fb->obj[0]->base, handle); +} + +static int nx_drm_fb_dirty(struct drm_framebuffer *fb, + struct drm_file *file_priv, unsigned flags, + unsigned color, struct drm_clip_rect *clips, + unsigned num_clips) +{ + /* TODO */ + return 0; +} + +static struct drm_framebuffer_funcs nx_drm_framebuffer_funcs = { + .destroy = nx_drm_fb_destroy, + .create_handle = nx_drm_fb_create_handle, + .dirty = nx_drm_fb_dirty, +}; + +static struct fb_ops nx_fb_ops = { + .owner = THIS_MODULE, + .fb_fillrect = sys_fillrect, + .fb_copyarea = sys_copyarea, + .fb_imageblit = sys_imageblit, + .fb_check_var = drm_fb_helper_check_var, + .fb_set_par = drm_fb_helper_set_par, + .fb_blank = drm_fb_helper_blank, + .fb_pan_display = drm_fb_helper_pan_display, + .fb_setcmap = drm_fb_helper_setcmap, +}; + +static struct nx_drm_fb *nx_drm_fb_alloc(struct drm_device *drm, + const struct drm_mode_fb_cmd2 *mode_cmd, + struct nx_gem_object **nx_obj, + unsigned int num_planes) +{ + struct nx_drm_fb *nx_fb; + int ret; + int i; + + nx_fb = kzalloc(sizeof(*nx_fb), GFP_KERNEL); + if (!nx_fb) + return ERR_PTR(-ENOMEM); + + drm_helper_mode_fill_fb_struct(drm, &nx_fb->fb, mode_cmd); + + for (i = 0; i < num_planes; i++) + nx_fb->obj[i] = nx_obj[i]; + + ret = drm_framebuffer_init(drm, &nx_fb->fb, &nx_drm_framebuffer_funcs); + if (ret) { + dev_err(drm->dev, "failed to initialize framebuffer:%d\n", ret); + kfree(nx_fb); + return ERR_PTR(ret); + } + + return nx_fb; +} + +static struct drm_framebuffer *nx_drm_fb_create(struct drm_device *drm, + struct drm_file *file_priv, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + struct nx_drm_fb *nx_fb; + struct nx_gem_object *nx_objs[4]; + struct drm_gem_object *obj; + unsigned int hsub; + unsigned int vsub; + int ret; + int i; + + hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format); + vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format); + + for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) { + unsigned int width = mode_cmd->width / (i ? hsub : 1); + unsigned int height = mode_cmd->height / (i ? vsub : 1); + unsigned int min_size; + + obj = drm_gem_object_lookup(file_priv, + mode_cmd->handles[i]); + if (!obj) { + dev_err(drm->dev, "Failed to lookup GEM object\n"); + ret = -ENXIO; + goto err_gem_object_unreference; + } + + min_size = (height - 1) * mode_cmd->pitches[i] + + width + * drm_format_plane_cpp(mode_cmd->pixel_format, i) + + mode_cmd->offsets[i]; + + if (obj->size < min_size) { + drm_gem_object_unreference_unlocked(obj); + ret = -EINVAL; + goto err_gem_object_unreference; + } + nx_objs[i] = to_nx_gem_obj(obj); + } + + nx_fb = nx_drm_fb_alloc(drm, mode_cmd, nx_objs, i); + if (IS_ERR(nx_fb)) { + ret = PTR_ERR(nx_fb); + goto err_gem_object_unreference; + } + + return &nx_fb->fb; + +err_gem_object_unreference: + for (i--; i >= 0; i--) + drm_gem_object_unreference_unlocked(&nx_objs[i]->base); + + return ERR_PTR(ret); +} + +struct drm_framebuffer *nx_drm_fb_mode_create(struct drm_device *drm, + struct drm_file *file_priv, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + DRM_DEBUG_KMS("enter\n"); + + return nx_drm_fb_create(drm, file_priv, mode_cmd); +} + +static uint32_t nx_drm_mode_fb_format(uint32_t bpp, uint32_t depth, bool bgr) +{ + uint32_t fmt; + + switch (bpp) { + case 8: + fmt = DRM_FORMAT_C8; + break; + case 16: + if (depth == 15) + fmt = bgr ? DRM_FORMAT_XBGR1555 : DRM_FORMAT_XRGB1555; + else + fmt = bgr ? DRM_FORMAT_BGR565 : DRM_FORMAT_RGB565; + break; + case 24: + fmt = bgr ? DRM_FORMAT_BGR888 : DRM_FORMAT_RGB888; + break; + case 32: + if (depth == 24) + fmt = bgr ? DRM_FORMAT_XBGR8888 : DRM_FORMAT_XRGB8888; + else + fmt = bgr ? DRM_FORMAT_ABGR8888 : DRM_FORMAT_ARGB8888; + break; + default: + DRM_ERROR("bad bpp, assuming x8r8g8b8 pixel format\n"); + fmt = DRM_FORMAT_XRGB8888; + break; + } + + return fmt; +} + +static int nx_drm_fb_helper_probe(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes) +{ + struct nx_drm_fbdev *fbdev = to_nx_drm_fbdev(fb_helper); + struct drm_mode_fb_cmd2 mode_cmd = { 0 }; + struct drm_device *drm = fb_helper->dev; + struct nx_gem_object *nx_obj; + struct drm_framebuffer *fb; + unsigned int bytes_per_pixel; + unsigned long offset; + struct fb_info *info; + size_t size; + unsigned int flags = 0; + int buffers = fbdev->fb_buffers; + int ret; + + DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d) buffers(%d)\n", + sizes->surface_width, sizes->surface_height, + sizes->surface_bpp, buffers); + + bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8); + + mode_cmd.width = sizes->surface_width; + mode_cmd.height = sizes->surface_height; + mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel; + mode_cmd.pixel_format = nx_drm_mode_fb_format(sizes->surface_bpp, + sizes->surface_depth, fb_format_bgr); + + /* for double buffer */ + size = mode_cmd.pitches[0] * (mode_cmd.height * buffers); + nx_obj = nx_drm_gem_create(drm, size, flags); + if (IS_ERR(nx_obj)) + return -ENOMEM; + + info = framebuffer_alloc(0, drm->dev); + if (!info) { + dev_err(drm->dev, "Failed to allocate framebuffer info.\n"); + ret = -ENOMEM; + goto err_drm_gem_free_object; + } + + fbdev->fb = nx_drm_fb_alloc(drm, &mode_cmd, &nx_obj, 1); + if (IS_ERR(fbdev->fb)) { + dev_err(drm->dev, "Failed to allocate DRM framebuffer.\n"); + ret = PTR_ERR(fbdev->fb); + goto err_framebuffer_release; + } + + fb = &fbdev->fb->fb; + fb_helper->fb = fb; + fb_helper->fbdev = info; + + info->par = fb_helper; + info->flags = FBINFO_FLAG_DEFAULT; + info->fbops = &nx_fb_ops; + + ret = fb_alloc_cmap(&info->cmap, 256, 0); + if (ret) { + dev_err(drm->dev, "Failed to allocate color map.\n"); + goto err_drm_fb_destroy; + } + + drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth); + drm_fb_helper_fill_var(info, fb_helper, + sizes->fb_width, sizes->fb_height); + + /* for double buffer */ + info->var.yres_virtual = fb->height * buffers; + + offset = info->var.xoffset * bytes_per_pixel; + offset += info->var.yoffset * fb->pitches[0]; + + drm->mode_config.fb_base = (resource_size_t)nx_obj->dma_addr; + info->screen_base = nx_obj->cpu_addr + offset; + info->fix.smem_start = (unsigned long)(nx_obj->dma_addr + offset); + info->screen_size = size; + info->fix.smem_len = size; + + if (fb_helper->crtc_info && + fb_helper->crtc_info->desired_mode) { + struct videomode vm; + struct drm_display_mode *mode = + fb_helper->crtc_info->desired_mode; + + drm_display_mode_to_videomode(mode, &vm); + info->var.left_margin = vm.hsync_len + vm.hback_porch; + info->var.right_margin = vm.hfront_porch; + info->var.upper_margin = vm.vsync_len + vm.vback_porch; + info->var.lower_margin = vm.vfront_porch; + /* pico second */ + info->var.pixclock = KHZ2PICOS(vm.pixelclock/1000); + } + + return 0; + +err_drm_fb_destroy: + drm_framebuffer_unregister_private(fb); + nx_drm_fb_destroy(fb); + +err_framebuffer_release: + framebuffer_release(info); + +err_drm_gem_free_object: + nx_drm_gem_destroy(nx_obj); + + return ret; +} + +static const struct drm_fb_helper_funcs nx_drm_fb_helper = { + .fb_probe = nx_drm_fb_helper_probe, +}; + +static struct nx_drm_fbdev *nx_drm_fbdev_init(struct drm_device *drm, + unsigned int preferred_bpp, unsigned int num_crtc, + unsigned int max_conn_count) +{ + struct nx_drm_fbdev *fbdev; + struct drm_fb_helper *fb_helper; + int ret; + + fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL); + if (!fbdev) + return ERR_PTR(-ENOMEM); + + fb_helper = &fbdev->fb_helper; + fbdev->fb_buffers = 1; + + if (fb_buffer_count > 0) + fbdev->fb_buffers = fb_buffer_count; + + DRM_INFO("FB counts = %d\n", fbdev->fb_buffers); + + drm_fb_helper_prepare(drm, fb_helper, &nx_drm_fb_helper); + + ret = drm_fb_helper_init(drm, fb_helper, max_conn_count); + if (ret < 0) { + dev_err(drm->dev, "Failed to initialize drm fb fb_helper.\n"); + goto err_free; + } + + ret = drm_fb_helper_single_add_all_connectors(fb_helper); + if (ret < 0) { + dev_err(drm->dev, "Failed to add connectors.\n"); + goto err_drm_fb_helper_fini; + + } + + /* disable all the possible outputs/crtcs before entering KMS mode */ + drm_helper_disable_unused_functions(drm); + + ret = drm_fb_helper_initial_config(fb_helper, preferred_bpp); + if (ret < 0) { + dev_err(drm->dev, "Failed to set initial hw configuration.\n"); + goto err_drm_fb_helper_fini; + } + + return fbdev; + +err_drm_fb_helper_fini: + drm_fb_helper_fini(fb_helper); +err_free: + kfree(fbdev); + + return ERR_PTR(ret); +} + +static void nx_drm_fbdev_fini(struct nx_drm_fbdev *fbdev) +{ + if (fbdev->fb_helper.fbdev) { + struct fb_info *info; + int ret; + + info = fbdev->fb_helper.fbdev; + ret = unregister_framebuffer(info); + if (ret < 0) + DRM_DEBUG_KMS("failed unregister_framebuffer()\n"); + + if (info->cmap.len) + fb_dealloc_cmap(&info->cmap); + + framebuffer_release(info); + } + + if (fbdev->fb) { + drm_framebuffer_unregister_private(&fbdev->fb->fb); + nx_drm_fb_destroy(&fbdev->fb->fb); + } + + drm_fb_helper_fini(&fbdev->fb_helper); + kfree(fbdev); +} + +int nx_drm_framebuffer_init(struct drm_device *drm) +{ + struct nx_drm_fbdev *fbdev; + struct nx_drm_priv *priv = drm->dev_private; + struct nx_framebuffer_dev *nx_framebuffer; + unsigned int num_crtc; + int bpp; + int ret = 0; + + if (!drm->mode_config.num_crtc || + !drm->mode_config.num_connector) + return 0; + + DRM_DEBUG_KMS("enter crtc num:%d, connector num:%d\n", + drm->mode_config.num_crtc, + drm->mode_config.num_connector); + + nx_framebuffer = kzalloc(sizeof(*nx_framebuffer), GFP_KERNEL); + if (!nx_framebuffer) + return -ENOMEM; + + priv->framebuffer_dev = nx_framebuffer; + num_crtc = drm->mode_config.num_crtc; + bpp = PREFERRED_BPP; + + fbdev = nx_drm_fbdev_init(drm, bpp, num_crtc, MAX_CONNECTOR); + if (IS_ERR(fbdev)) { + ret = PTR_ERR(fbdev); + goto err_drm_fb_dev_free; + } + + nx_framebuffer->fbdev = fbdev; + + return 0; + +err_drm_fb_dev_free: + kfree(nx_framebuffer); + return ret; +} + +void nx_drm_framebuffer_fini(struct drm_device *drm) +{ + struct nx_drm_priv *priv = drm->dev_private; + struct nx_framebuffer_dev *nx_framebuffer = priv->framebuffer_dev; + struct nx_drm_fbdev *fbdev = nx_framebuffer->fbdev; + + nx_drm_fbdev_fini(fbdev); + kfree(nx_framebuffer); + priv->framebuffer_dev = NULL; +} + +/* + * fb with gem + */ +struct nx_gem_object *nx_drm_fb_get_gem_obj(struct drm_framebuffer *fb, + unsigned int plane) +{ + struct nx_drm_fb *nx_fb = to_nx_drm_fb(fb); + + if (plane >= 4) + return NULL; + + return nx_fb->obj[plane]; +} +EXPORT_SYMBOL_GPL(nx_drm_fb_get_gem_obj); diff -ENwbur a/drivers/gpu/drm/nexell/nx_drm_fb.h b/drivers/gpu/drm/nexell/nx_drm_fb.h --- a/drivers/gpu/drm/nexell/nx_drm_fb.h 1970-01-01 01:00:00.000000000 +0100 +++ b/drivers/gpu/drm/nexell/nx_drm_fb.h 2018-05-06 08:49:49.566711163 +0200 @@ -0,0 +1,62 @@ +/* + * Copyright (C) 2016 Nexell Co., Ltd. + * Author: junghyun, kim + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef _NX_DRM_FB_H_ +#define _NX_DRM_FB_H_ + +#include +#include +#include